file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
stat.go
package stat import ( "fmt" "strconv" "sync" "time" "github.com/smartwalle/container/smap" "math" kdb "github.com/sv/kdbgo" ) type Response struct { Sym string Qid string Accountname string Time time.Time Entrustno int32 Stockcode string Askprice float64 Askvol int32 Bidprice float64 Bidvol int32 Withdraw int32 Status int32 } type ResponseInt64 struct { sync.Mutex Sym string Qid string Accountname string Time time.Time Entrustno int64 Stockcode string Askprice float64 Askvol int64 Bidprice float64 Bidvol int64 Withdraw int64 Status int64 } type STK struct { sync.Mutex SpaceStk SpaceSTK ProfitStk ProfitSTK orderArray []*Response } //仓位统计 type SpaceSTK struct { Sym string Accountname string Stockcode string SpaceVol int32 //仓位 OnlineProfit float64 //浮动盈利 AvgPrice float64 //均价 } //利润统计 type ProfitSTK struct { Sym string Accountname string Stockcode string PastProfit float64 //已成利润 BidCount int32 //交易笔数 BidNum int32 //股数 BidMoneySum float64 //交易额 TotalTax float64 //总费用 } type Market struct { Sym string Time time.Time SzWindCode string NActionDay int32 NTime int32 NStatus int32 NPreClose int32 NOpen int32 NHigh int32 NLow int32 NMatch int32 NAskPrice1 int32 NAskPrice2 int32 NAskPrice3 int32 NAskPrice4 int32 NAskPrice5 float32 NAskPrice6 float32 NAskPrice7 float32 NAskPrice8 float32 NAskPrice9 float32 NAskPrice10 float32 NAskVol1 int32 NAskVol2 int32 NAskVol3 int32 NAskVol4 int32 NAskVol5 int32 NAskVol6 int32 NAskVol7 int32 NAskVol8 int32 NAskVol9 int32 NAskVol10 int32 NBidPrice1 float32 NBidPrice2 float32 NBidPrice3 float32 NBidPrice4 float32 NBidPrice5 float32 NBidPrice6 float32 NBidPrice7 float32 NBidPrice8 float32 NBidPrice9 float32 NBidPrice10 float32 NBidVol1 int32 NBidVol2 int32 NBidVol3 int32 NBidVol4 int32 NBidVol5 int32 NBidVol6 int32 NBidVol7 int32 NBidVol8 int32 NBidVol9 int32 NBidVol10 int32 NNumTrades int32 IVolume int32 Turnover int32 NTotalBidVol int32 NTotalAskVol int32 NWeightedAvgBidPrice int32 NWeightedAvgAskPrice int32 NIOPV int32 NYieldToMaturity int32 NHighLimited int32 NLowLimited int32 NSyl1 int32 NSyl2 int32 NSD2 int32 } // Map<String,Map<String,Map<String,STK>>> 仓位的统计存放容器 var mapResult smap.Map = smap.New(true) var marketChan chan int = make(chan int) var orderChan chan int = make(chan int) var tChan chan int = make(chan int) func DoMain() { //nmatch是市价 fmt.Println("==stat=main===") SelectTransaction() go GetMarket() go GetTransaction() printMap() // <-marketChan fmt.Println("==stat=over===") } func SelectTransaction() { fmt.Println("==SelectTransaction==") var con *kdb.KDBConn var err error con, err = kdb.DialKDB("139.224.9.75", 52800, "") // con, err = kdb.DialKDB("139.196.77.165", 5033, "") if err != nil { fmt.Printf("Failed to connect kdb: %s", err.Error()) return } res, err := con.Call("0!select from response") if err != nil { fmt.Println("Subscribe: %s", err.Error()) return } // ignore type print output // fmt.Println("res:", res) table := res.Data.(kdb.Table) // fmt.Println("table:", table) for i := 0; i < int(table.Data[0].Len()); i++ { kline_data := &Response{} kline_data2 := &ResponseInt64{} err := kdb.UnmarshalDict(table.Index(i), kline_data) if err != nil { fmt.Println("Failed to unmrshall dict ", err) continue } err2 := kdb.UnmarshalDict(table.Index(i), kline_data2) if err2 != nil { fmt.Println("Failed to unmrshall dict ", err2) continue } if kline_data.Askvol == 0 && kline_data2.Askvol != 0 { kline_data.Askvol = int32(kline_data2.Askvol) kline_data.Withdraw = int32(kline_data2.Withdraw) kline_data.Status = int32(kline_data2.Status) kline_data.Bidvol = int32(kline_data2.Bidvol) kline_data.Entrustno = int32(kline_data2.Entrustno) } handleData(kline_data) } // fmt.Println("==SelectTransaction is over ==") } //按照 用户->账户->股票 结构初始化map容器下每一个STK统计对象。每个STK对应的是哪个用户下哪个账户的哪个票。然后新订单来了,拿对应的STK来做统计 func handleData(kline_data *Response) { fmt.Println("select:", kline_data) user := kline_data.Sym account := kline_data.Accountname stat := &STK{} p := ProfitSTK{} s := SpaceSTK{} stat.ProfitStk = p stat.SpaceStk = s arr := []*Response{} stat.orderArray = arr stat.ProfitStk.Sym = kline_data.Sym stat.ProfitStk.Accountname = kline_data.Accountname stat.ProfitStk.Stockcode = kline_data.Stockcode stat.SpaceStk.Sym = kline_data.Sym stat.SpaceStk.Accountname = kline_data.Accountname stat.SpaceStk.Stockcode = kline_data.Stockcode var acc_map smap.Map if mapResult.Exists(user) { acc_map = (mapResult.Value(user)).(smap.Map) if acc_map.Exists(account) { stock_map := acc_map.Value(account).(smap.Map) if stock_map.Exists(kline_data.Stockcode) { stat = (stock_map.Value(kline_data.Stockcode)).(*STK) } else { stock_map.Set(kline_data.Stockcode, stat) } } else { stock_map := smap.New(true) stock_map.Set(kline_data.Stockcode, stat) acc_map.Set(account, stock_map) } } else { stock_map := smap.New(true) stock_map.Set(kline_data.Stockcode, stat) acc_map = smap.New(true) acc_map.Set(account, stock_map) mapResult.Set(user, acc_map) } DoCalculateSTK(kline_data, stat) } func GetTransaction() { for { var con *kdb.KDBConn var err error con, err = kdb.DialKDB("127.0.0.1", 3900, "") // con, err = kdb.DialKDB("139.196.77.165", 5033, "") if err != nil { fmt.Printf("Failed to connect kdb: %s", err.Error()) return } err = con.AsyncCall(".u.sub", &kdb.K{-kdb.KS, kdb.NONE, "response"}, &kdb.K{-kdb.KS, kdb.NONE, ""}) if err != nil { fmt.Println("Subscribe: %s", err.Error()) return } // ignore type print output res, _, err := con.ReadMessage() if err != nil { fmt.Println("Error processing message: ", err.Error()) return } data_list := res.Data.([]*kdb.K) fmt.Println("data_list:", data_list) table := data_list[2].Data.(kdb.Table) fmt.Println("table:", table) for i := 0; i < int(table.Data[0].Len()); i++ { kline_data := &Response{} kline_data2 := &ResponseInt64{} err := kdb.UnmarshalDict(table.Index(i), kline_data) if err != nil { fmt.Println("Failed to unmrshall dict ", err) continue } err2 := kdb.UnmarshalDict(table.Index(i), kline_data2) if err2 != nil { fmt.Println("Failed to unmrshall dict ", err2) continue } // fmt.Println("get:", kline_data) // fmt.Println("get2:", kline_data2) if kline_data.Askvol == 0 && kline_data2.Askvol != 0 { kline_data.Askvol = int32(kline_data2.Askvol) kline_data.Withdraw = int32(kline_data2.Withdraw) kline_data.Status = int32(kline_data2.Status) kline_data.Bidvol = int32(kline_data2.Bidvol) } handleData(kline_data) } } } //获取行情数据来统计map内每个票的浮动盈亏 func GetMarket() { for { fmt.Println("==GetMarket==", time.Now()) var con *kdb.KDBConn var err error // con, err = kdb.DialKDB("10.0.0.71", 5010, "") con, err = kdb.DialKDB("139.196.77.165", 5031, "") if err != nil { fmt.Printf("Failed to connect kdb: %s", err.Error()) return } err = con.AsyncCall(".u.sub", &kdb.K{-kdb.KS, kdb.NONE, "Market"}, &kdb.K{-kdb.KS, kdb.NONE, ""}) if err != nil { fmt.Println("Subscribe: %s", err.Error()) return } res, _, err := con.ReadMessage() if err != nil { fmt.Println("Error processing message: ", err.Error()) return } data_list := res.Data.([]*kdb.K) table := data_list[2].Data.(kdb.Table) for i := 0; i < int(table.Data[0].Len()); i++ { kline_data := &Market{} err := kdb.UnmarshalDict(table.Index(i), kline_data) if err != nil { fmt.Println("Failed to unmrshall dict ", err) continue } fmt.Println("getMarket:", kline_data) for _, user_map := range mapResult.Values() { for _, account_map := range (user_map.(smap.Map)).Values() { for _, stock_map := range (account_map.(smap.Map)).Values() { stat := stock_map.(*STK) if stat.SpaceStk.Stockcode == kline_data.Sym { DoRefresh(float64(kline_data.NMatch/10000), stat) } } } } } } marketChan <- 0 } //再算每个订单之前,要判断是不是增量。算完之后,把订单存到数组 func DoCalculateSTK(newOrder *Response, stk *STK) { fmt.Println("---DoCalculateSTK newOrder:", newOrder) fmt.Println("---DoCalculateSTK stk:", stk) // //清除 // stk.SpaceStk.AvgPrice = 0 // stk.SpaceStk.OnlineProfit = 0 // stk.SpaceStk.SpaceVol = 0 // stk.ProfitStk.BidCount = 0 // stk.ProfitStk.BidMoneySum = 0 // stk.ProfitStk.BidNum = 0 // stk.ProfitStk.PastProfit = 0 // stk.ProfitStk.TotalTax = 0 // //之前的全部统计一遍 // for _, order := range stk.orderArray { // if order.Bidvol != 0 && (order.Status == 2 || order.Status == 5 || order.Status == 4) { // CalculateSingle(order, stk) // } // } //先统计新订单,再更新订单数组 if newOrder.Status == 4 { CalculateSingle(newOrder, stk) var index int flag := false for i, order := range stk.orderArray { // fmt.Println("iiiii ", i) if newOrder.Entrustno == order.Entrustno && order.Status != 4 { index = i flag = true break } } if flag { updateArray(stk, index, newOrder) } else { stk.orderArray = append(stk.orderArray, newOrder) } } else if newOrder.Status == 2 || newOrder.Status == 5 { var index int flag := false for i, order := range stk.orderArray { if newOrder.Entrustno == order.Entrustno && order.Status != 4 { //算增量 fmt.Println("---算增量----") x := &Response{} x.Bidvol = newOrder.Bidvol - order.Bidvol x.Bidprice = (newOrder.Bidprice*float64(newOrder.Bidvol) - order.Bidprice*float64(order.Bidvol)) / float64(newOrder.Bidvol-order.Bidvol) CalculateSingle(x, stk) index = i flag = true break } } if flag { updateArray(stk, index, newOrder) } else { CalculateSingle(newOrder, stk) stk.orderArray = append(stk.orderArray, newOrder) } } else { stk.orderArray = append(stk.orderArray, newOrder) } } func CalculateSingle(newOrder *Response, stat *STK) { fmt.Println("CalculateSingle--- vol:", newOrder.Bidvol, " price:", newOrder.Bidprice, " status:", newOrder.Status) stat.Lock() //StaticsResult为实时统计对象,每一个交易完成,刷下统计 if newOrder.Bidvol != 0 { //每次买入刷新均价。然后每次实时价格减去均价不断出现浮动盈利 //算仓位 不管买还是卖,仓位都是相加减 var spaceTemp int32 = stat.SpaceStk.SpaceVol //临时对象记录下之前的仓位量 var avgTemp float64 = stat.SpaceStk.AvgPrice //临时对象记录下之前的均价 //卖的大于原有仓位 var flag bool = false if AbsInt(newOrder.Bidvol) >= AbsInt(stat.SpaceStk.SpaceVol) { flag = true } stat.SpaceStk.SpaceVol = stat.SpaceStk.SpaceVol + newOrder.Bidvol fmt.Println("算仓位", stat.SpaceStk.SpaceVol) if newOrder.Bidvol > 0 { //算均价 if spaceTemp < 0 { if flag { stat.SpaceStk.AvgPrice = math.Abs(newOrder.Bidprice) } } else { stat.SpaceStk.AvgPrice = math.Abs((stat.SpaceStk.AvgPrice*(float64(spaceTemp)) + newOrder.Bidprice*float64(newOrder.Bidvol)) / float64(stat.SpaceStk.SpaceVol)) } } else { if spaceTemp > 0 { if flag { stat.SpaceStk.AvgPrice = math.Abs(newOrder.Bidprice) } } else { stat.SpaceStk.AvgPrice = math.Abs((stat.SpaceStk.AvgPrice*(float64(spaceTemp)) + newOrder.Bidprice*float64(newOrder.Bidvol)) / float64(stat.SpaceStk.SpaceVol)) } } fmt.Println("算均价", stat.SpaceStk.AvgPrice) //算费用 买是万三 卖是千一加上万三 var stattax float64 if newOrder.Bidvol > 0 { stattax = math.Abs(float64(newOrder.Bidprice*float64(newOrder.Bidvol))) * 3 / 10000 } else { stattax = math.Abs(float64(newOrder.Bidprice*float64(newOrder.Bidvol))) * 13 / 10000 } fmt.Println("之前费用", stat.ProfitStk.TotalTax, " 本次费用 ", stattax) stat.ProfitStk.TotalTax = stat.ProfitStk.TotalTax + stattax stat.ProfitStk.TotalTax = Float64Fmt(stat.ProfitStk.TotalTax, 2) fmt.Println("算费用", stat.ProfitStk.TotalTax) //算利润 var soldNum int32 = AbsInt(newOrder.Bidvol) //本笔卖出的量 if flag { //卖的大于原有仓位 soldNum = AbsInt(spaceTemp) } else { soldNum = AbsInt(newOrder.Bidvol) } if newOrder.Bidvol > 0 { if spaceTemp < 0 { g := (avgTemp - newOrder.Bidprice) * float64(soldNum) fmt.Println("ggggggggggggggain:", g, "soldNum", soldNum) stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit + g - stattax } else { stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit - stattax } } else if newOrder.Bidvol < 0 { if spaceTemp > 0 { g := (newOrder.Bidprice - avgTemp) * float64(soldNum) fmt.Println("ggggggggggggggain:", g, "soldNum", soldNum) stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit + g - stattax } else { stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit - stattax } } stat.ProfitStk.PastProfit = Float64Fmt(stat.ProfitStk.PastProfit, 2) fmt.Println("算利润", stat.ProfitStk.PastProfit) //算交易笔数 stat.ProfitStk.BidCount = stat.ProfitStk.BidCount + 1 //算交易股数 // fmt.Println("AbsInt(stat.ProfitStk.BidNum) ::", AbsInt(stat.ProfitStk.BidNum), " soldNum", soldNum) stat.ProfitStk.BidNum = stat.ProfitStk.BidNum + AbsInt(newOrder.Bidvol) fmt.Println("after stat.ProfitStk.BidNum ", stat.ProfitStk.BidNum) //算交易额 stat.ProfitStk.BidMoneySum = stat.ProfitStk.BidMoneySum + math.Abs(float64(AbsInt(newOrder.Bidvol))*newOrder.Bidprice) } stat.Unlock() } func DoRefresh(nMatch float64, stat *STK) { stat.Lock() //非交易统计,每次实时价格减去均价和费用不断出现浮动盈利 stat.SpaceStk.OnlineProfit = (float64(stat.SpaceStk.SpaceVol) * (nMatch - stat.SpaceStk.AvgPrice)) - (math.Abs(float64(nMatch*float64(stat.SpaceStk.SpaceVol))) * 13 / 10000) stat.SpaceStk.OnlineProfit = Float64Fmt(stat.SpaceStk.OnlineProfit, 64) stat.Unlock() } func printMap() { for { // fmt.Println("map:::", mapResult) fmt.Println("用户 账户 票 仓位 均价 浮盈 利润 笔数 股数 交易额 费用 ") for _, user_map := range mapResult.Values() { //累积每个用户的总浮动盈亏和 总利润 var totalOnlineProfit float64 var totalProfit float64 for _, account_map := range (user_map.(smap.Map)).Values() { for _, stock_map := range (account_map.(smap.Map)).Values() { stat := stock_map.(*STK) totalOnlineProfit = totalOnlineProfit + stat.SpaceStk.OnlineProfit totalProfit = totalProfit + stat.ProfitStk.PastProfit fmt.Println(stat.SpaceStk.Sym, " ", stat.SpaceStk.Accountname, " ", stat.SpaceStk.Stockcode, " ", stat.SpaceStk.SpaceVol, " ", stat.SpaceStk.AvgPrice, " ", stat.SpaceStk.OnlineProfit, " ", stat.ProfitStk.PastProfit, " ", stat.ProfitStk.BidCount, " ", stat.ProfitStk.BidNum, " ", stat.ProfitStk.BidMoneySum, " ", stat.ProfitStk.TotalTax) } } fmt.Println("总浮动盈亏:", totalOnlineProfit, "总利润:", totalProfit) } time.Sleep(time.Second * 20) } } // //func Abs(f float64) float64 { // if f < 0 {
// return float64(-f) // } // return float64(f) //} func AbsInt(f int32) int32 { if f < 0 { return int32(-f) } return int32(f) } func Float64Fmt(f float64, prec int) float64 { a := strconv.FormatFloat(f, 'f', prec, 64) ff, err := strconv.ParseFloat(a, 64) if err != nil { fmt.Println(err) } return ff } func updateArray(stk *STK, index int, newOrder *Response) { //去除原先的同一委托订单 加入新的订单放入末尾 fmt.Println("stk: %v index %v neworder %v", stk, index, newOrder) if len(stk.orderArray) == 0 { stk.orderArray = append(stk.orderArray, newOrder) } else { if index == len(stk.orderArray)-1 { stk.orderArray[index] = newOrder } else { stk.orderArray = append(stk.orderArray[:index], stk.orderArray[index+1:]...) stk.orderArray = append(stk.orderArray, newOrder) } } }
random_line_split
stat.go
package stat import ( "fmt" "strconv" "sync" "time" "github.com/smartwalle/container/smap" "math" kdb "github.com/sv/kdbgo" ) type Response struct { Sym string Qid string Accountname string Time time.Time Entrustno int32 Stockcode string Askprice float64 Askvol int32 Bidprice float64 Bidvol int32 Withdraw int32 Status int32 } type ResponseInt64 struct { sync.Mutex Sym string Qid string Accountname string Time time.Time Entrustno int64 Stockcode string Askprice float64 Askvol int64 Bidprice float64 Bidvol int64 Withdraw int64 Status int64 } type STK struct { sync.Mutex SpaceStk SpaceSTK ProfitStk ProfitSTK orderArray []*Response } //仓位统计 type SpaceSTK struct { Sym string Accountname string Stockcode string SpaceVol int32 //仓位 OnlineProfit float64 //浮动盈利 AvgPrice float64 //均价 } //利润统计 type ProfitSTK struct { Sym string Accountname string Stockcode string PastProfit float64 //已成利润 BidCount int32 //交易笔数 BidNum int32 //股数 BidMoneySum float64 //交易额 TotalTax float64 //总费用 } type Market struct { Sym string Time time.Time SzWindCode string NActionDay int32 NTime int32 NStatus int32 NPreClose int32 NOpen int32 NHigh int32 NLow int32 NMatch int32 NAskPrice1 int32 NAskPrice2 int32 NAskPrice3 int32 NAskPrice4 int32 NAskPrice5 float32 NAskPrice6 float32 NAskPrice7 float32 NAskPrice8 float32 NAskPrice9 float32 NAskPrice10 float32 NAskVol1 int32 NAskVol2 int32 NAskVol3 int32 NAskVol4 int32 NAskVol5 int32 NAskVol6 int32 NAskVol7 int32 NAskVol8 int32 NAskVol9 int32 NAskVol10 int32 NBidPrice1 float32 NBidPrice2 float32 NBidPrice3 float32 NBidPrice4 float32 NBidPrice5 float32 NBidPrice6 float32 NBidPrice7 float32 NBidPrice8 float32 NBidPrice9 float32 NBidPrice10 float32 NBidVol1 int32 NBidVol2 int32 NBidVol3 int32 NBidVol4 int32 NBidVol5 int32 NBidVol6 int32 NBidVol7 int32 NBidVol8 int32 NBidVol9 int32 NBidVol10 int32 NNumTrades int32 IVolume int32 Turnover int32 NTotalBidVol int32 NTotalAskVol int32 NWeightedAvgBidPrice int32 NWeightedAvgAskPrice int32 NIOPV int32 NYieldToMaturity int32 NHighLimited int32 NLowLimited int32 NSyl1 int32 NSyl2 int32 NSD2 int32 } // Map<String,Map<String,Map<String,STK>>> 仓位的统计存放容器 var mapResult smap.Map = smap.New(true) var marketChan chan int = make(chan int) var orderChan chan int = make(chan int) var tChan chan int = make(chan int) func DoMain() { //nmatch是市价 fmt.Println("==stat=main===") SelectTransaction() go GetMarket() go GetTransaction() printMap() // <-marketChan fmt.Println("==stat=over===") } func SelectTransaction() { fmt.Println("==SelectTransaction==") var con *kdb.KDBConn var err error con, err = kdb.DialKDB("139.224.9.75", 52800, "") // con, err = kdb.DialKDB("139.196.77.165", 5033, "") if err != nil { fmt.Printf("Failed to connect kdb: %s", err.Error()) return } res, err := con.Call("0!select from response") if err != nil { fmt.Println("Subscribe: %s", err.Error()) return } // ignore type print output // fmt.Println("res:", res) table := res.Data.(kdb.Table) // fmt.Println("table:", table) for i := 0; i < int(table.Data[0].Len()); i++ { kline_data := &Response{} kline_data2 := &ResponseInt64{} err := kdb.UnmarshalDict(table.Index(i), kline_data) if err != nil { fmt.Println("Failed to unmrshall dict ", err) continue } err2 := kdb.UnmarshalDict(table.Index(i), kline_data2) if err2 != nil { fmt.Println("Failed to unmrshall dict ", err2) continue } if kline_data.Askvol == 0 && kline_data2.Askvol != 0 { kline_data.Askvol = int32(kline_data2.Askvol) kline_data.Withdraw = int32(kline_data2.Withdraw) kline_data.Status = int32(kline_data2.Status) kline_data.Bidvol = int32(kline_data2.Bidvol) kline_data.Entrustno = int32(kline_data2.Entrustno) } handleData(kline_data) } // fmt.Println("==SelectTransaction is over ==") } //按照 用户->账户->股票 结构初始化map容器下每一个STK统计对象。每个STK对应的是哪个用户下哪个账户的哪个票。然后新订单来了,拿对应的STK来做统计 func handleData(kline_data *Response) { fmt.Println("select:", kline_data) user := kline_data.Sym account := kline_data.Accountname stat := &STK{} p := ProfitSTK{} s := SpaceSTK{} stat.ProfitStk = p stat.SpaceStk = s arr := []*Response{} stat.orderArray = arr stat.ProfitStk.Sym = kline_data.Sym stat.ProfitStk.Accountname = kline_data.Accountname stat.ProfitStk.Stockcode = kline_data.Stockcode stat.SpaceStk.Sym = kline_data.Sym stat.SpaceStk.Accountname = kline_data.Accountname stat.SpaceStk.Stockcode = kline_data.Stockcode var acc_map smap.Map if mapResult.Exists(user) { acc_map = (mapResult.Value(user)).(smap.Map) if acc_map.Exists(account) { stock_map := acc_map.Value(account).(smap.Map) if stock_map.Exists(kline_data.Stockcode) { stat = (stock_map.Value(kline_data.Stockcode)).(*STK) } else { stock_map.Set(kline_data.Stockcode, stat) } } else { stock_map := smap.New(true) stock_map.Set(kline_data.Stockcode, stat) acc_map.Set(account, stock_map) } } else { stock_map := smap.New(true) stock_map.Set(kline_data.Stockcode, stat) acc_map = smap.New(true) acc_map.Set(account, stock_map) mapResult.Set(user, acc_map) } DoCalculateSTK(kline_data, stat) } func GetTransaction() { for { var con *kdb.KDBConn var err error con, err = kdb.DialKDB("127.0.0.1", 3900, "") // con, err = kdb.DialKDB("139.196.77.165", 5033, "") if err != nil { fmt.Printf("Failed to connect kdb: %s", err.Error()) return } err = con.AsyncCall(".u.sub", &kdb.K{-kdb.KS, kdb.NONE, "response"}, &kdb.K{-kdb.KS, kdb.NONE, ""}) if err != nil { fmt.Println("Subscribe: %s", err.Error()) return } // ignore type print output res, _, err := con.ReadMessage() if err != nil { fmt.Println("Error processing message: ", err.Error()) return } data_list := res.Data.([]*kdb.K) fmt.Println("data_list:", data_list) table := data_list[2].Data.(kdb.Table) fmt.Println("table:", table) for i := 0; i < int(table.Data[0].Len()); i++ { kline_data := &Response{} kline_data2 := &ResponseInt64{} err := kdb.UnmarshalDict(table.Index(i), kline_data) if err != nil { fmt.Println("Failed to unmrshall dict ", err) continue } err2 := kdb.UnmarshalDict(table.Index(i), kline_data2) if err2 != nil { fmt.Println("Failed to unmrshall dict ", err2) continue } // fmt.Println("get:", kline_data) // fmt.Println("get2:", kline_data2) if kline_data.Askvol == 0 && kline_data2.Askvol != 0 { kline_data.Askvol = int32(kline_data2.Askvol) kline_data.Withdraw = int32(kline_data2.Withdraw) kline_data.Status = int32(kline_data2.Status) kline_data.Bidvol = int32(kline_data2.Bidvol) } handleData(kline_data) } } } //获取行情数据来统计map内每个票的浮动盈亏 func GetMarket() { for { fmt.Println("==GetMarket==", time.Now()) var con *kdb.KDBConn var err error // con, err = kdb.DialKDB("10.0.0.71", 5010, "") con, err = kdb.DialKDB("139.196.77.165", 5031, "") if err != nil { fmt.Printf("Failed to connect kdb: %s", err.Error()) return } err = con.AsyncCall(".u.sub", &kdb.K{-kdb.KS, kdb.NONE, "Market"}, &kdb.K{-kdb.KS, kdb.NONE, ""}) if err != nil { fmt.Println("Subscribe: %s", err.Error()) return } res, _, err := con.ReadMessage() if err != nil { fmt.Println("Error processing message: ", err.Error()) return } data_list := res.Data.([]*kdb.K) table := data_list[2].Data.(kdb.Table) for i := 0; i < int(table.Data[0].Len()); i++ { kline_data := &Market{} err := kdb.UnmarshalDict(table.Index(i), kline_data) if err != nil { fmt.Println("Failed to unmrshall dict ", err) continue } fmt.Println("getMarket:", kline_data) for _, user_map := range mapResult.Values() { for _, account_map := range (user_map.(smap.Map)).Values() { for _, stock_map := range (account_map.(smap.Map)).Values() { stat := stock_map.(*STK) if stat.SpaceStk.Stockcode == kline_data.Sym { DoRefresh(float64(kline_data.NMatch/10000), stat) } } } } } } marketChan <- 0 } //再算每个订单之前,要判断是不是增量。算完之后,把订单存到数组 func DoCalculateSTK(newOrder *Response, stk *STK) { fmt.Println("---DoCalculateSTK newOrder:", newOrder) fmt.Println("---DoCalculateSTK stk:", stk) // //清除 // stk.SpaceStk.AvgPrice = 0 // stk.SpaceStk.OnlineProfit = 0 // stk.SpaceStk.SpaceVol = 0 // stk.ProfitStk.BidCount = 0 // stk.ProfitStk.BidMoneySum = 0 // stk.ProfitStk.BidNum = 0 // stk.ProfitStk.PastProfit = 0 // stk.ProfitStk.TotalTax = 0 // //之前的全部统计一遍 // for _, order := range stk.orderArray { // if order.Bidvol != 0 && (order.Status == 2 || order.Status == 5 || order.Status == 4) { // CalculateSingle(order, stk) // } // } //先统计新订单,再更新订单数组 if newOrder.Status == 4 { CalculateSingle(newOrder, stk) var index int flag := false for i, order := range stk.orderArray { // fmt.Println("iiiii ", i) if newOrder.Entrustno == order.Entrustno && order.Status != 4 { index = i flag = true break } } if flag { updateArray(stk, index, newOrder) } else { stk.orderArray = append(stk.orderArray, newOrder) } } else if newOrder.Status == 2 || newOrder.Status == 5 { var index int flag := false for i, order := range stk.orderArray { if newOrder.Entrustno == order.Entrustno && order.Status != 4 { //算增量 fmt.Println("---算增量----") x := &Response{} x.Bidvol = newOrder.Bidvol - order.Bidvol x.Bidprice = (newOrder.Bidprice*float64(newOrder.Bidvol) - order.Bidprice*float64(order.Bidvol)) / float64(newOrder.Bidvol-order.Bidvol) CalculateSingle(x, stk) index = i flag = true break } } if flag { updateArray(stk, index, newOrder) } else { CalculateSingle(newOrder, stk) stk.orderArray = append(stk.orderArray, newOrder) } } else { stk.orderArray = append(stk.orderArray, newOrder) } } func CalculateSingle(newOrder *Response, stat *STK) { fmt.Println("CalculateSingle--- vol:", newOrder.Bidvol, " price:", newOrder.Bidprice, " status:", newOrder.Status) stat.Lock() //StaticsResult为实时统计对象,每一个交易完成,刷下统计 if newOrder.Bidvol != 0 { //每次买入刷新均价。然后每次实时价格减去均价不断出现浮动盈利 //算仓位 不管买还是卖,仓位都是相加减 var spaceTemp int32 = stat.SpaceStk.SpaceVol //临时对象记录下之前的仓位量 var avgTemp float64 = stat.SpaceStk.AvgPr
alues() { for _, stock_map := range (account_map.(smap.Map)).Values() { stat := stock_map.(*STK) totalOnlineProfit = totalOnlineProfit + stat.SpaceStk.OnlineProfit totalProfit = totalProfit + stat.ProfitStk.PastProfit fmt.Println(stat.SpaceStk.Sym, " ", stat.SpaceStk.Accountname, " ", stat.SpaceStk.Stockcode, " ", stat.SpaceStk.SpaceVol, " ", stat.SpaceStk.AvgPrice, " ", stat.SpaceStk.OnlineProfit, " ", stat.ProfitStk.PastProfit, " ", stat.ProfitStk.BidCount, " ", stat.ProfitStk.BidNum, " ", stat.ProfitStk.BidMoneySum, " ", stat.ProfitStk.TotalTax) } } fmt.Println("总浮动盈亏:", totalOnlineProfit, "总利润:", totalProfit) } time.Sleep(time.Second * 20) } } // //func Abs(f float64) float64 { // if f < 0 { // return float64(-f) // } // return float64(f) //} func AbsInt(f int32) int32 { if f < 0 { return int32(-f) } return int32(f) } func Float64Fmt(f float64, prec int) float64 { a := strconv.FormatFloat(f, 'f', prec, 64) ff, err := strconv.ParseFloat(a, 64) if err != nil { fmt.Println(err) } return ff } func updateArray(stk *STK, index int, newOrder *Response) { //去除原先的同一委托订单 加入新的订单放入末尾 fmt.Println("stk: %v index %v neworder %v", stk, index, newOrder) if len(stk.orderArray) == 0 { stk.orderArray = append(stk.orderArray, newOrder) } else { if index == len(stk.orderArray)-1 { stk.orderArray[index] = newOrder } else { stk.orderArray = append(stk.orderArray[:index], stk.orderArray[index+1:]...) stk.orderArray = append(stk.orderArray, newOrder) } } }
ice //临时对象记录下之前的均价 //卖的大于原有仓位 var flag bool = false if AbsInt(newOrder.Bidvol) >= AbsInt(stat.SpaceStk.SpaceVol) { flag = true } stat.SpaceStk.SpaceVol = stat.SpaceStk.SpaceVol + newOrder.Bidvol fmt.Println("算仓位", stat.SpaceStk.SpaceVol) if newOrder.Bidvol > 0 { //算均价 if spaceTemp < 0 { if flag { stat.SpaceStk.AvgPrice = math.Abs(newOrder.Bidprice) } } else { stat.SpaceStk.AvgPrice = math.Abs((stat.SpaceStk.AvgPrice*(float64(spaceTemp)) + newOrder.Bidprice*float64(newOrder.Bidvol)) / float64(stat.SpaceStk.SpaceVol)) } } else { if spaceTemp > 0 { if flag { stat.SpaceStk.AvgPrice = math.Abs(newOrder.Bidprice) } } else { stat.SpaceStk.AvgPrice = math.Abs((stat.SpaceStk.AvgPrice*(float64(spaceTemp)) + newOrder.Bidprice*float64(newOrder.Bidvol)) / float64(stat.SpaceStk.SpaceVol)) } } fmt.Println("算均价", stat.SpaceStk.AvgPrice) //算费用 买是万三 卖是千一加上万三 var stattax float64 if newOrder.Bidvol > 0 { stattax = math.Abs(float64(newOrder.Bidprice*float64(newOrder.Bidvol))) * 3 / 10000 } else { stattax = math.Abs(float64(newOrder.Bidprice*float64(newOrder.Bidvol))) * 13 / 10000 } fmt.Println("之前费用", stat.ProfitStk.TotalTax, " 本次费用 ", stattax) stat.ProfitStk.TotalTax = stat.ProfitStk.TotalTax + stattax stat.ProfitStk.TotalTax = Float64Fmt(stat.ProfitStk.TotalTax, 2) fmt.Println("算费用", stat.ProfitStk.TotalTax) //算利润 var soldNum int32 = AbsInt(newOrder.Bidvol) //本笔卖出的量 if flag { //卖的大于原有仓位 soldNum = AbsInt(spaceTemp) } else { soldNum = AbsInt(newOrder.Bidvol) } if newOrder.Bidvol > 0 { if spaceTemp < 0 { g := (avgTemp - newOrder.Bidprice) * float64(soldNum) fmt.Println("ggggggggggggggain:", g, "soldNum", soldNum) stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit + g - stattax } else { stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit - stattax } } else if newOrder.Bidvol < 0 { if spaceTemp > 0 { g := (newOrder.Bidprice - avgTemp) * float64(soldNum) fmt.Println("ggggggggggggggain:", g, "soldNum", soldNum) stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit + g - stattax } else { stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit - stattax } } stat.ProfitStk.PastProfit = Float64Fmt(stat.ProfitStk.PastProfit, 2) fmt.Println("算利润", stat.ProfitStk.PastProfit) //算交易笔数 stat.ProfitStk.BidCount = stat.ProfitStk.BidCount + 1 //算交易股数 // fmt.Println("AbsInt(stat.ProfitStk.BidNum) ::", AbsInt(stat.ProfitStk.BidNum), " soldNum", soldNum) stat.ProfitStk.BidNum = stat.ProfitStk.BidNum + AbsInt(newOrder.Bidvol) fmt.Println("after stat.ProfitStk.BidNum ", stat.ProfitStk.BidNum) //算交易额 stat.ProfitStk.BidMoneySum = stat.ProfitStk.BidMoneySum + math.Abs(float64(AbsInt(newOrder.Bidvol))*newOrder.Bidprice) } stat.Unlock() } func DoRefresh(nMatch float64, stat *STK) { stat.Lock() //非交易统计,每次实时价格减去均价和费用不断出现浮动盈利 stat.SpaceStk.OnlineProfit = (float64(stat.SpaceStk.SpaceVol) * (nMatch - stat.SpaceStk.AvgPrice)) - (math.Abs(float64(nMatch*float64(stat.SpaceStk.SpaceVol))) * 13 / 10000) stat.SpaceStk.OnlineProfit = Float64Fmt(stat.SpaceStk.OnlineProfit, 64) stat.Unlock() } func printMap() { for { // fmt.Println("map:::", mapResult) fmt.Println("用户 账户 票 仓位 均价 浮盈 利润 笔数 股数 交易额 费用 ") for _, user_map := range mapResult.Values() { //累积每个用户的总浮动盈亏和 总利润 var totalOnlineProfit float64 var totalProfit float64 for _, account_map := range (user_map.(smap.Map)).V
identifier_body
stat.go
package stat import ( "fmt" "strconv" "sync" "time" "github.com/smartwalle/container/smap" "math" kdb "github.com/sv/kdbgo" ) type Response struct { Sym string Qid string Accountname string Time time.Time Entrustno int32 Stockcode string Askprice float64 Askvol int32 Bidprice float64 Bidvol int32 Withdraw int32 Status int32 } type ResponseInt64 struct { sync.Mutex Sym string Qid string Accountname string Time time.Time Entrustno int64 Stockcode string Askprice float64 Askvol int64 Bidprice float64 Bidvol int64 Withdraw int64 Status int64 } type STK struct { sync.Mutex SpaceStk SpaceSTK ProfitStk ProfitSTK orderArray []*Response } //仓位统计 type SpaceSTK struct { Sym string Accountname string Stockcode string SpaceVol int32 //仓位 OnlineProfit float64 //浮动盈利 AvgPrice float64 //均价 } //利润统计 type ProfitSTK struct { Sym string Accountname string Stockcode string PastProfit float64 //已成利润 BidCount int32 //交易笔数 BidNum int32 //股数 BidMoneySum float64 //交易额 TotalTax float64 //总费用 } type Market struct { Sym string Time time.Time SzWindCode string NActionDay int32 NTime int32 NStatus int32 NPreClose int32 NOpen int32 NHigh int32 NLow int32 NMatch int32 NAskPrice1 int32 NAskPrice2 int32 NAskPrice3 int32 NAskPrice4 int32 NAskPrice5 float32 NAskPrice6 float32 NAskPrice7 float32 NAskPrice8 float32 NAskPrice9 float32 NAskPrice10 float32 NAskVol1 int32 NAskVol2 int32 NAskVol3 int32 NAskVol4 int32 NAskVol5 int32 NAskVol6 int32 NAskVol7 int32 NAskVol8 int32 NAskVol9 int32 NAskVol10 int32 NBidPrice1 float32 NBidPrice2 float32 NBidPrice3 float32 NBidPrice4 float32 NBidPrice5 float32 NBidPrice6 float32 NBidPrice7 float32 NBidPrice8 float32 NBidPrice9 float32 NBidPrice10 float32 NBidVol1 int32 NBidVol2 int32 NBidVol3 int32 NBidVol4 int32 NBidVol5 int32 NBidVol6 int32 NBidVol7 int32 NBidVol8 int32 NBidVol9 int32 NBidVol10 int32 NNumTrades int32 IVolume int32 Turnover int32 NTotalBidVol int32 NTotalAskVol int32 NWeightedAvgBidPrice int32 NWeightedAvgAskPrice int32 NIOPV int32 NYieldToMaturity int32 NHighLimited int32 NLowLimited int32 NSyl1 int32 NSyl2 int32 NSD2 int32 } // Map<String,Map<String,Map<String,STK>>> 仓位的统计存放容器 var mapResult smap.Map = smap.New(true) var marketChan chan int = make(chan int) var orderChan chan int = make(chan int) var tChan chan int = make(chan int) func DoMain() { //nmatch是市价 fmt.Println("==stat=main===") SelectTransaction() go GetMarket() go GetTransaction() printMap() // <-marketChan fmt.Println("==stat=over===") } func SelectTransaction() { fmt.Println("==SelectTransaction==") var con *kdb.KDBConn var err error con, err = kdb.DialKDB("139.224.9.75", 52800, "") // con, err = kdb.DialKDB("139.196.77.165", 5033, "") if err != nil { fmt.Printf("Failed to connect kdb: %s", err.Error()) return } res, err := con.Call("0!select from response") if err != nil { fmt.Println("Subscribe: %s", err.Error()) return } // ignore type print output // fmt.Println("res:", res) table := res.Data.(kdb.Table) // fmt.Println("table:", table) for i := 0; i < int(table.Data[0].Len()); i++ { kline_data := &Response{} kline_data2 := &ResponseInt64{} err := kdb.UnmarshalDict(table.Index(i), kline_data) if err != nil { fmt.Println("Failed to unmrshall dict ", err) continue } err2 := kdb.UnmarshalDict(table.Index(i), kline_data2) if err2 != nil { fmt.Println("Failed to unmrshall dict ", err2) continue } if kline_data.Askvol == 0 && kline_data2.Askvol != 0 { kline_data.Askvol = int32(kline_data2.Askvol) kline_data.Withdraw = int32(kline_data2.Withdraw) kline_data.Status = int32(kline_data2.Status) kline_data.Bidvol = int32(kline_data2.Bidvol) kline_data.Entrustno = int32(kline_data2.Entrustno) } handleData(kline_data) } // fmt.Println("==SelectTransaction is over ==") } //按照 用户->账户->股票 结构初始化map容器下每一个STK统计对象。每个STK对应的是哪个用户下哪个账户的哪个票。然后新订单来了,拿对应的STK来做统计 func handleData(kline_data *Response) { fmt.Println("select:", kline_data) user := kline_data.Sym account := kline_data.Accountname stat := &STK{} p := ProfitSTK{} s := SpaceSTK{} stat.ProfitStk = p stat.SpaceStk = s arr := []*Response{} stat.orderArray = arr stat.ProfitStk.Sym = kline_data.Sym stat.ProfitStk.Accountname = kline_data.Accountname stat.ProfitStk.Stockcode = kline_data.Stockcode stat.SpaceStk.Sym = kline_data.Sym stat.SpaceStk.Accountname = kline_data.Accountname stat.SpaceStk.Stockcode = kline_data.Stockcode var acc_map smap.Map if mapResult.Exists(user) { acc_map = (mapResult.Value(user)).(smap.Map) if acc_map.Exists(account) { stock_map := acc_map.Value(account).(smap.Map) if stock_map.Exists(kline_data.Stockcode) { stat = (stock_map.Value(kline_data.Stockcode)).(*STK) } else { stock_map.Set(kline_data.Stockcode, stat) } } else { stock_map := smap.New(true) stock_map.Set(kline_data.Stockcode, stat) acc_map.Set(account, stock_map) } } else { stock_map := smap.New(true) stock_map.Set(kline_data.Stockcode, stat) acc_map = smap.New(true) acc_map.Set(account, stock_map) mapResult.Set(user, acc_map) } DoCalculateSTK(kline_data, stat) } func GetTransaction() { for { var con *kdb.KDBConn var err error con, err = kdb.DialKDB("127.0.0.1", 3900, "") // con, err = kdb.DialKDB("139.196.77.165", 5033, "") if err != nil { fmt.Printf("Failed to connect kdb: %s", err.Error()) return } err = con.AsyncCall(".u.sub", &kdb.K{-kdb.KS, kdb.NONE, "response"}, &kdb.K{-kdb.KS, kdb.NONE, ""}) if err != nil { fmt.Println("Subscribe: %s", err.Error()) return } // ignore type print output res, _, err := con.ReadMessage() if err != nil { fmt.Println("Error processing message: ", err.Error()) return } data_list := res.Data.([]*kdb.K) fmt.Println("data_list:", data_list) table := data_list[2].Data.(kdb.Table) fmt.Println("table:", table) for i := 0; i < int(table.Data[0].Len()); i++ { kline_data := &Response{} kline_data2 := &ResponseInt64{} err := kdb.UnmarshalDict(table.Index(i), kline_data) if err != nil { fmt.Println("Failed to unmrshall dict ", err) continue } err2 := kdb.UnmarshalDict(table.Index(i), kline_data2) if err2 != nil { fmt.Println("Failed to unmrshall dict ", err2) continue } // fmt.Println("get:", kline_data) // fmt.Println("get2:", kline_data2) if kline_data.Askvol == 0 && kline_data2.Askvol != 0 { kline_data.Askvol = int32(kline_data2.Askvol) kline_data.Withdraw = int32(kline_data2.Withdraw) kline_data.Status = int32(kline_data2.Status) kline_data.Bidvol = int32(kline_data2.Bidvol) } handleData(kline_data) } } } //获取行情数据来统计map内每个票的浮动盈亏 func GetMarket() { for { fmt.Println("==GetMarket==", time.Now()) var con *kdb.KDBConn var err error // con, err = kdb.DialKDB("10.0.0.71", 5010, "") con, err = kdb.DialKDB("139.196.77.165", 5031, "") if err != nil { fmt.Printf("Fa
onnect kdb: %s", err.Error()) return } err = con.AsyncCall(".u.sub", &kdb.K{-kdb.KS, kdb.NONE, "Market"}, &kdb.K{-kdb.KS, kdb.NONE, ""}) if err != nil { fmt.Println("Subscribe: %s", err.Error()) return } res, _, err := con.ReadMessage() if err != nil { fmt.Println("Error processing message: ", err.Error()) return } data_list := res.Data.([]*kdb.K) table := data_list[2].Data.(kdb.Table) for i := 0; i < int(table.Data[0].Len()); i++ { kline_data := &Market{} err := kdb.UnmarshalDict(table.Index(i), kline_data) if err != nil { fmt.Println("Failed to unmrshall dict ", err) continue } fmt.Println("getMarket:", kline_data) for _, user_map := range mapResult.Values() { for _, account_map := range (user_map.(smap.Map)).Values() { for _, stock_map := range (account_map.(smap.Map)).Values() { stat := stock_map.(*STK) if stat.SpaceStk.Stockcode == kline_data.Sym { DoRefresh(float64(kline_data.NMatch/10000), stat) } } } } } } marketChan <- 0 } //再算每个订单之前,要判断是不是增量。算完之后,把订单存到数组 func DoCalculateSTK(newOrder *Response, stk *STK) { fmt.Println("---DoCalculateSTK newOrder:", newOrder) fmt.Println("---DoCalculateSTK stk:", stk) // //清除 // stk.SpaceStk.AvgPrice = 0 // stk.SpaceStk.OnlineProfit = 0 // stk.SpaceStk.SpaceVol = 0 // stk.ProfitStk.BidCount = 0 // stk.ProfitStk.BidMoneySum = 0 // stk.ProfitStk.BidNum = 0 // stk.ProfitStk.PastProfit = 0 // stk.ProfitStk.TotalTax = 0 // //之前的全部统计一遍 // for _, order := range stk.orderArray { // if order.Bidvol != 0 && (order.Status == 2 || order.Status == 5 || order.Status == 4) { // CalculateSingle(order, stk) // } // } //先统计新订单,再更新订单数组 if newOrder.Status == 4 { CalculateSingle(newOrder, stk) var index int flag := false for i, order := range stk.orderArray { // fmt.Println("iiiii ", i) if newOrder.Entrustno == order.Entrustno && order.Status != 4 { index = i flag = true break } } if flag { updateArray(stk, index, newOrder) } else { stk.orderArray = append(stk.orderArray, newOrder) } } else if newOrder.Status == 2 || newOrder.Status == 5 { var index int flag := false for i, order := range stk.orderArray { if newOrder.Entrustno == order.Entrustno && order.Status != 4 { //算增量 fmt.Println("---算增量----") x := &Response{} x.Bidvol = newOrder.Bidvol - order.Bidvol x.Bidprice = (newOrder.Bidprice*float64(newOrder.Bidvol) - order.Bidprice*float64(order.Bidvol)) / float64(newOrder.Bidvol-order.Bidvol) CalculateSingle(x, stk) index = i flag = true break } } if flag { updateArray(stk, index, newOrder) } else { CalculateSingle(newOrder, stk) stk.orderArray = append(stk.orderArray, newOrder) } } else { stk.orderArray = append(stk.orderArray, newOrder) } } func CalculateSingle(newOrder *Response, stat *STK) { fmt.Println("CalculateSingle--- vol:", newOrder.Bidvol, " price:", newOrder.Bidprice, " status:", newOrder.Status) stat.Lock() //StaticsResult为实时统计对象,每一个交易完成,刷下统计 if newOrder.Bidvol != 0 { //每次买入刷新均价。然后每次实时价格减去均价不断出现浮动盈利 //算仓位 不管买还是卖,仓位都是相加减 var spaceTemp int32 = stat.SpaceStk.SpaceVol //临时对象记录下之前的仓位量 var avgTemp float64 = stat.SpaceStk.AvgPrice //临时对象记录下之前的均价 //卖的大于原有仓位 var flag bool = false if AbsInt(newOrder.Bidvol) >= AbsInt(stat.SpaceStk.SpaceVol) { flag = true } stat.SpaceStk.SpaceVol = stat.SpaceStk.SpaceVol + newOrder.Bidvol fmt.Println("算仓位", stat.SpaceStk.SpaceVol) if newOrder.Bidvol > 0 { //算均价 if spaceTemp < 0 { if flag { stat.SpaceStk.AvgPrice = math.Abs(newOrder.Bidprice) } } else { stat.SpaceStk.AvgPrice = math.Abs((stat.SpaceStk.AvgPrice*(float64(spaceTemp)) + newOrder.Bidprice*float64(newOrder.Bidvol)) / float64(stat.SpaceStk.SpaceVol)) } } else { if spaceTemp > 0 { if flag { stat.SpaceStk.AvgPrice = math.Abs(newOrder.Bidprice) } } else { stat.SpaceStk.AvgPrice = math.Abs((stat.SpaceStk.AvgPrice*(float64(spaceTemp)) + newOrder.Bidprice*float64(newOrder.Bidvol)) / float64(stat.SpaceStk.SpaceVol)) } } fmt.Println("算均价", stat.SpaceStk.AvgPrice) //算费用 买是万三 卖是千一加上万三 var stattax float64 if newOrder.Bidvol > 0 { stattax = math.Abs(float64(newOrder.Bidprice*float64(newOrder.Bidvol))) * 3 / 10000 } else { stattax = math.Abs(float64(newOrder.Bidprice*float64(newOrder.Bidvol))) * 13 / 10000 } fmt.Println("之前费用", stat.ProfitStk.TotalTax, " 本次费用 ", stattax) stat.ProfitStk.TotalTax = stat.ProfitStk.TotalTax + stattax stat.ProfitStk.TotalTax = Float64Fmt(stat.ProfitStk.TotalTax, 2) fmt.Println("算费用", stat.ProfitStk.TotalTax) //算利润 var soldNum int32 = AbsInt(newOrder.Bidvol) //本笔卖出的量 if flag { //卖的大于原有仓位 soldNum = AbsInt(spaceTemp) } else { soldNum = AbsInt(newOrder.Bidvol) } if newOrder.Bidvol > 0 { if spaceTemp < 0 { g := (avgTemp - newOrder.Bidprice) * float64(soldNum) fmt.Println("ggggggggggggggain:", g, "soldNum", soldNum) stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit + g - stattax } else { stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit - stattax } } else if newOrder.Bidvol < 0 { if spaceTemp > 0 { g := (newOrder.Bidprice - avgTemp) * float64(soldNum) fmt.Println("ggggggggggggggain:", g, "soldNum", soldNum) stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit + g - stattax } else { stat.ProfitStk.PastProfit = stat.ProfitStk.PastProfit - stattax } } stat.ProfitStk.PastProfit = Float64Fmt(stat.ProfitStk.PastProfit, 2) fmt.Println("算利润", stat.ProfitStk.PastProfit) //算交易笔数 stat.ProfitStk.BidCount = stat.ProfitStk.BidCount + 1 //算交易股数 // fmt.Println("AbsInt(stat.ProfitStk.BidNum) ::", AbsInt(stat.ProfitStk.BidNum), " soldNum", soldNum) stat.ProfitStk.BidNum = stat.ProfitStk.BidNum + AbsInt(newOrder.Bidvol) fmt.Println("after stat.ProfitStk.BidNum ", stat.ProfitStk.BidNum) //算交易额 stat.ProfitStk.BidMoneySum = stat.ProfitStk.BidMoneySum + math.Abs(float64(AbsInt(newOrder.Bidvol))*newOrder.Bidprice) } stat.Unlock() } func DoRefresh(nMatch float64, stat *STK) { stat.Lock() //非交易统计,每次实时价格减去均价和费用不断出现浮动盈利 stat.SpaceStk.OnlineProfit = (float64(stat.SpaceStk.SpaceVol) * (nMatch - stat.SpaceStk.AvgPrice)) - (math.Abs(float64(nMatch*float64(stat.SpaceStk.SpaceVol))) * 13 / 10000) stat.SpaceStk.OnlineProfit = Float64Fmt(stat.SpaceStk.OnlineProfit, 64) stat.Unlock() } func printMap() { for { // fmt.Println("map:::", mapResult) fmt.Println("用户 账户 票 仓位 均价 浮盈 利润 笔数 股数 交易额 费用 ") for _, user_map := range mapResult.Values() { //累积每个用户的总浮动盈亏和 总利润 var totalOnlineProfit float64 var totalProfit float64 for _, account_map := range (user_map.(smap.Map)).Values() { for _, stock_map := range (account_map.(smap.Map)).Values() { stat := stock_map.(*STK) totalOnlineProfit = totalOnlineProfit + stat.SpaceStk.OnlineProfit totalProfit = totalProfit + stat.ProfitStk.PastProfit fmt.Println(stat.SpaceStk.Sym, " ", stat.SpaceStk.Accountname, " ", stat.SpaceStk.Stockcode, " ", stat.SpaceStk.SpaceVol, " ", stat.SpaceStk.AvgPrice, " ", stat.SpaceStk.OnlineProfit, " ", stat.ProfitStk.PastProfit, " ", stat.ProfitStk.BidCount, " ", stat.ProfitStk.BidNum, " ", stat.ProfitStk.BidMoneySum, " ", stat.ProfitStk.TotalTax) } } fmt.Println("总浮动盈亏:", totalOnlineProfit, "总利润:", totalProfit) } time.Sleep(time.Second * 20) } } // //func Abs(f float64) float64 { // if f < 0 { // return float64(-f) // } // return float64(f) //} func AbsInt(f int32) int32 { if f < 0 { return int32(-f) } return int32(f) } func Float64Fmt(f float64, prec int) float64 { a := strconv.FormatFloat(f, 'f', prec, 64) ff, err := strconv.ParseFloat(a, 64) if err != nil { fmt.Println(err) } return ff } func updateArray(stk *STK, index int, newOrder *Response) { //去除原先的同一委托订单 加入新的订单放入末尾 fmt.Println("stk: %v index %v neworder %v", stk, index, newOrder) if len(stk.orderArray) == 0 { stk.orderArray = append(stk.orderArray, newOrder) } else { if index == len(stk.orderArray)-1 { stk.orderArray[index] = newOrder } else { stk.orderArray = append(stk.orderArray[:index], stk.orderArray[index+1:]...) stk.orderArray = append(stk.orderArray, newOrder) } } }
iled to c
identifier_name
expdescription.py
#!/usr/bin/env python ############################################################################## ## ## This file is part of Sardana ## ## http://www.sardana-controls.org/ ## ## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain ## ## Sardana is free software: you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## Sardana is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public License ## along with Sardana. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################## """This module provides widget for configuring the data acquisition and display of an experiment""" __all__ = ["ExpDescriptionEditor"] from taurus.external.qt import Qt import copy import taurus import taurus.core from taurus.qt.qtgui.base import TaurusBaseWidget from taurus.qt.qtgui import resource from sardana.taurus.qt.qtcore.tango.sardana.model import SardanaBaseProxyModel, SardanaTypeTreeItem from taurus.qt.qtgui.util.ui import UILoadable ## Using a plain model and filtering and checking 'Acquirable' in item.itemData().interfaces is more elegant, but things don't get properly sorted... #from taurus.qt.qtcore.tango.sardana.model import SardanaElementPlainModel class SardanaAcquirableProxyModel(SardanaBaseProxyModel): # ALLOWED_TYPES = 'Acquirable' # # def filterAcceptsRow(self, sourceRow, sourceParent): # sourceModel = self.sourceModel() # idx = sourceModel.index(sourceRow, 0, sourceParent) # item = idx.internalPointer() # return 'Acquirable' in item.itemData().interfaces # ALLOWED_TYPES = ['Motor', 'CTExpChannel', 'ZeroDExpChannel', 'OneDExpChannel', # 'TwoDExpChannel', 'ComChannel', 'IORegister', 'PseudoMotor', # 'PseudoCounter'] from sardana.sardanadefs import ElementType, TYPE_ACQUIRABLE_ELEMENTS ALLOWED_TYPES = [ElementType[t] for t in TYPE_ACQUIRABLE_ELEMENTS] def filterAcceptsRow(self, sourceRow, sourceParent): sourceModel = self.sourceModel() idx = sourceModel.index(sourceRow, 0, sourceParent) treeItem = idx.internalPointer() if isinstance(treeItem, SardanaTypeTreeItem): return treeItem.itemData() in self.ALLOWED_TYPES return True @UILoadable(with_ui='ui') class ExpDescriptionEditor(Qt.QWidget, TaurusBaseWidget): ''' A widget for editing the configuration of a experiment (measurement groups, plot and storage parameters, etc). It receives a Sardana Door name as its model and gets/sets the configuration using the `ExperimentConfiguration` environmental variable for that Door. ''' def __init__(self, parent=None, door=None, plotsButton=True): Qt.QWidget.__init__(self, parent) TaurusBaseWidget.__init__(self, 'ExpDescriptionEditor') self.loadUi() self.ui.buttonBox.setStandardButtons(Qt.QDialogButtonBox.Reset | Qt.QDialogButtonBox.Apply) newperspectivesDict = copy.deepcopy(self.ui.sardanaElementTree.KnownPerspectives) #newperspectivesDict[self.ui.sardanaElementTree.DftPerspective]['model'] = [SardanaAcquirableProxyModel, SardanaElementPlainModel] newperspectivesDict[self.ui.sardanaElementTree.DftPerspective]['model'][0] = SardanaAcquirableProxyModel self.ui.sardanaElementTree.KnownPerspectives = newperspectivesDict #assign a copy because if just a key of this class memberwas modified, all instances of this class would be affected self.ui.sardanaElementTree._setPerspective(self.ui.sardanaElementTree.DftPerspective) self._localConfig = None self._originalConfiguration = None self._dirty = False self._dirtyMntGrps = set() self.connect(self.ui.activeMntGrpCB, Qt.SIGNAL('activated (QString)'), self.changeActiveMntGrp) self.connect(self.ui.createMntGrpBT, Qt.SIGNAL('clicked ()'), self.createMntGrp) self.connect(self.ui.deleteMntGrpBT, Qt.SIGNAL('clicked ()'), self.deleteMntGrp) self.connect(self.ui.compressionCB, Qt.SIGNAL('currentIndexChanged (int)'), self.onCompressionCBChanged) self.connect(self.ui.pathLE, Qt.SIGNAL('textEdited (QString)'), self.onPathLEEdited) self.connect(self.ui.filenameLE, Qt.SIGNAL('textEdited (QString)'), self.onFilenameLEEdited) self.connect(self.ui.channelEditor.getQModel(), Qt.SIGNAL('dataChanged (QModelIndex, QModelIndex)'), self._updateButtonBox) self.connect(self.ui.channelEditor.getQModel(), Qt.SIGNAL('modelReset ()'), self._updateButtonBox) preScanList = self.ui.preScanList self.connect(preScanList, Qt.SIGNAL('dataChanged'), self.onPreScanSnapshotChanged) #TODO: For Taurus 4 compatibility if hasattr(preScanList, "dataChangedSignal"): preScanList.dataChangedSignal.connect(self.onPreScanSnapshotChanged) self.connect(self.ui.choosePathBT, Qt.SIGNAL('clicked ()'), self.onChooseScanDirButtonClicked) self.__plotManager = None icon = resource.getIcon(":/actions/view.svg") self.togglePlotsAction = Qt.QAction(icon, "Show/Hide plots", self) self.togglePlotsAction.setCheckable(True) self.togglePlotsAction.setChecked(False) self.togglePlotsAction.setEnabled(plotsButton) self.addAction(self.togglePlotsAction) self.connect(self.togglePlotsAction, Qt.SIGNAL("toggled(bool)"), self.onPlotsButtonToggled) self.ui.plotsButton.setDefaultAction(self.togglePlotsAction) if door is not None: self.setModel(door) self.connect(self.ui.buttonBox, Qt.SIGNAL("clicked(QAbstractButton *)"), self.onDialogButtonClicked) #Taurus Configuration properties and delegates self.registerConfigDelegate(self.ui.channelEditor) def getModelClass(self): '''reimplemented from :class:`TaurusBaseWidget`''' return taurus.core.taurusdevice.TaurusDevice def onChooseScanDirButtonClicked(self): ret = Qt.QFileDialog.getExistingDirectory (self, 'Choose directory for saving files', self.ui.pathLE.text()) if ret: self.ui.pathLE.setText(ret) self.ui.pathLE.emit(Qt.SIGNAL('textEdited (QString)'), ret) def onDialogButtonClicked(self, button): role = self.ui.buttonBox.buttonRole(button) if role == Qt.QDialogButtonBox.ApplyRole: self.writeExperimentConfiguration(ask=False) elif role == Qt.QDialogButtonBox.ResetRole: self._reloadConf() def closeEvent(self, event): '''This event handler receives widget close events''' if self.isDataChanged(): self.writeExperimentConfiguration(ask=True) Qt.QWidget.closeEvent(self, event) def setModel(self, model): '''reimplemented from :class:`TaurusBaseWidget`''' TaurusBaseWidget.setModel(self, model) self._reloadConf(force=True) #set the model of some child widgets door = self.getModelObj() if door is None: return tghost = taurus.Database().getNormalName() #@todo: get the tghost from the door model instead msname = door.macro_server.getFullName() self.ui.taurusModelTree.setModel(tghost) self.ui.sardanaElementTree.setModel(msname) def _reloadConf(self, force=False): if not force and self.isDataChanged(): op = Qt.QMessageBox.question(self, "Reload info from door", "If you reload, all current experiment configuration changes will be lost. Reload?", Qt.QMessageBox.Yes | Qt.QMessageBox.Cancel) if op != Qt.QMessageBox.Yes: return door = self.getModelObj() if door is None: return conf = door.getExperimentConfiguration() self._originalConfiguration = copy.deepcopy(conf) self.setLocalConfig(conf) self._setDirty(False) self._dirtyMntGrps = set() #set a list of available channels avail_channels = {} for ch_info in door.macro_server.getExpChannelElements().values(): avail_channels[ch_info.full_name] = ch_info.getData() self.ui.channelEditor.getQModel().setAvailableChannels(avail_channels) def _setDirty(self, dirty): self._dirty = dirty self._updateButtonBox() def isDataChanged(self): """Tells if the local data has been modified since it was last refreshed :return: (bool) True if he local data has been modified since it was last refreshed """ return bool(self._dirty or self.ui.channelEditor.getQModel().isDataChanged() or self._dirtyMntGrps) def _updateButtonBox(self, *args, **kwargs): self.ui.buttonBox.setEnabled(self.isDataChanged()) def getLocalConfig(self): return self._localConfig def setLocalConfig(self, conf): '''gets a ExpDescription dictionary and sets up the widget''' self._localConfig = conf #set the Channel Editor activeMntGrpName = self._localConfig['ActiveMntGrp'] or '' if activeMntGrpName in self._localConfig['MntGrpConfigs']: mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName] self.ui.channelEditor.getQModel().setDataSource(mgconfig) #set the measurement group ComboBox self.ui.activeMntGrpCB.clear() mntGrpLabels = [] for _, mntGrpConf in self._localConfig['MntGrpConfigs'].items(): # get labels to visualize names with lower and upper case mntGrpLabels.append(mntGrpConf['label']) self.ui.activeMntGrpCB.addItems(sorted(mntGrpLabels)) idx = self.ui.activeMntGrpCB.findText(activeMntGrpName, # case insensitive find Qt.Qt.MatchFixedString) self.ui.activeMntGrpCB.setCurrentIndex(idx) #set the system snapshot list psl = self._localConfig.get('PreScanSnapshot') #I get it before clearing because clear() changes the _localConfig # TODO: For Taurus 4 compatibility psl_fullname = [] for name, display in psl: psl_fullname.append(("tango://%s" % name, display)) self.ui.preScanList.clear() self.ui.preScanList.addModels(psl_fullname) #other settings self.ui.filenameLE.setText(", ".join(self._localConfig['ScanFile'])) self.ui.pathLE.setText(self._localConfig['ScanDir'] or '') self.ui.compressionCB.setCurrentIndex(self._localConfig['DataCompressionRank'] + 1) def writeExperimentConfiguration(self, ask=True): '''sends the current local configuration to the door :param ask: (bool) If True (default) prompts the user before saving. ''' if ask: op = Qt.QMessageBox.question(self, "Save configuration?", 'Do you want to save the current configuration?\n(if not, any changes will be lost)', Qt.QMessageBox.Yes | Qt.QMessageBox.No) if op != Qt.QMessageBox.Yes: return False conf = self.getLocalConfig() #make sure that no empty measurement groups are written for mgname, mgconfig in conf.get('MntGrpConfigs', {}).items(): if mgconfig is not None and not mgconfig.get('controllers'): mglabel = mgconfig['label'] Qt.QMessageBox.information(self, "Empty Measurement group", "The measurement group '%s' is empty. Fill it (or delete it) before applying" % mglabel, Qt.QMessageBox.Ok) self.changeActiveMntGrp(mgname) return False #check if the currently displayed mntgrp is changed if self.ui.channelEditor.getQModel().isDataChanged(): self._dirtyMntGrps.add(self._localConfig['ActiveMntGrp']) door = self.getModelObj() door.setExperimentConfiguration(conf, mnt_grps=self._dirtyMntGrps) self._originalConfiguration = copy.deepcopy(conf) self._dirtyMntGrps = set() self.ui.channelEditor.getQModel().setDataChanged(False) self._setDirty(False) self.emit(Qt.SIGNAL('experimentConfigurationChanged'), copy.deepcopy(conf)) return True def changeActiveMntGrp(self, activeMntGrpName): activeMntGrpName = str(activeMntGrpName) if self._localConfig is None: return if activeMntGrpName == self._localConfig['ActiveMntGrp']: return #nothing changed if activeMntGrpName not in self._localConfig['MntGrpConfigs']: raise KeyError('Unknown measurement group "%s"' % activeMntGrpName) #add the previous measurement group to the list of "dirty" groups if something was changed if self.ui.channelEditor.getQModel().isDataChanged(): self._dirtyMntGrps.add(self._localConfig['ActiveMntGrp']) self._localConfig['ActiveMntGrp'] = activeMntGrpName i = self.ui.activeMntGrpCB.findText(activeMntGrpName, # case insensitive find Qt.Qt.MatchFixedString) self.ui.activeMntGrpCB.setCurrentIndex(i) mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName] self.ui.channelEditor.getQModel().setDataSource(mgconfig) self._setDirty(True) def createMntGrp(self): '''creates a new Measurement Group''' if self._localConfig is None: return mntGrpName, ok = Qt.QInputDialog.getText(self, "New Measurement Group", "Enter a name for the new measurement Group") if not ok: return mntGrpName = str(mntGrpName) #check that the given name is not an existing pool element ms = self.getModelObj().macro_server poolElementNames = [v.name for v in ms.getElementsWithInterface("PoolElement").values()] while mntGrpName in poolElementNames: Qt.QMessageBox.warning(self, "Cannot create Measurement group", "The name '%s' already is used for another pool element. Please Choose a different one." % mntGrpName, Qt.QMessageBox.Ok) mntGrpName, ok = Qt.QInputDialog.getText(self, "New Measurement Group", "Enter a name for the new measurement Group", Qt.QLineEdit.Normal, mntGrpName) if not ok: return mntGrpName = str(mntGrpName) #check that the measurement group is not already in the localConfig if mntGrpName in self._localConfig['MntGrpConfigs']: Qt.QMessageBox.warning(self, "%s already exists" % mntGrpName, 'A measurement group named "%s" already exists. A new one will not be created' % mntGrpName) return #add an empty configuration dictionary to the local config mgconfig = {'label': mntGrpName, 'controllers':{} } self._localConfig['MntGrpConfigs'][mntGrpName] = mgconfig #add the new measurement group to the list of "dirty" groups self._dirtyMntGrps.add(mntGrpName) #add the name to the combobox self.ui.activeMntGrpCB.addItem(mntGrpName) #make it the Active MntGrp self.changeActiveMntGrp(mntGrpName)
"Remove the measurement group '%s'?" % activeMntGrpName, Qt.QMessageBox.Yes | Qt.QMessageBox.Cancel) if op != Qt.QMessageBox.Yes: return currentIndex = self.ui.activeMntGrpCB.currentIndex() if self._localConfig is None: return if activeMntGrpName not in self._localConfig['MntGrpConfigs']: raise KeyError('Unknown measurement group "%s"' % activeMntGrpName) #add the current measurement group to the list of "dirty" groups self._dirtyMntGrps.add(activeMntGrpName) self._localConfig['MntGrpConfigs'][activeMntGrpName] = None self.ui.activeMntGrpCB.setCurrentIndex(-1) self.ui.activeMntGrpCB.removeItem(currentIndex) self.ui.channelEditor.getQModel().setDataSource({}) self._setDirty(True) def onCompressionCBChanged(self, idx): if self._localConfig is None: return self._localConfig['DataCompressionRank'] = idx - 1 self._setDirty(True) def onPathLEEdited(self, text): self._localConfig['ScanDir'] = str(text) self._setDirty(True) def onFilenameLEEdited(self, text): self._localConfig['ScanFile'] = [v.strip() for v in str(text).split(',')] self._setDirty(True) def onPreScanSnapshotChanged(self, items): door = self.getModelObj() ms = door.macro_server preScanList = [] for e in items: nfo = ms.getElementInfo(e.src) if nfo is None: full_name = e.src; display = e.display else: full_name = nfo.full_name; display = nfo.name # TODO: For Taurus 4 compatibility preScanList.append((full_name.lstrip("tango://"), display)) self._localConfig['PreScanSnapshot'] = preScanList self._setDirty(True) def onPlotsButtonToggled(self, checked): if checked: from taurus.qt.qtgui.taurusgui.macrolistener import \ DynamicPlotManager self.__plotManager = DynamicPlotManager(self) self.__plotManager.setModel(self.getModelName()) self.connect(self, Qt.SIGNAL('experimentConfigurationChanged'), self.__plotManager.onExpConfChanged) else: self.disconnect(self, Qt.SIGNAL('experimentConfigurationChanged'), self.__plotManager.onExpConfChanged) self.__plotManager.removePanels() self.__plotManager.setModel(None) self.__plotManager = None def demo(model=None): """Experiment configuration""" #w = main_ChannelEditor() w = ExpDescriptionEditor() if model is None: from sardana.taurus.qt.qtgui.extra_macroexecutor import \ TaurusMacroConfigurationDialog dialog = TaurusMacroConfigurationDialog(w) accept = dialog.exec_() if accept: model = str(dialog.doorComboBox.currentText()) if model is not None: w.setModel(model) return w def main(): import sys import taurus.qt.qtgui.application Application = taurus.qt.qtgui.application.TaurusApplication app = Application.instance() owns_app = app is None if owns_app: app = Application(app_name="Exp. Description demo", app_version="1.0", org_domain="Sardana", org_name="Tango community") args = app.get_command_line_args() if len(args) == 1: w = demo(model=args[0]) else: w = demo() w.show() if owns_app: sys.exit(app.exec_()) else: return w if __name__ == "__main__": main()
def deleteMntGrp(self): '''creates a new Measurement Group''' activeMntGrpName = str(self.ui.activeMntGrpCB.currentText()) op = Qt.QMessageBox.question(self, "Delete Measurement Group",
random_line_split
expdescription.py
#!/usr/bin/env python ############################################################################## ## ## This file is part of Sardana ## ## http://www.sardana-controls.org/ ## ## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain ## ## Sardana is free software: you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## Sardana is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public License ## along with Sardana. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################## """This module provides widget for configuring the data acquisition and display of an experiment""" __all__ = ["ExpDescriptionEditor"] from taurus.external.qt import Qt import copy import taurus import taurus.core from taurus.qt.qtgui.base import TaurusBaseWidget from taurus.qt.qtgui import resource from sardana.taurus.qt.qtcore.tango.sardana.model import SardanaBaseProxyModel, SardanaTypeTreeItem from taurus.qt.qtgui.util.ui import UILoadable ## Using a plain model and filtering and checking 'Acquirable' in item.itemData().interfaces is more elegant, but things don't get properly sorted... #from taurus.qt.qtcore.tango.sardana.model import SardanaElementPlainModel class SardanaAcquirableProxyModel(SardanaBaseProxyModel): # ALLOWED_TYPES = 'Acquirable' # # def filterAcceptsRow(self, sourceRow, sourceParent): # sourceModel = self.sourceModel() # idx = sourceModel.index(sourceRow, 0, sourceParent) # item = idx.internalPointer() # return 'Acquirable' in item.itemData().interfaces # ALLOWED_TYPES = ['Motor', 'CTExpChannel', 'ZeroDExpChannel', 'OneDExpChannel', # 'TwoDExpChannel', 'ComChannel', 'IORegister', 'PseudoMotor', # 'PseudoCounter'] from sardana.sardanadefs import ElementType, TYPE_ACQUIRABLE_ELEMENTS ALLOWED_TYPES = [ElementType[t] for t in TYPE_ACQUIRABLE_ELEMENTS] def filterAcceptsRow(self, sourceRow, sourceParent): sourceModel = self.sourceModel() idx = sourceModel.index(sourceRow, 0, sourceParent) treeItem = idx.internalPointer() if isinstance(treeItem, SardanaTypeTreeItem): return treeItem.itemData() in self.ALLOWED_TYPES return True @UILoadable(with_ui='ui') class
(Qt.QWidget, TaurusBaseWidget): ''' A widget for editing the configuration of a experiment (measurement groups, plot and storage parameters, etc). It receives a Sardana Door name as its model and gets/sets the configuration using the `ExperimentConfiguration` environmental variable for that Door. ''' def __init__(self, parent=None, door=None, plotsButton=True): Qt.QWidget.__init__(self, parent) TaurusBaseWidget.__init__(self, 'ExpDescriptionEditor') self.loadUi() self.ui.buttonBox.setStandardButtons(Qt.QDialogButtonBox.Reset | Qt.QDialogButtonBox.Apply) newperspectivesDict = copy.deepcopy(self.ui.sardanaElementTree.KnownPerspectives) #newperspectivesDict[self.ui.sardanaElementTree.DftPerspective]['model'] = [SardanaAcquirableProxyModel, SardanaElementPlainModel] newperspectivesDict[self.ui.sardanaElementTree.DftPerspective]['model'][0] = SardanaAcquirableProxyModel self.ui.sardanaElementTree.KnownPerspectives = newperspectivesDict #assign a copy because if just a key of this class memberwas modified, all instances of this class would be affected self.ui.sardanaElementTree._setPerspective(self.ui.sardanaElementTree.DftPerspective) self._localConfig = None self._originalConfiguration = None self._dirty = False self._dirtyMntGrps = set() self.connect(self.ui.activeMntGrpCB, Qt.SIGNAL('activated (QString)'), self.changeActiveMntGrp) self.connect(self.ui.createMntGrpBT, Qt.SIGNAL('clicked ()'), self.createMntGrp) self.connect(self.ui.deleteMntGrpBT, Qt.SIGNAL('clicked ()'), self.deleteMntGrp) self.connect(self.ui.compressionCB, Qt.SIGNAL('currentIndexChanged (int)'), self.onCompressionCBChanged) self.connect(self.ui.pathLE, Qt.SIGNAL('textEdited (QString)'), self.onPathLEEdited) self.connect(self.ui.filenameLE, Qt.SIGNAL('textEdited (QString)'), self.onFilenameLEEdited) self.connect(self.ui.channelEditor.getQModel(), Qt.SIGNAL('dataChanged (QModelIndex, QModelIndex)'), self._updateButtonBox) self.connect(self.ui.channelEditor.getQModel(), Qt.SIGNAL('modelReset ()'), self._updateButtonBox) preScanList = self.ui.preScanList self.connect(preScanList, Qt.SIGNAL('dataChanged'), self.onPreScanSnapshotChanged) #TODO: For Taurus 4 compatibility if hasattr(preScanList, "dataChangedSignal"): preScanList.dataChangedSignal.connect(self.onPreScanSnapshotChanged) self.connect(self.ui.choosePathBT, Qt.SIGNAL('clicked ()'), self.onChooseScanDirButtonClicked) self.__plotManager = None icon = resource.getIcon(":/actions/view.svg") self.togglePlotsAction = Qt.QAction(icon, "Show/Hide plots", self) self.togglePlotsAction.setCheckable(True) self.togglePlotsAction.setChecked(False) self.togglePlotsAction.setEnabled(plotsButton) self.addAction(self.togglePlotsAction) self.connect(self.togglePlotsAction, Qt.SIGNAL("toggled(bool)"), self.onPlotsButtonToggled) self.ui.plotsButton.setDefaultAction(self.togglePlotsAction) if door is not None: self.setModel(door) self.connect(self.ui.buttonBox, Qt.SIGNAL("clicked(QAbstractButton *)"), self.onDialogButtonClicked) #Taurus Configuration properties and delegates self.registerConfigDelegate(self.ui.channelEditor) def getModelClass(self): '''reimplemented from :class:`TaurusBaseWidget`''' return taurus.core.taurusdevice.TaurusDevice def onChooseScanDirButtonClicked(self): ret = Qt.QFileDialog.getExistingDirectory (self, 'Choose directory for saving files', self.ui.pathLE.text()) if ret: self.ui.pathLE.setText(ret) self.ui.pathLE.emit(Qt.SIGNAL('textEdited (QString)'), ret) def onDialogButtonClicked(self, button): role = self.ui.buttonBox.buttonRole(button) if role == Qt.QDialogButtonBox.ApplyRole: self.writeExperimentConfiguration(ask=False) elif role == Qt.QDialogButtonBox.ResetRole: self._reloadConf() def closeEvent(self, event): '''This event handler receives widget close events''' if self.isDataChanged(): self.writeExperimentConfiguration(ask=True) Qt.QWidget.closeEvent(self, event) def setModel(self, model): '''reimplemented from :class:`TaurusBaseWidget`''' TaurusBaseWidget.setModel(self, model) self._reloadConf(force=True) #set the model of some child widgets door = self.getModelObj() if door is None: return tghost = taurus.Database().getNormalName() #@todo: get the tghost from the door model instead msname = door.macro_server.getFullName() self.ui.taurusModelTree.setModel(tghost) self.ui.sardanaElementTree.setModel(msname) def _reloadConf(self, force=False): if not force and self.isDataChanged(): op = Qt.QMessageBox.question(self, "Reload info from door", "If you reload, all current experiment configuration changes will be lost. Reload?", Qt.QMessageBox.Yes | Qt.QMessageBox.Cancel) if op != Qt.QMessageBox.Yes: return door = self.getModelObj() if door is None: return conf = door.getExperimentConfiguration() self._originalConfiguration = copy.deepcopy(conf) self.setLocalConfig(conf) self._setDirty(False) self._dirtyMntGrps = set() #set a list of available channels avail_channels = {} for ch_info in door.macro_server.getExpChannelElements().values(): avail_channels[ch_info.full_name] = ch_info.getData() self.ui.channelEditor.getQModel().setAvailableChannels(avail_channels) def _setDirty(self, dirty): self._dirty = dirty self._updateButtonBox() def isDataChanged(self): """Tells if the local data has been modified since it was last refreshed :return: (bool) True if he local data has been modified since it was last refreshed """ return bool(self._dirty or self.ui.channelEditor.getQModel().isDataChanged() or self._dirtyMntGrps) def _updateButtonBox(self, *args, **kwargs): self.ui.buttonBox.setEnabled(self.isDataChanged()) def getLocalConfig(self): return self._localConfig def setLocalConfig(self, conf): '''gets a ExpDescription dictionary and sets up the widget''' self._localConfig = conf #set the Channel Editor activeMntGrpName = self._localConfig['ActiveMntGrp'] or '' if activeMntGrpName in self._localConfig['MntGrpConfigs']: mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName] self.ui.channelEditor.getQModel().setDataSource(mgconfig) #set the measurement group ComboBox self.ui.activeMntGrpCB.clear() mntGrpLabels = [] for _, mntGrpConf in self._localConfig['MntGrpConfigs'].items(): # get labels to visualize names with lower and upper case mntGrpLabels.append(mntGrpConf['label']) self.ui.activeMntGrpCB.addItems(sorted(mntGrpLabels)) idx = self.ui.activeMntGrpCB.findText(activeMntGrpName, # case insensitive find Qt.Qt.MatchFixedString) self.ui.activeMntGrpCB.setCurrentIndex(idx) #set the system snapshot list psl = self._localConfig.get('PreScanSnapshot') #I get it before clearing because clear() changes the _localConfig # TODO: For Taurus 4 compatibility psl_fullname = [] for name, display in psl: psl_fullname.append(("tango://%s" % name, display)) self.ui.preScanList.clear() self.ui.preScanList.addModels(psl_fullname) #other settings self.ui.filenameLE.setText(", ".join(self._localConfig['ScanFile'])) self.ui.pathLE.setText(self._localConfig['ScanDir'] or '') self.ui.compressionCB.setCurrentIndex(self._localConfig['DataCompressionRank'] + 1) def writeExperimentConfiguration(self, ask=True): '''sends the current local configuration to the door :param ask: (bool) If True (default) prompts the user before saving. ''' if ask: op = Qt.QMessageBox.question(self, "Save configuration?", 'Do you want to save the current configuration?\n(if not, any changes will be lost)', Qt.QMessageBox.Yes | Qt.QMessageBox.No) if op != Qt.QMessageBox.Yes: return False conf = self.getLocalConfig() #make sure that no empty measurement groups are written for mgname, mgconfig in conf.get('MntGrpConfigs', {}).items(): if mgconfig is not None and not mgconfig.get('controllers'): mglabel = mgconfig['label'] Qt.QMessageBox.information(self, "Empty Measurement group", "The measurement group '%s' is empty. Fill it (or delete it) before applying" % mglabel, Qt.QMessageBox.Ok) self.changeActiveMntGrp(mgname) return False #check if the currently displayed mntgrp is changed if self.ui.channelEditor.getQModel().isDataChanged(): self._dirtyMntGrps.add(self._localConfig['ActiveMntGrp']) door = self.getModelObj() door.setExperimentConfiguration(conf, mnt_grps=self._dirtyMntGrps) self._originalConfiguration = copy.deepcopy(conf) self._dirtyMntGrps = set() self.ui.channelEditor.getQModel().setDataChanged(False) self._setDirty(False) self.emit(Qt.SIGNAL('experimentConfigurationChanged'), copy.deepcopy(conf)) return True def changeActiveMntGrp(self, activeMntGrpName): activeMntGrpName = str(activeMntGrpName) if self._localConfig is None: return if activeMntGrpName == self._localConfig['ActiveMntGrp']: return #nothing changed if activeMntGrpName not in self._localConfig['MntGrpConfigs']: raise KeyError('Unknown measurement group "%s"' % activeMntGrpName) #add the previous measurement group to the list of "dirty" groups if something was changed if self.ui.channelEditor.getQModel().isDataChanged(): self._dirtyMntGrps.add(self._localConfig['ActiveMntGrp']) self._localConfig['ActiveMntGrp'] = activeMntGrpName i = self.ui.activeMntGrpCB.findText(activeMntGrpName, # case insensitive find Qt.Qt.MatchFixedString) self.ui.activeMntGrpCB.setCurrentIndex(i) mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName] self.ui.channelEditor.getQModel().setDataSource(mgconfig) self._setDirty(True) def createMntGrp(self): '''creates a new Measurement Group''' if self._localConfig is None: return mntGrpName, ok = Qt.QInputDialog.getText(self, "New Measurement Group", "Enter a name for the new measurement Group") if not ok: return mntGrpName = str(mntGrpName) #check that the given name is not an existing pool element ms = self.getModelObj().macro_server poolElementNames = [v.name for v in ms.getElementsWithInterface("PoolElement").values()] while mntGrpName in poolElementNames: Qt.QMessageBox.warning(self, "Cannot create Measurement group", "The name '%s' already is used for another pool element. Please Choose a different one." % mntGrpName, Qt.QMessageBox.Ok) mntGrpName, ok = Qt.QInputDialog.getText(self, "New Measurement Group", "Enter a name for the new measurement Group", Qt.QLineEdit.Normal, mntGrpName) if not ok: return mntGrpName = str(mntGrpName) #check that the measurement group is not already in the localConfig if mntGrpName in self._localConfig['MntGrpConfigs']: Qt.QMessageBox.warning(self, "%s already exists" % mntGrpName, 'A measurement group named "%s" already exists. A new one will not be created' % mntGrpName) return #add an empty configuration dictionary to the local config mgconfig = {'label': mntGrpName, 'controllers':{} } self._localConfig['MntGrpConfigs'][mntGrpName] = mgconfig #add the new measurement group to the list of "dirty" groups self._dirtyMntGrps.add(mntGrpName) #add the name to the combobox self.ui.activeMntGrpCB.addItem(mntGrpName) #make it the Active MntGrp self.changeActiveMntGrp(mntGrpName) def deleteMntGrp(self): '''creates a new Measurement Group''' activeMntGrpName = str(self.ui.activeMntGrpCB.currentText()) op = Qt.QMessageBox.question(self, "Delete Measurement Group", "Remove the measurement group '%s'?" % activeMntGrpName, Qt.QMessageBox.Yes | Qt.QMessageBox.Cancel) if op != Qt.QMessageBox.Yes: return currentIndex = self.ui.activeMntGrpCB.currentIndex() if self._localConfig is None: return if activeMntGrpName not in self._localConfig['MntGrpConfigs']: raise KeyError('Unknown measurement group "%s"' % activeMntGrpName) #add the current measurement group to the list of "dirty" groups self._dirtyMntGrps.add(activeMntGrpName) self._localConfig['MntGrpConfigs'][activeMntGrpName] = None self.ui.activeMntGrpCB.setCurrentIndex(-1) self.ui.activeMntGrpCB.removeItem(currentIndex) self.ui.channelEditor.getQModel().setDataSource({}) self._setDirty(True) def onCompressionCBChanged(self, idx): if self._localConfig is None: return self._localConfig['DataCompressionRank'] = idx - 1 self._setDirty(True) def onPathLEEdited(self, text): self._localConfig['ScanDir'] = str(text) self._setDirty(True) def onFilenameLEEdited(self, text): self._localConfig['ScanFile'] = [v.strip() for v in str(text).split(',')] self._setDirty(True) def onPreScanSnapshotChanged(self, items): door = self.getModelObj() ms = door.macro_server preScanList = [] for e in items: nfo = ms.getElementInfo(e.src) if nfo is None: full_name = e.src; display = e.display else: full_name = nfo.full_name; display = nfo.name # TODO: For Taurus 4 compatibility preScanList.append((full_name.lstrip("tango://"), display)) self._localConfig['PreScanSnapshot'] = preScanList self._setDirty(True) def onPlotsButtonToggled(self, checked): if checked: from taurus.qt.qtgui.taurusgui.macrolistener import \ DynamicPlotManager self.__plotManager = DynamicPlotManager(self) self.__plotManager.setModel(self.getModelName()) self.connect(self, Qt.SIGNAL('experimentConfigurationChanged'), self.__plotManager.onExpConfChanged) else: self.disconnect(self, Qt.SIGNAL('experimentConfigurationChanged'), self.__plotManager.onExpConfChanged) self.__plotManager.removePanels() self.__plotManager.setModel(None) self.__plotManager = None def demo(model=None): """Experiment configuration""" #w = main_ChannelEditor() w = ExpDescriptionEditor() if model is None: from sardana.taurus.qt.qtgui.extra_macroexecutor import \ TaurusMacroConfigurationDialog dialog = TaurusMacroConfigurationDialog(w) accept = dialog.exec_() if accept: model = str(dialog.doorComboBox.currentText()) if model is not None: w.setModel(model) return w def main(): import sys import taurus.qt.qtgui.application Application = taurus.qt.qtgui.application.TaurusApplication app = Application.instance() owns_app = app is None if owns_app: app = Application(app_name="Exp. Description demo", app_version="1.0", org_domain="Sardana", org_name="Tango community") args = app.get_command_line_args() if len(args) == 1: w = demo(model=args[0]) else: w = demo() w.show() if owns_app: sys.exit(app.exec_()) else: return w if __name__ == "__main__": main()
ExpDescriptionEditor
identifier_name
expdescription.py
#!/usr/bin/env python ############################################################################## ## ## This file is part of Sardana ## ## http://www.sardana-controls.org/ ## ## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain ## ## Sardana is free software: you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## Sardana is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public License ## along with Sardana. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################## """This module provides widget for configuring the data acquisition and display of an experiment""" __all__ = ["ExpDescriptionEditor"] from taurus.external.qt import Qt import copy import taurus import taurus.core from taurus.qt.qtgui.base import TaurusBaseWidget from taurus.qt.qtgui import resource from sardana.taurus.qt.qtcore.tango.sardana.model import SardanaBaseProxyModel, SardanaTypeTreeItem from taurus.qt.qtgui.util.ui import UILoadable ## Using a plain model and filtering and checking 'Acquirable' in item.itemData().interfaces is more elegant, but things don't get properly sorted... #from taurus.qt.qtcore.tango.sardana.model import SardanaElementPlainModel class SardanaAcquirableProxyModel(SardanaBaseProxyModel): # ALLOWED_TYPES = 'Acquirable' # # def filterAcceptsRow(self, sourceRow, sourceParent): # sourceModel = self.sourceModel() # idx = sourceModel.index(sourceRow, 0, sourceParent) # item = idx.internalPointer() # return 'Acquirable' in item.itemData().interfaces # ALLOWED_TYPES = ['Motor', 'CTExpChannel', 'ZeroDExpChannel', 'OneDExpChannel', # 'TwoDExpChannel', 'ComChannel', 'IORegister', 'PseudoMotor', # 'PseudoCounter'] from sardana.sardanadefs import ElementType, TYPE_ACQUIRABLE_ELEMENTS ALLOWED_TYPES = [ElementType[t] for t in TYPE_ACQUIRABLE_ELEMENTS] def filterAcceptsRow(self, sourceRow, sourceParent): sourceModel = self.sourceModel() idx = sourceModel.index(sourceRow, 0, sourceParent) treeItem = idx.internalPointer() if isinstance(treeItem, SardanaTypeTreeItem): return treeItem.itemData() in self.ALLOWED_TYPES return True @UILoadable(with_ui='ui') class ExpDescriptionEditor(Qt.QWidget, TaurusBaseWidget): ''' A widget for editing the configuration of a experiment (measurement groups, plot and storage parameters, etc). It receives a Sardana Door name as its model and gets/sets the configuration using the `ExperimentConfiguration` environmental variable for that Door. ''' def __init__(self, parent=None, door=None, plotsButton=True): Qt.QWidget.__init__(self, parent) TaurusBaseWidget.__init__(self, 'ExpDescriptionEditor') self.loadUi() self.ui.buttonBox.setStandardButtons(Qt.QDialogButtonBox.Reset | Qt.QDialogButtonBox.Apply) newperspectivesDict = copy.deepcopy(self.ui.sardanaElementTree.KnownPerspectives) #newperspectivesDict[self.ui.sardanaElementTree.DftPerspective]['model'] = [SardanaAcquirableProxyModel, SardanaElementPlainModel] newperspectivesDict[self.ui.sardanaElementTree.DftPerspective]['model'][0] = SardanaAcquirableProxyModel self.ui.sardanaElementTree.KnownPerspectives = newperspectivesDict #assign a copy because if just a key of this class memberwas modified, all instances of this class would be affected self.ui.sardanaElementTree._setPerspective(self.ui.sardanaElementTree.DftPerspective) self._localConfig = None self._originalConfiguration = None self._dirty = False self._dirtyMntGrps = set() self.connect(self.ui.activeMntGrpCB, Qt.SIGNAL('activated (QString)'), self.changeActiveMntGrp) self.connect(self.ui.createMntGrpBT, Qt.SIGNAL('clicked ()'), self.createMntGrp) self.connect(self.ui.deleteMntGrpBT, Qt.SIGNAL('clicked ()'), self.deleteMntGrp) self.connect(self.ui.compressionCB, Qt.SIGNAL('currentIndexChanged (int)'), self.onCompressionCBChanged) self.connect(self.ui.pathLE, Qt.SIGNAL('textEdited (QString)'), self.onPathLEEdited) self.connect(self.ui.filenameLE, Qt.SIGNAL('textEdited (QString)'), self.onFilenameLEEdited) self.connect(self.ui.channelEditor.getQModel(), Qt.SIGNAL('dataChanged (QModelIndex, QModelIndex)'), self._updateButtonBox) self.connect(self.ui.channelEditor.getQModel(), Qt.SIGNAL('modelReset ()'), self._updateButtonBox) preScanList = self.ui.preScanList self.connect(preScanList, Qt.SIGNAL('dataChanged'), self.onPreScanSnapshotChanged) #TODO: For Taurus 4 compatibility if hasattr(preScanList, "dataChangedSignal"): preScanList.dataChangedSignal.connect(self.onPreScanSnapshotChanged) self.connect(self.ui.choosePathBT, Qt.SIGNAL('clicked ()'), self.onChooseScanDirButtonClicked) self.__plotManager = None icon = resource.getIcon(":/actions/view.svg") self.togglePlotsAction = Qt.QAction(icon, "Show/Hide plots", self) self.togglePlotsAction.setCheckable(True) self.togglePlotsAction.setChecked(False) self.togglePlotsAction.setEnabled(plotsButton) self.addAction(self.togglePlotsAction) self.connect(self.togglePlotsAction, Qt.SIGNAL("toggled(bool)"), self.onPlotsButtonToggled) self.ui.plotsButton.setDefaultAction(self.togglePlotsAction) if door is not None: self.setModel(door) self.connect(self.ui.buttonBox, Qt.SIGNAL("clicked(QAbstractButton *)"), self.onDialogButtonClicked) #Taurus Configuration properties and delegates self.registerConfigDelegate(self.ui.channelEditor) def getModelClass(self): '''reimplemented from :class:`TaurusBaseWidget`''' return taurus.core.taurusdevice.TaurusDevice def onChooseScanDirButtonClicked(self): ret = Qt.QFileDialog.getExistingDirectory (self, 'Choose directory for saving files', self.ui.pathLE.text()) if ret: self.ui.pathLE.setText(ret) self.ui.pathLE.emit(Qt.SIGNAL('textEdited (QString)'), ret) def onDialogButtonClicked(self, button): role = self.ui.buttonBox.buttonRole(button) if role == Qt.QDialogButtonBox.ApplyRole: self.writeExperimentConfiguration(ask=False) elif role == Qt.QDialogButtonBox.ResetRole: self._reloadConf() def closeEvent(self, event): '''This event handler receives widget close events''' if self.isDataChanged(): self.writeExperimentConfiguration(ask=True) Qt.QWidget.closeEvent(self, event) def setModel(self, model): '''reimplemented from :class:`TaurusBaseWidget`''' TaurusBaseWidget.setModel(self, model) self._reloadConf(force=True) #set the model of some child widgets door = self.getModelObj() if door is None: return tghost = taurus.Database().getNormalName() #@todo: get the tghost from the door model instead msname = door.macro_server.getFullName() self.ui.taurusModelTree.setModel(tghost) self.ui.sardanaElementTree.setModel(msname) def _reloadConf(self, force=False): if not force and self.isDataChanged(): op = Qt.QMessageBox.question(self, "Reload info from door", "If you reload, all current experiment configuration changes will be lost. Reload?", Qt.QMessageBox.Yes | Qt.QMessageBox.Cancel) if op != Qt.QMessageBox.Yes: return door = self.getModelObj() if door is None: return conf = door.getExperimentConfiguration() self._originalConfiguration = copy.deepcopy(conf) self.setLocalConfig(conf) self._setDirty(False) self._dirtyMntGrps = set() #set a list of available channels avail_channels = {} for ch_info in door.macro_server.getExpChannelElements().values(): avail_channels[ch_info.full_name] = ch_info.getData() self.ui.channelEditor.getQModel().setAvailableChannels(avail_channels) def _setDirty(self, dirty): self._dirty = dirty self._updateButtonBox() def isDataChanged(self): """Tells if the local data has been modified since it was last refreshed :return: (bool) True if he local data has been modified since it was last refreshed """ return bool(self._dirty or self.ui.channelEditor.getQModel().isDataChanged() or self._dirtyMntGrps) def _updateButtonBox(self, *args, **kwargs): self.ui.buttonBox.setEnabled(self.isDataChanged()) def getLocalConfig(self): return self._localConfig def setLocalConfig(self, conf): '''gets a ExpDescription dictionary and sets up the widget''' self._localConfig = conf #set the Channel Editor activeMntGrpName = self._localConfig['ActiveMntGrp'] or '' if activeMntGrpName in self._localConfig['MntGrpConfigs']: mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName] self.ui.channelEditor.getQModel().setDataSource(mgconfig) #set the measurement group ComboBox self.ui.activeMntGrpCB.clear() mntGrpLabels = [] for _, mntGrpConf in self._localConfig['MntGrpConfigs'].items(): # get labels to visualize names with lower and upper case mntGrpLabels.append(mntGrpConf['label']) self.ui.activeMntGrpCB.addItems(sorted(mntGrpLabels)) idx = self.ui.activeMntGrpCB.findText(activeMntGrpName, # case insensitive find Qt.Qt.MatchFixedString) self.ui.activeMntGrpCB.setCurrentIndex(idx) #set the system snapshot list psl = self._localConfig.get('PreScanSnapshot') #I get it before clearing because clear() changes the _localConfig # TODO: For Taurus 4 compatibility psl_fullname = [] for name, display in psl: psl_fullname.append(("tango://%s" % name, display)) self.ui.preScanList.clear() self.ui.preScanList.addModels(psl_fullname) #other settings self.ui.filenameLE.setText(", ".join(self._localConfig['ScanFile'])) self.ui.pathLE.setText(self._localConfig['ScanDir'] or '') self.ui.compressionCB.setCurrentIndex(self._localConfig['DataCompressionRank'] + 1) def writeExperimentConfiguration(self, ask=True): '''sends the current local configuration to the door :param ask: (bool) If True (default) prompts the user before saving. ''' if ask: op = Qt.QMessageBox.question(self, "Save configuration?", 'Do you want to save the current configuration?\n(if not, any changes will be lost)', Qt.QMessageBox.Yes | Qt.QMessageBox.No) if op != Qt.QMessageBox.Yes: return False conf = self.getLocalConfig() #make sure that no empty measurement groups are written for mgname, mgconfig in conf.get('MntGrpConfigs', {}).items(): if mgconfig is not None and not mgconfig.get('controllers'): mglabel = mgconfig['label'] Qt.QMessageBox.information(self, "Empty Measurement group", "The measurement group '%s' is empty. Fill it (or delete it) before applying" % mglabel, Qt.QMessageBox.Ok) self.changeActiveMntGrp(mgname) return False #check if the currently displayed mntgrp is changed if self.ui.channelEditor.getQModel().isDataChanged(): self._dirtyMntGrps.add(self._localConfig['ActiveMntGrp']) door = self.getModelObj() door.setExperimentConfiguration(conf, mnt_grps=self._dirtyMntGrps) self._originalConfiguration = copy.deepcopy(conf) self._dirtyMntGrps = set() self.ui.channelEditor.getQModel().setDataChanged(False) self._setDirty(False) self.emit(Qt.SIGNAL('experimentConfigurationChanged'), copy.deepcopy(conf)) return True def changeActiveMntGrp(self, activeMntGrpName): activeMntGrpName = str(activeMntGrpName) if self._localConfig is None: return if activeMntGrpName == self._localConfig['ActiveMntGrp']: return #nothing changed if activeMntGrpName not in self._localConfig['MntGrpConfigs']: raise KeyError('Unknown measurement group "%s"' % activeMntGrpName) #add the previous measurement group to the list of "dirty" groups if something was changed if self.ui.channelEditor.getQModel().isDataChanged(): self._dirtyMntGrps.add(self._localConfig['ActiveMntGrp']) self._localConfig['ActiveMntGrp'] = activeMntGrpName i = self.ui.activeMntGrpCB.findText(activeMntGrpName, # case insensitive find Qt.Qt.MatchFixedString) self.ui.activeMntGrpCB.setCurrentIndex(i) mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName] self.ui.channelEditor.getQModel().setDataSource(mgconfig) self._setDirty(True) def createMntGrp(self):
def deleteMntGrp(self): '''creates a new Measurement Group''' activeMntGrpName = str(self.ui.activeMntGrpCB.currentText()) op = Qt.QMessageBox.question(self, "Delete Measurement Group", "Remove the measurement group '%s'?" % activeMntGrpName, Qt.QMessageBox.Yes | Qt.QMessageBox.Cancel) if op != Qt.QMessageBox.Yes: return currentIndex = self.ui.activeMntGrpCB.currentIndex() if self._localConfig is None: return if activeMntGrpName not in self._localConfig['MntGrpConfigs']: raise KeyError('Unknown measurement group "%s"' % activeMntGrpName) #add the current measurement group to the list of "dirty" groups self._dirtyMntGrps.add(activeMntGrpName) self._localConfig['MntGrpConfigs'][activeMntGrpName] = None self.ui.activeMntGrpCB.setCurrentIndex(-1) self.ui.activeMntGrpCB.removeItem(currentIndex) self.ui.channelEditor.getQModel().setDataSource({}) self._setDirty(True) def onCompressionCBChanged(self, idx): if self._localConfig is None: return self._localConfig['DataCompressionRank'] = idx - 1 self._setDirty(True) def onPathLEEdited(self, text): self._localConfig['ScanDir'] = str(text) self._setDirty(True) def onFilenameLEEdited(self, text): self._localConfig['ScanFile'] = [v.strip() for v in str(text).split(',')] self._setDirty(True) def onPreScanSnapshotChanged(self, items): door = self.getModelObj() ms = door.macro_server preScanList = [] for e in items: nfo = ms.getElementInfo(e.src) if nfo is None: full_name = e.src; display = e.display else: full_name = nfo.full_name; display = nfo.name # TODO: For Taurus 4 compatibility preScanList.append((full_name.lstrip("tango://"), display)) self._localConfig['PreScanSnapshot'] = preScanList self._setDirty(True) def onPlotsButtonToggled(self, checked): if checked: from taurus.qt.qtgui.taurusgui.macrolistener import \ DynamicPlotManager self.__plotManager = DynamicPlotManager(self) self.__plotManager.setModel(self.getModelName()) self.connect(self, Qt.SIGNAL('experimentConfigurationChanged'), self.__plotManager.onExpConfChanged) else: self.disconnect(self, Qt.SIGNAL('experimentConfigurationChanged'), self.__plotManager.onExpConfChanged) self.__plotManager.removePanels() self.__plotManager.setModel(None) self.__plotManager = None def demo(model=None): """Experiment configuration""" #w = main_ChannelEditor() w = ExpDescriptionEditor() if model is None: from sardana.taurus.qt.qtgui.extra_macroexecutor import \ TaurusMacroConfigurationDialog dialog = TaurusMacroConfigurationDialog(w) accept = dialog.exec_() if accept: model = str(dialog.doorComboBox.currentText()) if model is not None: w.setModel(model) return w def main(): import sys import taurus.qt.qtgui.application Application = taurus.qt.qtgui.application.TaurusApplication app = Application.instance() owns_app = app is None if owns_app: app = Application(app_name="Exp. Description demo", app_version="1.0", org_domain="Sardana", org_name="Tango community") args = app.get_command_line_args() if len(args) == 1: w = demo(model=args[0]) else: w = demo() w.show() if owns_app: sys.exit(app.exec_()) else: return w if __name__ == "__main__": main()
'''creates a new Measurement Group''' if self._localConfig is None: return mntGrpName, ok = Qt.QInputDialog.getText(self, "New Measurement Group", "Enter a name for the new measurement Group") if not ok: return mntGrpName = str(mntGrpName) #check that the given name is not an existing pool element ms = self.getModelObj().macro_server poolElementNames = [v.name for v in ms.getElementsWithInterface("PoolElement").values()] while mntGrpName in poolElementNames: Qt.QMessageBox.warning(self, "Cannot create Measurement group", "The name '%s' already is used for another pool element. Please Choose a different one." % mntGrpName, Qt.QMessageBox.Ok) mntGrpName, ok = Qt.QInputDialog.getText(self, "New Measurement Group", "Enter a name for the new measurement Group", Qt.QLineEdit.Normal, mntGrpName) if not ok: return mntGrpName = str(mntGrpName) #check that the measurement group is not already in the localConfig if mntGrpName in self._localConfig['MntGrpConfigs']: Qt.QMessageBox.warning(self, "%s already exists" % mntGrpName, 'A measurement group named "%s" already exists. A new one will not be created' % mntGrpName) return #add an empty configuration dictionary to the local config mgconfig = {'label': mntGrpName, 'controllers':{} } self._localConfig['MntGrpConfigs'][mntGrpName] = mgconfig #add the new measurement group to the list of "dirty" groups self._dirtyMntGrps.add(mntGrpName) #add the name to the combobox self.ui.activeMntGrpCB.addItem(mntGrpName) #make it the Active MntGrp self.changeActiveMntGrp(mntGrpName)
identifier_body
expdescription.py
#!/usr/bin/env python ############################################################################## ## ## This file is part of Sardana ## ## http://www.sardana-controls.org/ ## ## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain ## ## Sardana is free software: you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## Sardana is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public License ## along with Sardana. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################## """This module provides widget for configuring the data acquisition and display of an experiment""" __all__ = ["ExpDescriptionEditor"] from taurus.external.qt import Qt import copy import taurus import taurus.core from taurus.qt.qtgui.base import TaurusBaseWidget from taurus.qt.qtgui import resource from sardana.taurus.qt.qtcore.tango.sardana.model import SardanaBaseProxyModel, SardanaTypeTreeItem from taurus.qt.qtgui.util.ui import UILoadable ## Using a plain model and filtering and checking 'Acquirable' in item.itemData().interfaces is more elegant, but things don't get properly sorted... #from taurus.qt.qtcore.tango.sardana.model import SardanaElementPlainModel class SardanaAcquirableProxyModel(SardanaBaseProxyModel): # ALLOWED_TYPES = 'Acquirable' # # def filterAcceptsRow(self, sourceRow, sourceParent): # sourceModel = self.sourceModel() # idx = sourceModel.index(sourceRow, 0, sourceParent) # item = idx.internalPointer() # return 'Acquirable' in item.itemData().interfaces # ALLOWED_TYPES = ['Motor', 'CTExpChannel', 'ZeroDExpChannel', 'OneDExpChannel', # 'TwoDExpChannel', 'ComChannel', 'IORegister', 'PseudoMotor', # 'PseudoCounter'] from sardana.sardanadefs import ElementType, TYPE_ACQUIRABLE_ELEMENTS ALLOWED_TYPES = [ElementType[t] for t in TYPE_ACQUIRABLE_ELEMENTS] def filterAcceptsRow(self, sourceRow, sourceParent): sourceModel = self.sourceModel() idx = sourceModel.index(sourceRow, 0, sourceParent) treeItem = idx.internalPointer() if isinstance(treeItem, SardanaTypeTreeItem): return treeItem.itemData() in self.ALLOWED_TYPES return True @UILoadable(with_ui='ui') class ExpDescriptionEditor(Qt.QWidget, TaurusBaseWidget): ''' A widget for editing the configuration of a experiment (measurement groups, plot and storage parameters, etc). It receives a Sardana Door name as its model and gets/sets the configuration using the `ExperimentConfiguration` environmental variable for that Door. ''' def __init__(self, parent=None, door=None, plotsButton=True): Qt.QWidget.__init__(self, parent) TaurusBaseWidget.__init__(self, 'ExpDescriptionEditor') self.loadUi() self.ui.buttonBox.setStandardButtons(Qt.QDialogButtonBox.Reset | Qt.QDialogButtonBox.Apply) newperspectivesDict = copy.deepcopy(self.ui.sardanaElementTree.KnownPerspectives) #newperspectivesDict[self.ui.sardanaElementTree.DftPerspective]['model'] = [SardanaAcquirableProxyModel, SardanaElementPlainModel] newperspectivesDict[self.ui.sardanaElementTree.DftPerspective]['model'][0] = SardanaAcquirableProxyModel self.ui.sardanaElementTree.KnownPerspectives = newperspectivesDict #assign a copy because if just a key of this class memberwas modified, all instances of this class would be affected self.ui.sardanaElementTree._setPerspective(self.ui.sardanaElementTree.DftPerspective) self._localConfig = None self._originalConfiguration = None self._dirty = False self._dirtyMntGrps = set() self.connect(self.ui.activeMntGrpCB, Qt.SIGNAL('activated (QString)'), self.changeActiveMntGrp) self.connect(self.ui.createMntGrpBT, Qt.SIGNAL('clicked ()'), self.createMntGrp) self.connect(self.ui.deleteMntGrpBT, Qt.SIGNAL('clicked ()'), self.deleteMntGrp) self.connect(self.ui.compressionCB, Qt.SIGNAL('currentIndexChanged (int)'), self.onCompressionCBChanged) self.connect(self.ui.pathLE, Qt.SIGNAL('textEdited (QString)'), self.onPathLEEdited) self.connect(self.ui.filenameLE, Qt.SIGNAL('textEdited (QString)'), self.onFilenameLEEdited) self.connect(self.ui.channelEditor.getQModel(), Qt.SIGNAL('dataChanged (QModelIndex, QModelIndex)'), self._updateButtonBox) self.connect(self.ui.channelEditor.getQModel(), Qt.SIGNAL('modelReset ()'), self._updateButtonBox) preScanList = self.ui.preScanList self.connect(preScanList, Qt.SIGNAL('dataChanged'), self.onPreScanSnapshotChanged) #TODO: For Taurus 4 compatibility if hasattr(preScanList, "dataChangedSignal"): preScanList.dataChangedSignal.connect(self.onPreScanSnapshotChanged) self.connect(self.ui.choosePathBT, Qt.SIGNAL('clicked ()'), self.onChooseScanDirButtonClicked) self.__plotManager = None icon = resource.getIcon(":/actions/view.svg") self.togglePlotsAction = Qt.QAction(icon, "Show/Hide plots", self) self.togglePlotsAction.setCheckable(True) self.togglePlotsAction.setChecked(False) self.togglePlotsAction.setEnabled(plotsButton) self.addAction(self.togglePlotsAction) self.connect(self.togglePlotsAction, Qt.SIGNAL("toggled(bool)"), self.onPlotsButtonToggled) self.ui.plotsButton.setDefaultAction(self.togglePlotsAction) if door is not None: self.setModel(door) self.connect(self.ui.buttonBox, Qt.SIGNAL("clicked(QAbstractButton *)"), self.onDialogButtonClicked) #Taurus Configuration properties and delegates self.registerConfigDelegate(self.ui.channelEditor) def getModelClass(self): '''reimplemented from :class:`TaurusBaseWidget`''' return taurus.core.taurusdevice.TaurusDevice def onChooseScanDirButtonClicked(self): ret = Qt.QFileDialog.getExistingDirectory (self, 'Choose directory for saving files', self.ui.pathLE.text()) if ret: self.ui.pathLE.setText(ret) self.ui.pathLE.emit(Qt.SIGNAL('textEdited (QString)'), ret) def onDialogButtonClicked(self, button): role = self.ui.buttonBox.buttonRole(button) if role == Qt.QDialogButtonBox.ApplyRole: self.writeExperimentConfiguration(ask=False) elif role == Qt.QDialogButtonBox.ResetRole: self._reloadConf() def closeEvent(self, event): '''This event handler receives widget close events''' if self.isDataChanged(): self.writeExperimentConfiguration(ask=True) Qt.QWidget.closeEvent(self, event) def setModel(self, model): '''reimplemented from :class:`TaurusBaseWidget`''' TaurusBaseWidget.setModel(self, model) self._reloadConf(force=True) #set the model of some child widgets door = self.getModelObj() if door is None: return tghost = taurus.Database().getNormalName() #@todo: get the tghost from the door model instead msname = door.macro_server.getFullName() self.ui.taurusModelTree.setModel(tghost) self.ui.sardanaElementTree.setModel(msname) def _reloadConf(self, force=False): if not force and self.isDataChanged():
door = self.getModelObj() if door is None: return conf = door.getExperimentConfiguration() self._originalConfiguration = copy.deepcopy(conf) self.setLocalConfig(conf) self._setDirty(False) self._dirtyMntGrps = set() #set a list of available channels avail_channels = {} for ch_info in door.macro_server.getExpChannelElements().values(): avail_channels[ch_info.full_name] = ch_info.getData() self.ui.channelEditor.getQModel().setAvailableChannels(avail_channels) def _setDirty(self, dirty): self._dirty = dirty self._updateButtonBox() def isDataChanged(self): """Tells if the local data has been modified since it was last refreshed :return: (bool) True if he local data has been modified since it was last refreshed """ return bool(self._dirty or self.ui.channelEditor.getQModel().isDataChanged() or self._dirtyMntGrps) def _updateButtonBox(self, *args, **kwargs): self.ui.buttonBox.setEnabled(self.isDataChanged()) def getLocalConfig(self): return self._localConfig def setLocalConfig(self, conf): '''gets a ExpDescription dictionary and sets up the widget''' self._localConfig = conf #set the Channel Editor activeMntGrpName = self._localConfig['ActiveMntGrp'] or '' if activeMntGrpName in self._localConfig['MntGrpConfigs']: mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName] self.ui.channelEditor.getQModel().setDataSource(mgconfig) #set the measurement group ComboBox self.ui.activeMntGrpCB.clear() mntGrpLabels = [] for _, mntGrpConf in self._localConfig['MntGrpConfigs'].items(): # get labels to visualize names with lower and upper case mntGrpLabels.append(mntGrpConf['label']) self.ui.activeMntGrpCB.addItems(sorted(mntGrpLabels)) idx = self.ui.activeMntGrpCB.findText(activeMntGrpName, # case insensitive find Qt.Qt.MatchFixedString) self.ui.activeMntGrpCB.setCurrentIndex(idx) #set the system snapshot list psl = self._localConfig.get('PreScanSnapshot') #I get it before clearing because clear() changes the _localConfig # TODO: For Taurus 4 compatibility psl_fullname = [] for name, display in psl: psl_fullname.append(("tango://%s" % name, display)) self.ui.preScanList.clear() self.ui.preScanList.addModels(psl_fullname) #other settings self.ui.filenameLE.setText(", ".join(self._localConfig['ScanFile'])) self.ui.pathLE.setText(self._localConfig['ScanDir'] or '') self.ui.compressionCB.setCurrentIndex(self._localConfig['DataCompressionRank'] + 1) def writeExperimentConfiguration(self, ask=True): '''sends the current local configuration to the door :param ask: (bool) If True (default) prompts the user before saving. ''' if ask: op = Qt.QMessageBox.question(self, "Save configuration?", 'Do you want to save the current configuration?\n(if not, any changes will be lost)', Qt.QMessageBox.Yes | Qt.QMessageBox.No) if op != Qt.QMessageBox.Yes: return False conf = self.getLocalConfig() #make sure that no empty measurement groups are written for mgname, mgconfig in conf.get('MntGrpConfigs', {}).items(): if mgconfig is not None and not mgconfig.get('controllers'): mglabel = mgconfig['label'] Qt.QMessageBox.information(self, "Empty Measurement group", "The measurement group '%s' is empty. Fill it (or delete it) before applying" % mglabel, Qt.QMessageBox.Ok) self.changeActiveMntGrp(mgname) return False #check if the currently displayed mntgrp is changed if self.ui.channelEditor.getQModel().isDataChanged(): self._dirtyMntGrps.add(self._localConfig['ActiveMntGrp']) door = self.getModelObj() door.setExperimentConfiguration(conf, mnt_grps=self._dirtyMntGrps) self._originalConfiguration = copy.deepcopy(conf) self._dirtyMntGrps = set() self.ui.channelEditor.getQModel().setDataChanged(False) self._setDirty(False) self.emit(Qt.SIGNAL('experimentConfigurationChanged'), copy.deepcopy(conf)) return True def changeActiveMntGrp(self, activeMntGrpName): activeMntGrpName = str(activeMntGrpName) if self._localConfig is None: return if activeMntGrpName == self._localConfig['ActiveMntGrp']: return #nothing changed if activeMntGrpName not in self._localConfig['MntGrpConfigs']: raise KeyError('Unknown measurement group "%s"' % activeMntGrpName) #add the previous measurement group to the list of "dirty" groups if something was changed if self.ui.channelEditor.getQModel().isDataChanged(): self._dirtyMntGrps.add(self._localConfig['ActiveMntGrp']) self._localConfig['ActiveMntGrp'] = activeMntGrpName i = self.ui.activeMntGrpCB.findText(activeMntGrpName, # case insensitive find Qt.Qt.MatchFixedString) self.ui.activeMntGrpCB.setCurrentIndex(i) mgconfig = self._localConfig['MntGrpConfigs'][activeMntGrpName] self.ui.channelEditor.getQModel().setDataSource(mgconfig) self._setDirty(True) def createMntGrp(self): '''creates a new Measurement Group''' if self._localConfig is None: return mntGrpName, ok = Qt.QInputDialog.getText(self, "New Measurement Group", "Enter a name for the new measurement Group") if not ok: return mntGrpName = str(mntGrpName) #check that the given name is not an existing pool element ms = self.getModelObj().macro_server poolElementNames = [v.name for v in ms.getElementsWithInterface("PoolElement").values()] while mntGrpName in poolElementNames: Qt.QMessageBox.warning(self, "Cannot create Measurement group", "The name '%s' already is used for another pool element. Please Choose a different one." % mntGrpName, Qt.QMessageBox.Ok) mntGrpName, ok = Qt.QInputDialog.getText(self, "New Measurement Group", "Enter a name for the new measurement Group", Qt.QLineEdit.Normal, mntGrpName) if not ok: return mntGrpName = str(mntGrpName) #check that the measurement group is not already in the localConfig if mntGrpName in self._localConfig['MntGrpConfigs']: Qt.QMessageBox.warning(self, "%s already exists" % mntGrpName, 'A measurement group named "%s" already exists. A new one will not be created' % mntGrpName) return #add an empty configuration dictionary to the local config mgconfig = {'label': mntGrpName, 'controllers':{} } self._localConfig['MntGrpConfigs'][mntGrpName] = mgconfig #add the new measurement group to the list of "dirty" groups self._dirtyMntGrps.add(mntGrpName) #add the name to the combobox self.ui.activeMntGrpCB.addItem(mntGrpName) #make it the Active MntGrp self.changeActiveMntGrp(mntGrpName) def deleteMntGrp(self): '''creates a new Measurement Group''' activeMntGrpName = str(self.ui.activeMntGrpCB.currentText()) op = Qt.QMessageBox.question(self, "Delete Measurement Group", "Remove the measurement group '%s'?" % activeMntGrpName, Qt.QMessageBox.Yes | Qt.QMessageBox.Cancel) if op != Qt.QMessageBox.Yes: return currentIndex = self.ui.activeMntGrpCB.currentIndex() if self._localConfig is None: return if activeMntGrpName not in self._localConfig['MntGrpConfigs']: raise KeyError('Unknown measurement group "%s"' % activeMntGrpName) #add the current measurement group to the list of "dirty" groups self._dirtyMntGrps.add(activeMntGrpName) self._localConfig['MntGrpConfigs'][activeMntGrpName] = None self.ui.activeMntGrpCB.setCurrentIndex(-1) self.ui.activeMntGrpCB.removeItem(currentIndex) self.ui.channelEditor.getQModel().setDataSource({}) self._setDirty(True) def onCompressionCBChanged(self, idx): if self._localConfig is None: return self._localConfig['DataCompressionRank'] = idx - 1 self._setDirty(True) def onPathLEEdited(self, text): self._localConfig['ScanDir'] = str(text) self._setDirty(True) def onFilenameLEEdited(self, text): self._localConfig['ScanFile'] = [v.strip() for v in str(text).split(',')] self._setDirty(True) def onPreScanSnapshotChanged(self, items): door = self.getModelObj() ms = door.macro_server preScanList = [] for e in items: nfo = ms.getElementInfo(e.src) if nfo is None: full_name = e.src; display = e.display else: full_name = nfo.full_name; display = nfo.name # TODO: For Taurus 4 compatibility preScanList.append((full_name.lstrip("tango://"), display)) self._localConfig['PreScanSnapshot'] = preScanList self._setDirty(True) def onPlotsButtonToggled(self, checked): if checked: from taurus.qt.qtgui.taurusgui.macrolistener import \ DynamicPlotManager self.__plotManager = DynamicPlotManager(self) self.__plotManager.setModel(self.getModelName()) self.connect(self, Qt.SIGNAL('experimentConfigurationChanged'), self.__plotManager.onExpConfChanged) else: self.disconnect(self, Qt.SIGNAL('experimentConfigurationChanged'), self.__plotManager.onExpConfChanged) self.__plotManager.removePanels() self.__plotManager.setModel(None) self.__plotManager = None def demo(model=None): """Experiment configuration""" #w = main_ChannelEditor() w = ExpDescriptionEditor() if model is None: from sardana.taurus.qt.qtgui.extra_macroexecutor import \ TaurusMacroConfigurationDialog dialog = TaurusMacroConfigurationDialog(w) accept = dialog.exec_() if accept: model = str(dialog.doorComboBox.currentText()) if model is not None: w.setModel(model) return w def main(): import sys import taurus.qt.qtgui.application Application = taurus.qt.qtgui.application.TaurusApplication app = Application.instance() owns_app = app is None if owns_app: app = Application(app_name="Exp. Description demo", app_version="1.0", org_domain="Sardana", org_name="Tango community") args = app.get_command_line_args() if len(args) == 1: w = demo(model=args[0]) else: w = demo() w.show() if owns_app: sys.exit(app.exec_()) else: return w if __name__ == "__main__": main()
op = Qt.QMessageBox.question(self, "Reload info from door", "If you reload, all current experiment configuration changes will be lost. Reload?", Qt.QMessageBox.Yes | Qt.QMessageBox.Cancel) if op != Qt.QMessageBox.Yes: return
conditional_block
a1.py
"""CSCA08 Assignment 1, Fall 2017 I hereby agree that the work contained herein is solely my work and that I have not received any external help from my peers, nor have I used any resources not directly supplied by the course in order to complete this assignment. I have not looked at anyone else's solution, and no one has looked at mine. I understand that by adding my name to this file, I am making a formal declaration, and any subsequent discovery of plagiarism or other academic misconduct could result in a charge of perjury in addition to other charges under the academic code of conduct of the University of Toronto Scarborough Campus Name: Hyebeen Jung UtorID: junghyeb Student Number: 1004346512 Date: November 12, 2017 """ def pair_genes(first_gene, second_gene): ''' (str, str) -> bool Genes can be paired together by allowing the nucleotides from the first gene to pair-bond with the nucleotides from the second gene. Guanine will pair with cytosine, and adenine will pair with thymine. Genes can also pair in either direction. This function takes in two string representations of genes and returns a boolean that indicates whether the two genes can pair or not. REQ: genes must be consisted of letters {A, G, C, T} >>> pair_genes("TCAG", "AGTC") True >>> pair_genes("TCAG", "CTGA") True >>> pair_genes("TCAG", "CCAG") False ''' # declare a boolean that indicates whether the two genes are pairable can_pair = False # create a sample of gene that can pair sample_gene = "" for nucleotide in first_gene: if (nucleotide == "A"): sample_gene += "T" elif (nucleotide == "T"): sample_gene += "A" elif (nucleotide == "G"): sample_gene += "C" else: sample_gene += "G" # check if the sample gene matches the second gene if (second_gene == sample_gene): can_pair = True # genes can also pair either direction if (second_gene[::-1] == sample_gene): can_pair = True # returns the boolean that indicates whether the two genes can pair return can_pair def zip_length(gene): ''' (str) -> int Genes can partially pair with itself in a process called zipping. Zipping occurs when at either end of a gene can form a pair bond, and continues until the pair of nucleotides can no longer form a bond. Guanines pair with cytosines, and adenines pair with thymines. This function returns an integer value that indicates the maximum number of nucleotides pairs that the gene can zip. REQ: genes must be consisted of letters {A, G, C, T} >>> zip_length("AGTCTCGCT") 2 >>> zip_length("AGTCTCGAG") 0 ''' # declare a variable that counts the zip length zip_length_count = 0 # for loop that is in charge of each nucleotides from the left for left_index in range(len(gene)): # declare a variable that is in charge of the indices of # each nucleotides from the right right_index = len(gene) - 1 - left_index # checks if either end of the gene can form a bond if (gene[left_index] == "A" and gene[right_index] == "T"): zip_length_count += 1 elif (gene[left_index] == "C" and gene[right_index] == "G"): zip_length_count += 1 elif (gene[left_index] == "G" and gene[right_index] == "C"): zip_length_count += 1 elif (gene[left_index] == "T" and gene[right_index] == "A"): zip_length_count += 1 # once the gene can no longer zip, # returns the zip length right away else: return zip_length_count def splice_gene(source, destination, start_anchor, end_anchor): ''' (list, list, str, str) -> None This function performs splicing of gene sequences. Splicing of genes can be done by taking a nucleotide sequence from one gene and replace it with a nucleotide sequence from another. First, find the anchor sequences, which are the sequences found within the starting and end anchor given by the user (anchors can be found from either end of the gene). Then, if the starting anchor and the end anchor is found in both genes, the anchor sequence extracted from the source (the first gene) replaces the anchor sequence from the destination (the second gene). If the anchor is not found in both genes, the splice or the mutation does not occur. REQ: the anchors must be consisted of letters {A, G, C, T} ''' # convert the source gene into a string source_gene = "" for i in range(len(source)): source_gene += source[i] # convert the destination gene into a string destination_gene = "" for j in range(len(destination)): destination_gene += destination[j] # find the index of start and end anchor from the source source_start_anchor = source_gene.find(start_anchor) source_end_anchor = source_gene.find(end_anchor, source_start_anchor) # start and end anchor can be found in reverse order if (source_start_anchor == -1 and source_end_anchor == -1): source_start_anchor = source_gene.rfind(start_anchor) source_end_anchor = source_gene.rfind(end_anchor, source_start_anchor) # find the index of start and end anchor from the destination destination_start_anchor = destination_gene.find(start_anchor) destination_end_anchor = destination_gene.find(end_anchor, destination_start_anchor) # start and end anchor can be found in reverse order if (destination_start_anchor == -1 and destination_end_anchor == -1):
# check if the indices are found in source gene if (source_start_anchor != -1 and source_end_anchor != -1): # check if the indices are found in destination gene if (destination_start_anchor != -1 and destination_end_anchor != -1): # for loop to find the anchor sequence from the source source_anchor_sequence = "" for i in range(source_start_anchor, source_end_anchor + len(end_anchor)): source_anchor_sequence += source[i] # remove the nucleotides of anchor sequence from the source count = 0 while (count < len(source_anchor_sequence)): del source[source_start_anchor] count += 1 # for loop to find the anchor sequence from the destination destination_anchor_sequence = "" for i in range(destination_start_anchor, destination_end_anchor + len(end_anchor)): destination_anchor_sequence += destination[i] # remove the nucleotides from start and end anchor from # the destination count = 0 while (count < len(destination_anchor_sequence)): del destination[destination_start_anchor] count += 1 # splice the anchor sequence into the destination for l in range(len(source_anchor_sequence) - 1, -1, -1): destination.insert(destination_start_anchor, source_anchor_sequence[l]) # if not found, splice does not occur else: pass # if not found, splice does not occur else: pass def match_mask(gene, mask): ''' (str, str) -> int This function creates a mask to find a specific pattern in the gene. Masks can pair with parts of genes, but does not necessarily pair with the entire gene. Masks can be consisted of multis which are the special nucleotides, represented inside square brackets, that can mimic the bonding behaviour of multiple nucleotides. It can also create a nucleotide that is capapble of pairing with any other nucleotide, called stars. In addition, if there are repeated sequences of nucleotides in masks, it can be denoted by using numbers. An example of a mask would be [AG]C3*, which can be paired with any gene sequences that starts with T or C, followed by three G, followed by any other nucleotides. This function will take in a string representation of a gene, and a mask, and returns the index of the first nucleotide in the sequence that is matched by the given mask. If it is not found anywhere in the sequence, it returns -1. REQ: masks are strings consisted of '[', ']', numbers, and '*' REQ: the letters inside the square brackets, should be consisted of letters {A, G, C, T} REQ: masks cannot start with integers >>> match_mask("CCCAGGGGTT", "[TC]G") 3 >>> match_mask("CCCAGGGGTT", "*") 0 >>> match_mask("CCCCGGGG", "A") -1 ''' # declare a variable for set of nucleotides and integers nucleotides = "AGCT" numbers = "123456789" # declare a variable that is in charge of keeping the index # of the first nucleotide in gene that matches mask match_index = -1 # declare variables that make a gene sequence from the mask # four mask off gene sequences since there are four types of nucleotides mask_off = [] # for loop to read through the mask for index in range(len(mask)): # star is a special character that can pair with any nucleotides if (mask[index] == "*"): mask_off.append(nucleotides) # nucleotides pair with specific nucleotides elif (mask[index] in nucleotides): # adenine pairs with thymine if (mask[index] == "A"): mask_off.append("T") # thymine pairs with adenine elif (mask[index] == "T"): mask_off.append("A") # cytosine pairs with guanine elif (mask[index] == "C"): mask_off.append("G") # guanine pairs with cytosine else: mask_off.append("C") # repeated sequences of nucleotides can be denoted using numbers elif (mask[index] in numbers): # multiple adenines if (mask[index - 1] == "A"): mask_off.extend("A" * int(mask[index])) # multiple thymines elif (mask[index - 1] == "T"): mask_off.extend("T" * int(mask[index])) # multiple cytosine elif (mask[index - 1] == "C"): mask_off.extend("C" * int(mask[index])) # multiple guanine else: mask_off.extend("G" * int(mask[index])) # masks can have special nucleotides called multis else: mask_start = mask.find("[") mask_end = mask.rfind("]") # for loop to get the multis inside the bracket multis = "" # get the multi from the mask for m in range(index + 1, mask_end): multis += mask[m] # convert multis into pairable nucleotides multi_pair = "" # for loop to go through the multis for multi in range(len(multis)): # adenine pairs with thymine if (multis[multi] == "A"): multi_pair += "T" # thymine pairs with adenine elif (multis[multi] == "T"): multi_pair += "A" # cytosine pairs with guanine elif (multis[multi] == "C"): multi_pair += "G" # guanine pairs with cytosine else: multi_pair += "C" # add multi_pair to the mask off list mask_off.append(multi_pair) # remove multis from the mask for i in range(mask_end + 1): del mask_off[mask_start+1] # declare a boolean that indicates whether the gene matches the mask match = False # temporary counter g = 0 # while loop to go through the gene while matching index is not found while(not match and g < len(gene)): # if the element in mask off gene is a single letter if (len(mask_off[g]) == 1): # check if the nucleotide is the same as the elemnet # in mask off gene if (mask_off[g] == gene[g]): match_index = g match = True # if the element in mask off gene is not a single letter else: if (gene[g] in mask_off[g]): match_index = g match = True g += 1 # return the the first matching index of the gene return match_index def process_gene_file(file_handle, gene, mask): ''' (io.TextIOWrapper, str, str) -> tuple Takes in a file handle for a file containing one gene per line, a string representing a gene and a string representing a mask. Then, it returns a tuple (p, m, z) where p is the first gene that can pair with the input gene string, m is the first gene that matches the mask, and z is the longest gene zip found up in any gene up to and including the point where both p and m were found. If no genes match the given gene or mask, -1 is returned in place of p or m. REQ: file should not be empty REQ: genes must be consisted of letters {A, G, C, T} REQ: masks are strings consisted of '[', ']', numbers, and '*' REQ: the letters inside the square brackets, should be consisted of letters {A, G, C, T} REQ: masks cannot start with integers '''
destination_start_anchor = destination_gene.rfind(start_anchor) destination_end_anchor = destination_gene.rfind( end_anchor, destination_start_anchor)
conditional_block
a1.py
"""CSCA08 Assignment 1, Fall 2017 I hereby agree that the work contained herein is solely my work and that I have not received any external help from my peers, nor have I used any resources not directly supplied by the course in order to complete this assignment. I have not looked at anyone else's solution, and no one has looked at mine. I understand that by adding my name to this file, I am making a formal declaration, and any subsequent discovery of plagiarism or other academic misconduct could result in a charge of perjury in addition to other charges under the academic code of conduct of the University of Toronto Scarborough Campus Name: Hyebeen Jung UtorID: junghyeb Student Number: 1004346512 Date: November 12, 2017 """ def pair_genes(first_gene, second_gene): ''' (str, str) -> bool Genes can be paired together by allowing the nucleotides from the first gene to pair-bond with the nucleotides from the second gene. Guanine will pair with cytosine, and adenine will pair with thymine. Genes can also pair in either direction. This function takes in two string representations of genes and returns a boolean that indicates whether the two genes can pair or not. REQ: genes must be consisted of letters {A, G, C, T} >>> pair_genes("TCAG", "AGTC") True >>> pair_genes("TCAG", "CTGA") True >>> pair_genes("TCAG", "CCAG") False ''' # declare a boolean that indicates whether the two genes are pairable can_pair = False # create a sample of gene that can pair sample_gene = "" for nucleotide in first_gene: if (nucleotide == "A"): sample_gene += "T" elif (nucleotide == "T"): sample_gene += "A" elif (nucleotide == "G"): sample_gene += "C" else: sample_gene += "G" # check if the sample gene matches the second gene if (second_gene == sample_gene): can_pair = True # genes can also pair either direction if (second_gene[::-1] == sample_gene): can_pair = True # returns the boolean that indicates whether the two genes can pair return can_pair def zip_length(gene): ''' (str) -> int Genes can partially pair with itself in a process called zipping. Zipping occurs when at either end of a gene can form a pair bond, and continues until the pair of nucleotides can no longer form a bond. Guanines pair with cytosines, and adenines pair with thymines. This function returns an integer value that indicates the maximum number of nucleotides pairs that the gene can zip. REQ: genes must be consisted of letters {A, G, C, T} >>> zip_length("AGTCTCGCT") 2 >>> zip_length("AGTCTCGAG") 0 ''' # declare a variable that counts the zip length zip_length_count = 0 # for loop that is in charge of each nucleotides from the left for left_index in range(len(gene)): # declare a variable that is in charge of the indices of # each nucleotides from the right right_index = len(gene) - 1 - left_index # checks if either end of the gene can form a bond if (gene[left_index] == "A" and gene[right_index] == "T"): zip_length_count += 1 elif (gene[left_index] == "C" and gene[right_index] == "G"): zip_length_count += 1 elif (gene[left_index] == "G" and gene[right_index] == "C"): zip_length_count += 1 elif (gene[left_index] == "T" and gene[right_index] == "A"): zip_length_count += 1 # once the gene can no longer zip, # returns the zip length right away else: return zip_length_count def splice_gene(source, destination, start_anchor, end_anchor): ''' (list, list, str, str) -> None This function performs splicing of gene sequences. Splicing of genes can be done by taking a nucleotide sequence from one gene and replace it with a nucleotide sequence from another. First, find the anchor sequences, which are the sequences found within the starting and end anchor given by the user (anchors can be found from either end of the gene). Then, if the starting anchor and the end anchor is found in both genes, the anchor sequence extracted from the source (the first gene) replaces the anchor sequence from the destination (the second gene). If the anchor is not found in both genes, the splice or the mutation does not occur. REQ: the anchors must be consisted of letters {A, G, C, T} ''' # convert the source gene into a string source_gene = "" for i in range(len(source)): source_gene += source[i] # convert the destination gene into a string destination_gene = "" for j in range(len(destination)): destination_gene += destination[j] # find the index of start and end anchor from the source source_start_anchor = source_gene.find(start_anchor) source_end_anchor = source_gene.find(end_anchor, source_start_anchor) # start and end anchor can be found in reverse order if (source_start_anchor == -1 and source_end_anchor == -1): source_start_anchor = source_gene.rfind(start_anchor) source_end_anchor = source_gene.rfind(end_anchor, source_start_anchor) # find the index of start and end anchor from the destination destination_start_anchor = destination_gene.find(start_anchor) destination_end_anchor = destination_gene.find(end_anchor, destination_start_anchor) # start and end anchor can be found in reverse order if (destination_start_anchor == -1 and destination_end_anchor == -1): destination_start_anchor = destination_gene.rfind(start_anchor) destination_end_anchor = destination_gene.rfind( end_anchor, destination_start_anchor) # check if the indices are found in source gene if (source_start_anchor != -1 and source_end_anchor != -1): # check if the indices are found in destination gene if (destination_start_anchor != -1 and destination_end_anchor != -1): # for loop to find the anchor sequence from the source source_anchor_sequence = "" for i in range(source_start_anchor, source_end_anchor + len(end_anchor)): source_anchor_sequence += source[i] # remove the nucleotides of anchor sequence from the source count = 0 while (count < len(source_anchor_sequence)): del source[source_start_anchor] count += 1 # for loop to find the anchor sequence from the destination destination_anchor_sequence = "" for i in range(destination_start_anchor, destination_end_anchor + len(end_anchor)): destination_anchor_sequence += destination[i] # remove the nucleotides from start and end anchor from # the destination count = 0 while (count < len(destination_anchor_sequence)): del destination[destination_start_anchor] count += 1 # splice the anchor sequence into the destination for l in range(len(source_anchor_sequence) - 1, -1, -1): destination.insert(destination_start_anchor, source_anchor_sequence[l]) # if not found, splice does not occur else: pass # if not found, splice does not occur else: pass def match_mask(gene, mask): ''' (str, str) -> int This function creates a mask to find a specific pattern in the gene. Masks can pair with parts of genes, but does not necessarily pair with the entire gene. Masks can be consisted of multis which are the special nucleotides, represented inside square brackets, that can mimic the bonding behaviour of multiple nucleotides. It can also create a nucleotide that is capapble of pairing with any other nucleotide, called stars. In addition, if there are repeated sequences of nucleotides in masks, it can be denoted by using numbers. An example of a mask would be [AG]C3*, which can be paired with any gene sequences that starts with T or C, followed by three G, followed by any other nucleotides. This function will take in a string representation of a gene, and a mask, and returns the index of the first nucleotide in the sequence that is matched by the given mask. If it is not found anywhere in the sequence, it returns -1. REQ: masks are strings consisted of '[', ']', numbers, and '*' REQ: the letters inside the square brackets, should be consisted of letters {A, G, C, T} REQ: masks cannot start with integers >>> match_mask("CCCAGGGGTT", "[TC]G") 3 >>> match_mask("CCCAGGGGTT", "*") 0 >>> match_mask("CCCCGGGG", "A") -1 ''' # declare a variable for set of nucleotides and integers nucleotides = "AGCT" numbers = "123456789" # declare a variable that is in charge of keeping the index # of the first nucleotide in gene that matches mask match_index = -1 # declare variables that make a gene sequence from the mask # four mask off gene sequences since there are four types of nucleotides mask_off = [] # for loop to read through the mask for index in range(len(mask)): # star is a special character that can pair with any nucleotides if (mask[index] == "*"): mask_off.append(nucleotides) # nucleotides pair with specific nucleotides elif (mask[index] in nucleotides): # adenine pairs with thymine if (mask[index] == "A"): mask_off.append("T") # thymine pairs with adenine elif (mask[index] == "T"): mask_off.append("A") # cytosine pairs with guanine elif (mask[index] == "C"): mask_off.append("G") # guanine pairs with cytosine else: mask_off.append("C") # repeated sequences of nucleotides can be denoted using numbers elif (mask[index] in numbers): # multiple adenines if (mask[index - 1] == "A"): mask_off.extend("A" * int(mask[index])) # multiple thymines elif (mask[index - 1] == "T"): mask_off.extend("T" * int(mask[index])) # multiple cytosine elif (mask[index - 1] == "C"): mask_off.extend("C" * int(mask[index])) # multiple guanine else: mask_off.extend("G" * int(mask[index])) # masks can have special nucleotides called multis else: mask_start = mask.find("[") mask_end = mask.rfind("]") # for loop to get the multis inside the bracket multis = "" # get the multi from the mask for m in range(index + 1, mask_end): multis += mask[m] # convert multis into pairable nucleotides multi_pair = "" # for loop to go through the multis for multi in range(len(multis)): # adenine pairs with thymine if (multis[multi] == "A"): multi_pair += "T" # thymine pairs with adenine elif (multis[multi] == "T"): multi_pair += "A" # cytosine pairs with guanine elif (multis[multi] == "C"): multi_pair += "G" # guanine pairs with cytosine else: multi_pair += "C" # add multi_pair to the mask off list mask_off.append(multi_pair) # remove multis from the mask for i in range(mask_end + 1): del mask_off[mask_start+1] # declare a boolean that indicates whether the gene matches the mask match = False # temporary counter g = 0 # while loop to go through the gene while matching index is not found while(not match and g < len(gene)): # if the element in mask off gene is a single letter if (len(mask_off[g]) == 1): # check if the nucleotide is the same as the elemnet # in mask off gene if (mask_off[g] == gene[g]): match_index = g match = True # if the element in mask off gene is not a single letter else: if (gene[g] in mask_off[g]): match_index = g match = True g += 1 # return the the first matching index of the gene return match_index def
(file_handle, gene, mask): ''' (io.TextIOWrapper, str, str) -> tuple Takes in a file handle for a file containing one gene per line, a string representing a gene and a string representing a mask. Then, it returns a tuple (p, m, z) where p is the first gene that can pair with the input gene string, m is the first gene that matches the mask, and z is the longest gene zip found up in any gene up to and including the point where both p and m were found. If no genes match the given gene or mask, -1 is returned in place of p or m. REQ: file should not be empty REQ: genes must be consisted of letters {A, G, C, T} REQ: masks are strings consisted of '[', ']', numbers, and '*' REQ: the letters inside the square brackets, should be consisted of letters {A, G, C, T} REQ: masks cannot start with integers '''
process_gene_file
identifier_name
a1.py
"""CSCA08 Assignment 1, Fall 2017 I hereby agree that the work contained herein is solely my work and that I have not received any external help from my peers, nor have I used any resources not directly supplied by the course in order to complete this assignment. I have not looked at anyone else's solution, and no one has looked at mine. I understand that by adding my name to this file, I am making a formal declaration, and any subsequent discovery of plagiarism or other academic misconduct could result in a charge of perjury in addition to other charges under the academic code of conduct of the University of Toronto Scarborough Campus Name: Hyebeen Jung UtorID: junghyeb Student Number: 1004346512 Date: November 12, 2017 """ def pair_genes(first_gene, second_gene): ''' (str, str) -> bool Genes can be paired together by allowing the nucleotides from the first gene to pair-bond with the nucleotides from the second gene. Guanine will pair with cytosine, and adenine will pair with thymine. Genes can also pair in either direction. This function takes in two string representations of genes and returns a boolean that indicates whether the two genes can pair or not. REQ: genes must be consisted of letters {A, G, C, T} >>> pair_genes("TCAG", "AGTC") True >>> pair_genes("TCAG", "CTGA") True >>> pair_genes("TCAG", "CCAG") False ''' # declare a boolean that indicates whether the two genes are pairable can_pair = False # create a sample of gene that can pair sample_gene = "" for nucleotide in first_gene: if (nucleotide == "A"): sample_gene += "T" elif (nucleotide == "T"): sample_gene += "A" elif (nucleotide == "G"): sample_gene += "C" else: sample_gene += "G" # check if the sample gene matches the second gene if (second_gene == sample_gene): can_pair = True # genes can also pair either direction if (second_gene[::-1] == sample_gene): can_pair = True # returns the boolean that indicates whether the two genes can pair return can_pair def zip_length(gene): ''' (str) -> int Genes can partially pair with itself in a process called zipping. Zipping occurs when at either end of a gene can form a pair bond, and continues until the pair of nucleotides can no longer form a bond. Guanines pair with cytosines, and adenines pair with thymines. This function returns an integer value that indicates the maximum number of nucleotides pairs that the gene can zip. REQ: genes must be consisted of letters {A, G, C, T} >>> zip_length("AGTCTCGCT") 2 >>> zip_length("AGTCTCGAG") 0 ''' # declare a variable that counts the zip length zip_length_count = 0 # for loop that is in charge of each nucleotides from the left for left_index in range(len(gene)): # declare a variable that is in charge of the indices of # each nucleotides from the right right_index = len(gene) - 1 - left_index # checks if either end of the gene can form a bond if (gene[left_index] == "A" and gene[right_index] == "T"): zip_length_count += 1 elif (gene[left_index] == "C" and gene[right_index] == "G"): zip_length_count += 1 elif (gene[left_index] == "G" and gene[right_index] == "C"): zip_length_count += 1 elif (gene[left_index] == "T" and gene[right_index] == "A"): zip_length_count += 1 # once the gene can no longer zip, # returns the zip length right away else: return zip_length_count def splice_gene(source, destination, start_anchor, end_anchor):
def match_mask(gene, mask): ''' (str, str) -> int This function creates a mask to find a specific pattern in the gene. Masks can pair with parts of genes, but does not necessarily pair with the entire gene. Masks can be consisted of multis which are the special nucleotides, represented inside square brackets, that can mimic the bonding behaviour of multiple nucleotides. It can also create a nucleotide that is capapble of pairing with any other nucleotide, called stars. In addition, if there are repeated sequences of nucleotides in masks, it can be denoted by using numbers. An example of a mask would be [AG]C3*, which can be paired with any gene sequences that starts with T or C, followed by three G, followed by any other nucleotides. This function will take in a string representation of a gene, and a mask, and returns the index of the first nucleotide in the sequence that is matched by the given mask. If it is not found anywhere in the sequence, it returns -1. REQ: masks are strings consisted of '[', ']', numbers, and '*' REQ: the letters inside the square brackets, should be consisted of letters {A, G, C, T} REQ: masks cannot start with integers >>> match_mask("CCCAGGGGTT", "[TC]G") 3 >>> match_mask("CCCAGGGGTT", "*") 0 >>> match_mask("CCCCGGGG", "A") -1 ''' # declare a variable for set of nucleotides and integers nucleotides = "AGCT" numbers = "123456789" # declare a variable that is in charge of keeping the index # of the first nucleotide in gene that matches mask match_index = -1 # declare variables that make a gene sequence from the mask # four mask off gene sequences since there are four types of nucleotides mask_off = [] # for loop to read through the mask for index in range(len(mask)): # star is a special character that can pair with any nucleotides if (mask[index] == "*"): mask_off.append(nucleotides) # nucleotides pair with specific nucleotides elif (mask[index] in nucleotides): # adenine pairs with thymine if (mask[index] == "A"): mask_off.append("T") # thymine pairs with adenine elif (mask[index] == "T"): mask_off.append("A") # cytosine pairs with guanine elif (mask[index] == "C"): mask_off.append("G") # guanine pairs with cytosine else: mask_off.append("C") # repeated sequences of nucleotides can be denoted using numbers elif (mask[index] in numbers): # multiple adenines if (mask[index - 1] == "A"): mask_off.extend("A" * int(mask[index])) # multiple thymines elif (mask[index - 1] == "T"): mask_off.extend("T" * int(mask[index])) # multiple cytosine elif (mask[index - 1] == "C"): mask_off.extend("C" * int(mask[index])) # multiple guanine else: mask_off.extend("G" * int(mask[index])) # masks can have special nucleotides called multis else: mask_start = mask.find("[") mask_end = mask.rfind("]") # for loop to get the multis inside the bracket multis = "" # get the multi from the mask for m in range(index + 1, mask_end): multis += mask[m] # convert multis into pairable nucleotides multi_pair = "" # for loop to go through the multis for multi in range(len(multis)): # adenine pairs with thymine if (multis[multi] == "A"): multi_pair += "T" # thymine pairs with adenine elif (multis[multi] == "T"): multi_pair += "A" # cytosine pairs with guanine elif (multis[multi] == "C"): multi_pair += "G" # guanine pairs with cytosine else: multi_pair += "C" # add multi_pair to the mask off list mask_off.append(multi_pair) # remove multis from the mask for i in range(mask_end + 1): del mask_off[mask_start+1] # declare a boolean that indicates whether the gene matches the mask match = False # temporary counter g = 0 # while loop to go through the gene while matching index is not found while(not match and g < len(gene)): # if the element in mask off gene is a single letter if (len(mask_off[g]) == 1): # check if the nucleotide is the same as the elemnet # in mask off gene if (mask_off[g] == gene[g]): match_index = g match = True # if the element in mask off gene is not a single letter else: if (gene[g] in mask_off[g]): match_index = g match = True g += 1 # return the the first matching index of the gene return match_index def process_gene_file(file_handle, gene, mask): ''' (io.TextIOWrapper, str, str) -> tuple Takes in a file handle for a file containing one gene per line, a string representing a gene and a string representing a mask. Then, it returns a tuple (p, m, z) where p is the first gene that can pair with the input gene string, m is the first gene that matches the mask, and z is the longest gene zip found up in any gene up to and including the point where both p and m were found. If no genes match the given gene or mask, -1 is returned in place of p or m. REQ: file should not be empty REQ: genes must be consisted of letters {A, G, C, T} REQ: masks are strings consisted of '[', ']', numbers, and '*' REQ: the letters inside the square brackets, should be consisted of letters {A, G, C, T} REQ: masks cannot start with integers '''
''' (list, list, str, str) -> None This function performs splicing of gene sequences. Splicing of genes can be done by taking a nucleotide sequence from one gene and replace it with a nucleotide sequence from another. First, find the anchor sequences, which are the sequences found within the starting and end anchor given by the user (anchors can be found from either end of the gene). Then, if the starting anchor and the end anchor is found in both genes, the anchor sequence extracted from the source (the first gene) replaces the anchor sequence from the destination (the second gene). If the anchor is not found in both genes, the splice or the mutation does not occur. REQ: the anchors must be consisted of letters {A, G, C, T} ''' # convert the source gene into a string source_gene = "" for i in range(len(source)): source_gene += source[i] # convert the destination gene into a string destination_gene = "" for j in range(len(destination)): destination_gene += destination[j] # find the index of start and end anchor from the source source_start_anchor = source_gene.find(start_anchor) source_end_anchor = source_gene.find(end_anchor, source_start_anchor) # start and end anchor can be found in reverse order if (source_start_anchor == -1 and source_end_anchor == -1): source_start_anchor = source_gene.rfind(start_anchor) source_end_anchor = source_gene.rfind(end_anchor, source_start_anchor) # find the index of start and end anchor from the destination destination_start_anchor = destination_gene.find(start_anchor) destination_end_anchor = destination_gene.find(end_anchor, destination_start_anchor) # start and end anchor can be found in reverse order if (destination_start_anchor == -1 and destination_end_anchor == -1): destination_start_anchor = destination_gene.rfind(start_anchor) destination_end_anchor = destination_gene.rfind( end_anchor, destination_start_anchor) # check if the indices are found in source gene if (source_start_anchor != -1 and source_end_anchor != -1): # check if the indices are found in destination gene if (destination_start_anchor != -1 and destination_end_anchor != -1): # for loop to find the anchor sequence from the source source_anchor_sequence = "" for i in range(source_start_anchor, source_end_anchor + len(end_anchor)): source_anchor_sequence += source[i] # remove the nucleotides of anchor sequence from the source count = 0 while (count < len(source_anchor_sequence)): del source[source_start_anchor] count += 1 # for loop to find the anchor sequence from the destination destination_anchor_sequence = "" for i in range(destination_start_anchor, destination_end_anchor + len(end_anchor)): destination_anchor_sequence += destination[i] # remove the nucleotides from start and end anchor from # the destination count = 0 while (count < len(destination_anchor_sequence)): del destination[destination_start_anchor] count += 1 # splice the anchor sequence into the destination for l in range(len(source_anchor_sequence) - 1, -1, -1): destination.insert(destination_start_anchor, source_anchor_sequence[l]) # if not found, splice does not occur else: pass # if not found, splice does not occur else: pass
identifier_body
a1.py
"""CSCA08 Assignment 1, Fall 2017 I hereby agree that the work contained herein is solely my work and that I have not received any external help from my peers, nor have I used any resources not directly supplied by the course in order to complete this assignment. I have not looked at anyone else's solution, and no one has looked at mine. I understand that by adding my name to this file, I am making a formal declaration, and any subsequent discovery of plagiarism or other academic misconduct could result in a charge of perjury in addition to other charges under the academic code of conduct of the University of Toronto Scarborough Campus Name: Hyebeen Jung UtorID: junghyeb Student Number: 1004346512 Date: November 12, 2017 """ def pair_genes(first_gene, second_gene): ''' (str, str) -> bool Genes can be paired together by allowing the nucleotides from the first gene to pair-bond with the nucleotides from the second gene. Guanine will pair with cytosine, and adenine will pair with thymine. Genes can also pair in either direction. This function takes in two string representations of genes and returns a boolean that indicates whether the two genes can pair or not. REQ: genes must be consisted of letters {A, G, C, T} >>> pair_genes("TCAG", "AGTC") True >>> pair_genes("TCAG", "CTGA") True >>> pair_genes("TCAG", "CCAG") False ''' # declare a boolean that indicates whether the two genes are pairable can_pair = False # create a sample of gene that can pair sample_gene = "" for nucleotide in first_gene: if (nucleotide == "A"): sample_gene += "T" elif (nucleotide == "T"): sample_gene += "A" elif (nucleotide == "G"): sample_gene += "C" else: sample_gene += "G" # check if the sample gene matches the second gene if (second_gene == sample_gene): can_pair = True # genes can also pair either direction if (second_gene[::-1] == sample_gene): can_pair = True # returns the boolean that indicates whether the two genes can pair return can_pair def zip_length(gene): ''' (str) -> int Genes can partially pair with itself in a process called zipping. Zipping occurs when at either end of a gene can form a pair bond, and continues until the pair of nucleotides can no longer form a bond. Guanines pair with cytosines, and adenines pair with thymines.
>>> zip_length("AGTCTCGCT") 2 >>> zip_length("AGTCTCGAG") 0 ''' # declare a variable that counts the zip length zip_length_count = 0 # for loop that is in charge of each nucleotides from the left for left_index in range(len(gene)): # declare a variable that is in charge of the indices of # each nucleotides from the right right_index = len(gene) - 1 - left_index # checks if either end of the gene can form a bond if (gene[left_index] == "A" and gene[right_index] == "T"): zip_length_count += 1 elif (gene[left_index] == "C" and gene[right_index] == "G"): zip_length_count += 1 elif (gene[left_index] == "G" and gene[right_index] == "C"): zip_length_count += 1 elif (gene[left_index] == "T" and gene[right_index] == "A"): zip_length_count += 1 # once the gene can no longer zip, # returns the zip length right away else: return zip_length_count def splice_gene(source, destination, start_anchor, end_anchor): ''' (list, list, str, str) -> None This function performs splicing of gene sequences. Splicing of genes can be done by taking a nucleotide sequence from one gene and replace it with a nucleotide sequence from another. First, find the anchor sequences, which are the sequences found within the starting and end anchor given by the user (anchors can be found from either end of the gene). Then, if the starting anchor and the end anchor is found in both genes, the anchor sequence extracted from the source (the first gene) replaces the anchor sequence from the destination (the second gene). If the anchor is not found in both genes, the splice or the mutation does not occur. REQ: the anchors must be consisted of letters {A, G, C, T} ''' # convert the source gene into a string source_gene = "" for i in range(len(source)): source_gene += source[i] # convert the destination gene into a string destination_gene = "" for j in range(len(destination)): destination_gene += destination[j] # find the index of start and end anchor from the source source_start_anchor = source_gene.find(start_anchor) source_end_anchor = source_gene.find(end_anchor, source_start_anchor) # start and end anchor can be found in reverse order if (source_start_anchor == -1 and source_end_anchor == -1): source_start_anchor = source_gene.rfind(start_anchor) source_end_anchor = source_gene.rfind(end_anchor, source_start_anchor) # find the index of start and end anchor from the destination destination_start_anchor = destination_gene.find(start_anchor) destination_end_anchor = destination_gene.find(end_anchor, destination_start_anchor) # start and end anchor can be found in reverse order if (destination_start_anchor == -1 and destination_end_anchor == -1): destination_start_anchor = destination_gene.rfind(start_anchor) destination_end_anchor = destination_gene.rfind( end_anchor, destination_start_anchor) # check if the indices are found in source gene if (source_start_anchor != -1 and source_end_anchor != -1): # check if the indices are found in destination gene if (destination_start_anchor != -1 and destination_end_anchor != -1): # for loop to find the anchor sequence from the source source_anchor_sequence = "" for i in range(source_start_anchor, source_end_anchor + len(end_anchor)): source_anchor_sequence += source[i] # remove the nucleotides of anchor sequence from the source count = 0 while (count < len(source_anchor_sequence)): del source[source_start_anchor] count += 1 # for loop to find the anchor sequence from the destination destination_anchor_sequence = "" for i in range(destination_start_anchor, destination_end_anchor + len(end_anchor)): destination_anchor_sequence += destination[i] # remove the nucleotides from start and end anchor from # the destination count = 0 while (count < len(destination_anchor_sequence)): del destination[destination_start_anchor] count += 1 # splice the anchor sequence into the destination for l in range(len(source_anchor_sequence) - 1, -1, -1): destination.insert(destination_start_anchor, source_anchor_sequence[l]) # if not found, splice does not occur else: pass # if not found, splice does not occur else: pass def match_mask(gene, mask): ''' (str, str) -> int This function creates a mask to find a specific pattern in the gene. Masks can pair with parts of genes, but does not necessarily pair with the entire gene. Masks can be consisted of multis which are the special nucleotides, represented inside square brackets, that can mimic the bonding behaviour of multiple nucleotides. It can also create a nucleotide that is capapble of pairing with any other nucleotide, called stars. In addition, if there are repeated sequences of nucleotides in masks, it can be denoted by using numbers. An example of a mask would be [AG]C3*, which can be paired with any gene sequences that starts with T or C, followed by three G, followed by any other nucleotides. This function will take in a string representation of a gene, and a mask, and returns the index of the first nucleotide in the sequence that is matched by the given mask. If it is not found anywhere in the sequence, it returns -1. REQ: masks are strings consisted of '[', ']', numbers, and '*' REQ: the letters inside the square brackets, should be consisted of letters {A, G, C, T} REQ: masks cannot start with integers >>> match_mask("CCCAGGGGTT", "[TC]G") 3 >>> match_mask("CCCAGGGGTT", "*") 0 >>> match_mask("CCCCGGGG", "A") -1 ''' # declare a variable for set of nucleotides and integers nucleotides = "AGCT" numbers = "123456789" # declare a variable that is in charge of keeping the index # of the first nucleotide in gene that matches mask match_index = -1 # declare variables that make a gene sequence from the mask # four mask off gene sequences since there are four types of nucleotides mask_off = [] # for loop to read through the mask for index in range(len(mask)): # star is a special character that can pair with any nucleotides if (mask[index] == "*"): mask_off.append(nucleotides) # nucleotides pair with specific nucleotides elif (mask[index] in nucleotides): # adenine pairs with thymine if (mask[index] == "A"): mask_off.append("T") # thymine pairs with adenine elif (mask[index] == "T"): mask_off.append("A") # cytosine pairs with guanine elif (mask[index] == "C"): mask_off.append("G") # guanine pairs with cytosine else: mask_off.append("C") # repeated sequences of nucleotides can be denoted using numbers elif (mask[index] in numbers): # multiple adenines if (mask[index - 1] == "A"): mask_off.extend("A" * int(mask[index])) # multiple thymines elif (mask[index - 1] == "T"): mask_off.extend("T" * int(mask[index])) # multiple cytosine elif (mask[index - 1] == "C"): mask_off.extend("C" * int(mask[index])) # multiple guanine else: mask_off.extend("G" * int(mask[index])) # masks can have special nucleotides called multis else: mask_start = mask.find("[") mask_end = mask.rfind("]") # for loop to get the multis inside the bracket multis = "" # get the multi from the mask for m in range(index + 1, mask_end): multis += mask[m] # convert multis into pairable nucleotides multi_pair = "" # for loop to go through the multis for multi in range(len(multis)): # adenine pairs with thymine if (multis[multi] == "A"): multi_pair += "T" # thymine pairs with adenine elif (multis[multi] == "T"): multi_pair += "A" # cytosine pairs with guanine elif (multis[multi] == "C"): multi_pair += "G" # guanine pairs with cytosine else: multi_pair += "C" # add multi_pair to the mask off list mask_off.append(multi_pair) # remove multis from the mask for i in range(mask_end + 1): del mask_off[mask_start+1] # declare a boolean that indicates whether the gene matches the mask match = False # temporary counter g = 0 # while loop to go through the gene while matching index is not found while(not match and g < len(gene)): # if the element in mask off gene is a single letter if (len(mask_off[g]) == 1): # check if the nucleotide is the same as the elemnet # in mask off gene if (mask_off[g] == gene[g]): match_index = g match = True # if the element in mask off gene is not a single letter else: if (gene[g] in mask_off[g]): match_index = g match = True g += 1 # return the the first matching index of the gene return match_index def process_gene_file(file_handle, gene, mask): ''' (io.TextIOWrapper, str, str) -> tuple Takes in a file handle for a file containing one gene per line, a string representing a gene and a string representing a mask. Then, it returns a tuple (p, m, z) where p is the first gene that can pair with the input gene string, m is the first gene that matches the mask, and z is the longest gene zip found up in any gene up to and including the point where both p and m were found. If no genes match the given gene or mask, -1 is returned in place of p or m. REQ: file should not be empty REQ: genes must be consisted of letters {A, G, C, T} REQ: masks are strings consisted of '[', ']', numbers, and '*' REQ: the letters inside the square brackets, should be consisted of letters {A, G, C, T} REQ: masks cannot start with integers '''
This function returns an integer value that indicates the maximum number of nucleotides pairs that the gene can zip. REQ: genes must be consisted of letters {A, G, C, T}
random_line_split
runner.rs
use graph::Graph; use modules; use num_cpus; use runtime::{Environment, Runtime}; use std::cmp; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::sync::mpsc; use std::thread; use task::Task; use term; #[derive(Clone)] pub struct EnvironmentSpec { /// Script path. path: PathBuf, /// Script directory. directory: PathBuf, /// Module include paths. include_paths: Vec<PathBuf>, /// Global environment variables. variables: Vec<(String, String)>, /// Indicates if actually running tasks should be skipped. dry_run: bool, /// Indicates if up-to-date tasks should be run anyway. always_run: bool, /// Indicates task errors should be ignored. keep_going: bool, } impl EnvironmentSpec { /// Creates an environment from the environment specification. pub fn create(&self) -> Result<Runtime, Box<Error>> { // Prepare a new environment. let environment = try!(Environment::new(self.path.clone())); let runtime = Runtime::new(environment); // Open standard library functions. runtime.state().open_libs(); // Register modules. modules::register_all(&runtime); // Set include paths. for path in &self.include_paths { runtime.include_path(&path); } // Set the OS runtime.state().push_string(if cfg!(windows) { "windows" } else { "unix" }); runtime.state().set_global("OS"); // Set configured variables. for &(ref name, ref value) in &self.variables { runtime.state().push(value.clone()); runtime.state().set_global(&name); } // Load the script. try!(runtime.load()); Ok(runtime) } } /// A task runner object that holds the state for defined tasks, dependencies, and the scripting /// runtime. pub struct Runner { /// The current DAG for tasks. graph: Graph, /// The number of threads to use. jobs: usize, /// Environment specification. spec: EnvironmentSpec, /// Runtime local owned by the master thread. runtime: Option<Runtime>, } impl Runner { /// Creates a new runner instance. pub fn new<P: Into<PathBuf>>(path: P) -> Result<Runner, Box<Error>> { // By default, set the number of jobs to be one less than the number of available CPU cores. let jobs = cmp::max(1, num_cpus::get() - 1); let path = path.into(); let directory: PathBuf = match path.parent() { Some(path) => path.into(), None => { return Err("failed to parse script directory".into()); } }; Ok(Runner { graph: Graph::new(), jobs: jobs as usize, spec: EnvironmentSpec { path: path.into(), directory: directory, include_paths: Vec::new(), variables: Vec::new(), dry_run: false, always_run: false, keep_going: false, }, runtime: None, }) } pub fn path(&self) -> &Path { &self.spec.path } pub fn directory(&self) -> &Path { &self.spec.directory } /// Sets "dry run" mode. /// /// When in "dry run" mode, running tasks will operate as normal, except that no task's actions /// will be actually run. pub fn dry_run(&mut self) { self.spec.dry_run = true; } /// Run all tasks even if they are up-to-date. pub fn always_run(&mut self) { self.spec.always_run = true; } /// Run all tasks even if they throw errors. pub fn keep_going(&mut self) { self.spec.keep_going = true; } /// Sets the number of threads to use to run tasks. pub fn jobs(&mut self, jobs: usize) { self.jobs = jobs; } /// Adds a path to Lua's require path for modules. pub fn include_path<P: Into<PathBuf>>(&mut self, path: P) { self.spec.include_paths.push(path.into()); } /// Sets a variable value. pub fn set_var<S: AsRef<str>, V: Into<String>>(&mut self, name: S, value: V) { self.spec.variables.push((name.as_ref().to_string(), value.into())); } /// Load the script. pub fn load(&mut self) -> Result<(), Box<Error>> { if self.runtime.is_none() { self.runtime = Some(try!(self.spec.create())); } Ok(()) } /// Prints the list of named tasks for a script. pub fn print_task_list(&mut self) { let mut tasks = self.runtime().environment().tasks(); tasks.sort_by(|a, b| a.name().cmp(b.name())); let mut out = term::stdout().unwrap(); println!("Available tasks:"); for task in tasks { out.fg(term::color::BRIGHT_GREEN).unwrap(); write!(out, " {:16}", task.name()).unwrap(); out.reset().unwrap(); if let Some(ref description) = task.description() { write!(out, "{}", description).unwrap(); } writeln!(out, "").unwrap(); } if let Some(ref default) = self.runtime().environment().default_task() { println!(""); println!("Default task: {}", default); } } /// Run the default task. pub fn run_default(&mut self) -> Result<(), Box<Error>> { if let Some(ref name) = self.runtime().environment().default_task() { let tasks = vec![name]; self.run(&tasks) } else { Err("no default task defined".into()) } } /// Runs the specified list of tasks. /// /// Tasks are run in parallel when possible during execution. The maximum number of parallel /// jobs can be set with the `jobs()` method. pub fn run<S: AsRef<str>>(&mut self, tasks: &[S]) -> Result<(), Box<Error>> { // Resolve all tasks given. for task in tasks { try!(self.resolve_task(task)); } // Determine the schedule of tasks to execute. let mut queue = try!(self.graph.solve(!self.spec.always_run)); let task_count = queue.len(); let thread_count = cmp::min(self.jobs, task_count); debug!("running {} task(s) across {} thread(s)", task_count, thread_count); // Spawn one thread for each job. let mut threads = Vec::new(); let mut free_threads: HashSet<usize> = HashSet::new(); let mut channels = Vec::new(); let (sender, receiver) = mpsc::channel::<Result<usize, usize>>(); // Spawn `jobs` number of threads (but no more than the task count!). for thread_id in 0..thread_count { let spec = self.spec.clone(); let thread_sender = sender.clone(); let (parent_sender, thread_receiver) = mpsc::sync_channel::<(String, usize)>(0); channels.push(parent_sender); threads.push(thread::spawn(move || { // Prepare a new runtime. let runtime = spec.create().unwrap_or_else(|e| { error!("{}", e); panic!(); }); if thread_sender.send(Ok(thread_id)).is_err() { trace!("thread {} failed to send channel", thread_id); } // Begin executing tasks! while let Ok((name, task_id)) = thread_receiver.recv() { info!("running task '{}' ({} of {})", name, task_id, task_count); // Lookup the task to run. let task = { // Lookup the task to run. if let Some(task) = runtime.environment().get_task(&name) { task as Rc<Task> } // Find a rule that matches the task name. else if let Some(rule) = runtime.environment() .rules() .iter() .find(|rule| rule.matches(&name)) { Rc::new(rule.create_task(name).unwrap()) as Rc<Task> } // No matching task. else { panic!("no matching task or rule for '{}'", name); } }; // Check for dry run. if !spec.dry_run { if let Err(e) = task.run() { // If we ought to keep going, just issue a warning. if spec.keep_going { warn!("ignoring error: {}", e); } else { error!("{}", e); thread_sender.send(Err(thread_id)).unwrap(); return; } } } else { info!("would run task '{}'", task.name()); } if thread_sender.send(Ok(thread_id)).is_err() { trace!("thread {} failed to send channel", thread_id); break; } } })) } drop(sender); // Keep track of tasks completed and tasks in progress. let mut completed_tasks: HashSet<String> = HashSet::new(); let mut current_tasks: HashMap<usize, String> = HashMap::new(); let all_tasks: HashSet<String> = queue.iter().map(|s| s.name().to_string()).collect(); while !queue.is_empty() || !current_tasks.is_empty() { // Wait for a thread to request a task. let result = receiver.recv().unwrap(); // If the thread sent an error, we should stop everything if keep_going isn't enabled. if let Err(thread_id) = result { debug!("thread {} errored, waiting for remaining tasks...", thread_id); return Err("not all tasks completed successfully".into()); } let thread_id = result.unwrap(); free_threads.insert(thread_id); trace!("thread {} is idle", thread_id); // If the thread was previously running a task, mark it as completed. if let Some(task) = current_tasks.remove(&thread_id) { trace!("task '{}' completed", task); completed_tasks.insert(task); } // Attempt to schedule more tasks to run. The most we can schedule is the number of free // threads, but it is limited by the number of tasks that have their dependencies already // finished. 'schedule: for _ in 0..free_threads.len() { // If the queue is empty, we are done. if queue.is_empty() { break; } // Check the next task in the queue. If any of its dependencies have not yet been // completed, we cannot schedule it yet. for dependency in queue.front().unwrap().dependencies() { // Check that the dependency needs scheduled at all (some are already satisfied), // and that it hasn't already finished. if all_tasks.contains(dependency) && !completed_tasks.contains(dependency) { // We can't run the next task, so we're done scheduling for now until another // thread finishes. break 'schedule; } } // Get the available task from the queue. let task = queue.front().unwrap().clone(); // Pick a free thread to run the task in. if let Some(thread_id) = free_threads.iter().next().map(|t| *t) { trace!("scheduling task '{}' on thread {}", task.name(), thread_id); let data = (task.name().to_string(), task_count - queue.len() + 1); // Send the task name. if channels[thread_id].send(data).is_ok() { current_tasks.insert(thread_id, task.name().to_string()); free_threads.remove(&thread_id); // Scheduling was successful, so remove the task frome the queue. queue.pop_front().unwrap(); } else { trace!("failed to send channel to thread {}", thread_id); } } else { // We can schedule now, but there aren't any free threads. 😢 break; } } } // Close the input and wait for any remaining threads to finish. drop(channels); for (thread_id, thread) in threads.into_iter().enumerate() { if let Err(e) = thread.join() { trace!("thread {} closed with panic: {:?}", thread_id, e); } } info!("all tasks up to date"); Ok(()) } fn resolve_task<S: AsRef<str>>(&mut self, name: S) -> Result<(), Box<Error>> { if !self.graph.contains(&name) { // Lookup the task to run. if let Some(task) = self.runtime().environment().get_task(&name) { debug!("task '{}' matches named task", name.as_ref()); self.graph.insert(task.clone()); } // Find a rule that matches the task name. else if let Some(rule) = self.runtime() .environment() .rules() .iter() .find(|rule| rule.matches(&name)) { debug!("task '{}' matches rule '{}'", name.as_ref(), rule.pattern); // Create a task for the rule and insert it in the graph. self.graph.insert(Rc::new(rule.create_task(name.as_ref()).unwrap())); } // No matching task. else {
} for dependency in self.graph.get(name).unwrap().dependencies() { if !self.graph.contains(dependency) { try!(self.resolve_task(dependency)); } } Ok(()) } fn runtime(&self) -> Runtime { self.runtime.as_ref().unwrap().clone() } }
return Err(format!("no matching task or rule for '{}'", name.as_ref()).into()); }
conditional_block
runner.rs
use graph::Graph; use modules; use num_cpus; use runtime::{Environment, Runtime}; use std::cmp; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::sync::mpsc; use std::thread; use task::Task; use term; #[derive(Clone)] pub struct EnvironmentSpec { /// Script path. path: PathBuf, /// Script directory. directory: PathBuf, /// Module include paths. include_paths: Vec<PathBuf>, /// Global environment variables. variables: Vec<(String, String)>, /// Indicates if actually running tasks should be skipped. dry_run: bool, /// Indicates if up-to-date tasks should be run anyway. always_run: bool, /// Indicates task errors should be ignored. keep_going: bool, } impl EnvironmentSpec { /// Creates an environment from the environment specification. pub fn create(&self) -> Result<Runtime, Box<Error>> { // Prepare a new environment. let environment = try!(Environment::new(self.path.clone())); let runtime = Runtime::new(environment); // Open standard library functions. runtime.state().open_libs(); // Register modules. modules::register_all(&runtime); // Set include paths. for path in &self.include_paths { runtime.include_path(&path); } // Set the OS runtime.state().push_string(if cfg!(windows) { "windows" } else { "unix" }); runtime.state().set_global("OS"); // Set configured variables. for &(ref name, ref value) in &self.variables { runtime.state().push(value.clone()); runtime.state().set_global(&name); } // Load the script. try!(runtime.load()); Ok(runtime) } } /// A task runner object that holds the state for defined tasks, dependencies, and the scripting /// runtime. pub struct Runner { /// The current DAG for tasks. graph: Graph, /// The number of threads to use. jobs: usize, /// Environment specification. spec: EnvironmentSpec, /// Runtime local owned by the master thread. runtime: Option<Runtime>, } impl Runner { /// Creates a new runner instance. pub fn new<P: Into<PathBuf>>(path: P) -> Result<Runner, Box<Error>> { // By default, set the number of jobs to be one less than the number of available CPU cores. let jobs = cmp::max(1, num_cpus::get() - 1); let path = path.into(); let directory: PathBuf = match path.parent() { Some(path) => path.into(), None => { return Err("failed to parse script directory".into()); } }; Ok(Runner { graph: Graph::new(), jobs: jobs as usize, spec: EnvironmentSpec { path: path.into(), directory: directory, include_paths: Vec::new(), variables: Vec::new(), dry_run: false, always_run: false, keep_going: false, }, runtime: None, }) } pub fn path(&self) -> &Path { &self.spec.path } pub fn directory(&self) -> &Path { &self.spec.directory } /// Sets "dry run" mode. /// /// When in "dry run" mode, running tasks will operate as normal, except that no task's actions /// will be actually run. pub fn dry_run(&mut self) { self.spec.dry_run = true; } /// Run all tasks even if they are up-to-date. pub fn
(&mut self) { self.spec.always_run = true; } /// Run all tasks even if they throw errors. pub fn keep_going(&mut self) { self.spec.keep_going = true; } /// Sets the number of threads to use to run tasks. pub fn jobs(&mut self, jobs: usize) { self.jobs = jobs; } /// Adds a path to Lua's require path for modules. pub fn include_path<P: Into<PathBuf>>(&mut self, path: P) { self.spec.include_paths.push(path.into()); } /// Sets a variable value. pub fn set_var<S: AsRef<str>, V: Into<String>>(&mut self, name: S, value: V) { self.spec.variables.push((name.as_ref().to_string(), value.into())); } /// Load the script. pub fn load(&mut self) -> Result<(), Box<Error>> { if self.runtime.is_none() { self.runtime = Some(try!(self.spec.create())); } Ok(()) } /// Prints the list of named tasks for a script. pub fn print_task_list(&mut self) { let mut tasks = self.runtime().environment().tasks(); tasks.sort_by(|a, b| a.name().cmp(b.name())); let mut out = term::stdout().unwrap(); println!("Available tasks:"); for task in tasks { out.fg(term::color::BRIGHT_GREEN).unwrap(); write!(out, " {:16}", task.name()).unwrap(); out.reset().unwrap(); if let Some(ref description) = task.description() { write!(out, "{}", description).unwrap(); } writeln!(out, "").unwrap(); } if let Some(ref default) = self.runtime().environment().default_task() { println!(""); println!("Default task: {}", default); } } /// Run the default task. pub fn run_default(&mut self) -> Result<(), Box<Error>> { if let Some(ref name) = self.runtime().environment().default_task() { let tasks = vec![name]; self.run(&tasks) } else { Err("no default task defined".into()) } } /// Runs the specified list of tasks. /// /// Tasks are run in parallel when possible during execution. The maximum number of parallel /// jobs can be set with the `jobs()` method. pub fn run<S: AsRef<str>>(&mut self, tasks: &[S]) -> Result<(), Box<Error>> { // Resolve all tasks given. for task in tasks { try!(self.resolve_task(task)); } // Determine the schedule of tasks to execute. let mut queue = try!(self.graph.solve(!self.spec.always_run)); let task_count = queue.len(); let thread_count = cmp::min(self.jobs, task_count); debug!("running {} task(s) across {} thread(s)", task_count, thread_count); // Spawn one thread for each job. let mut threads = Vec::new(); let mut free_threads: HashSet<usize> = HashSet::new(); let mut channels = Vec::new(); let (sender, receiver) = mpsc::channel::<Result<usize, usize>>(); // Spawn `jobs` number of threads (but no more than the task count!). for thread_id in 0..thread_count { let spec = self.spec.clone(); let thread_sender = sender.clone(); let (parent_sender, thread_receiver) = mpsc::sync_channel::<(String, usize)>(0); channels.push(parent_sender); threads.push(thread::spawn(move || { // Prepare a new runtime. let runtime = spec.create().unwrap_or_else(|e| { error!("{}", e); panic!(); }); if thread_sender.send(Ok(thread_id)).is_err() { trace!("thread {} failed to send channel", thread_id); } // Begin executing tasks! while let Ok((name, task_id)) = thread_receiver.recv() { info!("running task '{}' ({} of {})", name, task_id, task_count); // Lookup the task to run. let task = { // Lookup the task to run. if let Some(task) = runtime.environment().get_task(&name) { task as Rc<Task> } // Find a rule that matches the task name. else if let Some(rule) = runtime.environment() .rules() .iter() .find(|rule| rule.matches(&name)) { Rc::new(rule.create_task(name).unwrap()) as Rc<Task> } // No matching task. else { panic!("no matching task or rule for '{}'", name); } }; // Check for dry run. if !spec.dry_run { if let Err(e) = task.run() { // If we ought to keep going, just issue a warning. if spec.keep_going { warn!("ignoring error: {}", e); } else { error!("{}", e); thread_sender.send(Err(thread_id)).unwrap(); return; } } } else { info!("would run task '{}'", task.name()); } if thread_sender.send(Ok(thread_id)).is_err() { trace!("thread {} failed to send channel", thread_id); break; } } })) } drop(sender); // Keep track of tasks completed and tasks in progress. let mut completed_tasks: HashSet<String> = HashSet::new(); let mut current_tasks: HashMap<usize, String> = HashMap::new(); let all_tasks: HashSet<String> = queue.iter().map(|s| s.name().to_string()).collect(); while !queue.is_empty() || !current_tasks.is_empty() { // Wait for a thread to request a task. let result = receiver.recv().unwrap(); // If the thread sent an error, we should stop everything if keep_going isn't enabled. if let Err(thread_id) = result { debug!("thread {} errored, waiting for remaining tasks...", thread_id); return Err("not all tasks completed successfully".into()); } let thread_id = result.unwrap(); free_threads.insert(thread_id); trace!("thread {} is idle", thread_id); // If the thread was previously running a task, mark it as completed. if let Some(task) = current_tasks.remove(&thread_id) { trace!("task '{}' completed", task); completed_tasks.insert(task); } // Attempt to schedule more tasks to run. The most we can schedule is the number of free // threads, but it is limited by the number of tasks that have their dependencies already // finished. 'schedule: for _ in 0..free_threads.len() { // If the queue is empty, we are done. if queue.is_empty() { break; } // Check the next task in the queue. If any of its dependencies have not yet been // completed, we cannot schedule it yet. for dependency in queue.front().unwrap().dependencies() { // Check that the dependency needs scheduled at all (some are already satisfied), // and that it hasn't already finished. if all_tasks.contains(dependency) && !completed_tasks.contains(dependency) { // We can't run the next task, so we're done scheduling for now until another // thread finishes. break 'schedule; } } // Get the available task from the queue. let task = queue.front().unwrap().clone(); // Pick a free thread to run the task in. if let Some(thread_id) = free_threads.iter().next().map(|t| *t) { trace!("scheduling task '{}' on thread {}", task.name(), thread_id); let data = (task.name().to_string(), task_count - queue.len() + 1); // Send the task name. if channels[thread_id].send(data).is_ok() { current_tasks.insert(thread_id, task.name().to_string()); free_threads.remove(&thread_id); // Scheduling was successful, so remove the task frome the queue. queue.pop_front().unwrap(); } else { trace!("failed to send channel to thread {}", thread_id); } } else { // We can schedule now, but there aren't any free threads. 😢 break; } } } // Close the input and wait for any remaining threads to finish. drop(channels); for (thread_id, thread) in threads.into_iter().enumerate() { if let Err(e) = thread.join() { trace!("thread {} closed with panic: {:?}", thread_id, e); } } info!("all tasks up to date"); Ok(()) } fn resolve_task<S: AsRef<str>>(&mut self, name: S) -> Result<(), Box<Error>> { if !self.graph.contains(&name) { // Lookup the task to run. if let Some(task) = self.runtime().environment().get_task(&name) { debug!("task '{}' matches named task", name.as_ref()); self.graph.insert(task.clone()); } // Find a rule that matches the task name. else if let Some(rule) = self.runtime() .environment() .rules() .iter() .find(|rule| rule.matches(&name)) { debug!("task '{}' matches rule '{}'", name.as_ref(), rule.pattern); // Create a task for the rule and insert it in the graph. self.graph.insert(Rc::new(rule.create_task(name.as_ref()).unwrap())); } // No matching task. else { return Err(format!("no matching task or rule for '{}'", name.as_ref()).into()); } } for dependency in self.graph.get(name).unwrap().dependencies() { if !self.graph.contains(dependency) { try!(self.resolve_task(dependency)); } } Ok(()) } fn runtime(&self) -> Runtime { self.runtime.as_ref().unwrap().clone() } }
always_run
identifier_name
runner.rs
use graph::Graph; use modules; use num_cpus; use runtime::{Environment, Runtime}; use std::cmp; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::sync::mpsc; use std::thread; use task::Task; use term; #[derive(Clone)] pub struct EnvironmentSpec { /// Script path. path: PathBuf, /// Script directory. directory: PathBuf, /// Module include paths. include_paths: Vec<PathBuf>, /// Global environment variables. variables: Vec<(String, String)>, /// Indicates if actually running tasks should be skipped. dry_run: bool, /// Indicates if up-to-date tasks should be run anyway. always_run: bool, /// Indicates task errors should be ignored. keep_going: bool, } impl EnvironmentSpec { /// Creates an environment from the environment specification. pub fn create(&self) -> Result<Runtime, Box<Error>> { // Prepare a new environment. let environment = try!(Environment::new(self.path.clone())); let runtime = Runtime::new(environment); // Open standard library functions. runtime.state().open_libs(); // Register modules. modules::register_all(&runtime); // Set include paths. for path in &self.include_paths { runtime.include_path(&path); } // Set the OS runtime.state().push_string(if cfg!(windows) { "windows" } else { "unix" }); runtime.state().set_global("OS"); // Set configured variables. for &(ref name, ref value) in &self.variables { runtime.state().push(value.clone()); runtime.state().set_global(&name); } // Load the script. try!(runtime.load()); Ok(runtime) } } /// A task runner object that holds the state for defined tasks, dependencies, and the scripting /// runtime. pub struct Runner { /// The current DAG for tasks. graph: Graph, /// The number of threads to use. jobs: usize, /// Environment specification. spec: EnvironmentSpec, /// Runtime local owned by the master thread. runtime: Option<Runtime>, } impl Runner { /// Creates a new runner instance. pub fn new<P: Into<PathBuf>>(path: P) -> Result<Runner, Box<Error>> { // By default, set the number of jobs to be one less than the number of available CPU cores. let jobs = cmp::max(1, num_cpus::get() - 1); let path = path.into(); let directory: PathBuf = match path.parent() { Some(path) => path.into(), None => { return Err("failed to parse script directory".into()); } }; Ok(Runner { graph: Graph::new(), jobs: jobs as usize, spec: EnvironmentSpec { path: path.into(), directory: directory, include_paths: Vec::new(), variables: Vec::new(), dry_run: false, always_run: false, keep_going: false, }, runtime: None, }) } pub fn path(&self) -> &Path { &self.spec.path } pub fn directory(&self) -> &Path { &self.spec.directory } /// Sets "dry run" mode. /// /// When in "dry run" mode, running tasks will operate as normal, except that no task's actions /// will be actually run. pub fn dry_run(&mut self) { self.spec.dry_run = true; } /// Run all tasks even if they are up-to-date. pub fn always_run(&mut self)
/// Run all tasks even if they throw errors. pub fn keep_going(&mut self) { self.spec.keep_going = true; } /// Sets the number of threads to use to run tasks. pub fn jobs(&mut self, jobs: usize) { self.jobs = jobs; } /// Adds a path to Lua's require path for modules. pub fn include_path<P: Into<PathBuf>>(&mut self, path: P) { self.spec.include_paths.push(path.into()); } /// Sets a variable value. pub fn set_var<S: AsRef<str>, V: Into<String>>(&mut self, name: S, value: V) { self.spec.variables.push((name.as_ref().to_string(), value.into())); } /// Load the script. pub fn load(&mut self) -> Result<(), Box<Error>> { if self.runtime.is_none() { self.runtime = Some(try!(self.spec.create())); } Ok(()) } /// Prints the list of named tasks for a script. pub fn print_task_list(&mut self) { let mut tasks = self.runtime().environment().tasks(); tasks.sort_by(|a, b| a.name().cmp(b.name())); let mut out = term::stdout().unwrap(); println!("Available tasks:"); for task in tasks { out.fg(term::color::BRIGHT_GREEN).unwrap(); write!(out, " {:16}", task.name()).unwrap(); out.reset().unwrap(); if let Some(ref description) = task.description() { write!(out, "{}", description).unwrap(); } writeln!(out, "").unwrap(); } if let Some(ref default) = self.runtime().environment().default_task() { println!(""); println!("Default task: {}", default); } } /// Run the default task. pub fn run_default(&mut self) -> Result<(), Box<Error>> { if let Some(ref name) = self.runtime().environment().default_task() { let tasks = vec![name]; self.run(&tasks) } else { Err("no default task defined".into()) } } /// Runs the specified list of tasks. /// /// Tasks are run in parallel when possible during execution. The maximum number of parallel /// jobs can be set with the `jobs()` method. pub fn run<S: AsRef<str>>(&mut self, tasks: &[S]) -> Result<(), Box<Error>> { // Resolve all tasks given. for task in tasks { try!(self.resolve_task(task)); } // Determine the schedule of tasks to execute. let mut queue = try!(self.graph.solve(!self.spec.always_run)); let task_count = queue.len(); let thread_count = cmp::min(self.jobs, task_count); debug!("running {} task(s) across {} thread(s)", task_count, thread_count); // Spawn one thread for each job. let mut threads = Vec::new(); let mut free_threads: HashSet<usize> = HashSet::new(); let mut channels = Vec::new(); let (sender, receiver) = mpsc::channel::<Result<usize, usize>>(); // Spawn `jobs` number of threads (but no more than the task count!). for thread_id in 0..thread_count { let spec = self.spec.clone(); let thread_sender = sender.clone(); let (parent_sender, thread_receiver) = mpsc::sync_channel::<(String, usize)>(0); channels.push(parent_sender); threads.push(thread::spawn(move || { // Prepare a new runtime. let runtime = spec.create().unwrap_or_else(|e| { error!("{}", e); panic!(); }); if thread_sender.send(Ok(thread_id)).is_err() { trace!("thread {} failed to send channel", thread_id); } // Begin executing tasks! while let Ok((name, task_id)) = thread_receiver.recv() { info!("running task '{}' ({} of {})", name, task_id, task_count); // Lookup the task to run. let task = { // Lookup the task to run. if let Some(task) = runtime.environment().get_task(&name) { task as Rc<Task> } // Find a rule that matches the task name. else if let Some(rule) = runtime.environment() .rules() .iter() .find(|rule| rule.matches(&name)) { Rc::new(rule.create_task(name).unwrap()) as Rc<Task> } // No matching task. else { panic!("no matching task or rule for '{}'", name); } }; // Check for dry run. if !spec.dry_run { if let Err(e) = task.run() { // If we ought to keep going, just issue a warning. if spec.keep_going { warn!("ignoring error: {}", e); } else { error!("{}", e); thread_sender.send(Err(thread_id)).unwrap(); return; } } } else { info!("would run task '{}'", task.name()); } if thread_sender.send(Ok(thread_id)).is_err() { trace!("thread {} failed to send channel", thread_id); break; } } })) } drop(sender); // Keep track of tasks completed and tasks in progress. let mut completed_tasks: HashSet<String> = HashSet::new(); let mut current_tasks: HashMap<usize, String> = HashMap::new(); let all_tasks: HashSet<String> = queue.iter().map(|s| s.name().to_string()).collect(); while !queue.is_empty() || !current_tasks.is_empty() { // Wait for a thread to request a task. let result = receiver.recv().unwrap(); // If the thread sent an error, we should stop everything if keep_going isn't enabled. if let Err(thread_id) = result { debug!("thread {} errored, waiting for remaining tasks...", thread_id); return Err("not all tasks completed successfully".into()); } let thread_id = result.unwrap(); free_threads.insert(thread_id); trace!("thread {} is idle", thread_id); // If the thread was previously running a task, mark it as completed. if let Some(task) = current_tasks.remove(&thread_id) { trace!("task '{}' completed", task); completed_tasks.insert(task); } // Attempt to schedule more tasks to run. The most we can schedule is the number of free // threads, but it is limited by the number of tasks that have their dependencies already // finished. 'schedule: for _ in 0..free_threads.len() { // If the queue is empty, we are done. if queue.is_empty() { break; } // Check the next task in the queue. If any of its dependencies have not yet been // completed, we cannot schedule it yet. for dependency in queue.front().unwrap().dependencies() { // Check that the dependency needs scheduled at all (some are already satisfied), // and that it hasn't already finished. if all_tasks.contains(dependency) && !completed_tasks.contains(dependency) { // We can't run the next task, so we're done scheduling for now until another // thread finishes. break 'schedule; } } // Get the available task from the queue. let task = queue.front().unwrap().clone(); // Pick a free thread to run the task in. if let Some(thread_id) = free_threads.iter().next().map(|t| *t) { trace!("scheduling task '{}' on thread {}", task.name(), thread_id); let data = (task.name().to_string(), task_count - queue.len() + 1); // Send the task name. if channels[thread_id].send(data).is_ok() { current_tasks.insert(thread_id, task.name().to_string()); free_threads.remove(&thread_id); // Scheduling was successful, so remove the task frome the queue. queue.pop_front().unwrap(); } else { trace!("failed to send channel to thread {}", thread_id); } } else { // We can schedule now, but there aren't any free threads. 😢 break; } } } // Close the input and wait for any remaining threads to finish. drop(channels); for (thread_id, thread) in threads.into_iter().enumerate() { if let Err(e) = thread.join() { trace!("thread {} closed with panic: {:?}", thread_id, e); } } info!("all tasks up to date"); Ok(()) } fn resolve_task<S: AsRef<str>>(&mut self, name: S) -> Result<(), Box<Error>> { if !self.graph.contains(&name) { // Lookup the task to run. if let Some(task) = self.runtime().environment().get_task(&name) { debug!("task '{}' matches named task", name.as_ref()); self.graph.insert(task.clone()); } // Find a rule that matches the task name. else if let Some(rule) = self.runtime() .environment() .rules() .iter() .find(|rule| rule.matches(&name)) { debug!("task '{}' matches rule '{}'", name.as_ref(), rule.pattern); // Create a task for the rule and insert it in the graph. self.graph.insert(Rc::new(rule.create_task(name.as_ref()).unwrap())); } // No matching task. else { return Err(format!("no matching task or rule for '{}'", name.as_ref()).into()); } } for dependency in self.graph.get(name).unwrap().dependencies() { if !self.graph.contains(dependency) { try!(self.resolve_task(dependency)); } } Ok(()) } fn runtime(&self) -> Runtime { self.runtime.as_ref().unwrap().clone() } }
{ self.spec.always_run = true; }
identifier_body
runner.rs
use graph::Graph; use modules; use num_cpus; use runtime::{Environment, Runtime}; use std::cmp; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::sync::mpsc; use std::thread; use task::Task; use term; #[derive(Clone)] pub struct EnvironmentSpec { /// Script path. path: PathBuf, /// Script directory. directory: PathBuf, /// Module include paths. include_paths: Vec<PathBuf>, /// Global environment variables. variables: Vec<(String, String)>, /// Indicates if actually running tasks should be skipped. dry_run: bool, /// Indicates if up-to-date tasks should be run anyway. always_run: bool, /// Indicates task errors should be ignored. keep_going: bool, } impl EnvironmentSpec { /// Creates an environment from the environment specification. pub fn create(&self) -> Result<Runtime, Box<Error>> { // Prepare a new environment. let environment = try!(Environment::new(self.path.clone())); let runtime = Runtime::new(environment); // Open standard library functions. runtime.state().open_libs(); // Register modules. modules::register_all(&runtime); // Set include paths. for path in &self.include_paths { runtime.include_path(&path); } // Set the OS runtime.state().push_string(if cfg!(windows) { "windows" } else { "unix" }); runtime.state().set_global("OS"); // Set configured variables. for &(ref name, ref value) in &self.variables { runtime.state().push(value.clone()); runtime.state().set_global(&name); } // Load the script. try!(runtime.load()); Ok(runtime) } } /// A task runner object that holds the state for defined tasks, dependencies, and the scripting /// runtime. pub struct Runner { /// The current DAG for tasks. graph: Graph, /// The number of threads to use. jobs: usize, /// Environment specification. spec: EnvironmentSpec, /// Runtime local owned by the master thread. runtime: Option<Runtime>, } impl Runner { /// Creates a new runner instance. pub fn new<P: Into<PathBuf>>(path: P) -> Result<Runner, Box<Error>> { // By default, set the number of jobs to be one less than the number of available CPU cores. let jobs = cmp::max(1, num_cpus::get() - 1); let path = path.into(); let directory: PathBuf = match path.parent() { Some(path) => path.into(), None => { return Err("failed to parse script directory".into()); } }; Ok(Runner { graph: Graph::new(), jobs: jobs as usize, spec: EnvironmentSpec { path: path.into(), directory: directory, include_paths: Vec::new(), variables: Vec::new(), dry_run: false, always_run: false, keep_going: false, }, runtime: None, }) } pub fn path(&self) -> &Path { &self.spec.path } pub fn directory(&self) -> &Path { &self.spec.directory } /// Sets "dry run" mode. /// /// When in "dry run" mode, running tasks will operate as normal, except that no task's actions /// will be actually run. pub fn dry_run(&mut self) { self.spec.dry_run = true; } /// Run all tasks even if they are up-to-date. pub fn always_run(&mut self) { self.spec.always_run = true; } /// Run all tasks even if they throw errors. pub fn keep_going(&mut self) { self.spec.keep_going = true; } /// Sets the number of threads to use to run tasks. pub fn jobs(&mut self, jobs: usize) { self.jobs = jobs; } /// Adds a path to Lua's require path for modules. pub fn include_path<P: Into<PathBuf>>(&mut self, path: P) { self.spec.include_paths.push(path.into()); } /// Sets a variable value. pub fn set_var<S: AsRef<str>, V: Into<String>>(&mut self, name: S, value: V) { self.spec.variables.push((name.as_ref().to_string(), value.into())); } /// Load the script. pub fn load(&mut self) -> Result<(), Box<Error>> { if self.runtime.is_none() { self.runtime = Some(try!(self.spec.create())); } Ok(()) } /// Prints the list of named tasks for a script. pub fn print_task_list(&mut self) { let mut tasks = self.runtime().environment().tasks(); tasks.sort_by(|a, b| a.name().cmp(b.name())); let mut out = term::stdout().unwrap(); println!("Available tasks:"); for task in tasks { out.fg(term::color::BRIGHT_GREEN).unwrap(); write!(out, " {:16}", task.name()).unwrap(); out.reset().unwrap(); if let Some(ref description) = task.description() { write!(out, "{}", description).unwrap(); } writeln!(out, "").unwrap(); } if let Some(ref default) = self.runtime().environment().default_task() { println!(""); println!("Default task: {}", default); } } /// Run the default task. pub fn run_default(&mut self) -> Result<(), Box<Error>> { if let Some(ref name) = self.runtime().environment().default_task() { let tasks = vec![name]; self.run(&tasks) } else { Err("no default task defined".into()) } } /// Runs the specified list of tasks. /// /// Tasks are run in parallel when possible during execution. The maximum number of parallel /// jobs can be set with the `jobs()` method. pub fn run<S: AsRef<str>>(&mut self, tasks: &[S]) -> Result<(), Box<Error>> { // Resolve all tasks given. for task in tasks { try!(self.resolve_task(task)); } // Determine the schedule of tasks to execute. let mut queue = try!(self.graph.solve(!self.spec.always_run)); let task_count = queue.len(); let thread_count = cmp::min(self.jobs, task_count); debug!("running {} task(s) across {} thread(s)", task_count, thread_count); // Spawn one thread for each job. let mut threads = Vec::new(); let mut free_threads: HashSet<usize> = HashSet::new(); let mut channels = Vec::new(); let (sender, receiver) = mpsc::channel::<Result<usize, usize>>(); // Spawn `jobs` number of threads (but no more than the task count!). for thread_id in 0..thread_count { let spec = self.spec.clone(); let thread_sender = sender.clone(); let (parent_sender, thread_receiver) = mpsc::sync_channel::<(String, usize)>(0); channels.push(parent_sender); threads.push(thread::spawn(move || { // Prepare a new runtime. let runtime = spec.create().unwrap_or_else(|e| { error!("{}", e); panic!(); }); if thread_sender.send(Ok(thread_id)).is_err() { trace!("thread {} failed to send channel", thread_id); } // Begin executing tasks! while let Ok((name, task_id)) = thread_receiver.recv() { info!("running task '{}' ({} of {})", name, task_id, task_count); // Lookup the task to run. let task = { // Lookup the task to run. if let Some(task) = runtime.environment().get_task(&name) { task as Rc<Task> } // Find a rule that matches the task name. else if let Some(rule) = runtime.environment() .rules() .iter() .find(|rule| rule.matches(&name)) { Rc::new(rule.create_task(name).unwrap()) as Rc<Task> } // No matching task. else { panic!("no matching task or rule for '{}'", name); } }; // Check for dry run. if !spec.dry_run { if let Err(e) = task.run() { // If we ought to keep going, just issue a warning. if spec.keep_going { warn!("ignoring error: {}", e); } else { error!("{}", e); thread_sender.send(Err(thread_id)).unwrap(); return; } } } else { info!("would run task '{}'", task.name()); } if thread_sender.send(Ok(thread_id)).is_err() { trace!("thread {} failed to send channel", thread_id); break; } } })) } drop(sender); // Keep track of tasks completed and tasks in progress. let mut completed_tasks: HashSet<String> = HashSet::new(); let mut current_tasks: HashMap<usize, String> = HashMap::new(); let all_tasks: HashSet<String> = queue.iter().map(|s| s.name().to_string()).collect(); while !queue.is_empty() || !current_tasks.is_empty() { // Wait for a thread to request a task. let result = receiver.recv().unwrap(); // If the thread sent an error, we should stop everything if keep_going isn't enabled. if let Err(thread_id) = result { debug!("thread {} errored, waiting for remaining tasks...", thread_id); return Err("not all tasks completed successfully".into()); } let thread_id = result.unwrap(); free_threads.insert(thread_id); trace!("thread {} is idle", thread_id); // If the thread was previously running a task, mark it as completed. if let Some(task) = current_tasks.remove(&thread_id) { trace!("task '{}' completed", task); completed_tasks.insert(task); } // Attempt to schedule more tasks to run. The most we can schedule is the number of free // threads, but it is limited by the number of tasks that have their dependencies already // finished. 'schedule: for _ in 0..free_threads.len() { // If the queue is empty, we are done. if queue.is_empty() { break; } // Check the next task in the queue. If any of its dependencies have not yet been // completed, we cannot schedule it yet. for dependency in queue.front().unwrap().dependencies() { // Check that the dependency needs scheduled at all (some are already satisfied), // and that it hasn't already finished. if all_tasks.contains(dependency) && !completed_tasks.contains(dependency) { // We can't run the next task, so we're done scheduling for now until another // thread finishes. break 'schedule; } } // Get the available task from the queue. let task = queue.front().unwrap().clone(); // Pick a free thread to run the task in. if let Some(thread_id) = free_threads.iter().next().map(|t| *t) { trace!("scheduling task '{}' on thread {}", task.name(), thread_id); let data = (task.name().to_string(), task_count - queue.len() + 1); // Send the task name. if channels[thread_id].send(data).is_ok() {
// Scheduling was successful, so remove the task frome the queue. queue.pop_front().unwrap(); } else { trace!("failed to send channel to thread {}", thread_id); } } else { // We can schedule now, but there aren't any free threads. 😢 break; } } } // Close the input and wait for any remaining threads to finish. drop(channels); for (thread_id, thread) in threads.into_iter().enumerate() { if let Err(e) = thread.join() { trace!("thread {} closed with panic: {:?}", thread_id, e); } } info!("all tasks up to date"); Ok(()) } fn resolve_task<S: AsRef<str>>(&mut self, name: S) -> Result<(), Box<Error>> { if !self.graph.contains(&name) { // Lookup the task to run. if let Some(task) = self.runtime().environment().get_task(&name) { debug!("task '{}' matches named task", name.as_ref()); self.graph.insert(task.clone()); } // Find a rule that matches the task name. else if let Some(rule) = self.runtime() .environment() .rules() .iter() .find(|rule| rule.matches(&name)) { debug!("task '{}' matches rule '{}'", name.as_ref(), rule.pattern); // Create a task for the rule and insert it in the graph. self.graph.insert(Rc::new(rule.create_task(name.as_ref()).unwrap())); } // No matching task. else { return Err(format!("no matching task or rule for '{}'", name.as_ref()).into()); } } for dependency in self.graph.get(name).unwrap().dependencies() { if !self.graph.contains(dependency) { try!(self.resolve_task(dependency)); } } Ok(()) } fn runtime(&self) -> Runtime { self.runtime.as_ref().unwrap().clone() } }
current_tasks.insert(thread_id, task.name().to_string()); free_threads.remove(&thread_id);
random_line_split
assembly_finishing_objects.py
# -*- coding: utf-8 -*- """ @author : c_georgescu """ class Sequence: def __init__(self, index, mums, step, big_enough): self.contigs = [] self.discarded_contigs = [] tmp = Sequence.sort_mums(index, mums) i = 0 while i < len(tmp): self.contigs.append(Contig(i+1, tmp[i], step, big_enough)) i += 1 self.contigs.sort(key = lambda c: c.first_mum) print "The order in which the contigs should be concatenated is :\n" + str([(c.id, len(c.mum_sequences)) for c in self.contigs]) i = 0 while (i < len(self.contigs)): if (len(self.contigs[i].mum_sequences) == 0): self.discarded_contigs.append(self.contigs.pop(i)) continue i += 1 # setting the first contig orientation if (self.contigs[0].mum_sequences[0].mums[0][5] == 1): self.contigs[0].futur = 1 else: self.contigs[0].futur = 0 self.orientate(0, 0, 0, float("inf"), False, step) print "The order in which the contigs should be concatenated is :\n" + str([(c.id, c.futur) for c in self.contigs]) def orientate(self, current, orientation, start, end, recursion, step): while current < len(self.contigs): c = self.contigs[current] if (c.first_mum == float("inf")): # if arrived at the end where contigs to be discarded or appended to the end are return if (c.mum_sequences[0].start > end): return # arrived at the end of the gap that called this recursion j = 0 if (c.futur == None or current == 0): j = c.search_true_first_sequence(start, 5*step) # info needed for verify height too, not only for setting futur if (c.futur == None): # searching if there is remaining noise that has inverted contigs if ((current + 1 < sum(k > 0 for k in [len(cont.mum_sequences) for cont in self.contigs]))): # are they others contigs left after it if self.contigs[current+1].search_true_first_sequence(start, step)==len(self.contigs[current+1].mum_sequences): pass elif (j != len(c.mum_sequences) # they are useful mum_sequences left and c.mum_sequences[j].start > self.contigs[current+1].mum_sequences[self.contigs[current+1].search_true_first_sequence(start, step)].start): # could it fit after the next contig self.swap_contigs(current, current + 1) continue # restart the loop if(j == len(c.mum_sequences)): # all mum_sequences fit on the already treated part, so we discard it current += 1 c.futur = None continue # restart the loop res = c.verify_heights(j, self.contigs[0].id) if (res == 0 or res == 1): #### maybe : find between which seqs is the vertical gap and use the orientaton of the one at the start to set orientation if (len(c.mum_sequences) > j+1): # change to a while? in case 0 1 0 gap 1 if ((c.mum_sequences[j+1].start - c.mum_sequences[j].end > step) and (c.mum_sequences[j] != c.mum_sequences[j+1])): c.futur = 0 if (c.futur == None): if c.mum_sequences[j].orientation == orientation: c.futur = 0 else: c.futur = 1 if (res == 1): # rolling case print "\n\nHERE\n\n" # search for vertical gap between a and b # while (compare_heights(a, b)): ## TODO need a function to check when it goes from top to bottom gap, if direct/direct, fisrt higher, if reverse/reverse, first lower res = c.find_rolling_gap() # call recursion on current + 1 with start = a.end and end = b.start # if (c.futur == 0): # if (c.mum_sequences[res].orientation == 0): # orientation = 1 # else: # orientation = 0 # else: if (c.futur == 1): if (c.mum_sequences[res].orientation == 0): orientation = 1 else: orientation = 0 else: orientation = c.mum_sequences[res].orientation self.orientate(current + 1, orientation, c.mum_sequences[res].end, c.mum_sequences[res+1].start, True, step) ### TODO restaurer l'orientation!! à celle de b if (c.futur == 1): if (c.mum_sequences[res+1].orientation == 0): orientation = 1 else: orientation = 0 else: orientation = c.mum_sequences[res+1].orientation current += 1 continue elif (res == 2): c.futur = 1 if c.mum_sequences[j].orientation == 0: # since it will be reversed orientation = 1 else: orientation = 0 j += 1 while (j < len(c.mum_sequences)): if (c.mum_sequences[j].start < start - step): j += 1 continue # not to change the start value if (c.mum_sequences[j].start - c.mum_sequences[j-1].end < step): # maybe n times step? if (c.futur == 0): # the contig is in the good orientation, so .orientation is true orientation = c.mum_sequences[j].orientation elif c.mum_sequences[j].orientation == 0: # the contig will be reversed, so the actual orientation is the opposite of .orientation orientation = 1 else: orientation = 0 else: # a gap can only happen when the two sequences around it are in different orientations ###### FALSE when there's need to roll, and it is of lower height if (c.compare_heights(j-1, j)): #c.mum_sequences[j-1].height < c.mum_sequences[j].height): # good if (not recursion): if orientation == 0: orientation = 1 # else: # orientation = 0 else: break # print "\n\n\n\n\n\nINVERSED CONCATENATION ORDER\n\n\n\n\n\n" tmp_start = c.get_position_before_gap(step) self.orientate(current + 1, orientation, tmp_start, c.mum_sequences[j].start, True, step) # search from here to which contig of the list, the first mums start are smaller than rev, and reverse that length rev = self.search_rev(current+1, c.mum_sequences[j].start) # self.contigs[current + 1: rev] = reversed(self.contigs[current + 1:rev]) #################### tmp = None if (c.mum_sequences[j-1].orientation == 1): # if the first mum sequence is reverse if (c.futur == 0): tmp = current else: tmp = current + 1 else: if (c.futur == 0): tmp = current +1 else: tmp = current self.contigs[tmp : rev] = reversed(self.contigs[tmp : rev]) #################### orientation = 0 current = rev-1 j += 1 break # because a gap means that j is the last mum_sequence of the list # else: # this one should be reversed, unless there's translocation # if c.futur == 0: # print "/!\\problem HEREEEE" # unless on the starting contig for which rolling might happen # else: # print "\n\n\n\n\nHERE\n\n\n\n" j += 1 # start = c.get_position_before_gap(step) start = c.mum_sequences[j-1].end current += 1 @staticmethod def sort_mums(index, mums): mums_of_contigs = [[] for v in xrange(len(index.graph.vertices)-1)] for m in mums: mums_of_contigs[m[4].id - 1].append(m) return mums_of_contigs def search_rev(self, i, limit): while(i < len(self.contigs) and self.contigs[i].mum_sequences[0].start < limit): i += 1 return i def swap_contigs(self, first, second): tmp_contig = self.contigs[first] self.contigs[first] = self.contigs[second] self.contigs[second] = tmp_contig class Contig: def __init__(self, n, mums, step, big_enough): self.id = n self.mum_sequences = [] if (len(mums) > 1): # print "kept : " + str(self.id) + " " + str(mums[0][2]) # mums = Contig.clean(mums, step) mums.sort(key = lambda info: info[0]) Contig.clean(mums, step) if (len(mums) == 0 or (len(mums) == 1 and mums[0][2] < big_enough)): # if only 1 mum, might false results print "Discarding this conting or appending it to the end of the sequence" self.futur = -1 # removal self.first_mum = float("inf") else: self.futur = None self.make_mum_sequences(mums) # each element is a list of mums which follow in the same order, two following elemnts are in different orientation self.first_mum = self.mum_sequences[0].mums[0][0] def make_mum_sequences(self, mums): i = 1 orientation = mums[0][5]
while(i < len(mums)): if (mums[i][5] != orientation): self.mum_sequences.append(Mum_sequence(orientation, mums[j:i])) orientation = mums[i][5] j = i elif (abs(mums[i-1][1] - mums[i][1]) > 2000000): # arbitrary value that is much bigger than any small jump that could happen, but smaller than the size of the genome self.mum_sequences.append(Mum_sequence(orientation, mums[j:i])) orientation = mums[i][5] j = i i += 1 # if (i != 0): self.mum_sequences.append(Mum_sequence(orientation, mums[j:i])) @staticmethod def clean(mums, step): continuating = [False for i in mums] for i in xrange(len(mums)): ### currently this removes inversions that would span only 1 mum if (continuating[i] is False): if i != 0: # if NOT the first one --> check continuity with previous for j in xrange(i): # searching if any mum before it continues it if mums[i][5] == 0: if ((mums[j][1] + mums[j][2] < mums[i][1] + step) and (mums[j][1] + mums[j][2] > mums[i][1] - step) and (mums[j][0] + mums[j][2] < mums[i][0] + step) and (mums[j][0] + mums[j][2] > mums[i][0] - step)): continuating[j] = True continuating[j] = True break else: if ((mums[j][1] < mums[i][1] + mums[i][2] + step) and (mums[j][1] > mums[i][1] + mums[i][2] - step) and (mums[j][0] + mums[j][2] < mums[i][0] + step) and (mums[j][0] + mums[j][2] > mums[i][0] - step)): continuating[j] = True continuating[i] = True break if i != len(mums): # if NOT the last one --> check continuity with next for j in xrange(i+1, len(mums)): # searching if any mum after it continues it if mums[i][5] == 0: if ((mums[j][1] > mums[i][1] + mums[i][2] - step) and (mums[j][1] < mums[i][1] + mums[i][2] + step) and (mums[j][0] > mums[i][0] + mums[i][2] - step) and (mums[j][0] < mums[i][0] + mums[i][2] + step)): continuating[j] = True continuating[i] = True break else: if ((mums[j][1] + mums[j][2] > mums[i][1] - step) and (mums[j][1] + mums[j][2] < mums[i][1] + step) and (mums[j][0] < mums[i][0] + mums[i][2] + step) and (mums[j][0] > mums[i][0] + mums[i][2] - step)): continuating[j] = True continuating[i] = True break i = 0 while (i < len(mums)): if (continuating[i] is False): continuating.pop(i) mums.pop(i) # print "Removing a mum of size " + str(tmp[2]) continue i += 1 # return mums def verify_heights(self, j, first_s_id): i = j + 1 start_height = self.mum_sequences[j].height current_height = start_height restarted = False while (i < len(self.mum_sequences)): if (self.mum_sequences[i].height > current_height): # current_height = self.mum_sequences[i].height ## ADDED THIS i += 1 continue elif (self.mum_sequences[i].height < start_height and not restarted and first_s_id == self.id): restarted = True current_height = self.mum_sequences[i].height else: if (i+1 == len(self.mum_sequences)): return 2 # need to reverse this contig return -1 # error somewhere i += 1 if (restarted == True): return 1 # will need to roll return 0 # linear (but might still have a gap and two different orientations) def search_true_first_sequence(self, start, step): j = 0 while (j < len(self.mum_sequences) and (self.mum_sequences[j].start < start - step)): j += 1 return j def get_position_before_gap(self, step): i = 1 position = self.mum_sequences[0].end while (i < len(self.mum_sequences)): if (self.mum_sequences[i].start > position + step): return position else: position = self.mum_sequences[i].end i += 1 return position def compare_heights(self, i, j): # True = i lower than j, False = i higher than j, with regards to futur orientation if (self.mum_sequences[i].height < self.mum_sequences[j].height): if (self.futur == 0): return True else: return False if (self.futur == 0): return False return True def find_rolling_gap(self): down = float("inf") up = 0 i = -1 j = -1 k = 0 ## need to find highest and lowest seq of mums while (k < len(self.mum_sequences)): if self.mum_sequences[k].height > up: up = self.mum_sequences[k].height j = k if self.mum_sequences[k].height < down: down = self.mum_sequences[k].height i = k k += 1 if (i < j): return i else: return j class Mum_sequence: def __init__(self, orientation, mums): self.orientation = orientation # 0 or 1, direct or reverse self.mums = mums self.start = mums[0][0] self.end = mums[len(mums)-1][0] + mums[len(mums)-1][2] self.height = None self.calculate_height(mums) def calculate_height(self, mums): # receives a list of successive mums all direct or reverse tmp = 0 i = 0 for mum in mums: i += 1 tmp += mum[1] try: self.height = tmp/i except ZeroDivisionError: self.height = 0 print "zero div"
j = 0 if (len(mums) == 1): self.mum_sequences.append(Mum_sequence(orientation, mums)) return
random_line_split
assembly_finishing_objects.py
# -*- coding: utf-8 -*- """ @author : c_georgescu """ class Sequence: def __init__(self, index, mums, step, big_enough): self.contigs = [] self.discarded_contigs = [] tmp = Sequence.sort_mums(index, mums) i = 0 while i < len(tmp): self.contigs.append(Contig(i+1, tmp[i], step, big_enough)) i += 1 self.contigs.sort(key = lambda c: c.first_mum) print "The order in which the contigs should be concatenated is :\n" + str([(c.id, len(c.mum_sequences)) for c in self.contigs]) i = 0 while (i < len(self.contigs)): if (len(self.contigs[i].mum_sequences) == 0): self.discarded_contigs.append(self.contigs.pop(i)) continue i += 1 # setting the first contig orientation if (self.contigs[0].mum_sequences[0].mums[0][5] == 1): self.contigs[0].futur = 1 else: self.contigs[0].futur = 0 self.orientate(0, 0, 0, float("inf"), False, step) print "The order in which the contigs should be concatenated is :\n" + str([(c.id, c.futur) for c in self.contigs]) def orientate(self, current, orientation, start, end, recursion, step): while current < len(self.contigs): c = self.contigs[current] if (c.first_mum == float("inf")): # if arrived at the end where contigs to be discarded or appended to the end are return if (c.mum_sequences[0].start > end): return # arrived at the end of the gap that called this recursion j = 0 if (c.futur == None or current == 0): j = c.search_true_first_sequence(start, 5*step) # info needed for verify height too, not only for setting futur if (c.futur == None): # searching if there is remaining noise that has inverted contigs if ((current + 1 < sum(k > 0 for k in [len(cont.mum_sequences) for cont in self.contigs]))): # are they others contigs left after it if self.contigs[current+1].search_true_first_sequence(start, step)==len(self.contigs[current+1].mum_sequences): pass elif (j != len(c.mum_sequences) # they are useful mum_sequences left and c.mum_sequences[j].start > self.contigs[current+1].mum_sequences[self.contigs[current+1].search_true_first_sequence(start, step)].start): # could it fit after the next contig self.swap_contigs(current, current + 1) continue # restart the loop if(j == len(c.mum_sequences)): # all mum_sequences fit on the already treated part, so we discard it current += 1 c.futur = None continue # restart the loop res = c.verify_heights(j, self.contigs[0].id) if (res == 0 or res == 1): #### maybe : find between which seqs is the vertical gap and use the orientaton of the one at the start to set orientation if (len(c.mum_sequences) > j+1): # change to a while? in case 0 1 0 gap 1 if ((c.mum_sequences[j+1].start - c.mum_sequences[j].end > step) and (c.mum_sequences[j] != c.mum_sequences[j+1])): c.futur = 0 if (c.futur == None): if c.mum_sequences[j].orientation == orientation: c.futur = 0 else: c.futur = 1 if (res == 1): # rolling case print "\n\nHERE\n\n" # search for vertical gap between a and b # while (compare_heights(a, b)): ## TODO need a function to check when it goes from top to bottom gap, if direct/direct, fisrt higher, if reverse/reverse, first lower res = c.find_rolling_gap() # call recursion on current + 1 with start = a.end and end = b.start # if (c.futur == 0): # if (c.mum_sequences[res].orientation == 0): # orientation = 1 # else: # orientation = 0 # else: if (c.futur == 1): if (c.mum_sequences[res].orientation == 0): orientation = 1 else: orientation = 0 else: orientation = c.mum_sequences[res].orientation self.orientate(current + 1, orientation, c.mum_sequences[res].end, c.mum_sequences[res+1].start, True, step) ### TODO restaurer l'orientation!! à celle de b if (c.futur == 1): if (c.mum_sequences[res+1].orientation == 0): orientation = 1 else: orientation = 0 else: orientation = c.mum_sequences[res+1].orientation current += 1 continue elif (res == 2): c.futur = 1 if c.mum_sequences[j].orientation == 0: # since it will be reversed orientation = 1 else: orientation = 0 j += 1 while (j < len(c.mum_sequences)): if (c.mum_sequences[j].start < start - step): j += 1 continue # not to change the start value if (c.mum_sequences[j].start - c.mum_sequences[j-1].end < step): # maybe n times step? if (c.futur == 0): # the contig is in the good orientation, so .orientation is true orientation = c.mum_sequences[j].orientation elif c.mum_sequences[j].orientation == 0: # the contig will be reversed, so the actual orientation is the opposite of .orientation orientation = 1 else: orientation = 0 else: # a gap can only happen when the two sequences around it are in different orientations ###### FALSE when there's need to roll, and it is of lower height if (c.compare_heights(j-1, j)): #c.mum_sequences[j-1].height < c.mum_sequences[j].height): # good if (not recursion): if orientation == 0: orientation = 1 # else: # orientation = 0 else: break # print "\n\n\n\n\n\nINVERSED CONCATENATION ORDER\n\n\n\n\n\n" tmp_start = c.get_position_before_gap(step) self.orientate(current + 1, orientation, tmp_start, c.mum_sequences[j].start, True, step) # search from here to which contig of the list, the first mums start are smaller than rev, and reverse that length rev = self.search_rev(current+1, c.mum_sequences[j].start) # self.contigs[current + 1: rev] = reversed(self.contigs[current + 1:rev]) #################### tmp = None if (c.mum_sequences[j-1].orientation == 1): # if the first mum sequence is reverse if (c.futur == 0): tmp = current else: tmp = current + 1 else: if (c.futur == 0): tmp = current +1 else: tmp = current self.contigs[tmp : rev] = reversed(self.contigs[tmp : rev]) #################### orientation = 0 current = rev-1 j += 1 break # because a gap means that j is the last mum_sequence of the list # else: # this one should be reversed, unless there's translocation # if c.futur == 0: # print "/!\\problem HEREEEE" # unless on the starting contig for which rolling might happen # else: # print "\n\n\n\n\nHERE\n\n\n\n" j += 1 # start = c.get_position_before_gap(step) start = c.mum_sequences[j-1].end current += 1 @staticmethod def sort_mums(index, mums): mums_of_contigs = [[] for v in xrange(len(index.graph.vertices)-1)] for m in mums: mums_of_contigs[m[4].id - 1].append(m) return mums_of_contigs def search_rev(self, i, limit): while(i < len(self.contigs) and self.contigs[i].mum_sequences[0].start < limit): i += 1 return i def swap_contigs(self, first, second): tmp_contig = self.contigs[first] self.contigs[first] = self.contigs[second] self.contigs[second] = tmp_contig class Contig: def __init__(self, n, mums, step, big_enough): self.id = n self.mum_sequences = [] if (len(mums) > 1): # print "kept : " + str(self.id) + " " + str(mums[0][2]) # mums = Contig.clean(mums, step) mums.sort(key = lambda info: info[0]) Contig.clean(mums, step) if (len(mums) == 0 or (len(mums) == 1 and mums[0][2] < big_enough)): # if only 1 mum, might false results print "Discarding this conting or appending it to the end of the sequence" self.futur = -1 # removal self.first_mum = float("inf") else: self.futur = None self.make_mum_sequences(mums) # each element is a list of mums which follow in the same order, two following elemnts are in different orientation self.first_mum = self.mum_sequences[0].mums[0][0] def make_mum_sequences(self, mums): i = 1 orientation = mums[0][5] j = 0 if (len(mums) == 1): self.mum_sequences.append(Mum_sequence(orientation, mums)) return while(i < len(mums)): if (mums[i][5] != orientation): self.mum_sequences.append(Mum_sequence(orientation, mums[j:i])) orientation = mums[i][5] j = i elif (abs(mums[i-1][1] - mums[i][1]) > 2000000): # arbitrary value that is much bigger than any small jump that could happen, but smaller than the size of the genome self.mum_sequences.append(Mum_sequence(orientation, mums[j:i])) orientation = mums[i][5] j = i i += 1 # if (i != 0): self.mum_sequences.append(Mum_sequence(orientation, mums[j:i])) @staticmethod def clean(mums, step): continuating = [False for i in mums] for i in xrange(len(mums)): ### currently this removes inversions that would span only 1 mum if (continuating[i] is False): if i != 0: # if NOT the first one --> check continuity with previous for j in xrange(i): # searching if any mum before it continues it if mums[i][5] == 0: if ((mums[j][1] + mums[j][2] < mums[i][1] + step) and (mums[j][1] + mums[j][2] > mums[i][1] - step) and (mums[j][0] + mums[j][2] < mums[i][0] + step) and (mums[j][0] + mums[j][2] > mums[i][0] - step)): continuating[j] = True continuating[j] = True break else: if ((mums[j][1] < mums[i][1] + mums[i][2] + step) and (mums[j][1] > mums[i][1] + mums[i][2] - step) and (mums[j][0] + mums[j][2] < mums[i][0] + step) and (mums[j][0] + mums[j][2] > mums[i][0] - step)): continuating[j] = True continuating[i] = True break if i != len(mums): # if NOT the last one --> check continuity with next for j in xrange(i+1, len(mums)): # searching if any mum after it continues it if mums[i][5] == 0: if ((mums[j][1] > mums[i][1] + mums[i][2] - step) and (mums[j][1] < mums[i][1] + mums[i][2] + step) and (mums[j][0] > mums[i][0] + mums[i][2] - step) and (mums[j][0] < mums[i][0] + mums[i][2] + step)): continuating[j] = True continuating[i] = True break else: if ((mums[j][1] + mums[j][2] > mums[i][1] - step) and (mums[j][1] + mums[j][2] < mums[i][1] + step) and (mums[j][0] < mums[i][0] + mums[i][2] + step) and (mums[j][0] > mums[i][0] + mums[i][2] - step)): continuating[j] = True continuating[i] = True break i = 0 while (i < len(mums)): if (continuating[i] is False): continuating.pop(i) mums.pop(i) # print "Removing a mum of size " + str(tmp[2]) continue i += 1 # return mums def verify_heights(self, j, first_s_id): i = j + 1 start_height = self.mum_sequences[j].height current_height = start_height restarted = False while (i < len(self.mum_sequences)): if (self.mum_sequences[i].height > current_height): # current_height = self.mum_sequences[i].height ## ADDED THIS i += 1 continue elif (self.mum_sequences[i].height < start_height and not restarted and first_s_id == self.id): restarted = True current_height = self.mum_sequences[i].height else: if (i+1 == len(self.mum_sequences)): return 2 # need to reverse this contig return -1 # error somewhere i += 1 if (restarted == True): return 1 # will need to roll return 0 # linear (but might still have a gap and two different orientations) def search_true_first_sequence(self, start, step): j = 0 while (j < len(self.mum_sequences) and (self.mum_sequences[j].start < start - step)): j += 1 return j def get_position_before_gap(self, step): i = 1 position = self.mum_sequences[0].end while (i < len(self.mum_sequences)): if (self.mum_sequences[i].start > position + step): return position else: position = self.mum_sequences[i].end i += 1 return position def compare_heights(self, i, j): # True = i lower than j, False = i higher than j, with regards to futur orientation if (self.mum_sequences[i].height < self.mum_sequences[j].height): if (self.futur == 0): return True else: return False if (self.futur == 0): return False return True def f
self): down = float("inf") up = 0 i = -1 j = -1 k = 0 ## need to find highest and lowest seq of mums while (k < len(self.mum_sequences)): if self.mum_sequences[k].height > up: up = self.mum_sequences[k].height j = k if self.mum_sequences[k].height < down: down = self.mum_sequences[k].height i = k k += 1 if (i < j): return i else: return j class Mum_sequence: def __init__(self, orientation, mums): self.orientation = orientation # 0 or 1, direct or reverse self.mums = mums self.start = mums[0][0] self.end = mums[len(mums)-1][0] + mums[len(mums)-1][2] self.height = None self.calculate_height(mums) def calculate_height(self, mums): # receives a list of successive mums all direct or reverse tmp = 0 i = 0 for mum in mums: i += 1 tmp += mum[1] try: self.height = tmp/i except ZeroDivisionError: self.height = 0 print "zero div"
ind_rolling_gap(
identifier_name
assembly_finishing_objects.py
# -*- coding: utf-8 -*- """ @author : c_georgescu """ class Sequence: def __init__(self, index, mums, step, big_enough): self.contigs = [] self.discarded_contigs = [] tmp = Sequence.sort_mums(index, mums) i = 0 while i < len(tmp): self.contigs.append(Contig(i+1, tmp[i], step, big_enough)) i += 1 self.contigs.sort(key = lambda c: c.first_mum) print "The order in which the contigs should be concatenated is :\n" + str([(c.id, len(c.mum_sequences)) for c in self.contigs]) i = 0 while (i < len(self.contigs)): if (len(self.contigs[i].mum_sequences) == 0): self.discarded_contigs.append(self.contigs.pop(i)) continue i += 1 # setting the first contig orientation if (self.contigs[0].mum_sequences[0].mums[0][5] == 1): self.contigs[0].futur = 1 else: self.contigs[0].futur = 0 self.orientate(0, 0, 0, float("inf"), False, step) print "The order in which the contigs should be concatenated is :\n" + str([(c.id, c.futur) for c in self.contigs]) def orientate(self, current, orientation, start, end, recursion, step): while current < len(self.contigs): c = self.contigs[current] if (c.first_mum == float("inf")): # if arrived at the end where contigs to be discarded or appended to the end are return if (c.mum_sequences[0].start > end): return # arrived at the end of the gap that called this recursion j = 0 if (c.futur == None or current == 0): j = c.search_true_first_sequence(start, 5*step) # info needed for verify height too, not only for setting futur if (c.futur == None): # searching if there is remaining noise that has inverted contigs if ((current + 1 < sum(k > 0 for k in [len(cont.mum_sequences) for cont in self.contigs]))): # are they others contigs left after it if self.contigs[current+1].search_true_first_sequence(start, step)==len(self.contigs[current+1].mum_sequences): pass elif (j != len(c.mum_sequences) # they are useful mum_sequences left and c.mum_sequences[j].start > self.contigs[current+1].mum_sequences[self.contigs[current+1].search_true_first_sequence(start, step)].start): # could it fit after the next contig self.swap_contigs(current, current + 1) continue # restart the loop if(j == len(c.mum_sequences)): # all mum_sequences fit on the already treated part, so we discard it current += 1 c.futur = None continue # restart the loop res = c.verify_heights(j, self.contigs[0].id) if (res == 0 or res == 1): #### maybe : find between which seqs is the vertical gap and use the orientaton of the one at the start to set orientation if (len(c.mum_sequences) > j+1): # change to a while? in case 0 1 0 gap 1 if ((c.mum_sequences[j+1].start - c.mum_sequences[j].end > step) and (c.mum_sequences[j] != c.mum_sequences[j+1])): c.futur = 0 if (c.futur == None): if c.mum_sequences[j].orientation == orientation: c.futur = 0 else: c.futur = 1 if (res == 1): # rolling case print "\n\nHERE\n\n" # search for vertical gap between a and b # while (compare_heights(a, b)): ## TODO need a function to check when it goes from top to bottom gap, if direct/direct, fisrt higher, if reverse/reverse, first lower res = c.find_rolling_gap() # call recursion on current + 1 with start = a.end and end = b.start # if (c.futur == 0): # if (c.mum_sequences[res].orientation == 0): # orientation = 1 # else: # orientation = 0 # else: if (c.futur == 1): if (c.mum_sequences[res].orientation == 0): orientation = 1 else: orientation = 0 else: orientation = c.mum_sequences[res].orientation self.orientate(current + 1, orientation, c.mum_sequences[res].end, c.mum_sequences[res+1].start, True, step) ### TODO restaurer l'orientation!! à celle de b if (c.futur == 1): if (c.mum_sequences[res+1].orientation == 0): orientation = 1 else: orientation = 0 else: orientation = c.mum_sequences[res+1].orientation current += 1 continue elif (res == 2): c.futur = 1 if c.mum_sequences[j].orientation == 0: # since it will be reversed orientation = 1 else: orientation = 0 j += 1 while (j < len(c.mum_sequences)): if (c.mum_sequences[j].start < start - step): j += 1 continue # not to change the start value if (c.mum_sequences[j].start - c.mum_sequences[j-1].end < step): # maybe n times step? if (c.futur == 0): # the contig is in the good orientation, so .orientation is true orientation = c.mum_sequences[j].orientation elif c.mum_sequences[j].orientation == 0: # the contig will be reversed, so the actual orientation is the opposite of .orientation orientation = 1 else: orientation = 0 else: # a gap can only happen when the two sequences around it are in different orientations ###### FALSE when there's need to roll, and it is of lower height if (c.compare_heights(j-1, j)): #c.mum_sequences[j-1].height < c.mum_sequences[j].height): # good if (not recursion): if orientation == 0: orientation = 1 # else: # orientation = 0 else: break # print "\n\n\n\n\n\nINVERSED CONCATENATION ORDER\n\n\n\n\n\n" tmp_start = c.get_position_before_gap(step) self.orientate(current + 1, orientation, tmp_start, c.mum_sequences[j].start, True, step) # search from here to which contig of the list, the first mums start are smaller than rev, and reverse that length rev = self.search_rev(current+1, c.mum_sequences[j].start) # self.contigs[current + 1: rev] = reversed(self.contigs[current + 1:rev]) #################### tmp = None if (c.mum_sequences[j-1].orientation == 1): # if the first mum sequence is reverse if (c.futur == 0): tmp = current else: tmp = current + 1 else: if (c.futur == 0): tmp = current +1 else: tmp = current self.contigs[tmp : rev] = reversed(self.contigs[tmp : rev]) #################### orientation = 0 current = rev-1 j += 1 break # because a gap means that j is the last mum_sequence of the list # else: # this one should be reversed, unless there's translocation # if c.futur == 0: # print "/!\\problem HEREEEE" # unless on the starting contig for which rolling might happen # else: # print "\n\n\n\n\nHERE\n\n\n\n" j += 1 # start = c.get_position_before_gap(step) start = c.mum_sequences[j-1].end current += 1 @staticmethod def sort_mums(index, mums): mums_of_contigs = [[] for v in xrange(len(index.graph.vertices)-1)] for m in mums: mums_of_contigs[m[4].id - 1].append(m) return mums_of_contigs def search_rev(self, i, limit): while(i < len(self.contigs) and self.contigs[i].mum_sequences[0].start < limit): i += 1 return i def swap_contigs(self, first, second): tmp_contig = self.contigs[first] self.contigs[first] = self.contigs[second] self.contigs[second] = tmp_contig class Contig: def __init__(self, n, mums, step, big_enough): self.id = n self.mum_sequences = [] if (len(mums) > 1): # print "kept : " + str(self.id) + " " + str(mums[0][2]) # mums = Contig.clean(mums, step) mums.sort(key = lambda info: info[0]) Contig.clean(mums, step) if (len(mums) == 0 or (len(mums) == 1 and mums[0][2] < big_enough)): # if only 1 mum, might false results print "Discarding this conting or appending it to the end of the sequence" self.futur = -1 # removal self.first_mum = float("inf") else: self.futur = None self.make_mum_sequences(mums) # each element is a list of mums which follow in the same order, two following elemnts are in different orientation self.first_mum = self.mum_sequences[0].mums[0][0] def make_mum_sequences(self, mums): i = 1 orientation = mums[0][5] j = 0 if (len(mums) == 1): self.mum_sequences.append(Mum_sequence(orientation, mums)) return while(i < len(mums)): if (mums[i][5] != orientation): self.mum_sequences.append(Mum_sequence(orientation, mums[j:i])) orientation = mums[i][5] j = i elif (abs(mums[i-1][1] - mums[i][1]) > 2000000): # arbitrary value that is much bigger than any small jump that could happen, but smaller than the size of the genome self.mum_sequences.append(Mum_sequence(orientation, mums[j:i])) orientation = mums[i][5] j = i i += 1 # if (i != 0): self.mum_sequences.append(Mum_sequence(orientation, mums[j:i])) @staticmethod def clean(mums, step): continuating = [False for i in mums] for i in xrange(len(mums)): ### currently this removes inversions that would span only 1 mum if (continuating[i] is False): if i != 0: # if NOT the first one --> check continuity with previous for j in xrange(i): # searching if any mum before it continues it if mums[i][5] == 0: if ((mums[j][1] + mums[j][2] < mums[i][1] + step) and (mums[j][1] + mums[j][2] > mums[i][1] - step) and (mums[j][0] + mums[j][2] < mums[i][0] + step) and (mums[j][0] + mums[j][2] > mums[i][0] - step)): continuating[j] = True continuating[j] = True break else: if ((mums[j][1] < mums[i][1] + mums[i][2] + step) and (mums[j][1] > mums[i][1] + mums[i][2] - step) and (mums[j][0] + mums[j][2] < mums[i][0] + step) and (mums[j][0] + mums[j][2] > mums[i][0] - step)): continuating[j] = True continuating[i] = True break if i != len(mums): # if NOT the last one --> check continuity with next for j in xrange(i+1, len(mums)): # searching if any mum after it continues it if mums[i][5] == 0: if ((mums[j][1] > mums[i][1] + mums[i][2] - step) and (mums[j][1] < mums[i][1] + mums[i][2] + step) and (mums[j][0] > mums[i][0] + mums[i][2] - step) and (mums[j][0] < mums[i][0] + mums[i][2] + step)): continuating[j] = True continuating[i] = True break else: if ((mums[j][1] + mums[j][2] > mums[i][1] - step) and (mums[j][1] + mums[j][2] < mums[i][1] + step) and (mums[j][0] < mums[i][0] + mums[i][2] + step) and (mums[j][0] > mums[i][0] + mums[i][2] - step)): continuating[j] = True continuating[i] = True break i = 0 while (i < len(mums)): if (continuating[i] is False): continuating.pop(i) mums.pop(i) # print "Removing a mum of size " + str(tmp[2]) continue i += 1 # return mums def verify_heights(self, j, first_s_id): i
def search_true_first_sequence(self, start, step): j = 0 while (j < len(self.mum_sequences) and (self.mum_sequences[j].start < start - step)): j += 1 return j def get_position_before_gap(self, step): i = 1 position = self.mum_sequences[0].end while (i < len(self.mum_sequences)): if (self.mum_sequences[i].start > position + step): return position else: position = self.mum_sequences[i].end i += 1 return position def compare_heights(self, i, j): # True = i lower than j, False = i higher than j, with regards to futur orientation if (self.mum_sequences[i].height < self.mum_sequences[j].height): if (self.futur == 0): return True else: return False if (self.futur == 0): return False return True def find_rolling_gap(self): down = float("inf") up = 0 i = -1 j = -1 k = 0 ## need to find highest and lowest seq of mums while (k < len(self.mum_sequences)): if self.mum_sequences[k].height > up: up = self.mum_sequences[k].height j = k if self.mum_sequences[k].height < down: down = self.mum_sequences[k].height i = k k += 1 if (i < j): return i else: return j class Mum_sequence: def __init__(self, orientation, mums): self.orientation = orientation # 0 or 1, direct or reverse self.mums = mums self.start = mums[0][0] self.end = mums[len(mums)-1][0] + mums[len(mums)-1][2] self.height = None self.calculate_height(mums) def calculate_height(self, mums): # receives a list of successive mums all direct or reverse tmp = 0 i = 0 for mum in mums: i += 1 tmp += mum[1] try: self.height = tmp/i except ZeroDivisionError: self.height = 0 print "zero div"
= j + 1 start_height = self.mum_sequences[j].height current_height = start_height restarted = False while (i < len(self.mum_sequences)): if (self.mum_sequences[i].height > current_height): # current_height = self.mum_sequences[i].height ## ADDED THIS i += 1 continue elif (self.mum_sequences[i].height < start_height and not restarted and first_s_id == self.id): restarted = True current_height = self.mum_sequences[i].height else: if (i+1 == len(self.mum_sequences)): return 2 # need to reverse this contig return -1 # error somewhere i += 1 if (restarted == True): return 1 # will need to roll return 0 # linear (but might still have a gap and two different orientations)
identifier_body
assembly_finishing_objects.py
# -*- coding: utf-8 -*- """ @author : c_georgescu """ class Sequence: def __init__(self, index, mums, step, big_enough): self.contigs = [] self.discarded_contigs = [] tmp = Sequence.sort_mums(index, mums) i = 0 while i < len(tmp): self.contigs.append(Contig(i+1, tmp[i], step, big_enough)) i += 1 self.contigs.sort(key = lambda c: c.first_mum) print "The order in which the contigs should be concatenated is :\n" + str([(c.id, len(c.mum_sequences)) for c in self.contigs]) i = 0 while (i < len(self.contigs)): if (len(self.contigs[i].mum_sequences) == 0): self.discarded_contigs.append(self.contigs.pop(i)) continue i += 1 # setting the first contig orientation if (self.contigs[0].mum_sequences[0].mums[0][5] == 1): self.contigs[0].futur = 1 else: self.contigs[0].futur = 0 self.orientate(0, 0, 0, float("inf"), False, step) print "The order in which the contigs should be concatenated is :\n" + str([(c.id, c.futur) for c in self.contigs]) def orientate(self, current, orientation, start, end, recursion, step): while current < len(self.contigs): c = self.contigs[current] if (c.first_mum == float("inf")): # if arrived at the end where contigs to be discarded or appended to the end are return if (c.mum_sequences[0].start > end): return # arrived at the end of the gap that called this recursion j = 0 if (c.futur == None or current == 0): j = c.search_true_first_sequence(start, 5*step) # info needed for verify height too, not only for setting futur if (c.futur == None): # searching if there is remaining noise that has inverted contigs if ((current + 1 < sum(k > 0 for k in [len(cont.mum_sequences) for cont in self.contigs]))): # are they others contigs left after it if self.contigs[current+1].search_true_first_sequence(start, step)==len(self.contigs[current+1].mum_sequences): pass elif (j != len(c.mum_sequences) # they are useful mum_sequences left and c.mum_sequences[j].start > self.contigs[current+1].mum_sequences[self.contigs[current+1].search_true_first_sequence(start, step)].start): # could it fit after the next contig self.swap_contigs(current, current + 1) continue # restart the loop if(j == len(c.mum_sequences)): # all mum_sequences fit on the already treated part, so we discard it current += 1 c.futur = None continue # restart the loop res = c.verify_heights(j, self.contigs[0].id) if (res == 0 or res == 1): #### maybe : find between which seqs is the vertical gap and use the orientaton of the one at the start to set orientation if (len(c.mum_sequences) > j+1): # change to a while? in case 0 1 0 gap 1 if ((c.mum_sequences[j+1].start - c.mum_sequences[j].end > step) and (c.mum_sequences[j] != c.mum_sequences[j+1])): c.futur = 0 if (c.futur == None): if c.mum_sequences[j].orientation == orientation: c.futur = 0 else:
if (res == 1): # rolling case print "\n\nHERE\n\n" # search for vertical gap between a and b # while (compare_heights(a, b)): ## TODO need a function to check when it goes from top to bottom gap, if direct/direct, fisrt higher, if reverse/reverse, first lower res = c.find_rolling_gap() # call recursion on current + 1 with start = a.end and end = b.start # if (c.futur == 0): # if (c.mum_sequences[res].orientation == 0): # orientation = 1 # else: # orientation = 0 # else: if (c.futur == 1): if (c.mum_sequences[res].orientation == 0): orientation = 1 else: orientation = 0 else: orientation = c.mum_sequences[res].orientation self.orientate(current + 1, orientation, c.mum_sequences[res].end, c.mum_sequences[res+1].start, True, step) ### TODO restaurer l'orientation!! à celle de b if (c.futur == 1): if (c.mum_sequences[res+1].orientation == 0): orientation = 1 else: orientation = 0 else: orientation = c.mum_sequences[res+1].orientation current += 1 continue elif (res == 2): c.futur = 1 if c.mum_sequences[j].orientation == 0: # since it will be reversed orientation = 1 else: orientation = 0 j += 1 while (j < len(c.mum_sequences)): if (c.mum_sequences[j].start < start - step): j += 1 continue # not to change the start value if (c.mum_sequences[j].start - c.mum_sequences[j-1].end < step): # maybe n times step? if (c.futur == 0): # the contig is in the good orientation, so .orientation is true orientation = c.mum_sequences[j].orientation elif c.mum_sequences[j].orientation == 0: # the contig will be reversed, so the actual orientation is the opposite of .orientation orientation = 1 else: orientation = 0 else: # a gap can only happen when the two sequences around it are in different orientations ###### FALSE when there's need to roll, and it is of lower height if (c.compare_heights(j-1, j)): #c.mum_sequences[j-1].height < c.mum_sequences[j].height): # good if (not recursion): if orientation == 0: orientation = 1 # else: # orientation = 0 else: break # print "\n\n\n\n\n\nINVERSED CONCATENATION ORDER\n\n\n\n\n\n" tmp_start = c.get_position_before_gap(step) self.orientate(current + 1, orientation, tmp_start, c.mum_sequences[j].start, True, step) # search from here to which contig of the list, the first mums start are smaller than rev, and reverse that length rev = self.search_rev(current+1, c.mum_sequences[j].start) # self.contigs[current + 1: rev] = reversed(self.contigs[current + 1:rev]) #################### tmp = None if (c.mum_sequences[j-1].orientation == 1): # if the first mum sequence is reverse if (c.futur == 0): tmp = current else: tmp = current + 1 else: if (c.futur == 0): tmp = current +1 else: tmp = current self.contigs[tmp : rev] = reversed(self.contigs[tmp : rev]) #################### orientation = 0 current = rev-1 j += 1 break # because a gap means that j is the last mum_sequence of the list # else: # this one should be reversed, unless there's translocation # if c.futur == 0: # print "/!\\problem HEREEEE" # unless on the starting contig for which rolling might happen # else: # print "\n\n\n\n\nHERE\n\n\n\n" j += 1 # start = c.get_position_before_gap(step) start = c.mum_sequences[j-1].end current += 1 @staticmethod def sort_mums(index, mums): mums_of_contigs = [[] for v in xrange(len(index.graph.vertices)-1)] for m in mums: mums_of_contigs[m[4].id - 1].append(m) return mums_of_contigs def search_rev(self, i, limit): while(i < len(self.contigs) and self.contigs[i].mum_sequences[0].start < limit): i += 1 return i def swap_contigs(self, first, second): tmp_contig = self.contigs[first] self.contigs[first] = self.contigs[second] self.contigs[second] = tmp_contig class Contig: def __init__(self, n, mums, step, big_enough): self.id = n self.mum_sequences = [] if (len(mums) > 1): # print "kept : " + str(self.id) + " " + str(mums[0][2]) # mums = Contig.clean(mums, step) mums.sort(key = lambda info: info[0]) Contig.clean(mums, step) if (len(mums) == 0 or (len(mums) == 1 and mums[0][2] < big_enough)): # if only 1 mum, might false results print "Discarding this conting or appending it to the end of the sequence" self.futur = -1 # removal self.first_mum = float("inf") else: self.futur = None self.make_mum_sequences(mums) # each element is a list of mums which follow in the same order, two following elemnts are in different orientation self.first_mum = self.mum_sequences[0].mums[0][0] def make_mum_sequences(self, mums): i = 1 orientation = mums[0][5] j = 0 if (len(mums) == 1): self.mum_sequences.append(Mum_sequence(orientation, mums)) return while(i < len(mums)): if (mums[i][5] != orientation): self.mum_sequences.append(Mum_sequence(orientation, mums[j:i])) orientation = mums[i][5] j = i elif (abs(mums[i-1][1] - mums[i][1]) > 2000000): # arbitrary value that is much bigger than any small jump that could happen, but smaller than the size of the genome self.mum_sequences.append(Mum_sequence(orientation, mums[j:i])) orientation = mums[i][5] j = i i += 1 # if (i != 0): self.mum_sequences.append(Mum_sequence(orientation, mums[j:i])) @staticmethod def clean(mums, step): continuating = [False for i in mums] for i in xrange(len(mums)): ### currently this removes inversions that would span only 1 mum if (continuating[i] is False): if i != 0: # if NOT the first one --> check continuity with previous for j in xrange(i): # searching if any mum before it continues it if mums[i][5] == 0: if ((mums[j][1] + mums[j][2] < mums[i][1] + step) and (mums[j][1] + mums[j][2] > mums[i][1] - step) and (mums[j][0] + mums[j][2] < mums[i][0] + step) and (mums[j][0] + mums[j][2] > mums[i][0] - step)): continuating[j] = True continuating[j] = True break else: if ((mums[j][1] < mums[i][1] + mums[i][2] + step) and (mums[j][1] > mums[i][1] + mums[i][2] - step) and (mums[j][0] + mums[j][2] < mums[i][0] + step) and (mums[j][0] + mums[j][2] > mums[i][0] - step)): continuating[j] = True continuating[i] = True break if i != len(mums): # if NOT the last one --> check continuity with next for j in xrange(i+1, len(mums)): # searching if any mum after it continues it if mums[i][5] == 0: if ((mums[j][1] > mums[i][1] + mums[i][2] - step) and (mums[j][1] < mums[i][1] + mums[i][2] + step) and (mums[j][0] > mums[i][0] + mums[i][2] - step) and (mums[j][0] < mums[i][0] + mums[i][2] + step)): continuating[j] = True continuating[i] = True break else: if ((mums[j][1] + mums[j][2] > mums[i][1] - step) and (mums[j][1] + mums[j][2] < mums[i][1] + step) and (mums[j][0] < mums[i][0] + mums[i][2] + step) and (mums[j][0] > mums[i][0] + mums[i][2] - step)): continuating[j] = True continuating[i] = True break i = 0 while (i < len(mums)): if (continuating[i] is False): continuating.pop(i) mums.pop(i) # print "Removing a mum of size " + str(tmp[2]) continue i += 1 # return mums def verify_heights(self, j, first_s_id): i = j + 1 start_height = self.mum_sequences[j].height current_height = start_height restarted = False while (i < len(self.mum_sequences)): if (self.mum_sequences[i].height > current_height): # current_height = self.mum_sequences[i].height ## ADDED THIS i += 1 continue elif (self.mum_sequences[i].height < start_height and not restarted and first_s_id == self.id): restarted = True current_height = self.mum_sequences[i].height else: if (i+1 == len(self.mum_sequences)): return 2 # need to reverse this contig return -1 # error somewhere i += 1 if (restarted == True): return 1 # will need to roll return 0 # linear (but might still have a gap and two different orientations) def search_true_first_sequence(self, start, step): j = 0 while (j < len(self.mum_sequences) and (self.mum_sequences[j].start < start - step)): j += 1 return j def get_position_before_gap(self, step): i = 1 position = self.mum_sequences[0].end while (i < len(self.mum_sequences)): if (self.mum_sequences[i].start > position + step): return position else: position = self.mum_sequences[i].end i += 1 return position def compare_heights(self, i, j): # True = i lower than j, False = i higher than j, with regards to futur orientation if (self.mum_sequences[i].height < self.mum_sequences[j].height): if (self.futur == 0): return True else: return False if (self.futur == 0): return False return True def find_rolling_gap(self): down = float("inf") up = 0 i = -1 j = -1 k = 0 ## need to find highest and lowest seq of mums while (k < len(self.mum_sequences)): if self.mum_sequences[k].height > up: up = self.mum_sequences[k].height j = k if self.mum_sequences[k].height < down: down = self.mum_sequences[k].height i = k k += 1 if (i < j): return i else: return j class Mum_sequence: def __init__(self, orientation, mums): self.orientation = orientation # 0 or 1, direct or reverse self.mums = mums self.start = mums[0][0] self.end = mums[len(mums)-1][0] + mums[len(mums)-1][2] self.height = None self.calculate_height(mums) def calculate_height(self, mums): # receives a list of successive mums all direct or reverse tmp = 0 i = 0 for mum in mums: i += 1 tmp += mum[1] try: self.height = tmp/i except ZeroDivisionError: self.height = 0 print "zero div"
c.futur = 1
conditional_block
Ch11. Ex.py
"""1. Write a program that allows the user to enter a string. It then prints a table of the letters of the alphabet in alphabetical order which occur in the string together with the number of times each letter occurs. Case should be ignored. A sample run of the program might look like this: Please enter a sentence: ThiS is a String with Upper and lower case Letters. a 3 c 1 d 1 e 5 g 1 h 2 i 4 l 2 n 2 o 1 p 2 r 4 s 5 t 5 u 1 w 2 $ """ def create_dict(str): my_dict = {} for letter in str: if letter.isalpha(): letter = letter.lower() if letter not in my_dict: my_dict[letter] = 1 else: my_dict[letter] += 1 return my_dict def print_dict(my_dict): keys = list(my_dict.keys()) keys.sort() for key in keys: print(key, " ", my_dict[key]) def main(): text = input("Please enter a sentence: ") chars = create_dict(text) print_dict(chars) if __name__ == "__main__": main() """2. Write a program that will function as a grade book, allowing a user (a professor or teacher) to enter the class roster for a course, along with each student’s cumulative grade. It then prints the class roster along with the average cumulative grade. Grades are on a 0-100 percentage scale. Use 2 lists (grades and students) and the enumerate function in your solution. A test run of this program would yield the following: # this is the first batch of input the user would enter Chris Jesse Sally # this is the second batch of input the user would enter Grade for Chris: 90 Grade for Jesse: 80 Grade for Sally: 70 # below is what your program should output Class roster: Chris (90.0) Jesse (80.0) Sally (70.0) Average grade: 80.0 """ import sys sys.setExecutionLimit(70000) students = [] grades = [] total_score = 0.0 name = input("Enter the name of a student. (When finished, enter nothing)") while (name != ""): students += [name] name = input("Enter the name of a student. (When finished, enter nothing)") for i in range(len(students)): score = float(input("Grade for {0}:".format(students[i]))) grades += [score] print("Class roster:") for index, student in enumerate(students): total_score += grades[index] print("{0} ({1:.1})".format(student, grades[index])) print("\nAverage grade:", (total_score / len(students))) #3. Implement the functionality of the above program using a dictionary instead of a list. import sys sys.setExecutionLimit(70000) students = {} total_score = 0.0 name = input("Enter the name of a student. (When finished, enter nothing)") while (name != ""): students[name] = 0.0 name = input("Enter the name of a student. (When finished, enter nothing)") print("Class roster:") for student in students.keys(): score = float(input("Grade for {0}:".format(student))) students[student] = score total_score += students[student] print("{0} ({1:.1})".format(student, students[student])) print("\nAverage grade:", (total_score / len(students))) """4. Make a dictionary where the key is a worker’s name, and the value is a list where the first element is the clock in time, second element is the clock out time, and the third element is the total hours worked that day. Each worker’s list starts at [0, 0, 0]. Create functions for clock_in and clock_out. clock_in takes the dictionary of workers, the name of the worker, and the clock in time as parameters. When the worker clocks in, enter and save their clock in time as the first element in the associated list value. clock_out takes the same parameters, but with a clock out time instead of clock in time. When the worker clocks out, enter and save their clock out time and calculate the hours worked for that day and store it as the third element in the list. To make this program a little easier, we’re entering the clock in and clock out times as integers. As a bonus mission, try adding the times as strings representing the 24 hour clock (e.g., "08:00"), and then figure out how to calculate the time worked. And you can do this exercise either by aliasing or copying the dictionary.""" def clock_in(worker_dict, name, clock_in_time): worker_dict[name][0] = clock_in_time def clock_out(worker_dict, name, clock_out_time): worker_dict[name][1] = clock_out_time worker_dict[name][2] = clock_out_time - worker_dict[name][0] def main(): workers = {"George Spelvin": [0,0,0], "Jane Doe": [0,0,0], "John Smith": [0,0,0]} print(workers.get("George Spelvin")) # should print [0,0,0] clock_in(workers, "George Spelvin", 8) clock_out(workers, "George Spelvin", 17) print(workers.get("George Spelvin")) # should print [8, 17, 9] if __name__ == "__main__": main() """5. Here’s a table of English to Pirate translations: English Pirate sir matey hotel fleabag inn student swabbie boy matey madam proud beauty professor foul blaggart restaurant galley your yer excuse arr students swabbies are be lawyer foul blaggart restroom th’ head my me hello avast is be man matey Write a program that asks the user for a sentence in English and then translates that sentence to Pirate.""" from test import testEqual def translate(text): # your code here! pirate_text = "" my_dict = {"sir" : "matey", "hotel" : "fleabag inn", "student" : "swabbie", "boy" : "matey", "madam" : "proud beauty", "professor" : "foul blaggart", "restaurant" : "galley", "your" : "yer", "excuse" : "arr", "students" : "swabbies", "are" : "be", "lawyer" : "foul blaggart", "restroom" : "head", "my" : "me", "the" : "th'", "hello" : "avast", "is" : "be", "man" : "matey"} word_list = text.split() index = 0 pirate_words = [] for word in word_list: char = "" pirate_word = "" if (word.isalpha() == False): char = word[len(word)-1] new_word = word[:len(word)-1] else: new_word = word #print("!"+new_word+"!") if new_word in my_dict: pirate_word = my_dict[new_word] else: pirate_word = new_word pirate_word += char index += 1 pirate_words += [pirate_word] pirate_text = " ".join(pirate_words) #print(my_dict, "\n\n") print(pirate_text) return pirate_text text = "hello my man, please excuse your professor to the restroom!" testEqual(translate(text), "avast me matey, please arr yer foul blaggart to th' head!") """6. Give the Python interpreter’s response to each of the following from a continuous interpreter session: >>> d = {'apples': 15, 'bananas': 35, 'grapes': 12} >>> d['bananas'] >>> d['oranges'] = 20 >>> len(d) >>> 'grapes' in d >>> d['pears'] >>> d.get('pears', 0) >>> fruits = d.keys() >>> sorted(fruits) >>> print(fruits) >>> del d['apples'] >>> 'apples' in d Be sure you understand why you get each result. """ from test import testEqual # Note: The pass is a placeholder to allow # the code to compile. Remove it when you # begin coding. def set_inventory(inventory, fruit, quantity=0): inventory[fruit] = quantity # make these tests work... new_inventory = {} set_inventory(new_inventory, 'strawberries', 10) testEqual('strawberries' in new_inventory, True) testEqual(new_inventory['strawberries'], 10) set_inventory(new_inventory, 'strawberries', 25) testEqual(new_inventory['strawberries'] , 25) """Weekly Graded Assignment Write a sort_contacts function that takes a dictionary of contacts as a parameter and returns a sorted list of those contacts, where each contact is a tuple. The contacts dictionary that will be passed into the function has the contact name as its key, and the value is a tuple containing the phone number and email for the contact. contacts = {name: (phone, email), name: (phone, email), etc.} The sort_contacts function should then create a new, sorted (by last name) list of tuples representing all of the contact info (one tuple for each contact) that was in the dictionary. It should then return this list to the calling function. For example, given a dictionary argument of: {"Horney, Karen": ("1-541-656-3010", "karen@psychoanalysis.com"), "Welles, Orson": ("1-312-720-8888", "orson@notlive.com"), "Freud, Anna": ("1-541-754-3010", "anna@psychoanalysis.com")} sort_contacts should return this: [('Freud, Anna', '1-541-754-3010', 'anna@psychoanalysis.com'), ('Horney, Karen', '1-541-656-3010', 'karen@psychoanalysis.com'), ('Welles, Orson', '1-312-720-8888', 'orson@notlive.com')] """ # Create sort_contacts function def sort_contacts(
"""Sorts a dictionary of contacts""" key_list = list(contacts.keys()) #get keys key_list.sort() #sort key_list sorted_list = [] #initialize sorted list for key in key_list: contact = (key, contacts[key][0], contacts[key][1]) #create tuple sorted_list += [contact] #add tuple to list return(sorted_list) # The code below is just for your testing purposes. Make sure you pass all the tests. # In Vocareum, only put code for the sort_contacts function above from test import testEqual testEqual(sort_contacts({"Horney, Karen": ("1-541-656-3010", "karen@psychoanalysis.com"), "Welles, Orson": ("1-312-720-8888", "orson@notlive.com"), "Freud, Anna": ("1-541-754-3010", "anna@psychoanalysis.com")}), [('Freud, Anna', '1-541-754-3010', 'anna@psychoanalysis.com'), ('Horney, Karen', '1-541-656-3010', 'karen@psychoanalysis.com'), ('Welles, Orson', '1-312-720-8888', 'orson@notlive.com')]) testEqual(sort_contacts({"Summitt, Pat": ("1-865-355-4320", "pat@greatcoaches.com"), "Rudolph, Wilma": ("1-410-5313-584", "wilma@olympians.com")}), [('Rudolph, Wilma', '1-410-5313-584', 'wilma@olympians.com'), ('Summitt, Pat', '1-865-355-4320', 'pat@greatcoaches.com')]) testEqual(sort_contacts({"Dinesen, Isak": ("1-718-939-2548", "isak@storytellers.com")}), [('Dinesen, Isak', '1-718-939-2548', 'isak@storytellers.com')]) testEqual(sort_contacts({"Rimbaud, Arthur": ("1-636-555-5555", "arthur@notlive.com"), "Swinton, Tilda": ("1-917-222-2222", "tilda@greatActors.com"), "Almodovar, Pedro": ("1-990-622-3892", "pedro@filmbuffs.com"), "Kandinsky, Wassily": ("1-333-555-9999", "kandinsky@painters.com")}), [('Almodovar, Pedro', '1-990-622-3892', 'pedro@filmbuffs.com'), ('Kandinsky, Wassily', '1-333-555-9999', 'kandinsky@painters.com'), ('Rimbaud, Arthur', '1-636-555-5555', 'arthur@notlive.com'), ('Swinton, Tilda', '1-917-222-2222', 'tilda@greatActors.com')])
contacts):
identifier_name
Ch11. Ex.py
"""1. Write a program that allows the user to enter a string. It then prints a table of the letters of the alphabet in alphabetical order which occur in the string together with the number of times each letter occurs. Case should be ignored. A sample run of the program might look like this: Please enter a sentence: ThiS is a String with Upper and lower case Letters. a 3 c 1 d 1 e 5 g 1 h 2 i 4 l 2 n 2 o 1 p 2 r 4 s 5 t 5 u 1 w 2 $ """ def create_dict(str): my_dict = {} for letter in str: if letter.isalpha(): letter = letter.lower() if letter not in my_dict: my_dict[letter] = 1 else: my_dict[letter] += 1 return my_dict def print_dict(my_dict): keys = list(my_dict.keys()) keys.sort() for key in keys: print(key, " ", my_dict[key]) def main(): text = input("Please enter a sentence: ") chars = create_dict(text) print_dict(chars) if __name__ == "__main__": main() """2. Write a program that will function as a grade book, allowing a user (a professor or teacher) to enter the class roster for a course, along with each student’s cumulative grade. It then prints the class roster along with the average cumulative grade. Grades are on a 0-100 percentage scale. Use 2 lists (grades and students) and the enumerate function in your solution. A test run of this program would yield the following: # this is the first batch of input the user would enter Chris Jesse Sally # this is the second batch of input the user would enter Grade for Chris: 90 Grade for Jesse: 80 Grade for Sally: 70 # below is what your program should output Class roster: Chris (90.0) Jesse (80.0) Sally (70.0) Average grade: 80.0 """ import sys sys.setExecutionLimit(70000) students = [] grades = [] total_score = 0.0 name = input("Enter the name of a student. (When finished, enter nothing)") while (name != ""): students += [name] name = input("Enter the name of a student. (When finished, enter nothing)") for i in range(len(students)): score = float(input("Grade for {0}:".format(students[i]))) grades += [score] print("Class roster:") for index, student in enumerate(students): to
print("\nAverage grade:", (total_score / len(students))) #3. Implement the functionality of the above program using a dictionary instead of a list. import sys sys.setExecutionLimit(70000) students = {} total_score = 0.0 name = input("Enter the name of a student. (When finished, enter nothing)") while (name != ""): students[name] = 0.0 name = input("Enter the name of a student. (When finished, enter nothing)") print("Class roster:") for student in students.keys(): score = float(input("Grade for {0}:".format(student))) students[student] = score total_score += students[student] print("{0} ({1:.1})".format(student, students[student])) print("\nAverage grade:", (total_score / len(students))) """4. Make a dictionary where the key is a worker’s name, and the value is a list where the first element is the clock in time, second element is the clock out time, and the third element is the total hours worked that day. Each worker’s list starts at [0, 0, 0]. Create functions for clock_in and clock_out. clock_in takes the dictionary of workers, the name of the worker, and the clock in time as parameters. When the worker clocks in, enter and save their clock in time as the first element in the associated list value. clock_out takes the same parameters, but with a clock out time instead of clock in time. When the worker clocks out, enter and save their clock out time and calculate the hours worked for that day and store it as the third element in the list. To make this program a little easier, we’re entering the clock in and clock out times as integers. As a bonus mission, try adding the times as strings representing the 24 hour clock (e.g., "08:00"), and then figure out how to calculate the time worked. And you can do this exercise either by aliasing or copying the dictionary.""" def clock_in(worker_dict, name, clock_in_time): worker_dict[name][0] = clock_in_time def clock_out(worker_dict, name, clock_out_time): worker_dict[name][1] = clock_out_time worker_dict[name][2] = clock_out_time - worker_dict[name][0] def main(): workers = {"George Spelvin": [0,0,0], "Jane Doe": [0,0,0], "John Smith": [0,0,0]} print(workers.get("George Spelvin")) # should print [0,0,0] clock_in(workers, "George Spelvin", 8) clock_out(workers, "George Spelvin", 17) print(workers.get("George Spelvin")) # should print [8, 17, 9] if __name__ == "__main__": main() """5. Here’s a table of English to Pirate translations: English Pirate sir matey hotel fleabag inn student swabbie boy matey madam proud beauty professor foul blaggart restaurant galley your yer excuse arr students swabbies are be lawyer foul blaggart restroom th’ head my me hello avast is be man matey Write a program that asks the user for a sentence in English and then translates that sentence to Pirate.""" from test import testEqual def translate(text): # your code here! pirate_text = "" my_dict = {"sir" : "matey", "hotel" : "fleabag inn", "student" : "swabbie", "boy" : "matey", "madam" : "proud beauty", "professor" : "foul blaggart", "restaurant" : "galley", "your" : "yer", "excuse" : "arr", "students" : "swabbies", "are" : "be", "lawyer" : "foul blaggart", "restroom" : "head", "my" : "me", "the" : "th'", "hello" : "avast", "is" : "be", "man" : "matey"} word_list = text.split() index = 0 pirate_words = [] for word in word_list: char = "" pirate_word = "" if (word.isalpha() == False): char = word[len(word)-1] new_word = word[:len(word)-1] else: new_word = word #print("!"+new_word+"!") if new_word in my_dict: pirate_word = my_dict[new_word] else: pirate_word = new_word pirate_word += char index += 1 pirate_words += [pirate_word] pirate_text = " ".join(pirate_words) #print(my_dict, "\n\n") print(pirate_text) return pirate_text text = "hello my man, please excuse your professor to the restroom!" testEqual(translate(text), "avast me matey, please arr yer foul blaggart to th' head!") """6. Give the Python interpreter’s response to each of the following from a continuous interpreter session: >>> d = {'apples': 15, 'bananas': 35, 'grapes': 12} >>> d['bananas'] >>> d['oranges'] = 20 >>> len(d) >>> 'grapes' in d >>> d['pears'] >>> d.get('pears', 0) >>> fruits = d.keys() >>> sorted(fruits) >>> print(fruits) >>> del d['apples'] >>> 'apples' in d Be sure you understand why you get each result. """ from test import testEqual # Note: The pass is a placeholder to allow # the code to compile. Remove it when you # begin coding. def set_inventory(inventory, fruit, quantity=0): inventory[fruit] = quantity # make these tests work... new_inventory = {} set_inventory(new_inventory, 'strawberries', 10) testEqual('strawberries' in new_inventory, True) testEqual(new_inventory['strawberries'], 10) set_inventory(new_inventory, 'strawberries', 25) testEqual(new_inventory['strawberries'] , 25) """Weekly Graded Assignment Write a sort_contacts function that takes a dictionary of contacts as a parameter and returns a sorted list of those contacts, where each contact is a tuple. The contacts dictionary that will be passed into the function has the contact name as its key, and the value is a tuple containing the phone number and email for the contact. contacts = {name: (phone, email), name: (phone, email), etc.} The sort_contacts function should then create a new, sorted (by last name) list of tuples representing all of the contact info (one tuple for each contact) that was in the dictionary. It should then return this list to the calling function. For example, given a dictionary argument of: {"Horney, Karen": ("1-541-656-3010", "karen@psychoanalysis.com"), "Welles, Orson": ("1-312-720-8888", "orson@notlive.com"), "Freud, Anna": ("1-541-754-3010", "anna@psychoanalysis.com")} sort_contacts should return this: [('Freud, Anna', '1-541-754-3010', 'anna@psychoanalysis.com'), ('Horney, Karen', '1-541-656-3010', 'karen@psychoanalysis.com'), ('Welles, Orson', '1-312-720-8888', 'orson@notlive.com')] """ # Create sort_contacts function def sort_contacts(contacts): """Sorts a dictionary of contacts""" key_list = list(contacts.keys()) #get keys key_list.sort() #sort key_list sorted_list = [] #initialize sorted list for key in key_list: contact = (key, contacts[key][0], contacts[key][1]) #create tuple sorted_list += [contact] #add tuple to list return(sorted_list) # The code below is just for your testing purposes. Make sure you pass all the tests. # In Vocareum, only put code for the sort_contacts function above from test import testEqual testEqual(sort_contacts({"Horney, Karen": ("1-541-656-3010", "karen@psychoanalysis.com"), "Welles, Orson": ("1-312-720-8888", "orson@notlive.com"), "Freud, Anna": ("1-541-754-3010", "anna@psychoanalysis.com")}), [('Freud, Anna', '1-541-754-3010', 'anna@psychoanalysis.com'), ('Horney, Karen', '1-541-656-3010', 'karen@psychoanalysis.com'), ('Welles, Orson', '1-312-720-8888', 'orson@notlive.com')]) testEqual(sort_contacts({"Summitt, Pat": ("1-865-355-4320", "pat@greatcoaches.com"), "Rudolph, Wilma": ("1-410-5313-584", "wilma@olympians.com")}), [('Rudolph, Wilma', '1-410-5313-584', 'wilma@olympians.com'), ('Summitt, Pat', '1-865-355-4320', 'pat@greatcoaches.com')]) testEqual(sort_contacts({"Dinesen, Isak": ("1-718-939-2548", "isak@storytellers.com")}), [('Dinesen, Isak', '1-718-939-2548', 'isak@storytellers.com')]) testEqual(sort_contacts({"Rimbaud, Arthur": ("1-636-555-5555", "arthur@notlive.com"), "Swinton, Tilda": ("1-917-222-2222", "tilda@greatActors.com"), "Almodovar, Pedro": ("1-990-622-3892", "pedro@filmbuffs.com"), "Kandinsky, Wassily": ("1-333-555-9999", "kandinsky@painters.com")}), [('Almodovar, Pedro', '1-990-622-3892', 'pedro@filmbuffs.com'), ('Kandinsky, Wassily', '1-333-555-9999', 'kandinsky@painters.com'), ('Rimbaud, Arthur', '1-636-555-5555', 'arthur@notlive.com'), ('Swinton, Tilda', '1-917-222-2222', 'tilda@greatActors.com')])
tal_score += grades[index] print("{0} ({1:.1})".format(student, grades[index]))
conditional_block
Ch11. Ex.py
"""1. Write a program that allows the user to enter a string. It then prints a table of the letters of the alphabet in alphabetical order which occur in the string together with the number of times each letter occurs. Case should be ignored. A sample run of the program might look like this: Please enter a sentence: ThiS is a String with Upper and lower case Letters. a 3 c 1 d 1 e 5 g 1 h 2 i 4 l 2 n 2 o 1 p 2 r 4 s 5 t 5 u 1 w 2 $ """ def create_dict(str): my_dict = {} for letter in str: if letter.isalpha(): letter = letter.lower() if letter not in my_dict: my_dict[letter] = 1 else: my_dict[letter] += 1 return my_dict def print_dict(my_dict): keys = list(my_dict.keys()) keys.sort() for key in keys: print(key, " ", my_dict[key]) def main():
if __name__ == "__main__": main() """2. Write a program that will function as a grade book, allowing a user (a professor or teacher) to enter the class roster for a course, along with each student’s cumulative grade. It then prints the class roster along with the average cumulative grade. Grades are on a 0-100 percentage scale. Use 2 lists (grades and students) and the enumerate function in your solution. A test run of this program would yield the following: # this is the first batch of input the user would enter Chris Jesse Sally # this is the second batch of input the user would enter Grade for Chris: 90 Grade for Jesse: 80 Grade for Sally: 70 # below is what your program should output Class roster: Chris (90.0) Jesse (80.0) Sally (70.0) Average grade: 80.0 """ import sys sys.setExecutionLimit(70000) students = [] grades = [] total_score = 0.0 name = input("Enter the name of a student. (When finished, enter nothing)") while (name != ""): students += [name] name = input("Enter the name of a student. (When finished, enter nothing)") for i in range(len(students)): score = float(input("Grade for {0}:".format(students[i]))) grades += [score] print("Class roster:") for index, student in enumerate(students): total_score += grades[index] print("{0} ({1:.1})".format(student, grades[index])) print("\nAverage grade:", (total_score / len(students))) #3. Implement the functionality of the above program using a dictionary instead of a list. import sys sys.setExecutionLimit(70000) students = {} total_score = 0.0 name = input("Enter the name of a student. (When finished, enter nothing)") while (name != ""): students[name] = 0.0 name = input("Enter the name of a student. (When finished, enter nothing)") print("Class roster:") for student in students.keys(): score = float(input("Grade for {0}:".format(student))) students[student] = score total_score += students[student] print("{0} ({1:.1})".format(student, students[student])) print("\nAverage grade:", (total_score / len(students))) """4. Make a dictionary where the key is a worker’s name, and the value is a list where the first element is the clock in time, second element is the clock out time, and the third element is the total hours worked that day. Each worker’s list starts at [0, 0, 0]. Create functions for clock_in and clock_out. clock_in takes the dictionary of workers, the name of the worker, and the clock in time as parameters. When the worker clocks in, enter and save their clock in time as the first element in the associated list value. clock_out takes the same parameters, but with a clock out time instead of clock in time. When the worker clocks out, enter and save their clock out time and calculate the hours worked for that day and store it as the third element in the list. To make this program a little easier, we’re entering the clock in and clock out times as integers. As a bonus mission, try adding the times as strings representing the 24 hour clock (e.g., "08:00"), and then figure out how to calculate the time worked. And you can do this exercise either by aliasing or copying the dictionary.""" def clock_in(worker_dict, name, clock_in_time): worker_dict[name][0] = clock_in_time def clock_out(worker_dict, name, clock_out_time): worker_dict[name][1] = clock_out_time worker_dict[name][2] = clock_out_time - worker_dict[name][0] def main(): workers = {"George Spelvin": [0,0,0], "Jane Doe": [0,0,0], "John Smith": [0,0,0]} print(workers.get("George Spelvin")) # should print [0,0,0] clock_in(workers, "George Spelvin", 8) clock_out(workers, "George Spelvin", 17) print(workers.get("George Spelvin")) # should print [8, 17, 9] if __name__ == "__main__": main() """5. Here’s a table of English to Pirate translations: English Pirate sir matey hotel fleabag inn student swabbie boy matey madam proud beauty professor foul blaggart restaurant galley your yer excuse arr students swabbies are be lawyer foul blaggart restroom th’ head my me hello avast is be man matey Write a program that asks the user for a sentence in English and then translates that sentence to Pirate.""" from test import testEqual def translate(text): # your code here! pirate_text = "" my_dict = {"sir" : "matey", "hotel" : "fleabag inn", "student" : "swabbie", "boy" : "matey", "madam" : "proud beauty", "professor" : "foul blaggart", "restaurant" : "galley", "your" : "yer", "excuse" : "arr", "students" : "swabbies", "are" : "be", "lawyer" : "foul blaggart", "restroom" : "head", "my" : "me", "the" : "th'", "hello" : "avast", "is" : "be", "man" : "matey"} word_list = text.split() index = 0 pirate_words = [] for word in word_list: char = "" pirate_word = "" if (word.isalpha() == False): char = word[len(word)-1] new_word = word[:len(word)-1] else: new_word = word #print("!"+new_word+"!") if new_word in my_dict: pirate_word = my_dict[new_word] else: pirate_word = new_word pirate_word += char index += 1 pirate_words += [pirate_word] pirate_text = " ".join(pirate_words) #print(my_dict, "\n\n") print(pirate_text) return pirate_text text = "hello my man, please excuse your professor to the restroom!" testEqual(translate(text), "avast me matey, please arr yer foul blaggart to th' head!") """6. Give the Python interpreter’s response to each of the following from a continuous interpreter session: >>> d = {'apples': 15, 'bananas': 35, 'grapes': 12} >>> d['bananas'] >>> d['oranges'] = 20 >>> len(d) >>> 'grapes' in d >>> d['pears'] >>> d.get('pears', 0) >>> fruits = d.keys() >>> sorted(fruits) >>> print(fruits) >>> del d['apples'] >>> 'apples' in d Be sure you understand why you get each result. """ from test import testEqual # Note: The pass is a placeholder to allow # the code to compile. Remove it when you # begin coding. def set_inventory(inventory, fruit, quantity=0): inventory[fruit] = quantity # make these tests work... new_inventory = {} set_inventory(new_inventory, 'strawberries', 10) testEqual('strawberries' in new_inventory, True) testEqual(new_inventory['strawberries'], 10) set_inventory(new_inventory, 'strawberries', 25) testEqual(new_inventory['strawberries'] , 25) """Weekly Graded Assignment Write a sort_contacts function that takes a dictionary of contacts as a parameter and returns a sorted list of those contacts, where each contact is a tuple. The contacts dictionary that will be passed into the function has the contact name as its key, and the value is a tuple containing the phone number and email for the contact. contacts = {name: (phone, email), name: (phone, email), etc.} The sort_contacts function should then create a new, sorted (by last name) list of tuples representing all of the contact info (one tuple for each contact) that was in the dictionary. It should then return this list to the calling function. For example, given a dictionary argument of: {"Horney, Karen": ("1-541-656-3010", "karen@psychoanalysis.com"), "Welles, Orson": ("1-312-720-8888", "orson@notlive.com"), "Freud, Anna": ("1-541-754-3010", "anna@psychoanalysis.com")} sort_contacts should return this: [('Freud, Anna', '1-541-754-3010', 'anna@psychoanalysis.com'), ('Horney, Karen', '1-541-656-3010', 'karen@psychoanalysis.com'), ('Welles, Orson', '1-312-720-8888', 'orson@notlive.com')] """ # Create sort_contacts function def sort_contacts(contacts): """Sorts a dictionary of contacts""" key_list = list(contacts.keys()) #get keys key_list.sort() #sort key_list sorted_list = [] #initialize sorted list for key in key_list: contact = (key, contacts[key][0], contacts[key][1]) #create tuple sorted_list += [contact] #add tuple to list return(sorted_list) # The code below is just for your testing purposes. Make sure you pass all the tests. # In Vocareum, only put code for the sort_contacts function above from test import testEqual testEqual(sort_contacts({"Horney, Karen": ("1-541-656-3010", "karen@psychoanalysis.com"), "Welles, Orson": ("1-312-720-8888", "orson@notlive.com"), "Freud, Anna": ("1-541-754-3010", "anna@psychoanalysis.com")}), [('Freud, Anna', '1-541-754-3010', 'anna@psychoanalysis.com'), ('Horney, Karen', '1-541-656-3010', 'karen@psychoanalysis.com'), ('Welles, Orson', '1-312-720-8888', 'orson@notlive.com')]) testEqual(sort_contacts({"Summitt, Pat": ("1-865-355-4320", "pat@greatcoaches.com"), "Rudolph, Wilma": ("1-410-5313-584", "wilma@olympians.com")}), [('Rudolph, Wilma', '1-410-5313-584', 'wilma@olympians.com'), ('Summitt, Pat', '1-865-355-4320', 'pat@greatcoaches.com')]) testEqual(sort_contacts({"Dinesen, Isak": ("1-718-939-2548", "isak@storytellers.com")}), [('Dinesen, Isak', '1-718-939-2548', 'isak@storytellers.com')]) testEqual(sort_contacts({"Rimbaud, Arthur": ("1-636-555-5555", "arthur@notlive.com"), "Swinton, Tilda": ("1-917-222-2222", "tilda@greatActors.com"), "Almodovar, Pedro": ("1-990-622-3892", "pedro@filmbuffs.com"), "Kandinsky, Wassily": ("1-333-555-9999", "kandinsky@painters.com")}), [('Almodovar, Pedro', '1-990-622-3892', 'pedro@filmbuffs.com'), ('Kandinsky, Wassily', '1-333-555-9999', 'kandinsky@painters.com'), ('Rimbaud, Arthur', '1-636-555-5555', 'arthur@notlive.com'), ('Swinton, Tilda', '1-917-222-2222', 'tilda@greatActors.com')])
text = input("Please enter a sentence: ") chars = create_dict(text) print_dict(chars)
identifier_body
Ch11. Ex.py
"""1. Write a program that allows the user to enter a string. It then prints a table of the letters of the alphabet in alphabetical order which occur in the string together with the number of times each letter occurs. Case should be ignored. A sample run of the program might look like this: Please enter a sentence: ThiS is a String with Upper and lower case Letters. a 3 c 1 d 1 e 5 g 1 h 2 i 4 l 2 n 2 o 1 p 2 r 4 s 5 t 5 u 1 w 2 $ """ def create_dict(str): my_dict = {} for letter in str: if letter.isalpha(): letter = letter.lower() if letter not in my_dict: my_dict[letter] = 1 else: my_dict[letter] += 1 return my_dict def print_dict(my_dict): keys = list(my_dict.keys()) keys.sort() for key in keys: print(key, " ", my_dict[key]) def main(): text = input("Please enter a sentence: ") chars = create_dict(text) print_dict(chars) if __name__ == "__main__": main() """2. Write a program that will function as a grade book, allowing a user (a professor or teacher) to enter the class roster for a course, along with each student’s cumulative grade. It then prints the class roster along with the average cumulative grade. Grades are on a 0-100 percentage scale. Use 2 lists (grades and students) and the enumerate function in your solution. A test run of this program would yield the following: # this is the first batch of input the user would enter Chris Jesse Sally # this is the second batch of input the user would enter Grade for Chris: 90 Grade for Jesse: 80 Grade for Sally: 70 # below is what your program should output Class roster: Chris (90.0) Jesse (80.0) Sally (70.0) Average grade: 80.0 """ import sys sys.setExecutionLimit(70000)
students = [] grades = [] total_score = 0.0 name = input("Enter the name of a student. (When finished, enter nothing)") while (name != ""): students += [name] name = input("Enter the name of a student. (When finished, enter nothing)") for i in range(len(students)): score = float(input("Grade for {0}:".format(students[i]))) grades += [score] print("Class roster:") for index, student in enumerate(students): total_score += grades[index] print("{0} ({1:.1})".format(student, grades[index])) print("\nAverage grade:", (total_score / len(students))) #3. Implement the functionality of the above program using a dictionary instead of a list. import sys sys.setExecutionLimit(70000) students = {} total_score = 0.0 name = input("Enter the name of a student. (When finished, enter nothing)") while (name != ""): students[name] = 0.0 name = input("Enter the name of a student. (When finished, enter nothing)") print("Class roster:") for student in students.keys(): score = float(input("Grade for {0}:".format(student))) students[student] = score total_score += students[student] print("{0} ({1:.1})".format(student, students[student])) print("\nAverage grade:", (total_score / len(students))) """4. Make a dictionary where the key is a worker’s name, and the value is a list where the first element is the clock in time, second element is the clock out time, and the third element is the total hours worked that day. Each worker’s list starts at [0, 0, 0]. Create functions for clock_in and clock_out. clock_in takes the dictionary of workers, the name of the worker, and the clock in time as parameters. When the worker clocks in, enter and save their clock in time as the first element in the associated list value. clock_out takes the same parameters, but with a clock out time instead of clock in time. When the worker clocks out, enter and save their clock out time and calculate the hours worked for that day and store it as the third element in the list. To make this program a little easier, we’re entering the clock in and clock out times as integers. As a bonus mission, try adding the times as strings representing the 24 hour clock (e.g., "08:00"), and then figure out how to calculate the time worked. And you can do this exercise either by aliasing or copying the dictionary.""" def clock_in(worker_dict, name, clock_in_time): worker_dict[name][0] = clock_in_time def clock_out(worker_dict, name, clock_out_time): worker_dict[name][1] = clock_out_time worker_dict[name][2] = clock_out_time - worker_dict[name][0] def main(): workers = {"George Spelvin": [0,0,0], "Jane Doe": [0,0,0], "John Smith": [0,0,0]} print(workers.get("George Spelvin")) # should print [0,0,0] clock_in(workers, "George Spelvin", 8) clock_out(workers, "George Spelvin", 17) print(workers.get("George Spelvin")) # should print [8, 17, 9] if __name__ == "__main__": main() """5. Here’s a table of English to Pirate translations: English Pirate sir matey hotel fleabag inn student swabbie boy matey madam proud beauty professor foul blaggart restaurant galley your yer excuse arr students swabbies are be lawyer foul blaggart restroom th’ head my me hello avast is be man matey Write a program that asks the user for a sentence in English and then translates that sentence to Pirate.""" from test import testEqual def translate(text): # your code here! pirate_text = "" my_dict = {"sir" : "matey", "hotel" : "fleabag inn", "student" : "swabbie", "boy" : "matey", "madam" : "proud beauty", "professor" : "foul blaggart", "restaurant" : "galley", "your" : "yer", "excuse" : "arr", "students" : "swabbies", "are" : "be", "lawyer" : "foul blaggart", "restroom" : "head", "my" : "me", "the" : "th'", "hello" : "avast", "is" : "be", "man" : "matey"} word_list = text.split() index = 0 pirate_words = [] for word in word_list: char = "" pirate_word = "" if (word.isalpha() == False): char = word[len(word)-1] new_word = word[:len(word)-1] else: new_word = word #print("!"+new_word+"!") if new_word in my_dict: pirate_word = my_dict[new_word] else: pirate_word = new_word pirate_word += char index += 1 pirate_words += [pirate_word] pirate_text = " ".join(pirate_words) #print(my_dict, "\n\n") print(pirate_text) return pirate_text text = "hello my man, please excuse your professor to the restroom!" testEqual(translate(text), "avast me matey, please arr yer foul blaggart to th' head!") """6. Give the Python interpreter’s response to each of the following from a continuous interpreter session: >>> d = {'apples': 15, 'bananas': 35, 'grapes': 12} >>> d['bananas'] >>> d['oranges'] = 20 >>> len(d) >>> 'grapes' in d >>> d['pears'] >>> d.get('pears', 0) >>> fruits = d.keys() >>> sorted(fruits) >>> print(fruits) >>> del d['apples'] >>> 'apples' in d Be sure you understand why you get each result. """ from test import testEqual # Note: The pass is a placeholder to allow # the code to compile. Remove it when you # begin coding. def set_inventory(inventory, fruit, quantity=0): inventory[fruit] = quantity # make these tests work... new_inventory = {} set_inventory(new_inventory, 'strawberries', 10) testEqual('strawberries' in new_inventory, True) testEqual(new_inventory['strawberries'], 10) set_inventory(new_inventory, 'strawberries', 25) testEqual(new_inventory['strawberries'] , 25) """Weekly Graded Assignment Write a sort_contacts function that takes a dictionary of contacts as a parameter and returns a sorted list of those contacts, where each contact is a tuple. The contacts dictionary that will be passed into the function has the contact name as its key, and the value is a tuple containing the phone number and email for the contact. contacts = {name: (phone, email), name: (phone, email), etc.} The sort_contacts function should then create a new, sorted (by last name) list of tuples representing all of the contact info (one tuple for each contact) that was in the dictionary. It should then return this list to the calling function. For example, given a dictionary argument of: {"Horney, Karen": ("1-541-656-3010", "karen@psychoanalysis.com"), "Welles, Orson": ("1-312-720-8888", "orson@notlive.com"), "Freud, Anna": ("1-541-754-3010", "anna@psychoanalysis.com")} sort_contacts should return this: [('Freud, Anna', '1-541-754-3010', 'anna@psychoanalysis.com'), ('Horney, Karen', '1-541-656-3010', 'karen@psychoanalysis.com'), ('Welles, Orson', '1-312-720-8888', 'orson@notlive.com')] """ # Create sort_contacts function def sort_contacts(contacts): """Sorts a dictionary of contacts""" key_list = list(contacts.keys()) #get keys key_list.sort() #sort key_list sorted_list = [] #initialize sorted list for key in key_list: contact = (key, contacts[key][0], contacts[key][1]) #create tuple sorted_list += [contact] #add tuple to list return(sorted_list) # The code below is just for your testing purposes. Make sure you pass all the tests. # In Vocareum, only put code for the sort_contacts function above from test import testEqual testEqual(sort_contacts({"Horney, Karen": ("1-541-656-3010", "karen@psychoanalysis.com"), "Welles, Orson": ("1-312-720-8888", "orson@notlive.com"), "Freud, Anna": ("1-541-754-3010", "anna@psychoanalysis.com")}), [('Freud, Anna', '1-541-754-3010', 'anna@psychoanalysis.com'), ('Horney, Karen', '1-541-656-3010', 'karen@psychoanalysis.com'), ('Welles, Orson', '1-312-720-8888', 'orson@notlive.com')]) testEqual(sort_contacts({"Summitt, Pat": ("1-865-355-4320", "pat@greatcoaches.com"), "Rudolph, Wilma": ("1-410-5313-584", "wilma@olympians.com")}), [('Rudolph, Wilma', '1-410-5313-584', 'wilma@olympians.com'), ('Summitt, Pat', '1-865-355-4320', 'pat@greatcoaches.com')]) testEqual(sort_contacts({"Dinesen, Isak": ("1-718-939-2548", "isak@storytellers.com")}), [('Dinesen, Isak', '1-718-939-2548', 'isak@storytellers.com')]) testEqual(sort_contacts({"Rimbaud, Arthur": ("1-636-555-5555", "arthur@notlive.com"), "Swinton, Tilda": ("1-917-222-2222", "tilda@greatActors.com"), "Almodovar, Pedro": ("1-990-622-3892", "pedro@filmbuffs.com"), "Kandinsky, Wassily": ("1-333-555-9999", "kandinsky@painters.com")}), [('Almodovar, Pedro', '1-990-622-3892', 'pedro@filmbuffs.com'), ('Kandinsky, Wassily', '1-333-555-9999', 'kandinsky@painters.com'), ('Rimbaud, Arthur', '1-636-555-5555', 'arthur@notlive.com'), ('Swinton, Tilda', '1-917-222-2222', 'tilda@greatActors.com')])
random_line_split
train_folds.py
import os, time mydir = os.path.split(os.path.abspath(__file__))[0] homepath = os.path.expanduser(os.getenv('USERPROFILE')) #print('Meu diretorio: {}'.format(mydir)) #print('Minha Home: {}'.format(homepath)) list_lib = [r'{}\anaconda3\envs\arc105'.format(homepath), \ r'{}\anaconda3\envs\arc105\Library\mingw-w64\bin'.format(homepath), \ r'{}\anaconda3\envs\arc105\Library\usr\bin'.format(homepath), \ r'{}\anaconda3\envs\arc105\Library\bin'.format(homepath), \ r'{}\anaconda3\envs\arc105\Scripts'.format(homepath), \ r'{}\anaconda3\envs\arc105\bin'.format(homepath), \ r'{}\anaconda3\condabin'.format(homepath)] for i in list_lib: os.environ['PATH'] = '%s;%s' % (i, os.environ['PATH']) os.environ['CUDA_LAUNCH_BLOCKING'] = '1' import numpy as np #print(np.__version__) import argparse, copy, shutil import random from torch.utils.tensorboard import SummaryWriter import torch.optim as optim #import network_factory as factory import network_factory_refactor_speed as factory import dataloader import torch.nn as nn import torch from torch.optim.lr_scheduler import StepLR from skimage.io import imsave import numpy as np import logging # Fix seed's for reproducibility random.seed(42) torch.manual_seed(42) def get_indices(id_fold, data): # Instantiate Folds list_folds = np.array(range(5)) # Roll in axis tmp_set = np.roll(list_folds,id_fold) indices_train = [] indices_val = [] indices_test = [] # Train for i in tmp_set[:3]: indices_train += list(data[data[:,1].astype(int) == i][:,0]) # Val for i in tmp_set[3:4]: indices_val += list(data[data[:,1].astype(int) == i][:,0]) # Test for i in tmp_set[4:]: indices_test += list(data[data[:,1].astype(int) == i][:,0]) #print(indices_train) #print(indices_val) #print(indices_test) indices = {} indices['train'] = indices_train indices['val'] = indices_val indices['test'] = indices_test return indices def main(): parser = argparse.ArgumentParser(description='Semantic Segmentation General') parser.add_argument('--dataset_path', type=str, required=True, help='Path to dataset') parser.add_argument('--inRasterReference', type=str, required=True, help='Path to inRasterReference') parser.add_argument('--output_path', type=str, required=True, help='Path to folder where models and stats will be saved') parser.add_argument('--batch', type=int, required=True, help='Batch Size') parser.add_argument('--epochs', type=int, required=True, help='Number of epochs') parser.add_argument('--learning_rate', type=float, required=False, default=0.001, help='Learning rate. Default:0.001') parser.add_argument('--network_type', type=str, required=True, help = 'Choose network type') parser.add_argument('--optimizer_type', type=str, required=False, default='adam', help = 'Optimizer: adam, sgd') parser.add_argument('--early_stop', type=int, required=True, help='Number of epochs to activate early stop.') parser.add_argument('--fine_tunning_imagenet', type= bool, required=False, default=False, help='set fine tunning on imagenet.') parser.add_argument('--feature_extract', type= bool, required=False, default=False, help='Train just the classifier.') parser.add_argument('--only_top_layers', type= str, required=False, default='True', help='Train only the top layers (classifier).') parser.add_argument('--ignore_zero', type= bool, required=False, default=True, help='Ignore class 0 (background).') parser.add_argument('--modelpath', type=str, required=False, default=False, help='Ignore class 0 (background).') parser.add_argument('--isRGB', type=str, required=False, default='False', help='Ignore class 0 (background).') parser.add_argument('--use_weight_decay', type=str, required=False, default='False', help='Use weight_decay.') args = parser.parse_args() dataset_dir = args.dataset_path inRasterReference = args.inRasterReference out_dir = args.output_path batch_size = args.batch epochs = args.epochs learning_rate = args.learning_rate net_type = args.network_type opt_type = args.optimizer_type fine_tunning = args.fine_tunning_imagenet early_stop = args.early_stop feature_extract = args.feature_extract only_top_layers = args.only_top_layers ignore_zero = args.ignore_zero modelpath = args.modelpath isRGB = True if args.isRGB == 'True' else False use_weight_decay = True if args.use_weight_decay == 'True' else False print(args) # Get classes from mask list_classes = factory.get_classes(inRasterReference, ignore_zero) num_classes = len(list_classes) #Delete base_output = os.path.join(out_dir, '2_Segmentation_BASE') if os.path.exists(base_output): shutil.rmtree(base_output, ignore_errors=True) #Recreate folders os.makedirs(base_output, exist_ok=True) #if (not os.path.exists(out_dir)): # os.makedirs(out_dir) num_folds = 5 #data = np.genfromtxt('code/config/stratified_folds', dtype=str, delimiter=',') data = np.genfromtxt(os.path.join(dataset_dir.split('raw_data')[0],'stratified_folds'), dtype=str, delimiter=',') #for id_fold in range(num_folds): for id_fold in range(num_folds): print ('.......Creating model.......') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if modelpath: print('Loading model in: {}'.format(modelpath)) model = torch.load(modelpath) else: print('Creating a new model: {}'.format(net_type)) model = factory.model_factory(net_type, num_classes, feature_extract, fine_tunning, isRGB) # Use multiples GPUS #if torch.cuda.device_count() > 1: # print("Let's use", torch.cuda.device_count(), "GPUs!") # batch_size = int(torch.cuda.device_count())*batch_size # model = nn.DataParallel(model, device_ids=[0, 1]) model = model.to(device) print(model) print ('......Model created.......') indices = get_indices(id_fold, data) print ('......Creating dataloader......') dataloaders_dict = {} dataset = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='train', indices=indices['train'], isRGB=isRGB) dataloaders_dict['train'] = torch.utils.data.DataLoader(dataset,
num_workers=4, drop_last=True) dataset_val = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='val', indices=indices['val'], isRGB=isRGB) dataloaders_dict['val'] = torch.utils.data.DataLoader(dataset_val, batch_size=batch_size, num_workers=4) #Compute mean,std from training data mean, std, transform_train = dataloader.get_transforms(dataloaders_dict['train'], 'train', mean=None, std=None) dataloaders_dict['train'].dataset.transform = transform_train dataloaders_dict['train'].dataset.mean = mean dataloaders_dict['train'].dataset.std = std dataloaders_dict['train'].dataset.iscomputed = True _, _, transform_val = dataloader.get_transforms(dataloaders_dict['val'], 'val', mean=mean, std=std) dataloaders_dict['val'].dataset.transform = transform_val dataloaders_dict['val'].dataset.mean = mean dataloaders_dict['val'].dataset.std = std print ('......Dataloader created......') print(dataloaders_dict['train'].dataset.mean) print(dataloaders_dict['train'].dataset.std) #print(only_top_layers) # FOr default all parameters have requires_grad = True # So, unselect all layers from backbone if only top is needed if only_top_layers == 'True': print('TRAINING: ONLY TOP LAYERS') # Freeze backbone parameters for param in model.backbone.parameters(): param.requires_grad = False else: print('TRAINING FULL LAYERS') # Show trainable layers for name,param in model.named_parameters(): if param.requires_grad == True: print("\t",name) # Get parameters to pass to optimizer params_to_update = model.parameters() """ print("Params to learn:") if feature_extract: params_to_update = [] for name,param in model.named_parameters(): if param.requires_grad == True: params_to_update.append(param) print("\t",name) else: for name,param in model.named_parameters(): if param.requires_grad == True: print("\t",name) """ #self.weight_class = 1. / np.unique(np.array(self.list_labels), return_counts=True)[1] #self.samples_weights = self.weight_class[self. list_labels] #criterion = nn.CrossEntropyLoss(weight=class_weights) #defining optimizer and loss scheduler = None if opt_type == 'adam': optimizer = optim.Adam(params_to_update, lr=learning_rate) if use_weight_decay: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') else: # Optimizer optimizer = optim.SGD(params_to_update, lr=learning_rate, momentum=0.9) if use_weight_decay: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') if ignore_zero: print('Ignoring Index Zero') #criterion = nn.CrossEntropyLoss(ignore_index=254, weight=torch.from_numpy(dataloaders_dict['train'].dataset.class_weights).float().cuda()) criterion = nn.CrossEntropyLoss(ignore_index=254, weight=torch.from_numpy(dataloaders_dict['train'].dataset.class_weights).float().to(device)) else: criterion = nn.CrossEntropyLoss() tensor_board = SummaryWriter(log_dir = base_output) final_model, val_history = factory.train(model, dataloaders_dict, criterion, optimizer, epochs, early_stop, tensor_board, net_type, scheduler, opt_type, list_classes, use_weight_decay) #if fine_tunning: # torch.save(final_model, os.path.join(out_dir, net_type + '_final_model_ft')) # final_stats_file = open (os.path.join(out_dir, net_type + '_finalstats_ft.txt'), 'w') #else: # torch.save(final_model, os.path.join(out_dir, net_type + '_final_model')) # final_stats_file = open (os.path.join(out_dir, net_type + '_finalstats.txt'), 'w') #factory.final_eval(model, dataloaders_dict, batch_size, final_stats_file) # Save mean and std final_model.mean = copy.deepcopy(mean) final_model.std = copy.deepcopy(std) final_model.num_classes = copy.deepcopy(num_classes) torch.save(final_model, os.path.join(base_output, net_type + '_final_model_ft_fold_{}'.format(id_fold))) if __name__ == '__main__': main()
batch_size=batch_size,
random_line_split
train_folds.py
import os, time mydir = os.path.split(os.path.abspath(__file__))[0] homepath = os.path.expanduser(os.getenv('USERPROFILE')) #print('Meu diretorio: {}'.format(mydir)) #print('Minha Home: {}'.format(homepath)) list_lib = [r'{}\anaconda3\envs\arc105'.format(homepath), \ r'{}\anaconda3\envs\arc105\Library\mingw-w64\bin'.format(homepath), \ r'{}\anaconda3\envs\arc105\Library\usr\bin'.format(homepath), \ r'{}\anaconda3\envs\arc105\Library\bin'.format(homepath), \ r'{}\anaconda3\envs\arc105\Scripts'.format(homepath), \ r'{}\anaconda3\envs\arc105\bin'.format(homepath), \ r'{}\anaconda3\condabin'.format(homepath)] for i in list_lib: os.environ['PATH'] = '%s;%s' % (i, os.environ['PATH']) os.environ['CUDA_LAUNCH_BLOCKING'] = '1' import numpy as np #print(np.__version__) import argparse, copy, shutil import random from torch.utils.tensorboard import SummaryWriter import torch.optim as optim #import network_factory as factory import network_factory_refactor_speed as factory import dataloader import torch.nn as nn import torch from torch.optim.lr_scheduler import StepLR from skimage.io import imsave import numpy as np import logging # Fix seed's for reproducibility random.seed(42) torch.manual_seed(42) def get_indices(id_fold, data): # Instantiate Folds
def main(): parser = argparse.ArgumentParser(description='Semantic Segmentation General') parser.add_argument('--dataset_path', type=str, required=True, help='Path to dataset') parser.add_argument('--inRasterReference', type=str, required=True, help='Path to inRasterReference') parser.add_argument('--output_path', type=str, required=True, help='Path to folder where models and stats will be saved') parser.add_argument('--batch', type=int, required=True, help='Batch Size') parser.add_argument('--epochs', type=int, required=True, help='Number of epochs') parser.add_argument('--learning_rate', type=float, required=False, default=0.001, help='Learning rate. Default:0.001') parser.add_argument('--network_type', type=str, required=True, help = 'Choose network type') parser.add_argument('--optimizer_type', type=str, required=False, default='adam', help = 'Optimizer: adam, sgd') parser.add_argument('--early_stop', type=int, required=True, help='Number of epochs to activate early stop.') parser.add_argument('--fine_tunning_imagenet', type= bool, required=False, default=False, help='set fine tunning on imagenet.') parser.add_argument('--feature_extract', type= bool, required=False, default=False, help='Train just the classifier.') parser.add_argument('--only_top_layers', type= str, required=False, default='True', help='Train only the top layers (classifier).') parser.add_argument('--ignore_zero', type= bool, required=False, default=True, help='Ignore class 0 (background).') parser.add_argument('--modelpath', type=str, required=False, default=False, help='Ignore class 0 (background).') parser.add_argument('--isRGB', type=str, required=False, default='False', help='Ignore class 0 (background).') parser.add_argument('--use_weight_decay', type=str, required=False, default='False', help='Use weight_decay.') args = parser.parse_args() dataset_dir = args.dataset_path inRasterReference = args.inRasterReference out_dir = args.output_path batch_size = args.batch epochs = args.epochs learning_rate = args.learning_rate net_type = args.network_type opt_type = args.optimizer_type fine_tunning = args.fine_tunning_imagenet early_stop = args.early_stop feature_extract = args.feature_extract only_top_layers = args.only_top_layers ignore_zero = args.ignore_zero modelpath = args.modelpath isRGB = True if args.isRGB == 'True' else False use_weight_decay = True if args.use_weight_decay == 'True' else False print(args) # Get classes from mask list_classes = factory.get_classes(inRasterReference, ignore_zero) num_classes = len(list_classes) #Delete base_output = os.path.join(out_dir, '2_Segmentation_BASE') if os.path.exists(base_output): shutil.rmtree(base_output, ignore_errors=True) #Recreate folders os.makedirs(base_output, exist_ok=True) #if (not os.path.exists(out_dir)): # os.makedirs(out_dir) num_folds = 5 #data = np.genfromtxt('code/config/stratified_folds', dtype=str, delimiter=',') data = np.genfromtxt(os.path.join(dataset_dir.split('raw_data')[0],'stratified_folds'), dtype=str, delimiter=',') #for id_fold in range(num_folds): for id_fold in range(num_folds): print ('.......Creating model.......') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if modelpath: print('Loading model in: {}'.format(modelpath)) model = torch.load(modelpath) else: print('Creating a new model: {}'.format(net_type)) model = factory.model_factory(net_type, num_classes, feature_extract, fine_tunning, isRGB) # Use multiples GPUS #if torch.cuda.device_count() > 1: # print("Let's use", torch.cuda.device_count(), "GPUs!") # batch_size = int(torch.cuda.device_count())*batch_size # model = nn.DataParallel(model, device_ids=[0, 1]) model = model.to(device) print(model) print ('......Model created.......') indices = get_indices(id_fold, data) print ('......Creating dataloader......') dataloaders_dict = {} dataset = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='train', indices=indices['train'], isRGB=isRGB) dataloaders_dict['train'] = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=4, drop_last=True) dataset_val = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='val', indices=indices['val'], isRGB=isRGB) dataloaders_dict['val'] = torch.utils.data.DataLoader(dataset_val, batch_size=batch_size, num_workers=4) #Compute mean,std from training data mean, std, transform_train = dataloader.get_transforms(dataloaders_dict['train'], 'train', mean=None, std=None) dataloaders_dict['train'].dataset.transform = transform_train dataloaders_dict['train'].dataset.mean = mean dataloaders_dict['train'].dataset.std = std dataloaders_dict['train'].dataset.iscomputed = True _, _, transform_val = dataloader.get_transforms(dataloaders_dict['val'], 'val', mean=mean, std=std) dataloaders_dict['val'].dataset.transform = transform_val dataloaders_dict['val'].dataset.mean = mean dataloaders_dict['val'].dataset.std = std print ('......Dataloader created......') print(dataloaders_dict['train'].dataset.mean) print(dataloaders_dict['train'].dataset.std) #print(only_top_layers) # FOr default all parameters have requires_grad = True # So, unselect all layers from backbone if only top is needed if only_top_layers == 'True': print('TRAINING: ONLY TOP LAYERS') # Freeze backbone parameters for param in model.backbone.parameters(): param.requires_grad = False else: print('TRAINING FULL LAYERS') # Show trainable layers for name,param in model.named_parameters(): if param.requires_grad == True: print("\t",name) # Get parameters to pass to optimizer params_to_update = model.parameters() """ print("Params to learn:") if feature_extract: params_to_update = [] for name,param in model.named_parameters(): if param.requires_grad == True: params_to_update.append(param) print("\t",name) else: for name,param in model.named_parameters(): if param.requires_grad == True: print("\t",name) """ #self.weight_class = 1. / np.unique(np.array(self.list_labels), return_counts=True)[1] #self.samples_weights = self.weight_class[self. list_labels] #criterion = nn.CrossEntropyLoss(weight=class_weights) #defining optimizer and loss scheduler = None if opt_type == 'adam': optimizer = optim.Adam(params_to_update, lr=learning_rate) if use_weight_decay: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') else: # Optimizer optimizer = optim.SGD(params_to_update, lr=learning_rate, momentum=0.9) if use_weight_decay: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') if ignore_zero: print('Ignoring Index Zero') #criterion = nn.CrossEntropyLoss(ignore_index=254, weight=torch.from_numpy(dataloaders_dict['train'].dataset.class_weights).float().cuda()) criterion = nn.CrossEntropyLoss(ignore_index=254, weight=torch.from_numpy(dataloaders_dict['train'].dataset.class_weights).float().to(device)) else: criterion = nn.CrossEntropyLoss() tensor_board = SummaryWriter(log_dir = base_output) final_model, val_history = factory.train(model, dataloaders_dict, criterion, optimizer, epochs, early_stop, tensor_board, net_type, scheduler, opt_type, list_classes, use_weight_decay) #if fine_tunning: # torch.save(final_model, os.path.join(out_dir, net_type + '_final_model_ft')) # final_stats_file = open (os.path.join(out_dir, net_type + '_finalstats_ft.txt'), 'w') #else: # torch.save(final_model, os.path.join(out_dir, net_type + '_final_model')) # final_stats_file = open (os.path.join(out_dir, net_type + '_finalstats.txt'), 'w') #factory.final_eval(model, dataloaders_dict, batch_size, final_stats_file) # Save mean and std final_model.mean = copy.deepcopy(mean) final_model.std = copy.deepcopy(std) final_model.num_classes = copy.deepcopy(num_classes) torch.save(final_model, os.path.join(base_output, net_type + '_final_model_ft_fold_{}'.format(id_fold))) if __name__ == '__main__': main()
list_folds = np.array(range(5)) # Roll in axis tmp_set = np.roll(list_folds,id_fold) indices_train = [] indices_val = [] indices_test = [] # Train for i in tmp_set[:3]: indices_train += list(data[data[:,1].astype(int) == i][:,0]) # Val for i in tmp_set[3:4]: indices_val += list(data[data[:,1].astype(int) == i][:,0]) # Test for i in tmp_set[4:]: indices_test += list(data[data[:,1].astype(int) == i][:,0]) #print(indices_train) #print(indices_val) #print(indices_test) indices = {} indices['train'] = indices_train indices['val'] = indices_val indices['test'] = indices_test return indices
identifier_body
train_folds.py
import os, time mydir = os.path.split(os.path.abspath(__file__))[0] homepath = os.path.expanduser(os.getenv('USERPROFILE')) #print('Meu diretorio: {}'.format(mydir)) #print('Minha Home: {}'.format(homepath)) list_lib = [r'{}\anaconda3\envs\arc105'.format(homepath), \ r'{}\anaconda3\envs\arc105\Library\mingw-w64\bin'.format(homepath), \ r'{}\anaconda3\envs\arc105\Library\usr\bin'.format(homepath), \ r'{}\anaconda3\envs\arc105\Library\bin'.format(homepath), \ r'{}\anaconda3\envs\arc105\Scripts'.format(homepath), \ r'{}\anaconda3\envs\arc105\bin'.format(homepath), \ r'{}\anaconda3\condabin'.format(homepath)] for i in list_lib: os.environ['PATH'] = '%s;%s' % (i, os.environ['PATH']) os.environ['CUDA_LAUNCH_BLOCKING'] = '1' import numpy as np #print(np.__version__) import argparse, copy, shutil import random from torch.utils.tensorboard import SummaryWriter import torch.optim as optim #import network_factory as factory import network_factory_refactor_speed as factory import dataloader import torch.nn as nn import torch from torch.optim.lr_scheduler import StepLR from skimage.io import imsave import numpy as np import logging # Fix seed's for reproducibility random.seed(42) torch.manual_seed(42) def get_indices(id_fold, data): # Instantiate Folds list_folds = np.array(range(5)) # Roll in axis tmp_set = np.roll(list_folds,id_fold) indices_train = [] indices_val = [] indices_test = [] # Train for i in tmp_set[:3]: indices_train += list(data[data[:,1].astype(int) == i][:,0]) # Val for i in tmp_set[3:4]:
# Test for i in tmp_set[4:]: indices_test += list(data[data[:,1].astype(int) == i][:,0]) #print(indices_train) #print(indices_val) #print(indices_test) indices = {} indices['train'] = indices_train indices['val'] = indices_val indices['test'] = indices_test return indices def main(): parser = argparse.ArgumentParser(description='Semantic Segmentation General') parser.add_argument('--dataset_path', type=str, required=True, help='Path to dataset') parser.add_argument('--inRasterReference', type=str, required=True, help='Path to inRasterReference') parser.add_argument('--output_path', type=str, required=True, help='Path to folder where models and stats will be saved') parser.add_argument('--batch', type=int, required=True, help='Batch Size') parser.add_argument('--epochs', type=int, required=True, help='Number of epochs') parser.add_argument('--learning_rate', type=float, required=False, default=0.001, help='Learning rate. Default:0.001') parser.add_argument('--network_type', type=str, required=True, help = 'Choose network type') parser.add_argument('--optimizer_type', type=str, required=False, default='adam', help = 'Optimizer: adam, sgd') parser.add_argument('--early_stop', type=int, required=True, help='Number of epochs to activate early stop.') parser.add_argument('--fine_tunning_imagenet', type= bool, required=False, default=False, help='set fine tunning on imagenet.') parser.add_argument('--feature_extract', type= bool, required=False, default=False, help='Train just the classifier.') parser.add_argument('--only_top_layers', type= str, required=False, default='True', help='Train only the top layers (classifier).') parser.add_argument('--ignore_zero', type= bool, required=False, default=True, help='Ignore class 0 (background).') parser.add_argument('--modelpath', type=str, required=False, default=False, help='Ignore class 0 (background).') parser.add_argument('--isRGB', type=str, required=False, default='False', help='Ignore class 0 (background).') parser.add_argument('--use_weight_decay', type=str, required=False, default='False', help='Use weight_decay.') args = parser.parse_args() dataset_dir = args.dataset_path inRasterReference = args.inRasterReference out_dir = args.output_path batch_size = args.batch epochs = args.epochs learning_rate = args.learning_rate net_type = args.network_type opt_type = args.optimizer_type fine_tunning = args.fine_tunning_imagenet early_stop = args.early_stop feature_extract = args.feature_extract only_top_layers = args.only_top_layers ignore_zero = args.ignore_zero modelpath = args.modelpath isRGB = True if args.isRGB == 'True' else False use_weight_decay = True if args.use_weight_decay == 'True' else False print(args) # Get classes from mask list_classes = factory.get_classes(inRasterReference, ignore_zero) num_classes = len(list_classes) #Delete base_output = os.path.join(out_dir, '2_Segmentation_BASE') if os.path.exists(base_output): shutil.rmtree(base_output, ignore_errors=True) #Recreate folders os.makedirs(base_output, exist_ok=True) #if (not os.path.exists(out_dir)): # os.makedirs(out_dir) num_folds = 5 #data = np.genfromtxt('code/config/stratified_folds', dtype=str, delimiter=',') data = np.genfromtxt(os.path.join(dataset_dir.split('raw_data')[0],'stratified_folds'), dtype=str, delimiter=',') #for id_fold in range(num_folds): for id_fold in range(num_folds): print ('.......Creating model.......') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if modelpath: print('Loading model in: {}'.format(modelpath)) model = torch.load(modelpath) else: print('Creating a new model: {}'.format(net_type)) model = factory.model_factory(net_type, num_classes, feature_extract, fine_tunning, isRGB) # Use multiples GPUS #if torch.cuda.device_count() > 1: # print("Let's use", torch.cuda.device_count(), "GPUs!") # batch_size = int(torch.cuda.device_count())*batch_size # model = nn.DataParallel(model, device_ids=[0, 1]) model = model.to(device) print(model) print ('......Model created.......') indices = get_indices(id_fold, data) print ('......Creating dataloader......') dataloaders_dict = {} dataset = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='train', indices=indices['train'], isRGB=isRGB) dataloaders_dict['train'] = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=4, drop_last=True) dataset_val = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='val', indices=indices['val'], isRGB=isRGB) dataloaders_dict['val'] = torch.utils.data.DataLoader(dataset_val, batch_size=batch_size, num_workers=4) #Compute mean,std from training data mean, std, transform_train = dataloader.get_transforms(dataloaders_dict['train'], 'train', mean=None, std=None) dataloaders_dict['train'].dataset.transform = transform_train dataloaders_dict['train'].dataset.mean = mean dataloaders_dict['train'].dataset.std = std dataloaders_dict['train'].dataset.iscomputed = True _, _, transform_val = dataloader.get_transforms(dataloaders_dict['val'], 'val', mean=mean, std=std) dataloaders_dict['val'].dataset.transform = transform_val dataloaders_dict['val'].dataset.mean = mean dataloaders_dict['val'].dataset.std = std print ('......Dataloader created......') print(dataloaders_dict['train'].dataset.mean) print(dataloaders_dict['train'].dataset.std) #print(only_top_layers) # FOr default all parameters have requires_grad = True # So, unselect all layers from backbone if only top is needed if only_top_layers == 'True': print('TRAINING: ONLY TOP LAYERS') # Freeze backbone parameters for param in model.backbone.parameters(): param.requires_grad = False else: print('TRAINING FULL LAYERS') # Show trainable layers for name,param in model.named_parameters(): if param.requires_grad == True: print("\t",name) # Get parameters to pass to optimizer params_to_update = model.parameters() """ print("Params to learn:") if feature_extract: params_to_update = [] for name,param in model.named_parameters(): if param.requires_grad == True: params_to_update.append(param) print("\t",name) else: for name,param in model.named_parameters(): if param.requires_grad == True: print("\t",name) """ #self.weight_class = 1. / np.unique(np.array(self.list_labels), return_counts=True)[1] #self.samples_weights = self.weight_class[self. list_labels] #criterion = nn.CrossEntropyLoss(weight=class_weights) #defining optimizer and loss scheduler = None if opt_type == 'adam': optimizer = optim.Adam(params_to_update, lr=learning_rate) if use_weight_decay: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') else: # Optimizer optimizer = optim.SGD(params_to_update, lr=learning_rate, momentum=0.9) if use_weight_decay: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') if ignore_zero: print('Ignoring Index Zero') #criterion = nn.CrossEntropyLoss(ignore_index=254, weight=torch.from_numpy(dataloaders_dict['train'].dataset.class_weights).float().cuda()) criterion = nn.CrossEntropyLoss(ignore_index=254, weight=torch.from_numpy(dataloaders_dict['train'].dataset.class_weights).float().to(device)) else: criterion = nn.CrossEntropyLoss() tensor_board = SummaryWriter(log_dir = base_output) final_model, val_history = factory.train(model, dataloaders_dict, criterion, optimizer, epochs, early_stop, tensor_board, net_type, scheduler, opt_type, list_classes, use_weight_decay) #if fine_tunning: # torch.save(final_model, os.path.join(out_dir, net_type + '_final_model_ft')) # final_stats_file = open (os.path.join(out_dir, net_type + '_finalstats_ft.txt'), 'w') #else: # torch.save(final_model, os.path.join(out_dir, net_type + '_final_model')) # final_stats_file = open (os.path.join(out_dir, net_type + '_finalstats.txt'), 'w') #factory.final_eval(model, dataloaders_dict, batch_size, final_stats_file) # Save mean and std final_model.mean = copy.deepcopy(mean) final_model.std = copy.deepcopy(std) final_model.num_classes = copy.deepcopy(num_classes) torch.save(final_model, os.path.join(base_output, net_type + '_final_model_ft_fold_{}'.format(id_fold))) if __name__ == '__main__': main()
indices_val += list(data[data[:,1].astype(int) == i][:,0])
conditional_block
train_folds.py
import os, time mydir = os.path.split(os.path.abspath(__file__))[0] homepath = os.path.expanduser(os.getenv('USERPROFILE')) #print('Meu diretorio: {}'.format(mydir)) #print('Minha Home: {}'.format(homepath)) list_lib = [r'{}\anaconda3\envs\arc105'.format(homepath), \ r'{}\anaconda3\envs\arc105\Library\mingw-w64\bin'.format(homepath), \ r'{}\anaconda3\envs\arc105\Library\usr\bin'.format(homepath), \ r'{}\anaconda3\envs\arc105\Library\bin'.format(homepath), \ r'{}\anaconda3\envs\arc105\Scripts'.format(homepath), \ r'{}\anaconda3\envs\arc105\bin'.format(homepath), \ r'{}\anaconda3\condabin'.format(homepath)] for i in list_lib: os.environ['PATH'] = '%s;%s' % (i, os.environ['PATH']) os.environ['CUDA_LAUNCH_BLOCKING'] = '1' import numpy as np #print(np.__version__) import argparse, copy, shutil import random from torch.utils.tensorboard import SummaryWriter import torch.optim as optim #import network_factory as factory import network_factory_refactor_speed as factory import dataloader import torch.nn as nn import torch from torch.optim.lr_scheduler import StepLR from skimage.io import imsave import numpy as np import logging # Fix seed's for reproducibility random.seed(42) torch.manual_seed(42) def
(id_fold, data): # Instantiate Folds list_folds = np.array(range(5)) # Roll in axis tmp_set = np.roll(list_folds,id_fold) indices_train = [] indices_val = [] indices_test = [] # Train for i in tmp_set[:3]: indices_train += list(data[data[:,1].astype(int) == i][:,0]) # Val for i in tmp_set[3:4]: indices_val += list(data[data[:,1].astype(int) == i][:,0]) # Test for i in tmp_set[4:]: indices_test += list(data[data[:,1].astype(int) == i][:,0]) #print(indices_train) #print(indices_val) #print(indices_test) indices = {} indices['train'] = indices_train indices['val'] = indices_val indices['test'] = indices_test return indices def main(): parser = argparse.ArgumentParser(description='Semantic Segmentation General') parser.add_argument('--dataset_path', type=str, required=True, help='Path to dataset') parser.add_argument('--inRasterReference', type=str, required=True, help='Path to inRasterReference') parser.add_argument('--output_path', type=str, required=True, help='Path to folder where models and stats will be saved') parser.add_argument('--batch', type=int, required=True, help='Batch Size') parser.add_argument('--epochs', type=int, required=True, help='Number of epochs') parser.add_argument('--learning_rate', type=float, required=False, default=0.001, help='Learning rate. Default:0.001') parser.add_argument('--network_type', type=str, required=True, help = 'Choose network type') parser.add_argument('--optimizer_type', type=str, required=False, default='adam', help = 'Optimizer: adam, sgd') parser.add_argument('--early_stop', type=int, required=True, help='Number of epochs to activate early stop.') parser.add_argument('--fine_tunning_imagenet', type= bool, required=False, default=False, help='set fine tunning on imagenet.') parser.add_argument('--feature_extract', type= bool, required=False, default=False, help='Train just the classifier.') parser.add_argument('--only_top_layers', type= str, required=False, default='True', help='Train only the top layers (classifier).') parser.add_argument('--ignore_zero', type= bool, required=False, default=True, help='Ignore class 0 (background).') parser.add_argument('--modelpath', type=str, required=False, default=False, help='Ignore class 0 (background).') parser.add_argument('--isRGB', type=str, required=False, default='False', help='Ignore class 0 (background).') parser.add_argument('--use_weight_decay', type=str, required=False, default='False', help='Use weight_decay.') args = parser.parse_args() dataset_dir = args.dataset_path inRasterReference = args.inRasterReference out_dir = args.output_path batch_size = args.batch epochs = args.epochs learning_rate = args.learning_rate net_type = args.network_type opt_type = args.optimizer_type fine_tunning = args.fine_tunning_imagenet early_stop = args.early_stop feature_extract = args.feature_extract only_top_layers = args.only_top_layers ignore_zero = args.ignore_zero modelpath = args.modelpath isRGB = True if args.isRGB == 'True' else False use_weight_decay = True if args.use_weight_decay == 'True' else False print(args) # Get classes from mask list_classes = factory.get_classes(inRasterReference, ignore_zero) num_classes = len(list_classes) #Delete base_output = os.path.join(out_dir, '2_Segmentation_BASE') if os.path.exists(base_output): shutil.rmtree(base_output, ignore_errors=True) #Recreate folders os.makedirs(base_output, exist_ok=True) #if (not os.path.exists(out_dir)): # os.makedirs(out_dir) num_folds = 5 #data = np.genfromtxt('code/config/stratified_folds', dtype=str, delimiter=',') data = np.genfromtxt(os.path.join(dataset_dir.split('raw_data')[0],'stratified_folds'), dtype=str, delimiter=',') #for id_fold in range(num_folds): for id_fold in range(num_folds): print ('.......Creating model.......') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if modelpath: print('Loading model in: {}'.format(modelpath)) model = torch.load(modelpath) else: print('Creating a new model: {}'.format(net_type)) model = factory.model_factory(net_type, num_classes, feature_extract, fine_tunning, isRGB) # Use multiples GPUS #if torch.cuda.device_count() > 1: # print("Let's use", torch.cuda.device_count(), "GPUs!") # batch_size = int(torch.cuda.device_count())*batch_size # model = nn.DataParallel(model, device_ids=[0, 1]) model = model.to(device) print(model) print ('......Model created.......') indices = get_indices(id_fold, data) print ('......Creating dataloader......') dataloaders_dict = {} dataset = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='train', indices=indices['train'], isRGB=isRGB) dataloaders_dict['train'] = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=4, drop_last=True) dataset_val = dataloader.RS_Loader_Semantic(dataset_dir, num_classes, mode='val', indices=indices['val'], isRGB=isRGB) dataloaders_dict['val'] = torch.utils.data.DataLoader(dataset_val, batch_size=batch_size, num_workers=4) #Compute mean,std from training data mean, std, transform_train = dataloader.get_transforms(dataloaders_dict['train'], 'train', mean=None, std=None) dataloaders_dict['train'].dataset.transform = transform_train dataloaders_dict['train'].dataset.mean = mean dataloaders_dict['train'].dataset.std = std dataloaders_dict['train'].dataset.iscomputed = True _, _, transform_val = dataloader.get_transforms(dataloaders_dict['val'], 'val', mean=mean, std=std) dataloaders_dict['val'].dataset.transform = transform_val dataloaders_dict['val'].dataset.mean = mean dataloaders_dict['val'].dataset.std = std print ('......Dataloader created......') print(dataloaders_dict['train'].dataset.mean) print(dataloaders_dict['train'].dataset.std) #print(only_top_layers) # FOr default all parameters have requires_grad = True # So, unselect all layers from backbone if only top is needed if only_top_layers == 'True': print('TRAINING: ONLY TOP LAYERS') # Freeze backbone parameters for param in model.backbone.parameters(): param.requires_grad = False else: print('TRAINING FULL LAYERS') # Show trainable layers for name,param in model.named_parameters(): if param.requires_grad == True: print("\t",name) # Get parameters to pass to optimizer params_to_update = model.parameters() """ print("Params to learn:") if feature_extract: params_to_update = [] for name,param in model.named_parameters(): if param.requires_grad == True: params_to_update.append(param) print("\t",name) else: for name,param in model.named_parameters(): if param.requires_grad == True: print("\t",name) """ #self.weight_class = 1. / np.unique(np.array(self.list_labels), return_counts=True)[1] #self.samples_weights = self.weight_class[self. list_labels] #criterion = nn.CrossEntropyLoss(weight=class_weights) #defining optimizer and loss scheduler = None if opt_type == 'adam': optimizer = optim.Adam(params_to_update, lr=learning_rate) if use_weight_decay: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') else: # Optimizer optimizer = optim.SGD(params_to_update, lr=learning_rate, momentum=0.9) if use_weight_decay: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') if ignore_zero: print('Ignoring Index Zero') #criterion = nn.CrossEntropyLoss(ignore_index=254, weight=torch.from_numpy(dataloaders_dict['train'].dataset.class_weights).float().cuda()) criterion = nn.CrossEntropyLoss(ignore_index=254, weight=torch.from_numpy(dataloaders_dict['train'].dataset.class_weights).float().to(device)) else: criterion = nn.CrossEntropyLoss() tensor_board = SummaryWriter(log_dir = base_output) final_model, val_history = factory.train(model, dataloaders_dict, criterion, optimizer, epochs, early_stop, tensor_board, net_type, scheduler, opt_type, list_classes, use_weight_decay) #if fine_tunning: # torch.save(final_model, os.path.join(out_dir, net_type + '_final_model_ft')) # final_stats_file = open (os.path.join(out_dir, net_type + '_finalstats_ft.txt'), 'w') #else: # torch.save(final_model, os.path.join(out_dir, net_type + '_final_model')) # final_stats_file = open (os.path.join(out_dir, net_type + '_finalstats.txt'), 'w') #factory.final_eval(model, dataloaders_dict, batch_size, final_stats_file) # Save mean and std final_model.mean = copy.deepcopy(mean) final_model.std = copy.deepcopy(std) final_model.num_classes = copy.deepcopy(num_classes) torch.save(final_model, os.path.join(base_output, net_type + '_final_model_ft_fold_{}'.format(id_fold))) if __name__ == '__main__': main()
get_indices
identifier_name
backtrace.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! As always, windows has something very different than unix, we mainly want //! to avoid having to depend too much on libunwind for windows. //! //! If you google around, you'll find a fair bit of references to built-in //! functions to get backtraces on windows. It turns out that most of these are //! in an external library called dbghelp. I was unable to find this library //! via `-ldbghelp`, but it is apparently normal to do the `dlopen` equivalent //! of it. //! //! You'll also find that there's a function called CaptureStackBackTrace //! mentioned frequently (which is also easy to use), but sadly I didn't have a //! copy of that function in my mingw install (maybe it was broken?). Instead, //! this takes the route of using StackWalk64 in order to walk the stack. #![allow(dead_code)] use prelude::v1::*; use io::prelude::*; use dynamic_lib::DynamicLibrary; use ffi::CStr; use intrinsics; use io; use libc; use mem; use path::Path; use ptr; use str; use sync::{StaticMutex, MUTEX_INIT}; use sys_common::backtrace::*; #[allow(non_snake_case)] extern "system" { fn GetCurrentProcess() -> libc::HANDLE; fn GetCurrentThread() -> libc::HANDLE; fn RtlCaptureContext(ctx: *mut arch::CONTEXT); } type SymFromAddrFn = extern "system" fn(libc::HANDLE, u64, *mut u64, *mut SYMBOL_INFO) -> libc::BOOL; type SymInitializeFn = extern "system" fn(libc::HANDLE, *mut libc::c_void, libc::BOOL) -> libc::BOOL; type SymCleanupFn = extern "system" fn(libc::HANDLE) -> libc::BOOL; type StackWalk64Fn = extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE, *mut STACKFRAME64, *mut arch::CONTEXT, *mut libc::c_void, *mut libc::c_void, *mut libc::c_void, *mut libc::c_void) -> libc::BOOL; const MAX_SYM_NAME: usize = 2000; const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c; const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200; const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664; #[repr(C)] struct SYMBOL_INFO { SizeOfStruct: libc::c_ulong, TypeIndex: libc::c_ulong, Reserved: [u64; 2], Index: libc::c_ulong, Size: libc::c_ulong, ModBase: u64, Flags: libc::c_ulong, Value: u64, Address: u64, Register: libc::c_ulong, Scope: libc::c_ulong, Tag: libc::c_ulong, NameLen: libc::c_ulong, MaxNameLen: libc::c_ulong, // note that windows has this as 1, but it basically just means that // the name is inline at the end of the struct. For us, we just bump // the struct size up to MAX_SYM_NAME. Name: [libc::c_char; MAX_SYM_NAME], } #[repr(C)] enum ADDRESS_MODE { AddrMode1616, AddrMode1632, AddrModeReal, AddrModeFlat, } struct
{ Offset: u64, Segment: u16, Mode: ADDRESS_MODE, } pub struct STACKFRAME64 { AddrPC: ADDRESS64, AddrReturn: ADDRESS64, AddrFrame: ADDRESS64, AddrStack: ADDRESS64, AddrBStore: ADDRESS64, FuncTableEntry: *mut libc::c_void, Params: [u64; 4], Far: libc::BOOL, Virtual: libc::BOOL, Reserved: [u64; 3], KdHelp: KDHELP64, } struct KDHELP64 { Thread: u64, ThCallbackStack: libc::DWORD, ThCallbackBStore: libc::DWORD, NextCallback: libc::DWORD, FramePointer: libc::DWORD, KiCallUserMode: u64, KeUserCallbackDispatcher: u64, SystemRangeStart: u64, KiUserExceptionDispatcher: u64, StackBase: u64, StackLimit: u64, Reserved: [u64; 5], } #[cfg(target_arch = "x86")] mod arch { use libc; const MAXIMUM_SUPPORTED_EXTENSION: usize = 512; #[repr(C)] pub struct CONTEXT { ContextFlags: libc::DWORD, Dr0: libc::DWORD, Dr1: libc::DWORD, Dr2: libc::DWORD, Dr3: libc::DWORD, Dr6: libc::DWORD, Dr7: libc::DWORD, FloatSave: FLOATING_SAVE_AREA, SegGs: libc::DWORD, SegFs: libc::DWORD, SegEs: libc::DWORD, SegDs: libc::DWORD, Edi: libc::DWORD, Esi: libc::DWORD, Ebx: libc::DWORD, Edx: libc::DWORD, Ecx: libc::DWORD, Eax: libc::DWORD, Ebp: libc::DWORD, Eip: libc::DWORD, SegCs: libc::DWORD, EFlags: libc::DWORD, Esp: libc::DWORD, SegSs: libc::DWORD, ExtendedRegisters: [u8; MAXIMUM_SUPPORTED_EXTENSION], } #[repr(C)] pub struct FLOATING_SAVE_AREA { ControlWord: libc::DWORD, StatusWord: libc::DWORD, TagWord: libc::DWORD, ErrorOffset: libc::DWORD, ErrorSelector: libc::DWORD, DataOffset: libc::DWORD, DataSelector: libc::DWORD, RegisterArea: [u8; 80], Cr0NpxState: libc::DWORD, } pub fn init_frame(frame: &mut super::STACKFRAME64, ctx: &CONTEXT) -> libc::DWORD { frame.AddrPC.Offset = ctx.Eip as u64; frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrStack.Offset = ctx.Esp as u64; frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrFrame.Offset = ctx.Ebp as u64; frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat; super::IMAGE_FILE_MACHINE_I386 } } #[cfg(target_arch = "x86_64")] mod arch { use libc::{c_longlong, c_ulonglong}; use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG}; use simd; #[repr(C)] pub struct CONTEXT { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte P1Home: DWORDLONG, P2Home: DWORDLONG, P3Home: DWORDLONG, P4Home: DWORDLONG, P5Home: DWORDLONG, P6Home: DWORDLONG, ContextFlags: DWORD, MxCsr: DWORD, SegCs: WORD, SegDs: WORD, SegEs: WORD, SegFs: WORD, SegGs: WORD, SegSs: WORD, EFlags: DWORD, Dr0: DWORDLONG, Dr1: DWORDLONG, Dr2: DWORDLONG, Dr3: DWORDLONG, Dr6: DWORDLONG, Dr7: DWORDLONG, Rax: DWORDLONG, Rcx: DWORDLONG, Rdx: DWORDLONG, Rbx: DWORDLONG, Rsp: DWORDLONG, Rbp: DWORDLONG, Rsi: DWORDLONG, Rdi: DWORDLONG, R8: DWORDLONG, R9: DWORDLONG, R10: DWORDLONG, R11: DWORDLONG, R12: DWORDLONG, R13: DWORDLONG, R14: DWORDLONG, R15: DWORDLONG, Rip: DWORDLONG, FltSave: FLOATING_SAVE_AREA, VectorRegister: [M128A; 26], VectorControl: DWORDLONG, DebugControl: DWORDLONG, LastBranchToRip: DWORDLONG, LastBranchFromRip: DWORDLONG, LastExceptionToRip: DWORDLONG, LastExceptionFromRip: DWORDLONG, } #[repr(C)] pub struct M128A { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte Low: c_ulonglong, High: c_longlong } #[repr(C)] pub struct FLOATING_SAVE_AREA { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte _Dummy: [u8; 512] // FIXME: Fill this out } pub fn init_frame(frame: &mut super::STACKFRAME64, ctx: &CONTEXT) -> DWORD { frame.AddrPC.Offset = ctx.Rip as u64; frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrStack.Offset = ctx.Rsp as u64; frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrFrame.Offset = ctx.Rbp as u64; frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat; super::IMAGE_FILE_MACHINE_AMD64 } } struct Cleanup { handle: libc::HANDLE, SymCleanup: SymCleanupFn, } impl Drop for Cleanup { fn drop(&mut self) { (self.SymCleanup)(self.handle); } } pub fn write(w: &mut Write) -> io::Result<()> { // According to windows documentation, all dbghelp functions are // single-threaded. static LOCK: StaticMutex = MUTEX_INIT; let _g = LOCK.lock(); // Open up dbghelp.dll, we don't link to it explicitly because it can't // always be found. Additionally, it's nice having fewer dependencies. let path = Path::new("dbghelp.dll"); let lib = match DynamicLibrary::open(Some(&path)) { Ok(lib) => lib, Err(..) => return Ok(()), }; macro_rules! sym{ ($e:expr, $t:ident) => (unsafe { match lib.symbol($e) { Ok(f) => mem::transmute::<*mut u8, $t>(f), Err(..) => return Ok(()) } }) } // Fetch the symbols necessary from dbghelp.dll let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn); let SymInitialize = sym!("SymInitialize", SymInitializeFn); let SymCleanup = sym!("SymCleanup", SymCleanupFn); let StackWalk64 = sym!("StackWalk64", StackWalk64Fn); // Allocate necessary structures for doing the stack walk let process = unsafe { GetCurrentProcess() }; let thread = unsafe { GetCurrentThread() }; let mut context: arch::CONTEXT = unsafe { intrinsics::init() }; unsafe { RtlCaptureContext(&mut context); } let mut frame: STACKFRAME64 = unsafe { intrinsics::init() }; let image = arch::init_frame(&mut frame, &context); // Initialize this process's symbols let ret = SymInitialize(process, ptr::null_mut(), libc::TRUE); if ret != libc::TRUE { return Ok(()) } let _c = Cleanup { handle: process, SymCleanup: SymCleanup }; // And now that we're done with all the setup, do the stack walking! let mut i = 0; try!(write!(w, "stack backtrace:\n")); while StackWalk64(image, process, thread, &mut frame, &mut context, ptr::null_mut(), ptr::null_mut(), ptr::null_mut(), ptr::null_mut()) == libc::TRUE{ let addr = frame.AddrPC.Offset; if addr == frame.AddrReturn.Offset || addr == 0 || frame.AddrReturn.Offset == 0 { break } i += 1; try!(write!(w, " {:2}: {:#2$x}", i, addr, HEX_WIDTH)); let mut info: SYMBOL_INFO = unsafe { intrinsics::init() }; info.MaxNameLen = MAX_SYM_NAME as libc::c_ulong; // the struct size in C. the value is different to // `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81) // due to struct alignment. info.SizeOfStruct = 88; let mut displacement = 0u64; let ret = SymFromAddr(process, addr as u64, &mut displacement, &mut info); if ret == libc::TRUE { try!(write!(w, " - ")); let ptr = info.Name.as_ptr() as *const libc::c_char; let bytes = unsafe { CStr::from_ptr(ptr).to_bytes() }; match str::from_utf8(bytes) { Ok(s) => try!(demangle(w, s)), Err(..) => try!(w.write_all(&bytes[..bytes.len()-1])), } } try!(w.write_all(&['\n' as u8])); } Ok(()) }
ADDRESS64
identifier_name
backtrace.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! As always, windows has something very different than unix, we mainly want //! to avoid having to depend too much on libunwind for windows. //! //! If you google around, you'll find a fair bit of references to built-in //! functions to get backtraces on windows. It turns out that most of these are //! in an external library called dbghelp. I was unable to find this library //! via `-ldbghelp`, but it is apparently normal to do the `dlopen` equivalent //! of it. //! //! You'll also find that there's a function called CaptureStackBackTrace //! mentioned frequently (which is also easy to use), but sadly I didn't have a //! copy of that function in my mingw install (maybe it was broken?). Instead, //! this takes the route of using StackWalk64 in order to walk the stack. #![allow(dead_code)] use prelude::v1::*; use io::prelude::*; use dynamic_lib::DynamicLibrary; use ffi::CStr; use intrinsics; use io; use libc; use mem; use path::Path; use ptr; use str; use sync::{StaticMutex, MUTEX_INIT}; use sys_common::backtrace::*; #[allow(non_snake_case)] extern "system" { fn GetCurrentProcess() -> libc::HANDLE; fn GetCurrentThread() -> libc::HANDLE; fn RtlCaptureContext(ctx: *mut arch::CONTEXT); } type SymFromAddrFn = extern "system" fn(libc::HANDLE, u64, *mut u64,
type SymInitializeFn = extern "system" fn(libc::HANDLE, *mut libc::c_void, libc::BOOL) -> libc::BOOL; type SymCleanupFn = extern "system" fn(libc::HANDLE) -> libc::BOOL; type StackWalk64Fn = extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE, *mut STACKFRAME64, *mut arch::CONTEXT, *mut libc::c_void, *mut libc::c_void, *mut libc::c_void, *mut libc::c_void) -> libc::BOOL; const MAX_SYM_NAME: usize = 2000; const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c; const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200; const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664; #[repr(C)] struct SYMBOL_INFO { SizeOfStruct: libc::c_ulong, TypeIndex: libc::c_ulong, Reserved: [u64; 2], Index: libc::c_ulong, Size: libc::c_ulong, ModBase: u64, Flags: libc::c_ulong, Value: u64, Address: u64, Register: libc::c_ulong, Scope: libc::c_ulong, Tag: libc::c_ulong, NameLen: libc::c_ulong, MaxNameLen: libc::c_ulong, // note that windows has this as 1, but it basically just means that // the name is inline at the end of the struct. For us, we just bump // the struct size up to MAX_SYM_NAME. Name: [libc::c_char; MAX_SYM_NAME], } #[repr(C)] enum ADDRESS_MODE { AddrMode1616, AddrMode1632, AddrModeReal, AddrModeFlat, } struct ADDRESS64 { Offset: u64, Segment: u16, Mode: ADDRESS_MODE, } pub struct STACKFRAME64 { AddrPC: ADDRESS64, AddrReturn: ADDRESS64, AddrFrame: ADDRESS64, AddrStack: ADDRESS64, AddrBStore: ADDRESS64, FuncTableEntry: *mut libc::c_void, Params: [u64; 4], Far: libc::BOOL, Virtual: libc::BOOL, Reserved: [u64; 3], KdHelp: KDHELP64, } struct KDHELP64 { Thread: u64, ThCallbackStack: libc::DWORD, ThCallbackBStore: libc::DWORD, NextCallback: libc::DWORD, FramePointer: libc::DWORD, KiCallUserMode: u64, KeUserCallbackDispatcher: u64, SystemRangeStart: u64, KiUserExceptionDispatcher: u64, StackBase: u64, StackLimit: u64, Reserved: [u64; 5], } #[cfg(target_arch = "x86")] mod arch { use libc; const MAXIMUM_SUPPORTED_EXTENSION: usize = 512; #[repr(C)] pub struct CONTEXT { ContextFlags: libc::DWORD, Dr0: libc::DWORD, Dr1: libc::DWORD, Dr2: libc::DWORD, Dr3: libc::DWORD, Dr6: libc::DWORD, Dr7: libc::DWORD, FloatSave: FLOATING_SAVE_AREA, SegGs: libc::DWORD, SegFs: libc::DWORD, SegEs: libc::DWORD, SegDs: libc::DWORD, Edi: libc::DWORD, Esi: libc::DWORD, Ebx: libc::DWORD, Edx: libc::DWORD, Ecx: libc::DWORD, Eax: libc::DWORD, Ebp: libc::DWORD, Eip: libc::DWORD, SegCs: libc::DWORD, EFlags: libc::DWORD, Esp: libc::DWORD, SegSs: libc::DWORD, ExtendedRegisters: [u8; MAXIMUM_SUPPORTED_EXTENSION], } #[repr(C)] pub struct FLOATING_SAVE_AREA { ControlWord: libc::DWORD, StatusWord: libc::DWORD, TagWord: libc::DWORD, ErrorOffset: libc::DWORD, ErrorSelector: libc::DWORD, DataOffset: libc::DWORD, DataSelector: libc::DWORD, RegisterArea: [u8; 80], Cr0NpxState: libc::DWORD, } pub fn init_frame(frame: &mut super::STACKFRAME64, ctx: &CONTEXT) -> libc::DWORD { frame.AddrPC.Offset = ctx.Eip as u64; frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrStack.Offset = ctx.Esp as u64; frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrFrame.Offset = ctx.Ebp as u64; frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat; super::IMAGE_FILE_MACHINE_I386 } } #[cfg(target_arch = "x86_64")] mod arch { use libc::{c_longlong, c_ulonglong}; use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG}; use simd; #[repr(C)] pub struct CONTEXT { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte P1Home: DWORDLONG, P2Home: DWORDLONG, P3Home: DWORDLONG, P4Home: DWORDLONG, P5Home: DWORDLONG, P6Home: DWORDLONG, ContextFlags: DWORD, MxCsr: DWORD, SegCs: WORD, SegDs: WORD, SegEs: WORD, SegFs: WORD, SegGs: WORD, SegSs: WORD, EFlags: DWORD, Dr0: DWORDLONG, Dr1: DWORDLONG, Dr2: DWORDLONG, Dr3: DWORDLONG, Dr6: DWORDLONG, Dr7: DWORDLONG, Rax: DWORDLONG, Rcx: DWORDLONG, Rdx: DWORDLONG, Rbx: DWORDLONG, Rsp: DWORDLONG, Rbp: DWORDLONG, Rsi: DWORDLONG, Rdi: DWORDLONG, R8: DWORDLONG, R9: DWORDLONG, R10: DWORDLONG, R11: DWORDLONG, R12: DWORDLONG, R13: DWORDLONG, R14: DWORDLONG, R15: DWORDLONG, Rip: DWORDLONG, FltSave: FLOATING_SAVE_AREA, VectorRegister: [M128A; 26], VectorControl: DWORDLONG, DebugControl: DWORDLONG, LastBranchToRip: DWORDLONG, LastBranchFromRip: DWORDLONG, LastExceptionToRip: DWORDLONG, LastExceptionFromRip: DWORDLONG, } #[repr(C)] pub struct M128A { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte Low: c_ulonglong, High: c_longlong } #[repr(C)] pub struct FLOATING_SAVE_AREA { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte _Dummy: [u8; 512] // FIXME: Fill this out } pub fn init_frame(frame: &mut super::STACKFRAME64, ctx: &CONTEXT) -> DWORD { frame.AddrPC.Offset = ctx.Rip as u64; frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrStack.Offset = ctx.Rsp as u64; frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrFrame.Offset = ctx.Rbp as u64; frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat; super::IMAGE_FILE_MACHINE_AMD64 } } struct Cleanup { handle: libc::HANDLE, SymCleanup: SymCleanupFn, } impl Drop for Cleanup { fn drop(&mut self) { (self.SymCleanup)(self.handle); } } pub fn write(w: &mut Write) -> io::Result<()> { // According to windows documentation, all dbghelp functions are // single-threaded. static LOCK: StaticMutex = MUTEX_INIT; let _g = LOCK.lock(); // Open up dbghelp.dll, we don't link to it explicitly because it can't // always be found. Additionally, it's nice having fewer dependencies. let path = Path::new("dbghelp.dll"); let lib = match DynamicLibrary::open(Some(&path)) { Ok(lib) => lib, Err(..) => return Ok(()), }; macro_rules! sym{ ($e:expr, $t:ident) => (unsafe { match lib.symbol($e) { Ok(f) => mem::transmute::<*mut u8, $t>(f), Err(..) => return Ok(()) } }) } // Fetch the symbols necessary from dbghelp.dll let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn); let SymInitialize = sym!("SymInitialize", SymInitializeFn); let SymCleanup = sym!("SymCleanup", SymCleanupFn); let StackWalk64 = sym!("StackWalk64", StackWalk64Fn); // Allocate necessary structures for doing the stack walk let process = unsafe { GetCurrentProcess() }; let thread = unsafe { GetCurrentThread() }; let mut context: arch::CONTEXT = unsafe { intrinsics::init() }; unsafe { RtlCaptureContext(&mut context); } let mut frame: STACKFRAME64 = unsafe { intrinsics::init() }; let image = arch::init_frame(&mut frame, &context); // Initialize this process's symbols let ret = SymInitialize(process, ptr::null_mut(), libc::TRUE); if ret != libc::TRUE { return Ok(()) } let _c = Cleanup { handle: process, SymCleanup: SymCleanup }; // And now that we're done with all the setup, do the stack walking! let mut i = 0; try!(write!(w, "stack backtrace:\n")); while StackWalk64(image, process, thread, &mut frame, &mut context, ptr::null_mut(), ptr::null_mut(), ptr::null_mut(), ptr::null_mut()) == libc::TRUE{ let addr = frame.AddrPC.Offset; if addr == frame.AddrReturn.Offset || addr == 0 || frame.AddrReturn.Offset == 0 { break } i += 1; try!(write!(w, " {:2}: {:#2$x}", i, addr, HEX_WIDTH)); let mut info: SYMBOL_INFO = unsafe { intrinsics::init() }; info.MaxNameLen = MAX_SYM_NAME as libc::c_ulong; // the struct size in C. the value is different to // `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81) // due to struct alignment. info.SizeOfStruct = 88; let mut displacement = 0u64; let ret = SymFromAddr(process, addr as u64, &mut displacement, &mut info); if ret == libc::TRUE { try!(write!(w, " - ")); let ptr = info.Name.as_ptr() as *const libc::c_char; let bytes = unsafe { CStr::from_ptr(ptr).to_bytes() }; match str::from_utf8(bytes) { Ok(s) => try!(demangle(w, s)), Err(..) => try!(w.write_all(&bytes[..bytes.len()-1])), } } try!(w.write_all(&['\n' as u8])); } Ok(()) }
*mut SYMBOL_INFO) -> libc::BOOL;
random_line_split
backtrace.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! As always, windows has something very different than unix, we mainly want //! to avoid having to depend too much on libunwind for windows. //! //! If you google around, you'll find a fair bit of references to built-in //! functions to get backtraces on windows. It turns out that most of these are //! in an external library called dbghelp. I was unable to find this library //! via `-ldbghelp`, but it is apparently normal to do the `dlopen` equivalent //! of it. //! //! You'll also find that there's a function called CaptureStackBackTrace //! mentioned frequently (which is also easy to use), but sadly I didn't have a //! copy of that function in my mingw install (maybe it was broken?). Instead, //! this takes the route of using StackWalk64 in order to walk the stack. #![allow(dead_code)] use prelude::v1::*; use io::prelude::*; use dynamic_lib::DynamicLibrary; use ffi::CStr; use intrinsics; use io; use libc; use mem; use path::Path; use ptr; use str; use sync::{StaticMutex, MUTEX_INIT}; use sys_common::backtrace::*; #[allow(non_snake_case)] extern "system" { fn GetCurrentProcess() -> libc::HANDLE; fn GetCurrentThread() -> libc::HANDLE; fn RtlCaptureContext(ctx: *mut arch::CONTEXT); } type SymFromAddrFn = extern "system" fn(libc::HANDLE, u64, *mut u64, *mut SYMBOL_INFO) -> libc::BOOL; type SymInitializeFn = extern "system" fn(libc::HANDLE, *mut libc::c_void, libc::BOOL) -> libc::BOOL; type SymCleanupFn = extern "system" fn(libc::HANDLE) -> libc::BOOL; type StackWalk64Fn = extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE, *mut STACKFRAME64, *mut arch::CONTEXT, *mut libc::c_void, *mut libc::c_void, *mut libc::c_void, *mut libc::c_void) -> libc::BOOL; const MAX_SYM_NAME: usize = 2000; const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c; const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200; const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664; #[repr(C)] struct SYMBOL_INFO { SizeOfStruct: libc::c_ulong, TypeIndex: libc::c_ulong, Reserved: [u64; 2], Index: libc::c_ulong, Size: libc::c_ulong, ModBase: u64, Flags: libc::c_ulong, Value: u64, Address: u64, Register: libc::c_ulong, Scope: libc::c_ulong, Tag: libc::c_ulong, NameLen: libc::c_ulong, MaxNameLen: libc::c_ulong, // note that windows has this as 1, but it basically just means that // the name is inline at the end of the struct. For us, we just bump // the struct size up to MAX_SYM_NAME. Name: [libc::c_char; MAX_SYM_NAME], } #[repr(C)] enum ADDRESS_MODE { AddrMode1616, AddrMode1632, AddrModeReal, AddrModeFlat, } struct ADDRESS64 { Offset: u64, Segment: u16, Mode: ADDRESS_MODE, } pub struct STACKFRAME64 { AddrPC: ADDRESS64, AddrReturn: ADDRESS64, AddrFrame: ADDRESS64, AddrStack: ADDRESS64, AddrBStore: ADDRESS64, FuncTableEntry: *mut libc::c_void, Params: [u64; 4], Far: libc::BOOL, Virtual: libc::BOOL, Reserved: [u64; 3], KdHelp: KDHELP64, } struct KDHELP64 { Thread: u64, ThCallbackStack: libc::DWORD, ThCallbackBStore: libc::DWORD, NextCallback: libc::DWORD, FramePointer: libc::DWORD, KiCallUserMode: u64, KeUserCallbackDispatcher: u64, SystemRangeStart: u64, KiUserExceptionDispatcher: u64, StackBase: u64, StackLimit: u64, Reserved: [u64; 5], } #[cfg(target_arch = "x86")] mod arch { use libc; const MAXIMUM_SUPPORTED_EXTENSION: usize = 512; #[repr(C)] pub struct CONTEXT { ContextFlags: libc::DWORD, Dr0: libc::DWORD, Dr1: libc::DWORD, Dr2: libc::DWORD, Dr3: libc::DWORD, Dr6: libc::DWORD, Dr7: libc::DWORD, FloatSave: FLOATING_SAVE_AREA, SegGs: libc::DWORD, SegFs: libc::DWORD, SegEs: libc::DWORD, SegDs: libc::DWORD, Edi: libc::DWORD, Esi: libc::DWORD, Ebx: libc::DWORD, Edx: libc::DWORD, Ecx: libc::DWORD, Eax: libc::DWORD, Ebp: libc::DWORD, Eip: libc::DWORD, SegCs: libc::DWORD, EFlags: libc::DWORD, Esp: libc::DWORD, SegSs: libc::DWORD, ExtendedRegisters: [u8; MAXIMUM_SUPPORTED_EXTENSION], } #[repr(C)] pub struct FLOATING_SAVE_AREA { ControlWord: libc::DWORD, StatusWord: libc::DWORD, TagWord: libc::DWORD, ErrorOffset: libc::DWORD, ErrorSelector: libc::DWORD, DataOffset: libc::DWORD, DataSelector: libc::DWORD, RegisterArea: [u8; 80], Cr0NpxState: libc::DWORD, } pub fn init_frame(frame: &mut super::STACKFRAME64, ctx: &CONTEXT) -> libc::DWORD { frame.AddrPC.Offset = ctx.Eip as u64; frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrStack.Offset = ctx.Esp as u64; frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrFrame.Offset = ctx.Ebp as u64; frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat; super::IMAGE_FILE_MACHINE_I386 } } #[cfg(target_arch = "x86_64")] mod arch { use libc::{c_longlong, c_ulonglong}; use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG}; use simd; #[repr(C)] pub struct CONTEXT { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte P1Home: DWORDLONG, P2Home: DWORDLONG, P3Home: DWORDLONG, P4Home: DWORDLONG, P5Home: DWORDLONG, P6Home: DWORDLONG, ContextFlags: DWORD, MxCsr: DWORD, SegCs: WORD, SegDs: WORD, SegEs: WORD, SegFs: WORD, SegGs: WORD, SegSs: WORD, EFlags: DWORD, Dr0: DWORDLONG, Dr1: DWORDLONG, Dr2: DWORDLONG, Dr3: DWORDLONG, Dr6: DWORDLONG, Dr7: DWORDLONG, Rax: DWORDLONG, Rcx: DWORDLONG, Rdx: DWORDLONG, Rbx: DWORDLONG, Rsp: DWORDLONG, Rbp: DWORDLONG, Rsi: DWORDLONG, Rdi: DWORDLONG, R8: DWORDLONG, R9: DWORDLONG, R10: DWORDLONG, R11: DWORDLONG, R12: DWORDLONG, R13: DWORDLONG, R14: DWORDLONG, R15: DWORDLONG, Rip: DWORDLONG, FltSave: FLOATING_SAVE_AREA, VectorRegister: [M128A; 26], VectorControl: DWORDLONG, DebugControl: DWORDLONG, LastBranchToRip: DWORDLONG, LastBranchFromRip: DWORDLONG, LastExceptionToRip: DWORDLONG, LastExceptionFromRip: DWORDLONG, } #[repr(C)] pub struct M128A { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte Low: c_ulonglong, High: c_longlong } #[repr(C)] pub struct FLOATING_SAVE_AREA { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte _Dummy: [u8; 512] // FIXME: Fill this out } pub fn init_frame(frame: &mut super::STACKFRAME64, ctx: &CONTEXT) -> DWORD { frame.AddrPC.Offset = ctx.Rip as u64; frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrStack.Offset = ctx.Rsp as u64; frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrFrame.Offset = ctx.Rbp as u64; frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat; super::IMAGE_FILE_MACHINE_AMD64 } } struct Cleanup { handle: libc::HANDLE, SymCleanup: SymCleanupFn, } impl Drop for Cleanup { fn drop(&mut self) { (self.SymCleanup)(self.handle); } } pub fn write(w: &mut Write) -> io::Result<()> { // According to windows documentation, all dbghelp functions are // single-threaded. static LOCK: StaticMutex = MUTEX_INIT; let _g = LOCK.lock(); // Open up dbghelp.dll, we don't link to it explicitly because it can't // always be found. Additionally, it's nice having fewer dependencies. let path = Path::new("dbghelp.dll"); let lib = match DynamicLibrary::open(Some(&path)) { Ok(lib) => lib, Err(..) => return Ok(()), }; macro_rules! sym{ ($e:expr, $t:ident) => (unsafe { match lib.symbol($e) { Ok(f) => mem::transmute::<*mut u8, $t>(f), Err(..) => return Ok(()) } }) } // Fetch the symbols necessary from dbghelp.dll let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn); let SymInitialize = sym!("SymInitialize", SymInitializeFn); let SymCleanup = sym!("SymCleanup", SymCleanupFn); let StackWalk64 = sym!("StackWalk64", StackWalk64Fn); // Allocate necessary structures for doing the stack walk let process = unsafe { GetCurrentProcess() }; let thread = unsafe { GetCurrentThread() }; let mut context: arch::CONTEXT = unsafe { intrinsics::init() }; unsafe { RtlCaptureContext(&mut context); } let mut frame: STACKFRAME64 = unsafe { intrinsics::init() }; let image = arch::init_frame(&mut frame, &context); // Initialize this process's symbols let ret = SymInitialize(process, ptr::null_mut(), libc::TRUE); if ret != libc::TRUE { return Ok(()) } let _c = Cleanup { handle: process, SymCleanup: SymCleanup }; // And now that we're done with all the setup, do the stack walking! let mut i = 0; try!(write!(w, "stack backtrace:\n")); while StackWalk64(image, process, thread, &mut frame, &mut context, ptr::null_mut(), ptr::null_mut(), ptr::null_mut(), ptr::null_mut()) == libc::TRUE{ let addr = frame.AddrPC.Offset; if addr == frame.AddrReturn.Offset || addr == 0 || frame.AddrReturn.Offset == 0
i += 1; try!(write!(w, " {:2}: {:#2$x}", i, addr, HEX_WIDTH)); let mut info: SYMBOL_INFO = unsafe { intrinsics::init() }; info.MaxNameLen = MAX_SYM_NAME as libc::c_ulong; // the struct size in C. the value is different to // `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81) // due to struct alignment. info.SizeOfStruct = 88; let mut displacement = 0u64; let ret = SymFromAddr(process, addr as u64, &mut displacement, &mut info); if ret == libc::TRUE { try!(write!(w, " - ")); let ptr = info.Name.as_ptr() as *const libc::c_char; let bytes = unsafe { CStr::from_ptr(ptr).to_bytes() }; match str::from_utf8(bytes) { Ok(s) => try!(demangle(w, s)), Err(..) => try!(w.write_all(&bytes[..bytes.len()-1])), } } try!(w.write_all(&['\n' as u8])); } Ok(()) }
{ break }
conditional_block
platform_types.rs
#![deny(unused)] use macros::{ d, fmt_debug, fmt_display, ord, u, }; use std::{ time::{Duration, Instant}, path::PathBuf }; pub use vec1::{vec1, Vec1}; pub use panic_safe_rope::{BorrowRope, Rope, RopeSlice, RopeSliceTrait, ByteIndex}; pub use text_pos::*; pub mod floating_point; pub mod screen_positioning; pub use screen_positioning::{ CharDim, ScreenSpaceRect, ScreenSpaceWH, ScreenSpaceXY, ScrollXY, SizeDependents, TextBoxXY, TextBoxSpaceXY, TextSpaceXY, TextSpaceXYWH, ssr, sswh, ssxy, }; pub mod spans; pub use spans::{ Spans, SpanView, SpanKind, }; pub use abs; pub use f32_0_1::{F32_0_1, f32_0_1}; pub use pos_f32::{PosF32, pos_f32}; pub use non_neg_f32::{NonNegF32, non_neg_f32}; pub use move_mod::Move; pub use g_i; pub use g_i::{SelectionAdjustment, SelectionMove, SelectableVec1}; #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ReplaceOrAdd { Replace, Add, } #[derive(Clone, Debug, PartialEq)] pub enum Input { None, Quit, /// Escape Menus, etc., not quitting the program. Escape, Insert(char), Delete, DeleteLines, ResetScroll, ScrollVertically(abs::Vector), ScrollHorizontally(abs::Vector), SetSizeDependents(Box<SizeDependents>), MoveAllCursors(Move), ExtendSelectionForAllCursors(Move), SelectAll, SetCursor(TextBoxSpaceXY, ReplaceOrAdd), DragCursors(TextBoxSpaceXY), SelectCharTypeGrouping(TextBoxSpaceXY, ReplaceOrAdd), ExtendSelectionWithSearch, ExtendSelectionMaximallyWithSearch, SavedAs(g_i::Index, PathBuf), Undo, Redo, Cut, Copy, Paste(Option<String>), InsertNumbersAtCursors, AddOrSelectBuffer(BufferName, String), AddOrSelectBufferThenGoTo(BufferName, String, Position), NewScratchBuffer(Option<String>), TabIn, TabOut, StripTrailingWhitespace, AdjustBufferSelection(SelectionAdjustment), NextLanguage, PreviousLanguage, ToggleSingleLineComments, ToggleCase, AutoIndentSelection, DuplicateLines, SelectBuffer(BufferId), OpenOrSelectBuffer(PathBuf), CloseBuffer(g_i::Index), SetMenuMode(MenuMode), SubmitForm, ShowError(String), } d!(for Input : Input::None); #[derive(Clone, Copy, Default, Debug, Hash, PartialEq, Eq)] pub struct BufferId { pub kind: BufferIdKind, pub index: g_i::Index, } ord!(for BufferId: id, other in { id.kind.cmp(&other.kind).then_with(|| id.index.cmp(&other.index)) }); #[macro_export] macro_rules! b_id { // // Creation // ($kind: expr) => { BufferId { kind: $kind, index: d!(), } }; ($kind: expr, $index: expr) => { BufferId { kind: $kind, index: $index, } }; } #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub enum BufferIdKind { /// Used to indicate that the keyboard is focused on a non-buffer. None, /// Indicates a buffer repesenting an open file or an in memory scratch file. /// Almost all buffers are `Text` buffers. Text, Find, Replace, FileSwitcher, GoToPosition, } d!(for BufferIdKind: BufferIdKind::Text); impl From<&BufferIdKind> for u8 { fn from(kind: &BufferIdKind) -> Self { use BufferIdKind::*; match kind { None => 0, Text => 1, Find => 2, Replace => 3, FileSwitcher => 4, GoToPosition => 5, } } } ord!(for BufferIdKind: kind, other in { let k: u8 = kind.into(); let o: u8 = other.into(); k.cmp(&o) }); #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum HighlightKind { User, Result, CurrentResult, } d!(for HighlightKind: HighlightKind::User); #[derive(Clone, Debug, PartialEq, Eq)] pub struct Highlight { pub min: Position, pub max: Position, pub kind: HighlightKind, } impl Highlight { #[must_use] pub fn new((p1, p2): (Position, Position), kind: HighlightKind) -> Self { Highlight { min: std::cmp::min(p1, p2), max: std::cmp::max(p1, p2), kind, } } #[must_use] pub fn get(&self) -> (Position, Position) { (self.min, self.max) } } #[macro_export] macro_rules! highlight { (l $min_line:literal o $min_offset:literal l $max_line:literal o $max_offset:literal ) => { Highlight::new( ( Position { line: $min_line, offset: CharOffset($min_offset), }, Position { line: $max_line, offset: CharOffset($max_offset), }, ), d!() ) }; (l $min_line:literal o $min_offset:literal l $max_line:literal o max ) => { highlight!(l $min_line o $min_offset l $max_line o 0xFFFF_FFFF__FFFF_FFFF) }; } pub fn push_highlights<O: Into<Option<Position>>>( highlights: &mut Vec<Highlight>, position: Position, highlight_position: O, kind: HighlightKind, ) { match highlight_position.into() { Some(h) if h != position => { let min = std::cmp::min(position, h); let max = std::cmp::max(position, h); if min.line == max.line { highlights.push(Highlight::new((min, max), kind)); return; } // This early return is merely an optimization from three rectangles to two. // TODO Is this optimization actually worth it? The sticky cursor offset does make this // more likely than it would otherwise be. if min.offset != 0 && min.offset == max.offset { // [|_______________________|] // ^min_middle max_middle^ let min_middle = min.line + if min.offset == 0 { 0 } else { 1 }; // Since We know the lines must be different, we know `max.line > 0` let max_middle = max.line - 1; let offset = min.offset; highlights.push(Highlight::new( ( Position { offset, line: min.line, }, Position { offset: CharOffset::max_value(), line: max_middle, }, ), kind, )); highlights.push(Highlight::new( ( Position { offset: CharOffset(0), line: min_middle, }, Position { offset, line: max.line, }, ), kind, )); return; } if min.offset != 0 { highlights.push(Highlight::new( ( min, Position { offset: CharOffset::max_value(), ..min }, ), kind, )); } let min_middle = min.line + if min.offset == 0 { 0 } else { 1 }; // Since We know the lines must be different, we know `max.line > 0` let max_middle = max.line - 1; if min_middle <= max_middle { highlights.push(Highlight::new( ( Position { offset: CharOffset(0), line: min_middle, }, Position { offset: CharOffset::max_value(), line: max_middle, }, ), kind, )); } if max.offset != 0 { highlights.push(Highlight::new( ( Position { offset: CharOffset(0), ..max }, max, ), kind, )); } } _ => {} } } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum BufferName { Path(PathBuf), Scratch(u32), } d!(for BufferName: BufferName::Scratch(d!())); fmt_display!(for BufferName: name in "{}", match name { BufferName::Path(p) => p .file_name() .map_or_else( || "?Unknown Path?".to_string(), |s| s.to_string_lossy().into_owned() ), BufferName::Scratch(n) => format!("*scratch {}*", n), } ); ord!(for BufferName: name, other in { use BufferName::*; use std::cmp::Ordering::*; match (name, other) { (Path(p1), Path(p2)) => { match (p1.canonicalize(), p2.canonicalize() ) { (Ok(ref cp1), Ok(ref cp2)) if cp1 == cp2 => { Equal } _ => { p1.cmp(p2) } } } (Path(_), Scratch(_)) => { Less } (Scratch(_), Path(_)) => { Greater } (Scratch(n1), Scratch(n2)) => { n1.cmp(n2) } } }); impl BufferName { #[must_use] pub fn get_extension_or_empty(&self) -> &str { use BufferName::*; match self { Path(p) => { p.extension() .and_then(std::ffi::OsStr::to_str) .unwrap_or("") }, Scratch(..) => "", } } #[must_use] pub fn size_in_bytes(&self) -> usize { use core::mem; // TODO Do other platforms need adjusting as well? #[cfg(target_os = "windows")] const BYTES_PER_UNIT: usize = 2; #[cfg(not(target_os = "windows"))] const BYTES_PER_UNIT: usize = 1; match self { Self::Path(p) => { mem::size_of_val(p) + p.capacity() * BYTES_PER_UNIT }, Self::Scratch(n) => mem::size_of_val(n), } } } #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum CursorState { None, PressedAgainstWall(Move), } d!(for CursorState: CursorState::None); fmt_debug!(for CursorState: s in "{}", match s { CursorState::None => std::borrow::Cow::Borrowed("_"), CursorState::PressedAgainstWall(r#move) => std::borrow::Cow::Owned(format!("->|({})", r#move)) }); ord!(for CursorState: state, other in { use std::cmp::Ordering::*; match (state, other) { (CursorState::None, CursorState::None) => Equal, (CursorState::None, CursorState::PressedAgainstWall(_)) => Less, (CursorState::PressedAgainstWall(_), CursorState::None) => Greater, (CursorState::PressedAgainstWall(m1), CursorState::PressedAgainstWall(m2)) => { m1.cmp(m2) } } }); #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct CursorView { pub position: Position, pub state: CursorState, } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct StatusLineView { pub chars: String, } pub const DEFAULT_STATUS_LINE_CHARS: &str = "No buffer selected."; d!(for StatusLineView: StatusLineView {chars: DEFAULT_STATUS_LINE_CHARS.to_owned()}); #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum MenuMode { Hidden, FileSwitcher, FindReplace(FindReplaceMode), GoToPosition, } d!(for MenuMode: MenuMode::Hidden); #[derive(Clone, Debug, PartialEq)] pub enum MenuView { None, FileSwitcher(FileSwitcherView), FindReplace(FindReplaceView), GoToPosition(GoToPositionView) } d!(for MenuView: MenuView::None); impl MenuView { #[must_use] pub fn get_mode(&self) -> MenuMode { match self { Self::None => MenuMode::Hidden, Self::FileSwitcher(_) => MenuMode::FileSwitcher, Self::FindReplace(v) => MenuMode::FindReplace(v.mode), Self::GoToPosition(_) => MenuMode::GoToPosition, } } } #[must_use] pub fn kind_editable_during_mode(kind: BufferIdKind, menu_mode: MenuMode) -> bool { u!{MenuMode} match (kind, menu_mode) { // We want this to be true for `Text` always since it would be completely // reasonable behaviour for a different client to always show the text // buffers. (BufferIdKind::Text, _) | (BufferIdKind::Find | BufferIdKind::Replace, FindReplace(_)) | (BufferIdKind::FileSwitcher, MenuMode::FileSwitcher) | (BufferIdKind::GoToPosition, MenuMode::GoToPosition) => true, _ => { false }, } } pub type FileSwitcherResults = Vec<PathBuf>; #[derive(Clone, Default, Debug, PartialEq)] pub struct FileSwitcherView { pub search: BufferViewData, pub results: FileSwitcherResults, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum FindReplaceMode { CurrentFile, } d!(for FindReplaceMode: FindReplaceMode::CurrentFile); #[derive(Clone, Default, Debug, PartialEq)] pub struct FindReplaceView { pub mode: FindReplaceMode, pub find: BufferViewData, pub replace: BufferViewData, pub result_count: usize, } #[derive(Clone, Default, Debug, PartialEq)] pub struct GoToPositionView { pub go_to_position: BufferViewData, } #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum EditedTransition { ToEdited, ToUnedited, } pub type IndexedEditedTransition = (g_i::Index, EditedTransition); #[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct EditedTransitions(Vec<IndexedEditedTransition>); impl EditedTransitions { pub fn push(&mut self, iet: IndexedEditedTransition) { self.0.push(iet); } pub fn clear(&mut self) { self.0.clear(); } #[must_use] pub fn len(&self) -> usize { self.0.len() } #[must_use] pub fn is_empty(&self) -> bool { self.0.is_empty() } pub fn iter(&self) -> impl Iterator<Item = &IndexedEditedTransition> { self.0.iter() } } impl IntoIterator for EditedTransitions { type Item = IndexedEditedTransition; type IntoIter = std::vec::IntoIter<Self::Item>; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } #[derive(Clone, Default, PartialEq)] pub struct BufferLabel { pub name: BufferName, /// Having an owned version of the result of `name.to_string()` simplifies /// ownership in some cases. // TODO this could be truncated to a fixed length/on the stack pub name_string: String, } fmt_debug!(collapse default for BufferLabel: me { blank_if_default!(name); blank_if_default!(name_string, me.name_string.is_empty()); }); // This could arguably be ToOwned. impl From<&BufferName> for BufferLabel { fn from(name: &BufferName) -> Self { Self { name: name.clone(), name_string: name.to_string(), } } } impl From<BufferName> for BufferLabel { fn from(name: BufferName) -> Self { let name_string = name.to_string(); Self { name, name_string, } } } #[derive(Clone, Default, Debug, PartialEq)] pub struct View { pub buffers: SelectableVec1<BufferLabel>, pub menu: MenuView, pub status_line: StatusLineView, pub current_buffer_kind: BufferIdKind, pub edited_transitions: EditedTransitions, pub stats: ViewStats, } impl View { #[must_use] /// returns the currently visible editor buffer index. pub fn current_text_index(&self) -> g_i::Index { self.buffers.current_index() } #[must_use] /// returns the currently visible editor buffer view's index and label. pub fn current_text_index_and_buffer_label(&self) -> (g_i::Index, &BufferLabel) { ( self.buffers.current_index(), self.buffers.get_current_element() ) } #[must_use] pub fn get_buffer_label(&self, index: g_i::Index) -> Option<&BufferLabel> { self.buffers.get(index) } #[must_use] pub fn current_buffer_id(&self) -> BufferId { b_id!( self.current_buffer_kind, self.buffers.current_index() ) } #[must_use] /// returns the selected menu's cursors if there is a menu containing a buffer /// currently visible, or the current text buffer's cursors if not. pub fn get_selected_cursors(&self) -> Option<&[CursorView]> { use BufferIdKind::*; match self.current_buffer_kind { // Seems like we never actually need to access the Text buffer // cursors here. If we want to later, then some additional restructuring // will be needed, at least according to the comment this comment // replaced. commmit `680d9507` None | Text => Option::None, Find => match &self.menu { MenuView::FindReplace(ref fr) => Some(&fr.find), _ => Option::None, }, Replace => match &self.menu { MenuView::FindReplace(ref fr) => Some(&fr.replace), _ => Option::None, }, FileSwitcher => match &self.menu {
}, GoToPosition => match &self.menu { MenuView::GoToPosition(ref gtp) => Some(&gtp.go_to_position), _ => Option::None, }, }.map(|d| &d.cursors[..]) } #[must_use] /// returns the currently visible editor buffer path if it has one. pub fn current_path(&self) -> Option<PathBuf> { u!{BufferName} match self.buffers.get_current_element().name { Path(ref p) => Some(p.clone()), Scratch(_) => None, } } } #[derive(Clone, Default, PartialEq)] pub struct BufferView { pub label: BufferLabel, pub data: BufferViewData, } fmt_debug!(collapse default for BufferView: me { blank_if_default!(label); blank_if_default!(data); }); /// The reason we keep this as a separate struct from `BufferView` is to enable /// storage of buffers without a `BufferName`. #[derive(Clone, Default, PartialEq)] pub struct BufferViewData { pub chars: String, pub scroll: ScrollXY, pub cursors: Vec<CursorView>, pub highlights: Vec<Highlight>, pub spans: Spans, } fmt_debug!(collapse default for BufferViewData: me { blank_if_default!(chars, me.chars == Rope::default()); blank_if_default!(scroll); blank_if_default!(cursors, me.cursors.is_empty()); blank_if_default!(highlights, me.highlights.is_empty()); blank_if_default!(spans, me.spans.is_empty()); }); #[macro_export] macro_rules! bvd { ($chars: expr) => {{ let mut data: BufferViewData = d!(); data.chars = $chars.into(); data }} } /// Short form of "Command". /// This is for telling the platform layer that it should do something in addition to /// rendering the view. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Cmd { None, SetClipboard(String), LoadFile(PathBuf), MakeActiveTabVisible, } d!(for Cmd : Cmd::None); pub type UpdateAndRenderOutput = (View, Cmd); pub type UpdateAndRender = fn(Input) -> UpdateAndRenderOutput; pub type LoadBufferViewError = String; pub type LoadBufferViewsResult = Result<BufferView, LoadBufferViewError>; pub type LoadBufferViews = fn(&[BufferName]) -> Vec<LoadBufferViewsResult>; #[derive(Copy, Clone)] pub struct EditorAPI { pub update_and_render: UpdateAndRender, pub load_buffer_views: LoadBufferViews, } fmt_debug!(for EditorAPI: _ in "EditorAPI{{...}}"); pub const PARSE_TIME_SPAN_COUNT: usize = 16 - 3; #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TimeSpan { NotStarted, Started(Instant), Ended(Duration), } impl TimeSpan { #[must_use] pub fn duration_or_default(&self) -> Duration { use TimeSpan::*; match self { Ended(duration) => *duration, NotStarted | Started(_) => d!(), } } #[must_use] pub fn start() -> Self { TimeSpan::Started(Instant::now()) } #[must_use] pub fn end_if_started(self) -> Self { if let TimeSpan::Started(started) = self { TimeSpan::Ended(Instant::now() - started) } else { self } } } d!(for TimeSpan: TimeSpan::NotStarted); #[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] pub struct ViewStats { pub latest_overall_time_span: TimeSpan, pub latest_update_time_span: TimeSpan, pub latest_render_time_span: TimeSpan, pub latest_buffer_render_time_span: TimeSpan, pub latest_status_line_time_span: TimeSpan, pub latest_menu_render_time_span: TimeSpan, // { pub latest_parse_time_spans: [TimeSpan; PARSE_TIME_SPAN_COUNT], pub current_parse_length: u8, // } pub editor_buffers_size_in_bytes: usize, } impl ViewStats { /// Since our current needs only require it, we currently expect only one layer /// of start and ends. // TODO track this and just skip the current one or set durations to 0 or // something? pub fn start_parse_duration_saturating(&mut self) { if (self.current_parse_length as usize) < PARSE_TIME_SPAN_COUNT { self.current_parse_length = self.current_parse_length.saturating_add(1); if let Some(index) = self.last_index() { self.latest_parse_time_spans[index] = TimeSpan::Started( Instant::now() ); } } } pub fn end_parse_duration_saturating(&mut self) { if let Some(index) = self.last_index() { if let TimeSpan::Started(instant) = self.latest_parse_time_spans[index] { self.latest_parse_time_spans[index] = TimeSpan::Ended( Instant::now() - instant ); } } } #[must_use] fn last_index(&self) -> Option<usize> { if self.current_parse_length > 0 && (self.current_parse_length as usize) <= PARSE_TIME_SPAN_COUNT { Some(self.current_parse_length as usize - 1) } else { None } } } #[cfg(any(test, feature = "pub_arb"))] pub mod tests;
MenuView::FileSwitcher(ref fs) => Some(&fs.search), _ => Option::None,
random_line_split
platform_types.rs
#![deny(unused)] use macros::{ d, fmt_debug, fmt_display, ord, u, }; use std::{ time::{Duration, Instant}, path::PathBuf }; pub use vec1::{vec1, Vec1}; pub use panic_safe_rope::{BorrowRope, Rope, RopeSlice, RopeSliceTrait, ByteIndex}; pub use text_pos::*; pub mod floating_point; pub mod screen_positioning; pub use screen_positioning::{ CharDim, ScreenSpaceRect, ScreenSpaceWH, ScreenSpaceXY, ScrollXY, SizeDependents, TextBoxXY, TextBoxSpaceXY, TextSpaceXY, TextSpaceXYWH, ssr, sswh, ssxy, }; pub mod spans; pub use spans::{ Spans, SpanView, SpanKind, }; pub use abs; pub use f32_0_1::{F32_0_1, f32_0_1}; pub use pos_f32::{PosF32, pos_f32}; pub use non_neg_f32::{NonNegF32, non_neg_f32}; pub use move_mod::Move; pub use g_i; pub use g_i::{SelectionAdjustment, SelectionMove, SelectableVec1}; #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ReplaceOrAdd { Replace, Add, } #[derive(Clone, Debug, PartialEq)] pub enum Input { None, Quit, /// Escape Menus, etc., not quitting the program. Escape, Insert(char), Delete, DeleteLines, ResetScroll, ScrollVertically(abs::Vector), ScrollHorizontally(abs::Vector), SetSizeDependents(Box<SizeDependents>), MoveAllCursors(Move), ExtendSelectionForAllCursors(Move), SelectAll, SetCursor(TextBoxSpaceXY, ReplaceOrAdd), DragCursors(TextBoxSpaceXY), SelectCharTypeGrouping(TextBoxSpaceXY, ReplaceOrAdd), ExtendSelectionWithSearch, ExtendSelectionMaximallyWithSearch, SavedAs(g_i::Index, PathBuf), Undo, Redo, Cut, Copy, Paste(Option<String>), InsertNumbersAtCursors, AddOrSelectBuffer(BufferName, String), AddOrSelectBufferThenGoTo(BufferName, String, Position), NewScratchBuffer(Option<String>), TabIn, TabOut, StripTrailingWhitespace, AdjustBufferSelection(SelectionAdjustment), NextLanguage, PreviousLanguage, ToggleSingleLineComments, ToggleCase, AutoIndentSelection, DuplicateLines, SelectBuffer(BufferId), OpenOrSelectBuffer(PathBuf), CloseBuffer(g_i::Index), SetMenuMode(MenuMode), SubmitForm, ShowError(String), } d!(for Input : Input::None); #[derive(Clone, Copy, Default, Debug, Hash, PartialEq, Eq)] pub struct BufferId { pub kind: BufferIdKind, pub index: g_i::Index, } ord!(for BufferId: id, other in { id.kind.cmp(&other.kind).then_with(|| id.index.cmp(&other.index)) }); #[macro_export] macro_rules! b_id { // // Creation // ($kind: expr) => { BufferId { kind: $kind, index: d!(), } }; ($kind: expr, $index: expr) => { BufferId { kind: $kind, index: $index, } }; } #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub enum BufferIdKind { /// Used to indicate that the keyboard is focused on a non-buffer. None, /// Indicates a buffer repesenting an open file or an in memory scratch file. /// Almost all buffers are `Text` buffers. Text, Find, Replace, FileSwitcher, GoToPosition, } d!(for BufferIdKind: BufferIdKind::Text); impl From<&BufferIdKind> for u8 { fn from(kind: &BufferIdKind) -> Self { use BufferIdKind::*; match kind { None => 0, Text => 1, Find => 2, Replace => 3, FileSwitcher => 4, GoToPosition => 5, } } } ord!(for BufferIdKind: kind, other in { let k: u8 = kind.into(); let o: u8 = other.into(); k.cmp(&o) }); #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum HighlightKind { User, Result, CurrentResult, } d!(for HighlightKind: HighlightKind::User); #[derive(Clone, Debug, PartialEq, Eq)] pub struct Highlight { pub min: Position, pub max: Position, pub kind: HighlightKind, } impl Highlight { #[must_use] pub fn new((p1, p2): (Position, Position), kind: HighlightKind) -> Self { Highlight { min: std::cmp::min(p1, p2), max: std::cmp::max(p1, p2), kind, } } #[must_use] pub fn get(&self) -> (Position, Position) { (self.min, self.max) } } #[macro_export] macro_rules! highlight { (l $min_line:literal o $min_offset:literal l $max_line:literal o $max_offset:literal ) => { Highlight::new( ( Position { line: $min_line, offset: CharOffset($min_offset), }, Position { line: $max_line, offset: CharOffset($max_offset), }, ), d!() ) }; (l $min_line:literal o $min_offset:literal l $max_line:literal o max ) => { highlight!(l $min_line o $min_offset l $max_line o 0xFFFF_FFFF__FFFF_FFFF) }; } pub fn push_highlights<O: Into<Option<Position>>>( highlights: &mut Vec<Highlight>, position: Position, highlight_position: O, kind: HighlightKind, ) { match highlight_position.into() { Some(h) if h != position => { let min = std::cmp::min(position, h); let max = std::cmp::max(position, h); if min.line == max.line { highlights.push(Highlight::new((min, max), kind)); return; } // This early return is merely an optimization from three rectangles to two. // TODO Is this optimization actually worth it? The sticky cursor offset does make this // more likely than it would otherwise be. if min.offset != 0 && min.offset == max.offset { // [|_______________________|] // ^min_middle max_middle^ let min_middle = min.line + if min.offset == 0 { 0 } else { 1 }; // Since We know the lines must be different, we know `max.line > 0` let max_middle = max.line - 1; let offset = min.offset; highlights.push(Highlight::new( ( Position { offset, line: min.line, }, Position { offset: CharOffset::max_value(), line: max_middle, }, ), kind, )); highlights.push(Highlight::new( ( Position { offset: CharOffset(0), line: min_middle, }, Position { offset, line: max.line, }, ), kind, )); return; } if min.offset != 0 { highlights.push(Highlight::new( ( min, Position { offset: CharOffset::max_value(), ..min }, ), kind, )); } let min_middle = min.line + if min.offset == 0 { 0 } else { 1 }; // Since We know the lines must be different, we know `max.line > 0` let max_middle = max.line - 1; if min_middle <= max_middle { highlights.push(Highlight::new( ( Position { offset: CharOffset(0), line: min_middle, }, Position { offset: CharOffset::max_value(), line: max_middle, }, ), kind, )); } if max.offset != 0 { highlights.push(Highlight::new( ( Position { offset: CharOffset(0), ..max }, max, ), kind, )); } } _ => {} } } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum BufferName { Path(PathBuf), Scratch(u32), } d!(for BufferName: BufferName::Scratch(d!())); fmt_display!(for BufferName: name in "{}", match name { BufferName::Path(p) => p .file_name() .map_or_else( || "?Unknown Path?".to_string(), |s| s.to_string_lossy().into_owned() ), BufferName::Scratch(n) => format!("*scratch {}*", n), } ); ord!(for BufferName: name, other in { use BufferName::*; use std::cmp::Ordering::*; match (name, other) { (Path(p1), Path(p2)) => { match (p1.canonicalize(), p2.canonicalize() ) { (Ok(ref cp1), Ok(ref cp2)) if cp1 == cp2 => { Equal } _ => { p1.cmp(p2) } } } (Path(_), Scratch(_)) => { Less } (Scratch(_), Path(_)) => { Greater } (Scratch(n1), Scratch(n2)) => { n1.cmp(n2) } } }); impl BufferName { #[must_use] pub fn get_extension_or_empty(&self) -> &str { use BufferName::*; match self { Path(p) => { p.extension() .and_then(std::ffi::OsStr::to_str) .unwrap_or("") }, Scratch(..) => "", } } #[must_use] pub fn size_in_bytes(&self) -> usize { use core::mem; // TODO Do other platforms need adjusting as well? #[cfg(target_os = "windows")] const BYTES_PER_UNIT: usize = 2; #[cfg(not(target_os = "windows"))] const BYTES_PER_UNIT: usize = 1; match self { Self::Path(p) => { mem::size_of_val(p) + p.capacity() * BYTES_PER_UNIT }, Self::Scratch(n) => mem::size_of_val(n), } } } #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum CursorState { None, PressedAgainstWall(Move), } d!(for CursorState: CursorState::None); fmt_debug!(for CursorState: s in "{}", match s { CursorState::None => std::borrow::Cow::Borrowed("_"), CursorState::PressedAgainstWall(r#move) => std::borrow::Cow::Owned(format!("->|({})", r#move)) }); ord!(for CursorState: state, other in { use std::cmp::Ordering::*; match (state, other) { (CursorState::None, CursorState::None) => Equal, (CursorState::None, CursorState::PressedAgainstWall(_)) => Less, (CursorState::PressedAgainstWall(_), CursorState::None) => Greater, (CursorState::PressedAgainstWall(m1), CursorState::PressedAgainstWall(m2)) => { m1.cmp(m2) } } }); #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct CursorView { pub position: Position, pub state: CursorState, } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct StatusLineView { pub chars: String, } pub const DEFAULT_STATUS_LINE_CHARS: &str = "No buffer selected."; d!(for StatusLineView: StatusLineView {chars: DEFAULT_STATUS_LINE_CHARS.to_owned()}); #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum MenuMode { Hidden, FileSwitcher, FindReplace(FindReplaceMode), GoToPosition, } d!(for MenuMode: MenuMode::Hidden); #[derive(Clone, Debug, PartialEq)] pub enum MenuView { None, FileSwitcher(FileSwitcherView), FindReplace(FindReplaceView), GoToPosition(GoToPositionView) } d!(for MenuView: MenuView::None); impl MenuView { #[must_use] pub fn
(&self) -> MenuMode { match self { Self::None => MenuMode::Hidden, Self::FileSwitcher(_) => MenuMode::FileSwitcher, Self::FindReplace(v) => MenuMode::FindReplace(v.mode), Self::GoToPosition(_) => MenuMode::GoToPosition, } } } #[must_use] pub fn kind_editable_during_mode(kind: BufferIdKind, menu_mode: MenuMode) -> bool { u!{MenuMode} match (kind, menu_mode) { // We want this to be true for `Text` always since it would be completely // reasonable behaviour for a different client to always show the text // buffers. (BufferIdKind::Text, _) | (BufferIdKind::Find | BufferIdKind::Replace, FindReplace(_)) | (BufferIdKind::FileSwitcher, MenuMode::FileSwitcher) | (BufferIdKind::GoToPosition, MenuMode::GoToPosition) => true, _ => { false }, } } pub type FileSwitcherResults = Vec<PathBuf>; #[derive(Clone, Default, Debug, PartialEq)] pub struct FileSwitcherView { pub search: BufferViewData, pub results: FileSwitcherResults, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum FindReplaceMode { CurrentFile, } d!(for FindReplaceMode: FindReplaceMode::CurrentFile); #[derive(Clone, Default, Debug, PartialEq)] pub struct FindReplaceView { pub mode: FindReplaceMode, pub find: BufferViewData, pub replace: BufferViewData, pub result_count: usize, } #[derive(Clone, Default, Debug, PartialEq)] pub struct GoToPositionView { pub go_to_position: BufferViewData, } #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum EditedTransition { ToEdited, ToUnedited, } pub type IndexedEditedTransition = (g_i::Index, EditedTransition); #[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct EditedTransitions(Vec<IndexedEditedTransition>); impl EditedTransitions { pub fn push(&mut self, iet: IndexedEditedTransition) { self.0.push(iet); } pub fn clear(&mut self) { self.0.clear(); } #[must_use] pub fn len(&self) -> usize { self.0.len() } #[must_use] pub fn is_empty(&self) -> bool { self.0.is_empty() } pub fn iter(&self) -> impl Iterator<Item = &IndexedEditedTransition> { self.0.iter() } } impl IntoIterator for EditedTransitions { type Item = IndexedEditedTransition; type IntoIter = std::vec::IntoIter<Self::Item>; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } #[derive(Clone, Default, PartialEq)] pub struct BufferLabel { pub name: BufferName, /// Having an owned version of the result of `name.to_string()` simplifies /// ownership in some cases. // TODO this could be truncated to a fixed length/on the stack pub name_string: String, } fmt_debug!(collapse default for BufferLabel: me { blank_if_default!(name); blank_if_default!(name_string, me.name_string.is_empty()); }); // This could arguably be ToOwned. impl From<&BufferName> for BufferLabel { fn from(name: &BufferName) -> Self { Self { name: name.clone(), name_string: name.to_string(), } } } impl From<BufferName> for BufferLabel { fn from(name: BufferName) -> Self { let name_string = name.to_string(); Self { name, name_string, } } } #[derive(Clone, Default, Debug, PartialEq)] pub struct View { pub buffers: SelectableVec1<BufferLabel>, pub menu: MenuView, pub status_line: StatusLineView, pub current_buffer_kind: BufferIdKind, pub edited_transitions: EditedTransitions, pub stats: ViewStats, } impl View { #[must_use] /// returns the currently visible editor buffer index. pub fn current_text_index(&self) -> g_i::Index { self.buffers.current_index() } #[must_use] /// returns the currently visible editor buffer view's index and label. pub fn current_text_index_and_buffer_label(&self) -> (g_i::Index, &BufferLabel) { ( self.buffers.current_index(), self.buffers.get_current_element() ) } #[must_use] pub fn get_buffer_label(&self, index: g_i::Index) -> Option<&BufferLabel> { self.buffers.get(index) } #[must_use] pub fn current_buffer_id(&self) -> BufferId { b_id!( self.current_buffer_kind, self.buffers.current_index() ) } #[must_use] /// returns the selected menu's cursors if there is a menu containing a buffer /// currently visible, or the current text buffer's cursors if not. pub fn get_selected_cursors(&self) -> Option<&[CursorView]> { use BufferIdKind::*; match self.current_buffer_kind { // Seems like we never actually need to access the Text buffer // cursors here. If we want to later, then some additional restructuring // will be needed, at least according to the comment this comment // replaced. commmit `680d9507` None | Text => Option::None, Find => match &self.menu { MenuView::FindReplace(ref fr) => Some(&fr.find), _ => Option::None, }, Replace => match &self.menu { MenuView::FindReplace(ref fr) => Some(&fr.replace), _ => Option::None, }, FileSwitcher => match &self.menu { MenuView::FileSwitcher(ref fs) => Some(&fs.search), _ => Option::None, }, GoToPosition => match &self.menu { MenuView::GoToPosition(ref gtp) => Some(&gtp.go_to_position), _ => Option::None, }, }.map(|d| &d.cursors[..]) } #[must_use] /// returns the currently visible editor buffer path if it has one. pub fn current_path(&self) -> Option<PathBuf> { u!{BufferName} match self.buffers.get_current_element().name { Path(ref p) => Some(p.clone()), Scratch(_) => None, } } } #[derive(Clone, Default, PartialEq)] pub struct BufferView { pub label: BufferLabel, pub data: BufferViewData, } fmt_debug!(collapse default for BufferView: me { blank_if_default!(label); blank_if_default!(data); }); /// The reason we keep this as a separate struct from `BufferView` is to enable /// storage of buffers without a `BufferName`. #[derive(Clone, Default, PartialEq)] pub struct BufferViewData { pub chars: String, pub scroll: ScrollXY, pub cursors: Vec<CursorView>, pub highlights: Vec<Highlight>, pub spans: Spans, } fmt_debug!(collapse default for BufferViewData: me { blank_if_default!(chars, me.chars == Rope::default()); blank_if_default!(scroll); blank_if_default!(cursors, me.cursors.is_empty()); blank_if_default!(highlights, me.highlights.is_empty()); blank_if_default!(spans, me.spans.is_empty()); }); #[macro_export] macro_rules! bvd { ($chars: expr) => {{ let mut data: BufferViewData = d!(); data.chars = $chars.into(); data }} } /// Short form of "Command". /// This is for telling the platform layer that it should do something in addition to /// rendering the view. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Cmd { None, SetClipboard(String), LoadFile(PathBuf), MakeActiveTabVisible, } d!(for Cmd : Cmd::None); pub type UpdateAndRenderOutput = (View, Cmd); pub type UpdateAndRender = fn(Input) -> UpdateAndRenderOutput; pub type LoadBufferViewError = String; pub type LoadBufferViewsResult = Result<BufferView, LoadBufferViewError>; pub type LoadBufferViews = fn(&[BufferName]) -> Vec<LoadBufferViewsResult>; #[derive(Copy, Clone)] pub struct EditorAPI { pub update_and_render: UpdateAndRender, pub load_buffer_views: LoadBufferViews, } fmt_debug!(for EditorAPI: _ in "EditorAPI{{...}}"); pub const PARSE_TIME_SPAN_COUNT: usize = 16 - 3; #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TimeSpan { NotStarted, Started(Instant), Ended(Duration), } impl TimeSpan { #[must_use] pub fn duration_or_default(&self) -> Duration { use TimeSpan::*; match self { Ended(duration) => *duration, NotStarted | Started(_) => d!(), } } #[must_use] pub fn start() -> Self { TimeSpan::Started(Instant::now()) } #[must_use] pub fn end_if_started(self) -> Self { if let TimeSpan::Started(started) = self { TimeSpan::Ended(Instant::now() - started) } else { self } } } d!(for TimeSpan: TimeSpan::NotStarted); #[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] pub struct ViewStats { pub latest_overall_time_span: TimeSpan, pub latest_update_time_span: TimeSpan, pub latest_render_time_span: TimeSpan, pub latest_buffer_render_time_span: TimeSpan, pub latest_status_line_time_span: TimeSpan, pub latest_menu_render_time_span: TimeSpan, // { pub latest_parse_time_spans: [TimeSpan; PARSE_TIME_SPAN_COUNT], pub current_parse_length: u8, // } pub editor_buffers_size_in_bytes: usize, } impl ViewStats { /// Since our current needs only require it, we currently expect only one layer /// of start and ends. // TODO track this and just skip the current one or set durations to 0 or // something? pub fn start_parse_duration_saturating(&mut self) { if (self.current_parse_length as usize) < PARSE_TIME_SPAN_COUNT { self.current_parse_length = self.current_parse_length.saturating_add(1); if let Some(index) = self.last_index() { self.latest_parse_time_spans[index] = TimeSpan::Started( Instant::now() ); } } } pub fn end_parse_duration_saturating(&mut self) { if let Some(index) = self.last_index() { if let TimeSpan::Started(instant) = self.latest_parse_time_spans[index] { self.latest_parse_time_spans[index] = TimeSpan::Ended( Instant::now() - instant ); } } } #[must_use] fn last_index(&self) -> Option<usize> { if self.current_parse_length > 0 && (self.current_parse_length as usize) <= PARSE_TIME_SPAN_COUNT { Some(self.current_parse_length as usize - 1) } else { None } } } #[cfg(any(test, feature = "pub_arb"))] pub mod tests;
get_mode
identifier_name
platform_types.rs
#![deny(unused)] use macros::{ d, fmt_debug, fmt_display, ord, u, }; use std::{ time::{Duration, Instant}, path::PathBuf }; pub use vec1::{vec1, Vec1}; pub use panic_safe_rope::{BorrowRope, Rope, RopeSlice, RopeSliceTrait, ByteIndex}; pub use text_pos::*; pub mod floating_point; pub mod screen_positioning; pub use screen_positioning::{ CharDim, ScreenSpaceRect, ScreenSpaceWH, ScreenSpaceXY, ScrollXY, SizeDependents, TextBoxXY, TextBoxSpaceXY, TextSpaceXY, TextSpaceXYWH, ssr, sswh, ssxy, }; pub mod spans; pub use spans::{ Spans, SpanView, SpanKind, }; pub use abs; pub use f32_0_1::{F32_0_1, f32_0_1}; pub use pos_f32::{PosF32, pos_f32}; pub use non_neg_f32::{NonNegF32, non_neg_f32}; pub use move_mod::Move; pub use g_i; pub use g_i::{SelectionAdjustment, SelectionMove, SelectableVec1}; #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ReplaceOrAdd { Replace, Add, } #[derive(Clone, Debug, PartialEq)] pub enum Input { None, Quit, /// Escape Menus, etc., not quitting the program. Escape, Insert(char), Delete, DeleteLines, ResetScroll, ScrollVertically(abs::Vector), ScrollHorizontally(abs::Vector), SetSizeDependents(Box<SizeDependents>), MoveAllCursors(Move), ExtendSelectionForAllCursors(Move), SelectAll, SetCursor(TextBoxSpaceXY, ReplaceOrAdd), DragCursors(TextBoxSpaceXY), SelectCharTypeGrouping(TextBoxSpaceXY, ReplaceOrAdd), ExtendSelectionWithSearch, ExtendSelectionMaximallyWithSearch, SavedAs(g_i::Index, PathBuf), Undo, Redo, Cut, Copy, Paste(Option<String>), InsertNumbersAtCursors, AddOrSelectBuffer(BufferName, String), AddOrSelectBufferThenGoTo(BufferName, String, Position), NewScratchBuffer(Option<String>), TabIn, TabOut, StripTrailingWhitespace, AdjustBufferSelection(SelectionAdjustment), NextLanguage, PreviousLanguage, ToggleSingleLineComments, ToggleCase, AutoIndentSelection, DuplicateLines, SelectBuffer(BufferId), OpenOrSelectBuffer(PathBuf), CloseBuffer(g_i::Index), SetMenuMode(MenuMode), SubmitForm, ShowError(String), } d!(for Input : Input::None); #[derive(Clone, Copy, Default, Debug, Hash, PartialEq, Eq)] pub struct BufferId { pub kind: BufferIdKind, pub index: g_i::Index, } ord!(for BufferId: id, other in { id.kind.cmp(&other.kind).then_with(|| id.index.cmp(&other.index)) }); #[macro_export] macro_rules! b_id { // // Creation // ($kind: expr) => { BufferId { kind: $kind, index: d!(), } }; ($kind: expr, $index: expr) => { BufferId { kind: $kind, index: $index, } }; } #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub enum BufferIdKind { /// Used to indicate that the keyboard is focused on a non-buffer. None, /// Indicates a buffer repesenting an open file or an in memory scratch file. /// Almost all buffers are `Text` buffers. Text, Find, Replace, FileSwitcher, GoToPosition, } d!(for BufferIdKind: BufferIdKind::Text); impl From<&BufferIdKind> for u8 { fn from(kind: &BufferIdKind) -> Self { use BufferIdKind::*; match kind { None => 0, Text => 1, Find => 2, Replace => 3, FileSwitcher => 4, GoToPosition => 5, } } } ord!(for BufferIdKind: kind, other in { let k: u8 = kind.into(); let o: u8 = other.into(); k.cmp(&o) }); #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum HighlightKind { User, Result, CurrentResult, } d!(for HighlightKind: HighlightKind::User); #[derive(Clone, Debug, PartialEq, Eq)] pub struct Highlight { pub min: Position, pub max: Position, pub kind: HighlightKind, } impl Highlight { #[must_use] pub fn new((p1, p2): (Position, Position), kind: HighlightKind) -> Self { Highlight { min: std::cmp::min(p1, p2), max: std::cmp::max(p1, p2), kind, } } #[must_use] pub fn get(&self) -> (Position, Position) { (self.min, self.max) } } #[macro_export] macro_rules! highlight { (l $min_line:literal o $min_offset:literal l $max_line:literal o $max_offset:literal ) => { Highlight::new( ( Position { line: $min_line, offset: CharOffset($min_offset), }, Position { line: $max_line, offset: CharOffset($max_offset), }, ), d!() ) }; (l $min_line:literal o $min_offset:literal l $max_line:literal o max ) => { highlight!(l $min_line o $min_offset l $max_line o 0xFFFF_FFFF__FFFF_FFFF) }; } pub fn push_highlights<O: Into<Option<Position>>>( highlights: &mut Vec<Highlight>, position: Position, highlight_position: O, kind: HighlightKind, ) { match highlight_position.into() { Some(h) if h != position => { let min = std::cmp::min(position, h); let max = std::cmp::max(position, h); if min.line == max.line { highlights.push(Highlight::new((min, max), kind)); return; } // This early return is merely an optimization from three rectangles to two. // TODO Is this optimization actually worth it? The sticky cursor offset does make this // more likely than it would otherwise be. if min.offset != 0 && min.offset == max.offset { // [|_______________________|] // ^min_middle max_middle^ let min_middle = min.line + if min.offset == 0 { 0 } else { 1 }; // Since We know the lines must be different, we know `max.line > 0` let max_middle = max.line - 1; let offset = min.offset; highlights.push(Highlight::new( ( Position { offset, line: min.line, }, Position { offset: CharOffset::max_value(), line: max_middle, }, ), kind, )); highlights.push(Highlight::new( ( Position { offset: CharOffset(0), line: min_middle, }, Position { offset, line: max.line, }, ), kind, )); return; } if min.offset != 0 { highlights.push(Highlight::new( ( min, Position { offset: CharOffset::max_value(), ..min }, ), kind, )); } let min_middle = min.line + if min.offset == 0 { 0 } else { 1 }; // Since We know the lines must be different, we know `max.line > 0` let max_middle = max.line - 1; if min_middle <= max_middle { highlights.push(Highlight::new( ( Position { offset: CharOffset(0), line: min_middle, }, Position { offset: CharOffset::max_value(), line: max_middle, }, ), kind, )); } if max.offset != 0 { highlights.push(Highlight::new( ( Position { offset: CharOffset(0), ..max }, max, ), kind, )); } } _ => {} } } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum BufferName { Path(PathBuf), Scratch(u32), } d!(for BufferName: BufferName::Scratch(d!())); fmt_display!(for BufferName: name in "{}", match name { BufferName::Path(p) => p .file_name() .map_or_else( || "?Unknown Path?".to_string(), |s| s.to_string_lossy().into_owned() ), BufferName::Scratch(n) => format!("*scratch {}*", n), } ); ord!(for BufferName: name, other in { use BufferName::*; use std::cmp::Ordering::*; match (name, other) { (Path(p1), Path(p2)) => { match (p1.canonicalize(), p2.canonicalize() ) { (Ok(ref cp1), Ok(ref cp2)) if cp1 == cp2 => { Equal } _ => { p1.cmp(p2) } } } (Path(_), Scratch(_)) => { Less } (Scratch(_), Path(_)) => { Greater } (Scratch(n1), Scratch(n2)) => { n1.cmp(n2) } } }); impl BufferName { #[must_use] pub fn get_extension_or_empty(&self) -> &str { use BufferName::*; match self { Path(p) => { p.extension() .and_then(std::ffi::OsStr::to_str) .unwrap_or("") }, Scratch(..) => "", } } #[must_use] pub fn size_in_bytes(&self) -> usize { use core::mem; // TODO Do other platforms need adjusting as well? #[cfg(target_os = "windows")] const BYTES_PER_UNIT: usize = 2; #[cfg(not(target_os = "windows"))] const BYTES_PER_UNIT: usize = 1; match self { Self::Path(p) => { mem::size_of_val(p) + p.capacity() * BYTES_PER_UNIT }, Self::Scratch(n) => mem::size_of_val(n), } } } #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum CursorState { None, PressedAgainstWall(Move), } d!(for CursorState: CursorState::None); fmt_debug!(for CursorState: s in "{}", match s { CursorState::None => std::borrow::Cow::Borrowed("_"), CursorState::PressedAgainstWall(r#move) => std::borrow::Cow::Owned(format!("->|({})", r#move)) }); ord!(for CursorState: state, other in { use std::cmp::Ordering::*; match (state, other) { (CursorState::None, CursorState::None) => Equal, (CursorState::None, CursorState::PressedAgainstWall(_)) => Less, (CursorState::PressedAgainstWall(_), CursorState::None) => Greater, (CursorState::PressedAgainstWall(m1), CursorState::PressedAgainstWall(m2)) => { m1.cmp(m2) } } }); #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct CursorView { pub position: Position, pub state: CursorState, } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct StatusLineView { pub chars: String, } pub const DEFAULT_STATUS_LINE_CHARS: &str = "No buffer selected."; d!(for StatusLineView: StatusLineView {chars: DEFAULT_STATUS_LINE_CHARS.to_owned()}); #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum MenuMode { Hidden, FileSwitcher, FindReplace(FindReplaceMode), GoToPosition, } d!(for MenuMode: MenuMode::Hidden); #[derive(Clone, Debug, PartialEq)] pub enum MenuView { None, FileSwitcher(FileSwitcherView), FindReplace(FindReplaceView), GoToPosition(GoToPositionView) } d!(for MenuView: MenuView::None); impl MenuView { #[must_use] pub fn get_mode(&self) -> MenuMode { match self { Self::None => MenuMode::Hidden, Self::FileSwitcher(_) => MenuMode::FileSwitcher, Self::FindReplace(v) => MenuMode::FindReplace(v.mode), Self::GoToPosition(_) => MenuMode::GoToPosition, } } } #[must_use] pub fn kind_editable_during_mode(kind: BufferIdKind, menu_mode: MenuMode) -> bool { u!{MenuMode} match (kind, menu_mode) { // We want this to be true for `Text` always since it would be completely // reasonable behaviour for a different client to always show the text // buffers. (BufferIdKind::Text, _) | (BufferIdKind::Find | BufferIdKind::Replace, FindReplace(_)) | (BufferIdKind::FileSwitcher, MenuMode::FileSwitcher) | (BufferIdKind::GoToPosition, MenuMode::GoToPosition) => true, _ => { false }, } } pub type FileSwitcherResults = Vec<PathBuf>; #[derive(Clone, Default, Debug, PartialEq)] pub struct FileSwitcherView { pub search: BufferViewData, pub results: FileSwitcherResults, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum FindReplaceMode { CurrentFile, } d!(for FindReplaceMode: FindReplaceMode::CurrentFile); #[derive(Clone, Default, Debug, PartialEq)] pub struct FindReplaceView { pub mode: FindReplaceMode, pub find: BufferViewData, pub replace: BufferViewData, pub result_count: usize, } #[derive(Clone, Default, Debug, PartialEq)] pub struct GoToPositionView { pub go_to_position: BufferViewData, } #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum EditedTransition { ToEdited, ToUnedited, } pub type IndexedEditedTransition = (g_i::Index, EditedTransition); #[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct EditedTransitions(Vec<IndexedEditedTransition>); impl EditedTransitions { pub fn push(&mut self, iet: IndexedEditedTransition) { self.0.push(iet); } pub fn clear(&mut self) { self.0.clear(); } #[must_use] pub fn len(&self) -> usize { self.0.len() } #[must_use] pub fn is_empty(&self) -> bool { self.0.is_empty() } pub fn iter(&self) -> impl Iterator<Item = &IndexedEditedTransition> { self.0.iter() } } impl IntoIterator for EditedTransitions { type Item = IndexedEditedTransition; type IntoIter = std::vec::IntoIter<Self::Item>; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } #[derive(Clone, Default, PartialEq)] pub struct BufferLabel { pub name: BufferName, /// Having an owned version of the result of `name.to_string()` simplifies /// ownership in some cases. // TODO this could be truncated to a fixed length/on the stack pub name_string: String, } fmt_debug!(collapse default for BufferLabel: me { blank_if_default!(name); blank_if_default!(name_string, me.name_string.is_empty()); }); // This could arguably be ToOwned. impl From<&BufferName> for BufferLabel { fn from(name: &BufferName) -> Self { Self { name: name.clone(), name_string: name.to_string(), } } } impl From<BufferName> for BufferLabel { fn from(name: BufferName) -> Self { let name_string = name.to_string(); Self { name, name_string, } } } #[derive(Clone, Default, Debug, PartialEq)] pub struct View { pub buffers: SelectableVec1<BufferLabel>, pub menu: MenuView, pub status_line: StatusLineView, pub current_buffer_kind: BufferIdKind, pub edited_transitions: EditedTransitions, pub stats: ViewStats, } impl View { #[must_use] /// returns the currently visible editor buffer index. pub fn current_text_index(&self) -> g_i::Index { self.buffers.current_index() } #[must_use] /// returns the currently visible editor buffer view's index and label. pub fn current_text_index_and_buffer_label(&self) -> (g_i::Index, &BufferLabel) { ( self.buffers.current_index(), self.buffers.get_current_element() ) } #[must_use] pub fn get_buffer_label(&self, index: g_i::Index) -> Option<&BufferLabel> { self.buffers.get(index) } #[must_use] pub fn current_buffer_id(&self) -> BufferId { b_id!( self.current_buffer_kind, self.buffers.current_index() ) } #[must_use] /// returns the selected menu's cursors if there is a menu containing a buffer /// currently visible, or the current text buffer's cursors if not. pub fn get_selected_cursors(&self) -> Option<&[CursorView]>
#[must_use] /// returns the currently visible editor buffer path if it has one. pub fn current_path(&self) -> Option<PathBuf> { u!{BufferName} match self.buffers.get_current_element().name { Path(ref p) => Some(p.clone()), Scratch(_) => None, } } } #[derive(Clone, Default, PartialEq)] pub struct BufferView { pub label: BufferLabel, pub data: BufferViewData, } fmt_debug!(collapse default for BufferView: me { blank_if_default!(label); blank_if_default!(data); }); /// The reason we keep this as a separate struct from `BufferView` is to enable /// storage of buffers without a `BufferName`. #[derive(Clone, Default, PartialEq)] pub struct BufferViewData { pub chars: String, pub scroll: ScrollXY, pub cursors: Vec<CursorView>, pub highlights: Vec<Highlight>, pub spans: Spans, } fmt_debug!(collapse default for BufferViewData: me { blank_if_default!(chars, me.chars == Rope::default()); blank_if_default!(scroll); blank_if_default!(cursors, me.cursors.is_empty()); blank_if_default!(highlights, me.highlights.is_empty()); blank_if_default!(spans, me.spans.is_empty()); }); #[macro_export] macro_rules! bvd { ($chars: expr) => {{ let mut data: BufferViewData = d!(); data.chars = $chars.into(); data }} } /// Short form of "Command". /// This is for telling the platform layer that it should do something in addition to /// rendering the view. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Cmd { None, SetClipboard(String), LoadFile(PathBuf), MakeActiveTabVisible, } d!(for Cmd : Cmd::None); pub type UpdateAndRenderOutput = (View, Cmd); pub type UpdateAndRender = fn(Input) -> UpdateAndRenderOutput; pub type LoadBufferViewError = String; pub type LoadBufferViewsResult = Result<BufferView, LoadBufferViewError>; pub type LoadBufferViews = fn(&[BufferName]) -> Vec<LoadBufferViewsResult>; #[derive(Copy, Clone)] pub struct EditorAPI { pub update_and_render: UpdateAndRender, pub load_buffer_views: LoadBufferViews, } fmt_debug!(for EditorAPI: _ in "EditorAPI{{...}}"); pub const PARSE_TIME_SPAN_COUNT: usize = 16 - 3; #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TimeSpan { NotStarted, Started(Instant), Ended(Duration), } impl TimeSpan { #[must_use] pub fn duration_or_default(&self) -> Duration { use TimeSpan::*; match self { Ended(duration) => *duration, NotStarted | Started(_) => d!(), } } #[must_use] pub fn start() -> Self { TimeSpan::Started(Instant::now()) } #[must_use] pub fn end_if_started(self) -> Self { if let TimeSpan::Started(started) = self { TimeSpan::Ended(Instant::now() - started) } else { self } } } d!(for TimeSpan: TimeSpan::NotStarted); #[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] pub struct ViewStats { pub latest_overall_time_span: TimeSpan, pub latest_update_time_span: TimeSpan, pub latest_render_time_span: TimeSpan, pub latest_buffer_render_time_span: TimeSpan, pub latest_status_line_time_span: TimeSpan, pub latest_menu_render_time_span: TimeSpan, // { pub latest_parse_time_spans: [TimeSpan; PARSE_TIME_SPAN_COUNT], pub current_parse_length: u8, // } pub editor_buffers_size_in_bytes: usize, } impl ViewStats { /// Since our current needs only require it, we currently expect only one layer /// of start and ends. // TODO track this and just skip the current one or set durations to 0 or // something? pub fn start_parse_duration_saturating(&mut self) { if (self.current_parse_length as usize) < PARSE_TIME_SPAN_COUNT { self.current_parse_length = self.current_parse_length.saturating_add(1); if let Some(index) = self.last_index() { self.latest_parse_time_spans[index] = TimeSpan::Started( Instant::now() ); } } } pub fn end_parse_duration_saturating(&mut self) { if let Some(index) = self.last_index() { if let TimeSpan::Started(instant) = self.latest_parse_time_spans[index] { self.latest_parse_time_spans[index] = TimeSpan::Ended( Instant::now() - instant ); } } } #[must_use] fn last_index(&self) -> Option<usize> { if self.current_parse_length > 0 && (self.current_parse_length as usize) <= PARSE_TIME_SPAN_COUNT { Some(self.current_parse_length as usize - 1) } else { None } } } #[cfg(any(test, feature = "pub_arb"))] pub mod tests;
{ use BufferIdKind::*; match self.current_buffer_kind { // Seems like we never actually need to access the Text buffer // cursors here. If we want to later, then some additional restructuring // will be needed, at least according to the comment this comment // replaced. commmit `680d9507` None | Text => Option::None, Find => match &self.menu { MenuView::FindReplace(ref fr) => Some(&fr.find), _ => Option::None, }, Replace => match &self.menu { MenuView::FindReplace(ref fr) => Some(&fr.replace), _ => Option::None, }, FileSwitcher => match &self.menu { MenuView::FileSwitcher(ref fs) => Some(&fs.search), _ => Option::None, }, GoToPosition => match &self.menu { MenuView::GoToPosition(ref gtp) => Some(&gtp.go_to_position), _ => Option::None, }, }.map(|d| &d.cursors[..]) }
identifier_body
pvc-clone-controller.go
/* Copyright 2022 The CDI Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package datavolume import ( "context" "crypto/rsa" "fmt" "strconv" "time" "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" "kubevirt.io/containerized-data-importer/pkg/common" cc "kubevirt.io/containerized-data-importer/pkg/controller/common" featuregates "kubevirt.io/containerized-data-importer/pkg/feature-gates" ) const ( sourceInUseRequeueDuration = time.Duration(5 * time.Second) pvcCloneControllerName = "datavolume-pvc-clone-controller" volumeCloneSourcePrefix = "volume-clone-source" ) // ErrInvalidTermMsg reports that the termination message from the size-detection pod doesn't exists or is not a valid quantity var ErrInvalidTermMsg = fmt.Errorf("the termination message from the size-detection pod is not-valid") // PvcCloneReconciler members type PvcCloneReconciler struct { CloneReconcilerBase } // NewPvcCloneController creates a new instance of the datavolume clone controller func NewPvcCloneController( ctx context.Context, mgr manager.Manager, log logr.Logger, clonerImage string, importerImage string, pullPolicy string, tokenPublicKey *rsa.PublicKey, tokenPrivateKey *rsa.PrivateKey, installerLabels map[string]string, ) (controller.Controller, error) { client := mgr.GetClient() reconciler := &PvcCloneReconciler{ CloneReconcilerBase: CloneReconcilerBase{ ReconcilerBase: ReconcilerBase{ client: client, scheme: mgr.GetScheme(), log: log.WithName(pvcCloneControllerName), featureGates: featuregates.NewFeatureGates(client), recorder: mgr.GetEventRecorderFor(pvcCloneControllerName), installerLabels: installerLabels, shouldUpdateProgress: true, }, clonerImage: clonerImage, importerImage: importerImage, pullPolicy: pullPolicy, cloneSourceKind: "PersistentVolumeClaim", shortTokenValidator: cc.NewCloneTokenValidator(common.CloneTokenIssuer, tokenPublicKey), longTokenValidator: cc.NewCloneTokenValidator(common.ExtendedCloneTokenIssuer, tokenPublicKey), // for long term tokens to handle cross namespace dumb clones tokenGenerator: newLongTermCloneTokenGenerator(tokenPrivateKey), }, } dataVolumeCloneController, err := controller.New(pvcCloneControllerName, mgr, controller.Options{ MaxConcurrentReconciles: 3, Reconciler: reconciler, }) if err != nil { return nil, err } if err = reconciler.addDataVolumeCloneControllerWatches(mgr, dataVolumeCloneController); err != nil { return nil, err } return dataVolumeCloneController, nil } func (r *PvcCloneReconciler) addDataVolumeCloneControllerWatches(mgr manager.Manager, datavolumeController controller.Controller) error { if err := addDataVolumeControllerCommonWatches(mgr, datavolumeController, dataVolumePvcClone); err != nil { return err } // Watch to reconcile clones created without source if err := addCloneWithoutSourceWatch(mgr, datavolumeController, &corev1.PersistentVolumeClaim{}, "spec.source.pvc"); err != nil { return err } if err := addDataSourceWatch(mgr, datavolumeController); err != nil { return err } if err := r.addVolumeCloneSourceWatch(datavolumeController); err != nil { return err } return nil } func addDataSourceWatch(mgr manager.Manager, c controller.Controller) error { const dvDataSourceField = "datasource" getKey := func(namespace, name string) string { return namespace + "/" + name } if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &cdiv1.DataVolume{}, dvDataSourceField, func(obj client.Object) []string { if sourceRef := obj.(*cdiv1.DataVolume).Spec.SourceRef; sourceRef != nil && sourceRef.Kind == cdiv1.DataVolumeDataSource { ns := obj.GetNamespace() if sourceRef.Namespace != nil && *sourceRef.Namespace != "" { ns = *sourceRef.Namespace } return []string{getKey(ns, sourceRef.Name)} } return nil }); err != nil { return err } mapToDataVolume := func(obj client.Object) (reqs []reconcile.Request) { var dvs cdiv1.DataVolumeList matchingFields := client.MatchingFields{dvDataSourceField: getKey(obj.GetNamespace(), obj.GetName())} if err := mgr.GetClient().List(context.TODO(), &dvs, matchingFields); err != nil { c.GetLogger().Error(err, "Unable to list DataVolumes", "matchingFields", matchingFields) return } for _, dv := range dvs.Items { reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: dv.Namespace, Name: dv.Name}}) } return } if err := c.Watch(&source.Kind{Type: &cdiv1.DataSource{}}, handler.EnqueueRequestsFromMapFunc(mapToDataVolume), ); err != nil { return err } return nil } // Reconcile loop for the clone data volumes func (r *PvcCloneReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { return r.reconcile(ctx, req, r) } func (r *PvcCloneReconciler) prepare(syncState *dvSyncState) error { dv := syncState.dvMutated if err := r.populateSourceIfSourceRef(dv); err != nil { return err } return nil } func (r *PvcCloneReconciler) cleanup(syncState *dvSyncState) error { dv := syncState.dvMutated if err := r.populateSourceIfSourceRef(dv); err != nil { return err } if dv.DeletionTimestamp == nil && dv.Status.Phase != cdiv1.Succeeded { return nil } return r.reconcileVolumeCloneSourceCR(syncState) } func addCloneToken(dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { // first clear out tokens that may have already been added delete(pvc.Annotations, cc.AnnCloneToken) delete(pvc.Annotations, cc.AnnExtendedCloneToken) if isCrossNamespaceClone(dv) { // only want this initially // extended token is added later token, ok := dv.Annotations[cc.AnnCloneToken] if !ok { return errors.Errorf("no clone token") } cc.AddAnnotation(pvc, cc.AnnCloneToken, token) } return nil } func volumeCloneSourceName(dv *cdiv1.DataVolume) string { return fmt.Sprintf("%s-%s", volumeCloneSourcePrefix, dv.UID) } func (r *PvcCloneReconciler) updateAnnotations(dataVolume *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { if dataVolume.Spec.Source.PVC == nil { return errors.Errorf("no source set for clone datavolume") } if err := addCloneToken(dataVolume, pvc); err != nil { return err } sourceNamespace := dataVolume.Spec.Source.PVC.Namespace if sourceNamespace == "" { sourceNamespace = dataVolume.Namespace } pvc.Annotations[cc.AnnCloneRequest] = sourceNamespace + "/" + dataVolume.Spec.Source.PVC.Name return nil } func (r *PvcCloneReconciler) sync(log logr.Logger, req reconcile.Request) (dvSyncResult, error) { syncState, err := r.syncClone(log, req) if err == nil { err = r.syncUpdate(log, &syncState) } return syncState.dvSyncResult, err } func (r *PvcCloneReconciler) syncClone(log logr.Logger, req reconcile.Request) (dvSyncState, error) { syncRes, syncErr := r.syncCommon(log, req, r.cleanup, r.prepare) if syncErr != nil || syncRes.result != nil { return syncRes, syncErr } pvc := syncRes.pvc pvcSpec := syncRes.pvcSpec datavolume := syncRes.dvMutated pvcPopulated := pvcIsPopulated(pvc, datavolume) staticProvisionPending := checkStaticProvisionPending(pvc, datavolume) prePopulated := dvIsPrePopulated(datavolume) if pvcPopulated || prePopulated || staticProvisionPending { return syncRes, nil } if addedToken, err := r.ensureExtendedTokenDV(datavolume); err != nil { return syncRes, err } else if addedToken { // make sure token gets persisted before doing anything else return syncRes, nil } if pvc == nil { // Check if source PVC exists and do proper validation before attempting to clone if done, err := r.validateCloneAndSourcePVC(&syncRes, log); err != nil { return syncRes, err } else if !done { return syncRes, nil } // Always call detect size, it will handle the case where size is specified // and detection pod not necessary if datavolume.Spec.Storage != nil { done, err := r.detectCloneSize(&syncRes) if err != nil { return syncRes, err } else if !done { // Check if the source PVC is ready to be cloned if readyToClone, err := r.isSourceReadyToClone(datavolume); err != nil { return syncRes, err } else if !readyToClone { if syncRes.result == nil { syncRes.result = &reconcile.Result{} } syncRes.result.RequeueAfter = sourceInUseRequeueDuration return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil) } return syncRes, nil } } pvcModifier := r.updateAnnotations if syncRes.usePopulator { if isCrossNamespaceClone(datavolume) { if !cc.HasFinalizer(datavolume, crossNamespaceFinalizer) { cc.AddFinalizer(datavolume, crossNamespaceFinalizer) return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil) } } pvcModifier = r.updatePVCForPopulation } newPvc, err := r.createPvcForDatavolume(datavolume, pvcSpec, pvcModifier) if err != nil { if cc.ErrQuotaExceeded(err) { syncErr = r.syncDataVolumeStatusPhaseWithEvent(&syncRes, cdiv1.Pending, nil, Event{ eventType: corev1.EventTypeWarning, reason: cc.ErrExceededQuota, message: err.Error(), }) if syncErr != nil { log.Error(syncErr, "failed to sync DataVolume status with event") } } return syncRes, err } pvc = newPvc } if syncRes.usePopulator { if err := r.reconcileVolumeCloneSourceCR(&syncRes); err != nil { return syncRes, err } ct, ok := pvc.Annotations[cc.AnnCloneType] if ok { cc.AddAnnotation(datavolume, cc.AnnCloneType, ct) } } else { cc.AddAnnotation(datavolume, cc.AnnCloneType, string(cdiv1.CloneStrategyHostAssisted)) if err := r.fallbackToHostAssisted(pvc); err != nil { return syncRes, err } } if err := r.ensureExtendedTokenPVC(datavolume, pvc); err != nil { return syncRes, err } return syncRes, syncErr } // Verify that the source PVC has been completely populated. func (r *PvcCloneReconciler) isSourcePVCPopulated(dv *cdiv1.DataVolume) (bool, error) { sourcePvc := &corev1.PersistentVolumeClaim{} if err := r.client.Get(context.TODO(), types.NamespacedName{Name: dv.Spec.Source.PVC.Name, Namespace: dv.Spec.Source.PVC.Namespace}, sourcePvc); err != nil { return false, err } return cc.IsPopulated(sourcePvc, r.client) } func (r *PvcCloneReconciler) sourceInUse(dv *cdiv1.DataVolume, eventReason string) (bool, error) { pods, err := cc.GetPodsUsingPVCs(context.TODO(), r.client, dv.Spec.Source.PVC.Namespace, sets.New(dv.Spec.Source.PVC.Name), false) if err != nil { return false, err } for _, pod := range pods { r.log.V(1).Info("Cannot snapshot", "namespace", dv.Namespace, "name", dv.Name, "pod namespace", pod.Namespace, "pod name", pod.Name) r.recorder.Eventf(dv, corev1.EventTypeWarning, eventReason, "pod %s/%s using PersistentVolumeClaim %s", pod.Namespace, pod.Name, dv.Spec.Source.PVC.Name) } return len(pods) > 0, nil } func (r *PvcCloneReconciler) findSourcePvc(dataVolume *cdiv1.DataVolume) (*corev1.PersistentVolumeClaim, error) { sourcePvcSpec := dataVolume.Spec.Source.PVC if sourcePvcSpec == nil { return nil, errors.New("no source PVC provided") } // Find source PVC sourcePvcNs := sourcePvcSpec.Namespace if sourcePvcNs == "" { sourcePvcNs = dataVolume.Namespace } pvc := &corev1.PersistentVolumeClaim{} if err := r.client.Get(context.TODO(), types.NamespacedName{Namespace: sourcePvcNs, Name: sourcePvcSpec.Name}, pvc); err != nil { if k8serrors.IsNotFound(err) { r.log.V(3).Info("Source PVC is missing", "source namespace", sourcePvcSpec.Namespace, "source name", sourcePvcSpec.Name) } return nil, err } return pvc, nil } // validateCloneAndSourcePVC checks if the source PVC of a clone exists and does proper validation func (r *PvcCloneReconciler) validateCloneAndSourcePVC(syncState *dvSyncState, log logr.Logger) (bool, error) { datavolume := syncState.dvMutated sourcePvc, err := r.findSourcePvc(datavolume) if err != nil { // Clone without source if k8serrors.IsNotFound(err) { syncErr := r.syncDataVolumeStatusPhaseWithEvent(syncState, datavolume.Status.Phase, nil, Event{ eventType: corev1.EventTypeWarning, reason: CloneWithoutSource, message: fmt.Sprintf(MessageCloneWithoutSource, "pvc", datavolume.Spec.Source.PVC.Name), }) if syncErr != nil { log.Error(syncErr, "failed to sync DataVolume status with event") } return false, nil } return false, err } err = cc.ValidateClone(sourcePvc, &datavolume.Spec) if err != nil { r.recorder.Event(datavolume, corev1.EventTypeWarning, CloneValidationFailed, MessageCloneValidationFailed) return false, err } return true, nil } // isSourceReadyToClone handles the reconciling process of a clone when the source PVC is not ready func (r *PvcCloneReconciler) isSourceReadyToClone(datavolume *cdiv1.DataVolume) (bool, error)
// detectCloneSize obtains and assigns the original PVC's size when cloning using an empty storage value func (r *PvcCloneReconciler) detectCloneSize(syncState *dvSyncState) (bool, error) { sourcePvc, err := r.findSourcePvc(syncState.dvMutated) if err != nil { return false, err } // because of filesystem overhead calculations when cloning // even if storage size is requested we have to calculate source size // when source is filesystem and target is block requestedSize, hasSize := syncState.pvcSpec.Resources.Requests[corev1.ResourceStorage] sizeRequired := !hasSize || requestedSize.IsZero() targetIsBlock := syncState.pvcSpec.VolumeMode != nil && *syncState.pvcSpec.VolumeMode == corev1.PersistentVolumeBlock sourceIsFilesystem := cc.GetVolumeMode(sourcePvc) == corev1.PersistentVolumeFilesystem // have to be explicit here or detection pod will crash sourceIsKubevirt := sourcePvc.Annotations[cc.AnnContentType] == string(cdiv1.DataVolumeKubeVirt) if !sizeRequired && (!targetIsBlock || !sourceIsFilesystem || !sourceIsKubevirt) { return true, nil } var targetSize int64 sourceCapacity := sourcePvc.Status.Capacity.Storage() // Due to possible filesystem overhead complications when cloning // using host-assisted strategy, we create a pod that automatically // collects the size of the original virtual image with 'qemu-img'. // If the original PVC's volume mode is "block", // we simply extract the value from the original PVC's spec. if sourceIsFilesystem && sourceIsKubevirt { var available bool // If available, we first try to get the virtual size from previous iterations targetSize, available = getSizeFromAnnotations(sourcePvc) if !available { targetSize, err = r.getSizeFromPod(syncState.pvc, sourcePvc, syncState.dvMutated) if err != nil { return false, err } else if targetSize == 0 { return false, nil } } } else { targetSize, _ = sourceCapacity.AsInt64() } var isPermissiveClone bool if sizeRequired { // Allow the clone-controller to skip the size comparison requirement // if the source's size ends up being larger due to overhead differences // TODO: Fix this in next PR that uses actual size also in validation isPermissiveClone = sourceCapacity.CmpInt64(targetSize) == 1 } else { isPermissiveClone = requestedSize.CmpInt64(targetSize) >= 0 } if isPermissiveClone { syncState.dvMutated.Annotations[cc.AnnPermissiveClone] = "true" } if !sizeRequired { return true, nil } // Parse size into a 'Quantity' struct and, if needed, inflate it with filesystem overhead targetCapacity, err := cc.InflateSizeWithOverhead(context.TODO(), r.client, targetSize, syncState.pvcSpec) if err != nil { return false, err } syncState.pvcSpec.Resources.Requests[corev1.ResourceStorage] = targetCapacity return true, nil } // getSizeFromAnnotations checks the source PVC's annotations and returns the requested size if it has already been obtained func getSizeFromAnnotations(sourcePvc *corev1.PersistentVolumeClaim) (int64, bool) { virtualImageSize, available := sourcePvc.Annotations[AnnVirtualImageSize] if available { sourceCapacity, available := sourcePvc.Annotations[AnnSourceCapacity] currCapacity := sourcePvc.Status.Capacity // Checks if the original PVC's capacity has changed if available && currCapacity.Storage().Cmp(resource.MustParse(sourceCapacity)) == 0 { // Parse the raw string containing the image size into a 64-bit int imgSizeInt, _ := strconv.ParseInt(virtualImageSize, 10, 64) return imgSizeInt, true } } return 0, false } // getSizeFromPod attempts to get the image size from a pod that directly obtains said value from the source PVC func (r *PvcCloneReconciler) getSizeFromPod(targetPvc, sourcePvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume) (int64, error) { // The pod should not be created until the source PVC has finished the import process populated, err := cc.IsPopulated(sourcePvc, r.client) if err != nil { return 0, err } if !populated { r.recorder.Event(dv, corev1.EventTypeNormal, ImportPVCNotReady, MessageImportPVCNotReady) return 0, nil } pod, err := r.getOrCreateSizeDetectionPod(sourcePvc, dv) // Check if pod has failed and, in that case, record an event with the error if podErr := cc.HandleFailedPod(err, sizeDetectionPodName(sourcePvc), targetPvc, r.recorder, r.client); podErr != nil { return 0, podErr } else if !isPodComplete(pod) { r.recorder.Event(dv, corev1.EventTypeNormal, SizeDetectionPodNotReady, MessageSizeDetectionPodNotReady) return 0, nil } // Parse raw image size from the pod's termination message if pod.Status.ContainerStatuses == nil || pod.Status.ContainerStatuses[0].State.Terminated == nil || pod.Status.ContainerStatuses[0].State.Terminated.ExitCode > 0 { return 0, r.handleSizeDetectionError(pod, dv, sourcePvc) } termMsg := pod.Status.ContainerStatuses[0].State.Terminated.Message imgSize, _ := strconv.ParseInt(termMsg, 10, 64) // Update Source PVC annotations if err := r.updateClonePVCAnnotations(sourcePvc, termMsg); err != nil { return imgSize, err } // Finally, detelete the pod if cc.ShouldDeletePod(sourcePvc) { err = r.client.Delete(context.TODO(), pod) if err != nil && !k8serrors.IsNotFound(err) { return imgSize, err } } return imgSize, nil } // getOrCreateSizeDetectionPod gets the size-detection pod if it already exists/creates it if not func (r *PvcCloneReconciler) getOrCreateSizeDetectionPod( sourcePvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume) (*corev1.Pod, error) { podName := sizeDetectionPodName(sourcePvc) pod := &corev1.Pod{} nn := types.NamespacedName{Namespace: sourcePvc.Namespace, Name: podName} // Trying to get the pod if it already exists/create it if not if err := r.client.Get(context.TODO(), nn, pod); err != nil { if !k8serrors.IsNotFound(err) { return nil, err } // Generate the pod spec pod = r.makeSizeDetectionPodSpec(sourcePvc, dv) if pod == nil { return nil, errors.Errorf("Size-detection pod spec could not be generated") } // Create the pod if err := r.client.Create(context.TODO(), pod); err != nil { if !k8serrors.IsAlreadyExists(err) { return nil, err } } r.recorder.Event(dv, corev1.EventTypeNormal, SizeDetectionPodCreated, MessageSizeDetectionPodCreated) r.log.V(3).Info(MessageSizeDetectionPodCreated, "pod.Name", pod.Name, "pod.Namespace", pod.Namespace) } return pod, nil } // makeSizeDetectionPodSpec creates and returns the full size-detection pod spec func (r *PvcCloneReconciler) makeSizeDetectionPodSpec( sourcePvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume) *corev1.Pod { workloadNodePlacement, err := cc.GetWorkloadNodePlacement(context.TODO(), r.client) if err != nil { return nil } // Generate individual specs objectMeta := makeSizeDetectionObjectMeta(sourcePvc, dv) volume := makeSizeDetectionVolumeSpec(sourcePvc.Name) container := r.makeSizeDetectionContainerSpec(volume.Name) if container == nil { return nil } imagePullSecrets, err := cc.GetImagePullSecrets(r.client) if err != nil { return nil } // Assemble the pod pod := &corev1.Pod{ ObjectMeta: *objectMeta, Spec: corev1.PodSpec{ Containers: []corev1.Container{ *container, }, Volumes: []corev1.Volume{ *volume, }, RestartPolicy: corev1.RestartPolicyOnFailure, NodeSelector: workloadNodePlacement.NodeSelector, Tolerations: workloadNodePlacement.Tolerations, Affinity: workloadNodePlacement.Affinity, PriorityClassName: cc.GetPriorityClass(sourcePvc), ImagePullSecrets: imagePullSecrets, }, } if sourcePvc.Namespace == dv.Namespace { pod.OwnerReferences = []metav1.OwnerReference{ *metav1.NewControllerRef(dv, schema.GroupVersionKind{ Group: cdiv1.SchemeGroupVersion.Group, Version: cdiv1.SchemeGroupVersion.Version, Kind: "DataVolume", }), } } else { if err := setAnnOwnedByDataVolume(pod, dv); err != nil { return nil } pod.Annotations[cc.AnnOwnerUID] = string(dv.UID) } cc.SetRestrictedSecurityContext(&pod.Spec) return pod } // makeSizeDetectionObjectMeta creates and returns the object metadata for the size-detection pod func makeSizeDetectionObjectMeta(sourcePvc *corev1.PersistentVolumeClaim, dataVolume *cdiv1.DataVolume) *metav1.ObjectMeta { return &metav1.ObjectMeta{ Name: sizeDetectionPodName(sourcePvc), Namespace: sourcePvc.Namespace, Labels: map[string]string{ common.CDILabelKey: common.CDILabelValue, common.CDIComponentLabel: common.ImporterPodName, }, } } // makeSizeDetectionContainerSpec creates and returns the size-detection pod's Container spec func (r *PvcCloneReconciler) makeSizeDetectionContainerSpec(volName string) *corev1.Container { container := corev1.Container{ Name: "size-detection-volume", Image: r.importerImage, ImagePullPolicy: corev1.PullPolicy(r.pullPolicy), Command: []string{"/usr/bin/cdi-image-size-detection"}, Args: []string{"-image-path", common.ImporterWritePath}, VolumeMounts: []corev1.VolumeMount{ { MountPath: common.ImporterVolumePath, Name: volName, }, }, } // Get and assign container's default resource requirements resourceRequirements, err := cc.GetDefaultPodResourceRequirements(r.client) if err != nil { return nil } if resourceRequirements != nil { container.Resources = *resourceRequirements } return &container } // makeSizeDetectionVolumeSpec creates and returns the size-detection pod's Volume spec func makeSizeDetectionVolumeSpec(pvcName string) *corev1.Volume { return &corev1.Volume{ Name: cc.DataVolName, VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: pvcName, }, }, } } // handleSizeDetectionError handles the termination of the size-detection pod in case of error func (r *PvcCloneReconciler) handleSizeDetectionError(pod *corev1.Pod, dv *cdiv1.DataVolume, sourcePvc *corev1.PersistentVolumeClaim) error { var event Event var exitCode int if pod.Status.ContainerStatuses == nil || pod.Status.ContainerStatuses[0].State.Terminated == nil { exitCode = cc.ErrUnknown } else { exitCode = int(pod.Status.ContainerStatuses[0].State.Terminated.ExitCode) } // We attempt to delete the pod err := r.client.Delete(context.TODO(), pod) if err != nil && !k8serrors.IsNotFound(err) { return err } switch exitCode { case cc.ErrBadArguments: event.eventType = corev1.EventTypeWarning event.reason = "ErrBadArguments" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) case cc.ErrInvalidPath: event.eventType = corev1.EventTypeWarning event.reason = "ErrInvalidPath" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) case cc.ErrInvalidFile: event.eventType = corev1.EventTypeWarning event.reason = "ErrInvalidFile" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) case cc.ErrBadTermFile: event.eventType = corev1.EventTypeWarning event.reason = "ErrBadTermFile" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) default: event.eventType = corev1.EventTypeWarning event.reason = "ErrUnknown" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) } r.recorder.Event(dv, event.eventType, event.reason, event.message) return ErrInvalidTermMsg } // updateClonePVCAnnotations updates the clone-related annotations of the source PVC func (r *PvcCloneReconciler) updateClonePVCAnnotations(sourcePvc *corev1.PersistentVolumeClaim, virtualSize string) error { currCapacity := sourcePvc.Status.Capacity sourcePvc.Annotations[AnnVirtualImageSize] = virtualSize sourcePvc.Annotations[AnnSourceCapacity] = currCapacity.Storage().String() return r.client.Update(context.TODO(), sourcePvc) } // sizeDetectionPodName returns the name of the size-detection pod accoding to the source PVC's UID func sizeDetectionPodName(pvc *corev1.PersistentVolumeClaim) string { return fmt.Sprintf("size-detection-%s", pvc.UID) } // isPodComplete returns true if a pod is in 'Succeeded' phase, false if not func isPodComplete(pod *corev1.Pod) bool { return pod != nil && pod.Status.Phase == corev1.PodSucceeded }
{ // TODO preper const eventReason := "CloneSourceInUse" // Check if any pods are using the source PVC inUse, err := r.sourceInUse(datavolume, eventReason) if err != nil { return false, err } // Check if the source PVC is fully populated populated, err := r.isSourcePVCPopulated(datavolume) if err != nil { return false, err } if inUse || !populated { return false, nil } return true, nil }
identifier_body
pvc-clone-controller.go
/* Copyright 2022 The CDI Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package datavolume import ( "context" "crypto/rsa" "fmt" "strconv" "time" "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" "kubevirt.io/containerized-data-importer/pkg/common" cc "kubevirt.io/containerized-data-importer/pkg/controller/common" featuregates "kubevirt.io/containerized-data-importer/pkg/feature-gates" ) const ( sourceInUseRequeueDuration = time.Duration(5 * time.Second) pvcCloneControllerName = "datavolume-pvc-clone-controller" volumeCloneSourcePrefix = "volume-clone-source" ) // ErrInvalidTermMsg reports that the termination message from the size-detection pod doesn't exists or is not a valid quantity var ErrInvalidTermMsg = fmt.Errorf("the termination message from the size-detection pod is not-valid") // PvcCloneReconciler members type PvcCloneReconciler struct { CloneReconcilerBase } // NewPvcCloneController creates a new instance of the datavolume clone controller func NewPvcCloneController( ctx context.Context, mgr manager.Manager, log logr.Logger, clonerImage string, importerImage string, pullPolicy string, tokenPublicKey *rsa.PublicKey, tokenPrivateKey *rsa.PrivateKey, installerLabels map[string]string, ) (controller.Controller, error) { client := mgr.GetClient() reconciler := &PvcCloneReconciler{ CloneReconcilerBase: CloneReconcilerBase{ ReconcilerBase: ReconcilerBase{ client: client, scheme: mgr.GetScheme(), log: log.WithName(pvcCloneControllerName), featureGates: featuregates.NewFeatureGates(client), recorder: mgr.GetEventRecorderFor(pvcCloneControllerName), installerLabels: installerLabels, shouldUpdateProgress: true, }, clonerImage: clonerImage, importerImage: importerImage, pullPolicy: pullPolicy, cloneSourceKind: "PersistentVolumeClaim", shortTokenValidator: cc.NewCloneTokenValidator(common.CloneTokenIssuer, tokenPublicKey), longTokenValidator: cc.NewCloneTokenValidator(common.ExtendedCloneTokenIssuer, tokenPublicKey), // for long term tokens to handle cross namespace dumb clones tokenGenerator: newLongTermCloneTokenGenerator(tokenPrivateKey), }, } dataVolumeCloneController, err := controller.New(pvcCloneControllerName, mgr, controller.Options{ MaxConcurrentReconciles: 3, Reconciler: reconciler, }) if err != nil { return nil, err } if err = reconciler.addDataVolumeCloneControllerWatches(mgr, dataVolumeCloneController); err != nil { return nil, err } return dataVolumeCloneController, nil } func (r *PvcCloneReconciler) addDataVolumeCloneControllerWatches(mgr manager.Manager, datavolumeController controller.Controller) error { if err := addDataVolumeControllerCommonWatches(mgr, datavolumeController, dataVolumePvcClone); err != nil { return err } // Watch to reconcile clones created without source if err := addCloneWithoutSourceWatch(mgr, datavolumeController, &corev1.PersistentVolumeClaim{}, "spec.source.pvc"); err != nil { return err } if err := addDataSourceWatch(mgr, datavolumeController); err != nil { return err } if err := r.addVolumeCloneSourceWatch(datavolumeController); err != nil { return err } return nil } func addDataSourceWatch(mgr manager.Manager, c controller.Controller) error { const dvDataSourceField = "datasource" getKey := func(namespace, name string) string { return namespace + "/" + name } if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &cdiv1.DataVolume{}, dvDataSourceField, func(obj client.Object) []string { if sourceRef := obj.(*cdiv1.DataVolume).Spec.SourceRef; sourceRef != nil && sourceRef.Kind == cdiv1.DataVolumeDataSource { ns := obj.GetNamespace() if sourceRef.Namespace != nil && *sourceRef.Namespace != "" { ns = *sourceRef.Namespace } return []string{getKey(ns, sourceRef.Name)} } return nil }); err != nil { return err } mapToDataVolume := func(obj client.Object) (reqs []reconcile.Request) { var dvs cdiv1.DataVolumeList matchingFields := client.MatchingFields{dvDataSourceField: getKey(obj.GetNamespace(), obj.GetName())} if err := mgr.GetClient().List(context.TODO(), &dvs, matchingFields); err != nil { c.GetLogger().Error(err, "Unable to list DataVolumes", "matchingFields", matchingFields) return } for _, dv := range dvs.Items { reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: dv.Namespace, Name: dv.Name}}) } return } if err := c.Watch(&source.Kind{Type: &cdiv1.DataSource{}}, handler.EnqueueRequestsFromMapFunc(mapToDataVolume), ); err != nil { return err } return nil } // Reconcile loop for the clone data volumes func (r *PvcCloneReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { return r.reconcile(ctx, req, r) } func (r *PvcCloneReconciler) prepare(syncState *dvSyncState) error { dv := syncState.dvMutated if err := r.populateSourceIfSourceRef(dv); err != nil { return err } return nil } func (r *PvcCloneReconciler) cleanup(syncState *dvSyncState) error { dv := syncState.dvMutated if err := r.populateSourceIfSourceRef(dv); err != nil { return err } if dv.DeletionTimestamp == nil && dv.Status.Phase != cdiv1.Succeeded { return nil } return r.reconcileVolumeCloneSourceCR(syncState) } func
(dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { // first clear out tokens that may have already been added delete(pvc.Annotations, cc.AnnCloneToken) delete(pvc.Annotations, cc.AnnExtendedCloneToken) if isCrossNamespaceClone(dv) { // only want this initially // extended token is added later token, ok := dv.Annotations[cc.AnnCloneToken] if !ok { return errors.Errorf("no clone token") } cc.AddAnnotation(pvc, cc.AnnCloneToken, token) } return nil } func volumeCloneSourceName(dv *cdiv1.DataVolume) string { return fmt.Sprintf("%s-%s", volumeCloneSourcePrefix, dv.UID) } func (r *PvcCloneReconciler) updateAnnotations(dataVolume *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { if dataVolume.Spec.Source.PVC == nil { return errors.Errorf("no source set for clone datavolume") } if err := addCloneToken(dataVolume, pvc); err != nil { return err } sourceNamespace := dataVolume.Spec.Source.PVC.Namespace if sourceNamespace == "" { sourceNamespace = dataVolume.Namespace } pvc.Annotations[cc.AnnCloneRequest] = sourceNamespace + "/" + dataVolume.Spec.Source.PVC.Name return nil } func (r *PvcCloneReconciler) sync(log logr.Logger, req reconcile.Request) (dvSyncResult, error) { syncState, err := r.syncClone(log, req) if err == nil { err = r.syncUpdate(log, &syncState) } return syncState.dvSyncResult, err } func (r *PvcCloneReconciler) syncClone(log logr.Logger, req reconcile.Request) (dvSyncState, error) { syncRes, syncErr := r.syncCommon(log, req, r.cleanup, r.prepare) if syncErr != nil || syncRes.result != nil { return syncRes, syncErr } pvc := syncRes.pvc pvcSpec := syncRes.pvcSpec datavolume := syncRes.dvMutated pvcPopulated := pvcIsPopulated(pvc, datavolume) staticProvisionPending := checkStaticProvisionPending(pvc, datavolume) prePopulated := dvIsPrePopulated(datavolume) if pvcPopulated || prePopulated || staticProvisionPending { return syncRes, nil } if addedToken, err := r.ensureExtendedTokenDV(datavolume); err != nil { return syncRes, err } else if addedToken { // make sure token gets persisted before doing anything else return syncRes, nil } if pvc == nil { // Check if source PVC exists and do proper validation before attempting to clone if done, err := r.validateCloneAndSourcePVC(&syncRes, log); err != nil { return syncRes, err } else if !done { return syncRes, nil } // Always call detect size, it will handle the case where size is specified // and detection pod not necessary if datavolume.Spec.Storage != nil { done, err := r.detectCloneSize(&syncRes) if err != nil { return syncRes, err } else if !done { // Check if the source PVC is ready to be cloned if readyToClone, err := r.isSourceReadyToClone(datavolume); err != nil { return syncRes, err } else if !readyToClone { if syncRes.result == nil { syncRes.result = &reconcile.Result{} } syncRes.result.RequeueAfter = sourceInUseRequeueDuration return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil) } return syncRes, nil } } pvcModifier := r.updateAnnotations if syncRes.usePopulator { if isCrossNamespaceClone(datavolume) { if !cc.HasFinalizer(datavolume, crossNamespaceFinalizer) { cc.AddFinalizer(datavolume, crossNamespaceFinalizer) return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil) } } pvcModifier = r.updatePVCForPopulation } newPvc, err := r.createPvcForDatavolume(datavolume, pvcSpec, pvcModifier) if err != nil { if cc.ErrQuotaExceeded(err) { syncErr = r.syncDataVolumeStatusPhaseWithEvent(&syncRes, cdiv1.Pending, nil, Event{ eventType: corev1.EventTypeWarning, reason: cc.ErrExceededQuota, message: err.Error(), }) if syncErr != nil { log.Error(syncErr, "failed to sync DataVolume status with event") } } return syncRes, err } pvc = newPvc } if syncRes.usePopulator { if err := r.reconcileVolumeCloneSourceCR(&syncRes); err != nil { return syncRes, err } ct, ok := pvc.Annotations[cc.AnnCloneType] if ok { cc.AddAnnotation(datavolume, cc.AnnCloneType, ct) } } else { cc.AddAnnotation(datavolume, cc.AnnCloneType, string(cdiv1.CloneStrategyHostAssisted)) if err := r.fallbackToHostAssisted(pvc); err != nil { return syncRes, err } } if err := r.ensureExtendedTokenPVC(datavolume, pvc); err != nil { return syncRes, err } return syncRes, syncErr } // Verify that the source PVC has been completely populated. func (r *PvcCloneReconciler) isSourcePVCPopulated(dv *cdiv1.DataVolume) (bool, error) { sourcePvc := &corev1.PersistentVolumeClaim{} if err := r.client.Get(context.TODO(), types.NamespacedName{Name: dv.Spec.Source.PVC.Name, Namespace: dv.Spec.Source.PVC.Namespace}, sourcePvc); err != nil { return false, err } return cc.IsPopulated(sourcePvc, r.client) } func (r *PvcCloneReconciler) sourceInUse(dv *cdiv1.DataVolume, eventReason string) (bool, error) { pods, err := cc.GetPodsUsingPVCs(context.TODO(), r.client, dv.Spec.Source.PVC.Namespace, sets.New(dv.Spec.Source.PVC.Name), false) if err != nil { return false, err } for _, pod := range pods { r.log.V(1).Info("Cannot snapshot", "namespace", dv.Namespace, "name", dv.Name, "pod namespace", pod.Namespace, "pod name", pod.Name) r.recorder.Eventf(dv, corev1.EventTypeWarning, eventReason, "pod %s/%s using PersistentVolumeClaim %s", pod.Namespace, pod.Name, dv.Spec.Source.PVC.Name) } return len(pods) > 0, nil } func (r *PvcCloneReconciler) findSourcePvc(dataVolume *cdiv1.DataVolume) (*corev1.PersistentVolumeClaim, error) { sourcePvcSpec := dataVolume.Spec.Source.PVC if sourcePvcSpec == nil { return nil, errors.New("no source PVC provided") } // Find source PVC sourcePvcNs := sourcePvcSpec.Namespace if sourcePvcNs == "" { sourcePvcNs = dataVolume.Namespace } pvc := &corev1.PersistentVolumeClaim{} if err := r.client.Get(context.TODO(), types.NamespacedName{Namespace: sourcePvcNs, Name: sourcePvcSpec.Name}, pvc); err != nil { if k8serrors.IsNotFound(err) { r.log.V(3).Info("Source PVC is missing", "source namespace", sourcePvcSpec.Namespace, "source name", sourcePvcSpec.Name) } return nil, err } return pvc, nil } // validateCloneAndSourcePVC checks if the source PVC of a clone exists and does proper validation func (r *PvcCloneReconciler) validateCloneAndSourcePVC(syncState *dvSyncState, log logr.Logger) (bool, error) { datavolume := syncState.dvMutated sourcePvc, err := r.findSourcePvc(datavolume) if err != nil { // Clone without source if k8serrors.IsNotFound(err) { syncErr := r.syncDataVolumeStatusPhaseWithEvent(syncState, datavolume.Status.Phase, nil, Event{ eventType: corev1.EventTypeWarning, reason: CloneWithoutSource, message: fmt.Sprintf(MessageCloneWithoutSource, "pvc", datavolume.Spec.Source.PVC.Name), }) if syncErr != nil { log.Error(syncErr, "failed to sync DataVolume status with event") } return false, nil } return false, err } err = cc.ValidateClone(sourcePvc, &datavolume.Spec) if err != nil { r.recorder.Event(datavolume, corev1.EventTypeWarning, CloneValidationFailed, MessageCloneValidationFailed) return false, err } return true, nil } // isSourceReadyToClone handles the reconciling process of a clone when the source PVC is not ready func (r *PvcCloneReconciler) isSourceReadyToClone(datavolume *cdiv1.DataVolume) (bool, error) { // TODO preper const eventReason := "CloneSourceInUse" // Check if any pods are using the source PVC inUse, err := r.sourceInUse(datavolume, eventReason) if err != nil { return false, err } // Check if the source PVC is fully populated populated, err := r.isSourcePVCPopulated(datavolume) if err != nil { return false, err } if inUse || !populated { return false, nil } return true, nil } // detectCloneSize obtains and assigns the original PVC's size when cloning using an empty storage value func (r *PvcCloneReconciler) detectCloneSize(syncState *dvSyncState) (bool, error) { sourcePvc, err := r.findSourcePvc(syncState.dvMutated) if err != nil { return false, err } // because of filesystem overhead calculations when cloning // even if storage size is requested we have to calculate source size // when source is filesystem and target is block requestedSize, hasSize := syncState.pvcSpec.Resources.Requests[corev1.ResourceStorage] sizeRequired := !hasSize || requestedSize.IsZero() targetIsBlock := syncState.pvcSpec.VolumeMode != nil && *syncState.pvcSpec.VolumeMode == corev1.PersistentVolumeBlock sourceIsFilesystem := cc.GetVolumeMode(sourcePvc) == corev1.PersistentVolumeFilesystem // have to be explicit here or detection pod will crash sourceIsKubevirt := sourcePvc.Annotations[cc.AnnContentType] == string(cdiv1.DataVolumeKubeVirt) if !sizeRequired && (!targetIsBlock || !sourceIsFilesystem || !sourceIsKubevirt) { return true, nil } var targetSize int64 sourceCapacity := sourcePvc.Status.Capacity.Storage() // Due to possible filesystem overhead complications when cloning // using host-assisted strategy, we create a pod that automatically // collects the size of the original virtual image with 'qemu-img'. // If the original PVC's volume mode is "block", // we simply extract the value from the original PVC's spec. if sourceIsFilesystem && sourceIsKubevirt { var available bool // If available, we first try to get the virtual size from previous iterations targetSize, available = getSizeFromAnnotations(sourcePvc) if !available { targetSize, err = r.getSizeFromPod(syncState.pvc, sourcePvc, syncState.dvMutated) if err != nil { return false, err } else if targetSize == 0 { return false, nil } } } else { targetSize, _ = sourceCapacity.AsInt64() } var isPermissiveClone bool if sizeRequired { // Allow the clone-controller to skip the size comparison requirement // if the source's size ends up being larger due to overhead differences // TODO: Fix this in next PR that uses actual size also in validation isPermissiveClone = sourceCapacity.CmpInt64(targetSize) == 1 } else { isPermissiveClone = requestedSize.CmpInt64(targetSize) >= 0 } if isPermissiveClone { syncState.dvMutated.Annotations[cc.AnnPermissiveClone] = "true" } if !sizeRequired { return true, nil } // Parse size into a 'Quantity' struct and, if needed, inflate it with filesystem overhead targetCapacity, err := cc.InflateSizeWithOverhead(context.TODO(), r.client, targetSize, syncState.pvcSpec) if err != nil { return false, err } syncState.pvcSpec.Resources.Requests[corev1.ResourceStorage] = targetCapacity return true, nil } // getSizeFromAnnotations checks the source PVC's annotations and returns the requested size if it has already been obtained func getSizeFromAnnotations(sourcePvc *corev1.PersistentVolumeClaim) (int64, bool) { virtualImageSize, available := sourcePvc.Annotations[AnnVirtualImageSize] if available { sourceCapacity, available := sourcePvc.Annotations[AnnSourceCapacity] currCapacity := sourcePvc.Status.Capacity // Checks if the original PVC's capacity has changed if available && currCapacity.Storage().Cmp(resource.MustParse(sourceCapacity)) == 0 { // Parse the raw string containing the image size into a 64-bit int imgSizeInt, _ := strconv.ParseInt(virtualImageSize, 10, 64) return imgSizeInt, true } } return 0, false } // getSizeFromPod attempts to get the image size from a pod that directly obtains said value from the source PVC func (r *PvcCloneReconciler) getSizeFromPod(targetPvc, sourcePvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume) (int64, error) { // The pod should not be created until the source PVC has finished the import process populated, err := cc.IsPopulated(sourcePvc, r.client) if err != nil { return 0, err } if !populated { r.recorder.Event(dv, corev1.EventTypeNormal, ImportPVCNotReady, MessageImportPVCNotReady) return 0, nil } pod, err := r.getOrCreateSizeDetectionPod(sourcePvc, dv) // Check if pod has failed and, in that case, record an event with the error if podErr := cc.HandleFailedPod(err, sizeDetectionPodName(sourcePvc), targetPvc, r.recorder, r.client); podErr != nil { return 0, podErr } else if !isPodComplete(pod) { r.recorder.Event(dv, corev1.EventTypeNormal, SizeDetectionPodNotReady, MessageSizeDetectionPodNotReady) return 0, nil } // Parse raw image size from the pod's termination message if pod.Status.ContainerStatuses == nil || pod.Status.ContainerStatuses[0].State.Terminated == nil || pod.Status.ContainerStatuses[0].State.Terminated.ExitCode > 0 { return 0, r.handleSizeDetectionError(pod, dv, sourcePvc) } termMsg := pod.Status.ContainerStatuses[0].State.Terminated.Message imgSize, _ := strconv.ParseInt(termMsg, 10, 64) // Update Source PVC annotations if err := r.updateClonePVCAnnotations(sourcePvc, termMsg); err != nil { return imgSize, err } // Finally, detelete the pod if cc.ShouldDeletePod(sourcePvc) { err = r.client.Delete(context.TODO(), pod) if err != nil && !k8serrors.IsNotFound(err) { return imgSize, err } } return imgSize, nil } // getOrCreateSizeDetectionPod gets the size-detection pod if it already exists/creates it if not func (r *PvcCloneReconciler) getOrCreateSizeDetectionPod( sourcePvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume) (*corev1.Pod, error) { podName := sizeDetectionPodName(sourcePvc) pod := &corev1.Pod{} nn := types.NamespacedName{Namespace: sourcePvc.Namespace, Name: podName} // Trying to get the pod if it already exists/create it if not if err := r.client.Get(context.TODO(), nn, pod); err != nil { if !k8serrors.IsNotFound(err) { return nil, err } // Generate the pod spec pod = r.makeSizeDetectionPodSpec(sourcePvc, dv) if pod == nil { return nil, errors.Errorf("Size-detection pod spec could not be generated") } // Create the pod if err := r.client.Create(context.TODO(), pod); err != nil { if !k8serrors.IsAlreadyExists(err) { return nil, err } } r.recorder.Event(dv, corev1.EventTypeNormal, SizeDetectionPodCreated, MessageSizeDetectionPodCreated) r.log.V(3).Info(MessageSizeDetectionPodCreated, "pod.Name", pod.Name, "pod.Namespace", pod.Namespace) } return pod, nil } // makeSizeDetectionPodSpec creates and returns the full size-detection pod spec func (r *PvcCloneReconciler) makeSizeDetectionPodSpec( sourcePvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume) *corev1.Pod { workloadNodePlacement, err := cc.GetWorkloadNodePlacement(context.TODO(), r.client) if err != nil { return nil } // Generate individual specs objectMeta := makeSizeDetectionObjectMeta(sourcePvc, dv) volume := makeSizeDetectionVolumeSpec(sourcePvc.Name) container := r.makeSizeDetectionContainerSpec(volume.Name) if container == nil { return nil } imagePullSecrets, err := cc.GetImagePullSecrets(r.client) if err != nil { return nil } // Assemble the pod pod := &corev1.Pod{ ObjectMeta: *objectMeta, Spec: corev1.PodSpec{ Containers: []corev1.Container{ *container, }, Volumes: []corev1.Volume{ *volume, }, RestartPolicy: corev1.RestartPolicyOnFailure, NodeSelector: workloadNodePlacement.NodeSelector, Tolerations: workloadNodePlacement.Tolerations, Affinity: workloadNodePlacement.Affinity, PriorityClassName: cc.GetPriorityClass(sourcePvc), ImagePullSecrets: imagePullSecrets, }, } if sourcePvc.Namespace == dv.Namespace { pod.OwnerReferences = []metav1.OwnerReference{ *metav1.NewControllerRef(dv, schema.GroupVersionKind{ Group: cdiv1.SchemeGroupVersion.Group, Version: cdiv1.SchemeGroupVersion.Version, Kind: "DataVolume", }), } } else { if err := setAnnOwnedByDataVolume(pod, dv); err != nil { return nil } pod.Annotations[cc.AnnOwnerUID] = string(dv.UID) } cc.SetRestrictedSecurityContext(&pod.Spec) return pod } // makeSizeDetectionObjectMeta creates and returns the object metadata for the size-detection pod func makeSizeDetectionObjectMeta(sourcePvc *corev1.PersistentVolumeClaim, dataVolume *cdiv1.DataVolume) *metav1.ObjectMeta { return &metav1.ObjectMeta{ Name: sizeDetectionPodName(sourcePvc), Namespace: sourcePvc.Namespace, Labels: map[string]string{ common.CDILabelKey: common.CDILabelValue, common.CDIComponentLabel: common.ImporterPodName, }, } } // makeSizeDetectionContainerSpec creates and returns the size-detection pod's Container spec func (r *PvcCloneReconciler) makeSizeDetectionContainerSpec(volName string) *corev1.Container { container := corev1.Container{ Name: "size-detection-volume", Image: r.importerImage, ImagePullPolicy: corev1.PullPolicy(r.pullPolicy), Command: []string{"/usr/bin/cdi-image-size-detection"}, Args: []string{"-image-path", common.ImporterWritePath}, VolumeMounts: []corev1.VolumeMount{ { MountPath: common.ImporterVolumePath, Name: volName, }, }, } // Get and assign container's default resource requirements resourceRequirements, err := cc.GetDefaultPodResourceRequirements(r.client) if err != nil { return nil } if resourceRequirements != nil { container.Resources = *resourceRequirements } return &container } // makeSizeDetectionVolumeSpec creates and returns the size-detection pod's Volume spec func makeSizeDetectionVolumeSpec(pvcName string) *corev1.Volume { return &corev1.Volume{ Name: cc.DataVolName, VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: pvcName, }, }, } } // handleSizeDetectionError handles the termination of the size-detection pod in case of error func (r *PvcCloneReconciler) handleSizeDetectionError(pod *corev1.Pod, dv *cdiv1.DataVolume, sourcePvc *corev1.PersistentVolumeClaim) error { var event Event var exitCode int if pod.Status.ContainerStatuses == nil || pod.Status.ContainerStatuses[0].State.Terminated == nil { exitCode = cc.ErrUnknown } else { exitCode = int(pod.Status.ContainerStatuses[0].State.Terminated.ExitCode) } // We attempt to delete the pod err := r.client.Delete(context.TODO(), pod) if err != nil && !k8serrors.IsNotFound(err) { return err } switch exitCode { case cc.ErrBadArguments: event.eventType = corev1.EventTypeWarning event.reason = "ErrBadArguments" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) case cc.ErrInvalidPath: event.eventType = corev1.EventTypeWarning event.reason = "ErrInvalidPath" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) case cc.ErrInvalidFile: event.eventType = corev1.EventTypeWarning event.reason = "ErrInvalidFile" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) case cc.ErrBadTermFile: event.eventType = corev1.EventTypeWarning event.reason = "ErrBadTermFile" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) default: event.eventType = corev1.EventTypeWarning event.reason = "ErrUnknown" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) } r.recorder.Event(dv, event.eventType, event.reason, event.message) return ErrInvalidTermMsg } // updateClonePVCAnnotations updates the clone-related annotations of the source PVC func (r *PvcCloneReconciler) updateClonePVCAnnotations(sourcePvc *corev1.PersistentVolumeClaim, virtualSize string) error { currCapacity := sourcePvc.Status.Capacity sourcePvc.Annotations[AnnVirtualImageSize] = virtualSize sourcePvc.Annotations[AnnSourceCapacity] = currCapacity.Storage().String() return r.client.Update(context.TODO(), sourcePvc) } // sizeDetectionPodName returns the name of the size-detection pod accoding to the source PVC's UID func sizeDetectionPodName(pvc *corev1.PersistentVolumeClaim) string { return fmt.Sprintf("size-detection-%s", pvc.UID) } // isPodComplete returns true if a pod is in 'Succeeded' phase, false if not func isPodComplete(pod *corev1.Pod) bool { return pod != nil && pod.Status.Phase == corev1.PodSucceeded }
addCloneToken
identifier_name
pvc-clone-controller.go
/* Copyright 2022 The CDI Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package datavolume import ( "context" "crypto/rsa" "fmt" "strconv" "time" "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" "kubevirt.io/containerized-data-importer/pkg/common" cc "kubevirt.io/containerized-data-importer/pkg/controller/common" featuregates "kubevirt.io/containerized-data-importer/pkg/feature-gates" ) const ( sourceInUseRequeueDuration = time.Duration(5 * time.Second) pvcCloneControllerName = "datavolume-pvc-clone-controller" volumeCloneSourcePrefix = "volume-clone-source" ) // ErrInvalidTermMsg reports that the termination message from the size-detection pod doesn't exists or is not a valid quantity var ErrInvalidTermMsg = fmt.Errorf("the termination message from the size-detection pod is not-valid") // PvcCloneReconciler members type PvcCloneReconciler struct { CloneReconcilerBase } // NewPvcCloneController creates a new instance of the datavolume clone controller func NewPvcCloneController( ctx context.Context, mgr manager.Manager, log logr.Logger, clonerImage string, importerImage string, pullPolicy string, tokenPublicKey *rsa.PublicKey, tokenPrivateKey *rsa.PrivateKey, installerLabels map[string]string, ) (controller.Controller, error) { client := mgr.GetClient() reconciler := &PvcCloneReconciler{ CloneReconcilerBase: CloneReconcilerBase{ ReconcilerBase: ReconcilerBase{ client: client, scheme: mgr.GetScheme(), log: log.WithName(pvcCloneControllerName), featureGates: featuregates.NewFeatureGates(client), recorder: mgr.GetEventRecorderFor(pvcCloneControllerName), installerLabels: installerLabels, shouldUpdateProgress: true, }, clonerImage: clonerImage, importerImage: importerImage, pullPolicy: pullPolicy, cloneSourceKind: "PersistentVolumeClaim", shortTokenValidator: cc.NewCloneTokenValidator(common.CloneTokenIssuer, tokenPublicKey), longTokenValidator: cc.NewCloneTokenValidator(common.ExtendedCloneTokenIssuer, tokenPublicKey), // for long term tokens to handle cross namespace dumb clones tokenGenerator: newLongTermCloneTokenGenerator(tokenPrivateKey), }, } dataVolumeCloneController, err := controller.New(pvcCloneControllerName, mgr, controller.Options{ MaxConcurrentReconciles: 3, Reconciler: reconciler, }) if err != nil { return nil, err } if err = reconciler.addDataVolumeCloneControllerWatches(mgr, dataVolumeCloneController); err != nil { return nil, err } return dataVolumeCloneController, nil } func (r *PvcCloneReconciler) addDataVolumeCloneControllerWatches(mgr manager.Manager, datavolumeController controller.Controller) error { if err := addDataVolumeControllerCommonWatches(mgr, datavolumeController, dataVolumePvcClone); err != nil { return err } // Watch to reconcile clones created without source if err := addCloneWithoutSourceWatch(mgr, datavolumeController, &corev1.PersistentVolumeClaim{}, "spec.source.pvc"); err != nil { return err } if err := addDataSourceWatch(mgr, datavolumeController); err != nil { return err } if err := r.addVolumeCloneSourceWatch(datavolumeController); err != nil { return err } return nil } func addDataSourceWatch(mgr manager.Manager, c controller.Controller) error { const dvDataSourceField = "datasource"
getKey := func(namespace, name string) string { return namespace + "/" + name } if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &cdiv1.DataVolume{}, dvDataSourceField, func(obj client.Object) []string { if sourceRef := obj.(*cdiv1.DataVolume).Spec.SourceRef; sourceRef != nil && sourceRef.Kind == cdiv1.DataVolumeDataSource { ns := obj.GetNamespace() if sourceRef.Namespace != nil && *sourceRef.Namespace != "" { ns = *sourceRef.Namespace } return []string{getKey(ns, sourceRef.Name)} } return nil }); err != nil { return err } mapToDataVolume := func(obj client.Object) (reqs []reconcile.Request) { var dvs cdiv1.DataVolumeList matchingFields := client.MatchingFields{dvDataSourceField: getKey(obj.GetNamespace(), obj.GetName())} if err := mgr.GetClient().List(context.TODO(), &dvs, matchingFields); err != nil { c.GetLogger().Error(err, "Unable to list DataVolumes", "matchingFields", matchingFields) return } for _, dv := range dvs.Items { reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: dv.Namespace, Name: dv.Name}}) } return } if err := c.Watch(&source.Kind{Type: &cdiv1.DataSource{}}, handler.EnqueueRequestsFromMapFunc(mapToDataVolume), ); err != nil { return err } return nil } // Reconcile loop for the clone data volumes func (r *PvcCloneReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { return r.reconcile(ctx, req, r) } func (r *PvcCloneReconciler) prepare(syncState *dvSyncState) error { dv := syncState.dvMutated if err := r.populateSourceIfSourceRef(dv); err != nil { return err } return nil } func (r *PvcCloneReconciler) cleanup(syncState *dvSyncState) error { dv := syncState.dvMutated if err := r.populateSourceIfSourceRef(dv); err != nil { return err } if dv.DeletionTimestamp == nil && dv.Status.Phase != cdiv1.Succeeded { return nil } return r.reconcileVolumeCloneSourceCR(syncState) } func addCloneToken(dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { // first clear out tokens that may have already been added delete(pvc.Annotations, cc.AnnCloneToken) delete(pvc.Annotations, cc.AnnExtendedCloneToken) if isCrossNamespaceClone(dv) { // only want this initially // extended token is added later token, ok := dv.Annotations[cc.AnnCloneToken] if !ok { return errors.Errorf("no clone token") } cc.AddAnnotation(pvc, cc.AnnCloneToken, token) } return nil } func volumeCloneSourceName(dv *cdiv1.DataVolume) string { return fmt.Sprintf("%s-%s", volumeCloneSourcePrefix, dv.UID) } func (r *PvcCloneReconciler) updateAnnotations(dataVolume *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { if dataVolume.Spec.Source.PVC == nil { return errors.Errorf("no source set for clone datavolume") } if err := addCloneToken(dataVolume, pvc); err != nil { return err } sourceNamespace := dataVolume.Spec.Source.PVC.Namespace if sourceNamespace == "" { sourceNamespace = dataVolume.Namespace } pvc.Annotations[cc.AnnCloneRequest] = sourceNamespace + "/" + dataVolume.Spec.Source.PVC.Name return nil } func (r *PvcCloneReconciler) sync(log logr.Logger, req reconcile.Request) (dvSyncResult, error) { syncState, err := r.syncClone(log, req) if err == nil { err = r.syncUpdate(log, &syncState) } return syncState.dvSyncResult, err } func (r *PvcCloneReconciler) syncClone(log logr.Logger, req reconcile.Request) (dvSyncState, error) { syncRes, syncErr := r.syncCommon(log, req, r.cleanup, r.prepare) if syncErr != nil || syncRes.result != nil { return syncRes, syncErr } pvc := syncRes.pvc pvcSpec := syncRes.pvcSpec datavolume := syncRes.dvMutated pvcPopulated := pvcIsPopulated(pvc, datavolume) staticProvisionPending := checkStaticProvisionPending(pvc, datavolume) prePopulated := dvIsPrePopulated(datavolume) if pvcPopulated || prePopulated || staticProvisionPending { return syncRes, nil } if addedToken, err := r.ensureExtendedTokenDV(datavolume); err != nil { return syncRes, err } else if addedToken { // make sure token gets persisted before doing anything else return syncRes, nil } if pvc == nil { // Check if source PVC exists and do proper validation before attempting to clone if done, err := r.validateCloneAndSourcePVC(&syncRes, log); err != nil { return syncRes, err } else if !done { return syncRes, nil } // Always call detect size, it will handle the case where size is specified // and detection pod not necessary if datavolume.Spec.Storage != nil { done, err := r.detectCloneSize(&syncRes) if err != nil { return syncRes, err } else if !done { // Check if the source PVC is ready to be cloned if readyToClone, err := r.isSourceReadyToClone(datavolume); err != nil { return syncRes, err } else if !readyToClone { if syncRes.result == nil { syncRes.result = &reconcile.Result{} } syncRes.result.RequeueAfter = sourceInUseRequeueDuration return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil) } return syncRes, nil } } pvcModifier := r.updateAnnotations if syncRes.usePopulator { if isCrossNamespaceClone(datavolume) { if !cc.HasFinalizer(datavolume, crossNamespaceFinalizer) { cc.AddFinalizer(datavolume, crossNamespaceFinalizer) return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil) } } pvcModifier = r.updatePVCForPopulation } newPvc, err := r.createPvcForDatavolume(datavolume, pvcSpec, pvcModifier) if err != nil { if cc.ErrQuotaExceeded(err) { syncErr = r.syncDataVolumeStatusPhaseWithEvent(&syncRes, cdiv1.Pending, nil, Event{ eventType: corev1.EventTypeWarning, reason: cc.ErrExceededQuota, message: err.Error(), }) if syncErr != nil { log.Error(syncErr, "failed to sync DataVolume status with event") } } return syncRes, err } pvc = newPvc } if syncRes.usePopulator { if err := r.reconcileVolumeCloneSourceCR(&syncRes); err != nil { return syncRes, err } ct, ok := pvc.Annotations[cc.AnnCloneType] if ok { cc.AddAnnotation(datavolume, cc.AnnCloneType, ct) } } else { cc.AddAnnotation(datavolume, cc.AnnCloneType, string(cdiv1.CloneStrategyHostAssisted)) if err := r.fallbackToHostAssisted(pvc); err != nil { return syncRes, err } } if err := r.ensureExtendedTokenPVC(datavolume, pvc); err != nil { return syncRes, err } return syncRes, syncErr } // Verify that the source PVC has been completely populated. func (r *PvcCloneReconciler) isSourcePVCPopulated(dv *cdiv1.DataVolume) (bool, error) { sourcePvc := &corev1.PersistentVolumeClaim{} if err := r.client.Get(context.TODO(), types.NamespacedName{Name: dv.Spec.Source.PVC.Name, Namespace: dv.Spec.Source.PVC.Namespace}, sourcePvc); err != nil { return false, err } return cc.IsPopulated(sourcePvc, r.client) } func (r *PvcCloneReconciler) sourceInUse(dv *cdiv1.DataVolume, eventReason string) (bool, error) { pods, err := cc.GetPodsUsingPVCs(context.TODO(), r.client, dv.Spec.Source.PVC.Namespace, sets.New(dv.Spec.Source.PVC.Name), false) if err != nil { return false, err } for _, pod := range pods { r.log.V(1).Info("Cannot snapshot", "namespace", dv.Namespace, "name", dv.Name, "pod namespace", pod.Namespace, "pod name", pod.Name) r.recorder.Eventf(dv, corev1.EventTypeWarning, eventReason, "pod %s/%s using PersistentVolumeClaim %s", pod.Namespace, pod.Name, dv.Spec.Source.PVC.Name) } return len(pods) > 0, nil } func (r *PvcCloneReconciler) findSourcePvc(dataVolume *cdiv1.DataVolume) (*corev1.PersistentVolumeClaim, error) { sourcePvcSpec := dataVolume.Spec.Source.PVC if sourcePvcSpec == nil { return nil, errors.New("no source PVC provided") } // Find source PVC sourcePvcNs := sourcePvcSpec.Namespace if sourcePvcNs == "" { sourcePvcNs = dataVolume.Namespace } pvc := &corev1.PersistentVolumeClaim{} if err := r.client.Get(context.TODO(), types.NamespacedName{Namespace: sourcePvcNs, Name: sourcePvcSpec.Name}, pvc); err != nil { if k8serrors.IsNotFound(err) { r.log.V(3).Info("Source PVC is missing", "source namespace", sourcePvcSpec.Namespace, "source name", sourcePvcSpec.Name) } return nil, err } return pvc, nil } // validateCloneAndSourcePVC checks if the source PVC of a clone exists and does proper validation func (r *PvcCloneReconciler) validateCloneAndSourcePVC(syncState *dvSyncState, log logr.Logger) (bool, error) { datavolume := syncState.dvMutated sourcePvc, err := r.findSourcePvc(datavolume) if err != nil { // Clone without source if k8serrors.IsNotFound(err) { syncErr := r.syncDataVolumeStatusPhaseWithEvent(syncState, datavolume.Status.Phase, nil, Event{ eventType: corev1.EventTypeWarning, reason: CloneWithoutSource, message: fmt.Sprintf(MessageCloneWithoutSource, "pvc", datavolume.Spec.Source.PVC.Name), }) if syncErr != nil { log.Error(syncErr, "failed to sync DataVolume status with event") } return false, nil } return false, err } err = cc.ValidateClone(sourcePvc, &datavolume.Spec) if err != nil { r.recorder.Event(datavolume, corev1.EventTypeWarning, CloneValidationFailed, MessageCloneValidationFailed) return false, err } return true, nil } // isSourceReadyToClone handles the reconciling process of a clone when the source PVC is not ready func (r *PvcCloneReconciler) isSourceReadyToClone(datavolume *cdiv1.DataVolume) (bool, error) { // TODO preper const eventReason := "CloneSourceInUse" // Check if any pods are using the source PVC inUse, err := r.sourceInUse(datavolume, eventReason) if err != nil { return false, err } // Check if the source PVC is fully populated populated, err := r.isSourcePVCPopulated(datavolume) if err != nil { return false, err } if inUse || !populated { return false, nil } return true, nil } // detectCloneSize obtains and assigns the original PVC's size when cloning using an empty storage value func (r *PvcCloneReconciler) detectCloneSize(syncState *dvSyncState) (bool, error) { sourcePvc, err := r.findSourcePvc(syncState.dvMutated) if err != nil { return false, err } // because of filesystem overhead calculations when cloning // even if storage size is requested we have to calculate source size // when source is filesystem and target is block requestedSize, hasSize := syncState.pvcSpec.Resources.Requests[corev1.ResourceStorage] sizeRequired := !hasSize || requestedSize.IsZero() targetIsBlock := syncState.pvcSpec.VolumeMode != nil && *syncState.pvcSpec.VolumeMode == corev1.PersistentVolumeBlock sourceIsFilesystem := cc.GetVolumeMode(sourcePvc) == corev1.PersistentVolumeFilesystem // have to be explicit here or detection pod will crash sourceIsKubevirt := sourcePvc.Annotations[cc.AnnContentType] == string(cdiv1.DataVolumeKubeVirt) if !sizeRequired && (!targetIsBlock || !sourceIsFilesystem || !sourceIsKubevirt) { return true, nil } var targetSize int64 sourceCapacity := sourcePvc.Status.Capacity.Storage() // Due to possible filesystem overhead complications when cloning // using host-assisted strategy, we create a pod that automatically // collects the size of the original virtual image with 'qemu-img'. // If the original PVC's volume mode is "block", // we simply extract the value from the original PVC's spec. if sourceIsFilesystem && sourceIsKubevirt { var available bool // If available, we first try to get the virtual size from previous iterations targetSize, available = getSizeFromAnnotations(sourcePvc) if !available { targetSize, err = r.getSizeFromPod(syncState.pvc, sourcePvc, syncState.dvMutated) if err != nil { return false, err } else if targetSize == 0 { return false, nil } } } else { targetSize, _ = sourceCapacity.AsInt64() } var isPermissiveClone bool if sizeRequired { // Allow the clone-controller to skip the size comparison requirement // if the source's size ends up being larger due to overhead differences // TODO: Fix this in next PR that uses actual size also in validation isPermissiveClone = sourceCapacity.CmpInt64(targetSize) == 1 } else { isPermissiveClone = requestedSize.CmpInt64(targetSize) >= 0 } if isPermissiveClone { syncState.dvMutated.Annotations[cc.AnnPermissiveClone] = "true" } if !sizeRequired { return true, nil } // Parse size into a 'Quantity' struct and, if needed, inflate it with filesystem overhead targetCapacity, err := cc.InflateSizeWithOverhead(context.TODO(), r.client, targetSize, syncState.pvcSpec) if err != nil { return false, err } syncState.pvcSpec.Resources.Requests[corev1.ResourceStorage] = targetCapacity return true, nil } // getSizeFromAnnotations checks the source PVC's annotations and returns the requested size if it has already been obtained func getSizeFromAnnotations(sourcePvc *corev1.PersistentVolumeClaim) (int64, bool) { virtualImageSize, available := sourcePvc.Annotations[AnnVirtualImageSize] if available { sourceCapacity, available := sourcePvc.Annotations[AnnSourceCapacity] currCapacity := sourcePvc.Status.Capacity // Checks if the original PVC's capacity has changed if available && currCapacity.Storage().Cmp(resource.MustParse(sourceCapacity)) == 0 { // Parse the raw string containing the image size into a 64-bit int imgSizeInt, _ := strconv.ParseInt(virtualImageSize, 10, 64) return imgSizeInt, true } } return 0, false } // getSizeFromPod attempts to get the image size from a pod that directly obtains said value from the source PVC func (r *PvcCloneReconciler) getSizeFromPod(targetPvc, sourcePvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume) (int64, error) { // The pod should not be created until the source PVC has finished the import process populated, err := cc.IsPopulated(sourcePvc, r.client) if err != nil { return 0, err } if !populated { r.recorder.Event(dv, corev1.EventTypeNormal, ImportPVCNotReady, MessageImportPVCNotReady) return 0, nil } pod, err := r.getOrCreateSizeDetectionPod(sourcePvc, dv) // Check if pod has failed and, in that case, record an event with the error if podErr := cc.HandleFailedPod(err, sizeDetectionPodName(sourcePvc), targetPvc, r.recorder, r.client); podErr != nil { return 0, podErr } else if !isPodComplete(pod) { r.recorder.Event(dv, corev1.EventTypeNormal, SizeDetectionPodNotReady, MessageSizeDetectionPodNotReady) return 0, nil } // Parse raw image size from the pod's termination message if pod.Status.ContainerStatuses == nil || pod.Status.ContainerStatuses[0].State.Terminated == nil || pod.Status.ContainerStatuses[0].State.Terminated.ExitCode > 0 { return 0, r.handleSizeDetectionError(pod, dv, sourcePvc) } termMsg := pod.Status.ContainerStatuses[0].State.Terminated.Message imgSize, _ := strconv.ParseInt(termMsg, 10, 64) // Update Source PVC annotations if err := r.updateClonePVCAnnotations(sourcePvc, termMsg); err != nil { return imgSize, err } // Finally, detelete the pod if cc.ShouldDeletePod(sourcePvc) { err = r.client.Delete(context.TODO(), pod) if err != nil && !k8serrors.IsNotFound(err) { return imgSize, err } } return imgSize, nil } // getOrCreateSizeDetectionPod gets the size-detection pod if it already exists/creates it if not func (r *PvcCloneReconciler) getOrCreateSizeDetectionPod( sourcePvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume) (*corev1.Pod, error) { podName := sizeDetectionPodName(sourcePvc) pod := &corev1.Pod{} nn := types.NamespacedName{Namespace: sourcePvc.Namespace, Name: podName} // Trying to get the pod if it already exists/create it if not if err := r.client.Get(context.TODO(), nn, pod); err != nil { if !k8serrors.IsNotFound(err) { return nil, err } // Generate the pod spec pod = r.makeSizeDetectionPodSpec(sourcePvc, dv) if pod == nil { return nil, errors.Errorf("Size-detection pod spec could not be generated") } // Create the pod if err := r.client.Create(context.TODO(), pod); err != nil { if !k8serrors.IsAlreadyExists(err) { return nil, err } } r.recorder.Event(dv, corev1.EventTypeNormal, SizeDetectionPodCreated, MessageSizeDetectionPodCreated) r.log.V(3).Info(MessageSizeDetectionPodCreated, "pod.Name", pod.Name, "pod.Namespace", pod.Namespace) } return pod, nil } // makeSizeDetectionPodSpec creates and returns the full size-detection pod spec func (r *PvcCloneReconciler) makeSizeDetectionPodSpec( sourcePvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume) *corev1.Pod { workloadNodePlacement, err := cc.GetWorkloadNodePlacement(context.TODO(), r.client) if err != nil { return nil } // Generate individual specs objectMeta := makeSizeDetectionObjectMeta(sourcePvc, dv) volume := makeSizeDetectionVolumeSpec(sourcePvc.Name) container := r.makeSizeDetectionContainerSpec(volume.Name) if container == nil { return nil } imagePullSecrets, err := cc.GetImagePullSecrets(r.client) if err != nil { return nil } // Assemble the pod pod := &corev1.Pod{ ObjectMeta: *objectMeta, Spec: corev1.PodSpec{ Containers: []corev1.Container{ *container, }, Volumes: []corev1.Volume{ *volume, }, RestartPolicy: corev1.RestartPolicyOnFailure, NodeSelector: workloadNodePlacement.NodeSelector, Tolerations: workloadNodePlacement.Tolerations, Affinity: workloadNodePlacement.Affinity, PriorityClassName: cc.GetPriorityClass(sourcePvc), ImagePullSecrets: imagePullSecrets, }, } if sourcePvc.Namespace == dv.Namespace { pod.OwnerReferences = []metav1.OwnerReference{ *metav1.NewControllerRef(dv, schema.GroupVersionKind{ Group: cdiv1.SchemeGroupVersion.Group, Version: cdiv1.SchemeGroupVersion.Version, Kind: "DataVolume", }), } } else { if err := setAnnOwnedByDataVolume(pod, dv); err != nil { return nil } pod.Annotations[cc.AnnOwnerUID] = string(dv.UID) } cc.SetRestrictedSecurityContext(&pod.Spec) return pod } // makeSizeDetectionObjectMeta creates and returns the object metadata for the size-detection pod func makeSizeDetectionObjectMeta(sourcePvc *corev1.PersistentVolumeClaim, dataVolume *cdiv1.DataVolume) *metav1.ObjectMeta { return &metav1.ObjectMeta{ Name: sizeDetectionPodName(sourcePvc), Namespace: sourcePvc.Namespace, Labels: map[string]string{ common.CDILabelKey: common.CDILabelValue, common.CDIComponentLabel: common.ImporterPodName, }, } } // makeSizeDetectionContainerSpec creates and returns the size-detection pod's Container spec func (r *PvcCloneReconciler) makeSizeDetectionContainerSpec(volName string) *corev1.Container { container := corev1.Container{ Name: "size-detection-volume", Image: r.importerImage, ImagePullPolicy: corev1.PullPolicy(r.pullPolicy), Command: []string{"/usr/bin/cdi-image-size-detection"}, Args: []string{"-image-path", common.ImporterWritePath}, VolumeMounts: []corev1.VolumeMount{ { MountPath: common.ImporterVolumePath, Name: volName, }, }, } // Get and assign container's default resource requirements resourceRequirements, err := cc.GetDefaultPodResourceRequirements(r.client) if err != nil { return nil } if resourceRequirements != nil { container.Resources = *resourceRequirements } return &container } // makeSizeDetectionVolumeSpec creates and returns the size-detection pod's Volume spec func makeSizeDetectionVolumeSpec(pvcName string) *corev1.Volume { return &corev1.Volume{ Name: cc.DataVolName, VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: pvcName, }, }, } } // handleSizeDetectionError handles the termination of the size-detection pod in case of error func (r *PvcCloneReconciler) handleSizeDetectionError(pod *corev1.Pod, dv *cdiv1.DataVolume, sourcePvc *corev1.PersistentVolumeClaim) error { var event Event var exitCode int if pod.Status.ContainerStatuses == nil || pod.Status.ContainerStatuses[0].State.Terminated == nil { exitCode = cc.ErrUnknown } else { exitCode = int(pod.Status.ContainerStatuses[0].State.Terminated.ExitCode) } // We attempt to delete the pod err := r.client.Delete(context.TODO(), pod) if err != nil && !k8serrors.IsNotFound(err) { return err } switch exitCode { case cc.ErrBadArguments: event.eventType = corev1.EventTypeWarning event.reason = "ErrBadArguments" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) case cc.ErrInvalidPath: event.eventType = corev1.EventTypeWarning event.reason = "ErrInvalidPath" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) case cc.ErrInvalidFile: event.eventType = corev1.EventTypeWarning event.reason = "ErrInvalidFile" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) case cc.ErrBadTermFile: event.eventType = corev1.EventTypeWarning event.reason = "ErrBadTermFile" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) default: event.eventType = corev1.EventTypeWarning event.reason = "ErrUnknown" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) } r.recorder.Event(dv, event.eventType, event.reason, event.message) return ErrInvalidTermMsg } // updateClonePVCAnnotations updates the clone-related annotations of the source PVC func (r *PvcCloneReconciler) updateClonePVCAnnotations(sourcePvc *corev1.PersistentVolumeClaim, virtualSize string) error { currCapacity := sourcePvc.Status.Capacity sourcePvc.Annotations[AnnVirtualImageSize] = virtualSize sourcePvc.Annotations[AnnSourceCapacity] = currCapacity.Storage().String() return r.client.Update(context.TODO(), sourcePvc) } // sizeDetectionPodName returns the name of the size-detection pod accoding to the source PVC's UID func sizeDetectionPodName(pvc *corev1.PersistentVolumeClaim) string { return fmt.Sprintf("size-detection-%s", pvc.UID) } // isPodComplete returns true if a pod is in 'Succeeded' phase, false if not func isPodComplete(pod *corev1.Pod) bool { return pod != nil && pod.Status.Phase == corev1.PodSucceeded }
random_line_split
pvc-clone-controller.go
/* Copyright 2022 The CDI Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package datavolume import ( "context" "crypto/rsa" "fmt" "strconv" "time" "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" "kubevirt.io/containerized-data-importer/pkg/common" cc "kubevirt.io/containerized-data-importer/pkg/controller/common" featuregates "kubevirt.io/containerized-data-importer/pkg/feature-gates" ) const ( sourceInUseRequeueDuration = time.Duration(5 * time.Second) pvcCloneControllerName = "datavolume-pvc-clone-controller" volumeCloneSourcePrefix = "volume-clone-source" ) // ErrInvalidTermMsg reports that the termination message from the size-detection pod doesn't exists or is not a valid quantity var ErrInvalidTermMsg = fmt.Errorf("the termination message from the size-detection pod is not-valid") // PvcCloneReconciler members type PvcCloneReconciler struct { CloneReconcilerBase } // NewPvcCloneController creates a new instance of the datavolume clone controller func NewPvcCloneController( ctx context.Context, mgr manager.Manager, log logr.Logger, clonerImage string, importerImage string, pullPolicy string, tokenPublicKey *rsa.PublicKey, tokenPrivateKey *rsa.PrivateKey, installerLabels map[string]string, ) (controller.Controller, error) { client := mgr.GetClient() reconciler := &PvcCloneReconciler{ CloneReconcilerBase: CloneReconcilerBase{ ReconcilerBase: ReconcilerBase{ client: client, scheme: mgr.GetScheme(), log: log.WithName(pvcCloneControllerName), featureGates: featuregates.NewFeatureGates(client), recorder: mgr.GetEventRecorderFor(pvcCloneControllerName), installerLabels: installerLabels, shouldUpdateProgress: true, }, clonerImage: clonerImage, importerImage: importerImage, pullPolicy: pullPolicy, cloneSourceKind: "PersistentVolumeClaim", shortTokenValidator: cc.NewCloneTokenValidator(common.CloneTokenIssuer, tokenPublicKey), longTokenValidator: cc.NewCloneTokenValidator(common.ExtendedCloneTokenIssuer, tokenPublicKey), // for long term tokens to handle cross namespace dumb clones tokenGenerator: newLongTermCloneTokenGenerator(tokenPrivateKey), }, } dataVolumeCloneController, err := controller.New(pvcCloneControllerName, mgr, controller.Options{ MaxConcurrentReconciles: 3, Reconciler: reconciler, }) if err != nil { return nil, err } if err = reconciler.addDataVolumeCloneControllerWatches(mgr, dataVolumeCloneController); err != nil { return nil, err } return dataVolumeCloneController, nil } func (r *PvcCloneReconciler) addDataVolumeCloneControllerWatches(mgr manager.Manager, datavolumeController controller.Controller) error { if err := addDataVolumeControllerCommonWatches(mgr, datavolumeController, dataVolumePvcClone); err != nil { return err } // Watch to reconcile clones created without source if err := addCloneWithoutSourceWatch(mgr, datavolumeController, &corev1.PersistentVolumeClaim{}, "spec.source.pvc"); err != nil { return err } if err := addDataSourceWatch(mgr, datavolumeController); err != nil { return err } if err := r.addVolumeCloneSourceWatch(datavolumeController); err != nil { return err } return nil } func addDataSourceWatch(mgr manager.Manager, c controller.Controller) error { const dvDataSourceField = "datasource" getKey := func(namespace, name string) string { return namespace + "/" + name } if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &cdiv1.DataVolume{}, dvDataSourceField, func(obj client.Object) []string { if sourceRef := obj.(*cdiv1.DataVolume).Spec.SourceRef; sourceRef != nil && sourceRef.Kind == cdiv1.DataVolumeDataSource { ns := obj.GetNamespace() if sourceRef.Namespace != nil && *sourceRef.Namespace != "" { ns = *sourceRef.Namespace } return []string{getKey(ns, sourceRef.Name)} } return nil }); err != nil { return err } mapToDataVolume := func(obj client.Object) (reqs []reconcile.Request) { var dvs cdiv1.DataVolumeList matchingFields := client.MatchingFields{dvDataSourceField: getKey(obj.GetNamespace(), obj.GetName())} if err := mgr.GetClient().List(context.TODO(), &dvs, matchingFields); err != nil { c.GetLogger().Error(err, "Unable to list DataVolumes", "matchingFields", matchingFields) return } for _, dv := range dvs.Items { reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: dv.Namespace, Name: dv.Name}}) } return } if err := c.Watch(&source.Kind{Type: &cdiv1.DataSource{}}, handler.EnqueueRequestsFromMapFunc(mapToDataVolume), ); err != nil { return err } return nil } // Reconcile loop for the clone data volumes func (r *PvcCloneReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { return r.reconcile(ctx, req, r) } func (r *PvcCloneReconciler) prepare(syncState *dvSyncState) error { dv := syncState.dvMutated if err := r.populateSourceIfSourceRef(dv); err != nil { return err } return nil } func (r *PvcCloneReconciler) cleanup(syncState *dvSyncState) error { dv := syncState.dvMutated if err := r.populateSourceIfSourceRef(dv); err != nil { return err } if dv.DeletionTimestamp == nil && dv.Status.Phase != cdiv1.Succeeded { return nil } return r.reconcileVolumeCloneSourceCR(syncState) } func addCloneToken(dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { // first clear out tokens that may have already been added delete(pvc.Annotations, cc.AnnCloneToken) delete(pvc.Annotations, cc.AnnExtendedCloneToken) if isCrossNamespaceClone(dv) { // only want this initially // extended token is added later token, ok := dv.Annotations[cc.AnnCloneToken] if !ok { return errors.Errorf("no clone token") } cc.AddAnnotation(pvc, cc.AnnCloneToken, token) } return nil } func volumeCloneSourceName(dv *cdiv1.DataVolume) string { return fmt.Sprintf("%s-%s", volumeCloneSourcePrefix, dv.UID) } func (r *PvcCloneReconciler) updateAnnotations(dataVolume *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { if dataVolume.Spec.Source.PVC == nil { return errors.Errorf("no source set for clone datavolume") } if err := addCloneToken(dataVolume, pvc); err != nil { return err } sourceNamespace := dataVolume.Spec.Source.PVC.Namespace if sourceNamespace == "" { sourceNamespace = dataVolume.Namespace } pvc.Annotations[cc.AnnCloneRequest] = sourceNamespace + "/" + dataVolume.Spec.Source.PVC.Name return nil } func (r *PvcCloneReconciler) sync(log logr.Logger, req reconcile.Request) (dvSyncResult, error) { syncState, err := r.syncClone(log, req) if err == nil { err = r.syncUpdate(log, &syncState) } return syncState.dvSyncResult, err } func (r *PvcCloneReconciler) syncClone(log logr.Logger, req reconcile.Request) (dvSyncState, error) { syncRes, syncErr := r.syncCommon(log, req, r.cleanup, r.prepare) if syncErr != nil || syncRes.result != nil { return syncRes, syncErr } pvc := syncRes.pvc pvcSpec := syncRes.pvcSpec datavolume := syncRes.dvMutated pvcPopulated := pvcIsPopulated(pvc, datavolume) staticProvisionPending := checkStaticProvisionPending(pvc, datavolume) prePopulated := dvIsPrePopulated(datavolume) if pvcPopulated || prePopulated || staticProvisionPending { return syncRes, nil } if addedToken, err := r.ensureExtendedTokenDV(datavolume); err != nil { return syncRes, err } else if addedToken { // make sure token gets persisted before doing anything else return syncRes, nil } if pvc == nil { // Check if source PVC exists and do proper validation before attempting to clone if done, err := r.validateCloneAndSourcePVC(&syncRes, log); err != nil { return syncRes, err } else if !done { return syncRes, nil } // Always call detect size, it will handle the case where size is specified // and detection pod not necessary if datavolume.Spec.Storage != nil { done, err := r.detectCloneSize(&syncRes) if err != nil { return syncRes, err } else if !done { // Check if the source PVC is ready to be cloned if readyToClone, err := r.isSourceReadyToClone(datavolume); err != nil { return syncRes, err } else if !readyToClone { if syncRes.result == nil { syncRes.result = &reconcile.Result{} } syncRes.result.RequeueAfter = sourceInUseRequeueDuration return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil) } return syncRes, nil } } pvcModifier := r.updateAnnotations if syncRes.usePopulator { if isCrossNamespaceClone(datavolume) { if !cc.HasFinalizer(datavolume, crossNamespaceFinalizer) { cc.AddFinalizer(datavolume, crossNamespaceFinalizer) return syncRes, r.syncCloneStatusPhase(&syncRes, cdiv1.CloneScheduled, nil) } } pvcModifier = r.updatePVCForPopulation } newPvc, err := r.createPvcForDatavolume(datavolume, pvcSpec, pvcModifier) if err != nil
pvc = newPvc } if syncRes.usePopulator { if err := r.reconcileVolumeCloneSourceCR(&syncRes); err != nil { return syncRes, err } ct, ok := pvc.Annotations[cc.AnnCloneType] if ok { cc.AddAnnotation(datavolume, cc.AnnCloneType, ct) } } else { cc.AddAnnotation(datavolume, cc.AnnCloneType, string(cdiv1.CloneStrategyHostAssisted)) if err := r.fallbackToHostAssisted(pvc); err != nil { return syncRes, err } } if err := r.ensureExtendedTokenPVC(datavolume, pvc); err != nil { return syncRes, err } return syncRes, syncErr } // Verify that the source PVC has been completely populated. func (r *PvcCloneReconciler) isSourcePVCPopulated(dv *cdiv1.DataVolume) (bool, error) { sourcePvc := &corev1.PersistentVolumeClaim{} if err := r.client.Get(context.TODO(), types.NamespacedName{Name: dv.Spec.Source.PVC.Name, Namespace: dv.Spec.Source.PVC.Namespace}, sourcePvc); err != nil { return false, err } return cc.IsPopulated(sourcePvc, r.client) } func (r *PvcCloneReconciler) sourceInUse(dv *cdiv1.DataVolume, eventReason string) (bool, error) { pods, err := cc.GetPodsUsingPVCs(context.TODO(), r.client, dv.Spec.Source.PVC.Namespace, sets.New(dv.Spec.Source.PVC.Name), false) if err != nil { return false, err } for _, pod := range pods { r.log.V(1).Info("Cannot snapshot", "namespace", dv.Namespace, "name", dv.Name, "pod namespace", pod.Namespace, "pod name", pod.Name) r.recorder.Eventf(dv, corev1.EventTypeWarning, eventReason, "pod %s/%s using PersistentVolumeClaim %s", pod.Namespace, pod.Name, dv.Spec.Source.PVC.Name) } return len(pods) > 0, nil } func (r *PvcCloneReconciler) findSourcePvc(dataVolume *cdiv1.DataVolume) (*corev1.PersistentVolumeClaim, error) { sourcePvcSpec := dataVolume.Spec.Source.PVC if sourcePvcSpec == nil { return nil, errors.New("no source PVC provided") } // Find source PVC sourcePvcNs := sourcePvcSpec.Namespace if sourcePvcNs == "" { sourcePvcNs = dataVolume.Namespace } pvc := &corev1.PersistentVolumeClaim{} if err := r.client.Get(context.TODO(), types.NamespacedName{Namespace: sourcePvcNs, Name: sourcePvcSpec.Name}, pvc); err != nil { if k8serrors.IsNotFound(err) { r.log.V(3).Info("Source PVC is missing", "source namespace", sourcePvcSpec.Namespace, "source name", sourcePvcSpec.Name) } return nil, err } return pvc, nil } // validateCloneAndSourcePVC checks if the source PVC of a clone exists and does proper validation func (r *PvcCloneReconciler) validateCloneAndSourcePVC(syncState *dvSyncState, log logr.Logger) (bool, error) { datavolume := syncState.dvMutated sourcePvc, err := r.findSourcePvc(datavolume) if err != nil { // Clone without source if k8serrors.IsNotFound(err) { syncErr := r.syncDataVolumeStatusPhaseWithEvent(syncState, datavolume.Status.Phase, nil, Event{ eventType: corev1.EventTypeWarning, reason: CloneWithoutSource, message: fmt.Sprintf(MessageCloneWithoutSource, "pvc", datavolume.Spec.Source.PVC.Name), }) if syncErr != nil { log.Error(syncErr, "failed to sync DataVolume status with event") } return false, nil } return false, err } err = cc.ValidateClone(sourcePvc, &datavolume.Spec) if err != nil { r.recorder.Event(datavolume, corev1.EventTypeWarning, CloneValidationFailed, MessageCloneValidationFailed) return false, err } return true, nil } // isSourceReadyToClone handles the reconciling process of a clone when the source PVC is not ready func (r *PvcCloneReconciler) isSourceReadyToClone(datavolume *cdiv1.DataVolume) (bool, error) { // TODO preper const eventReason := "CloneSourceInUse" // Check if any pods are using the source PVC inUse, err := r.sourceInUse(datavolume, eventReason) if err != nil { return false, err } // Check if the source PVC is fully populated populated, err := r.isSourcePVCPopulated(datavolume) if err != nil { return false, err } if inUse || !populated { return false, nil } return true, nil } // detectCloneSize obtains and assigns the original PVC's size when cloning using an empty storage value func (r *PvcCloneReconciler) detectCloneSize(syncState *dvSyncState) (bool, error) { sourcePvc, err := r.findSourcePvc(syncState.dvMutated) if err != nil { return false, err } // because of filesystem overhead calculations when cloning // even if storage size is requested we have to calculate source size // when source is filesystem and target is block requestedSize, hasSize := syncState.pvcSpec.Resources.Requests[corev1.ResourceStorage] sizeRequired := !hasSize || requestedSize.IsZero() targetIsBlock := syncState.pvcSpec.VolumeMode != nil && *syncState.pvcSpec.VolumeMode == corev1.PersistentVolumeBlock sourceIsFilesystem := cc.GetVolumeMode(sourcePvc) == corev1.PersistentVolumeFilesystem // have to be explicit here or detection pod will crash sourceIsKubevirt := sourcePvc.Annotations[cc.AnnContentType] == string(cdiv1.DataVolumeKubeVirt) if !sizeRequired && (!targetIsBlock || !sourceIsFilesystem || !sourceIsKubevirt) { return true, nil } var targetSize int64 sourceCapacity := sourcePvc.Status.Capacity.Storage() // Due to possible filesystem overhead complications when cloning // using host-assisted strategy, we create a pod that automatically // collects the size of the original virtual image with 'qemu-img'. // If the original PVC's volume mode is "block", // we simply extract the value from the original PVC's spec. if sourceIsFilesystem && sourceIsKubevirt { var available bool // If available, we first try to get the virtual size from previous iterations targetSize, available = getSizeFromAnnotations(sourcePvc) if !available { targetSize, err = r.getSizeFromPod(syncState.pvc, sourcePvc, syncState.dvMutated) if err != nil { return false, err } else if targetSize == 0 { return false, nil } } } else { targetSize, _ = sourceCapacity.AsInt64() } var isPermissiveClone bool if sizeRequired { // Allow the clone-controller to skip the size comparison requirement // if the source's size ends up being larger due to overhead differences // TODO: Fix this in next PR that uses actual size also in validation isPermissiveClone = sourceCapacity.CmpInt64(targetSize) == 1 } else { isPermissiveClone = requestedSize.CmpInt64(targetSize) >= 0 } if isPermissiveClone { syncState.dvMutated.Annotations[cc.AnnPermissiveClone] = "true" } if !sizeRequired { return true, nil } // Parse size into a 'Quantity' struct and, if needed, inflate it with filesystem overhead targetCapacity, err := cc.InflateSizeWithOverhead(context.TODO(), r.client, targetSize, syncState.pvcSpec) if err != nil { return false, err } syncState.pvcSpec.Resources.Requests[corev1.ResourceStorage] = targetCapacity return true, nil } // getSizeFromAnnotations checks the source PVC's annotations and returns the requested size if it has already been obtained func getSizeFromAnnotations(sourcePvc *corev1.PersistentVolumeClaim) (int64, bool) { virtualImageSize, available := sourcePvc.Annotations[AnnVirtualImageSize] if available { sourceCapacity, available := sourcePvc.Annotations[AnnSourceCapacity] currCapacity := sourcePvc.Status.Capacity // Checks if the original PVC's capacity has changed if available && currCapacity.Storage().Cmp(resource.MustParse(sourceCapacity)) == 0 { // Parse the raw string containing the image size into a 64-bit int imgSizeInt, _ := strconv.ParseInt(virtualImageSize, 10, 64) return imgSizeInt, true } } return 0, false } // getSizeFromPod attempts to get the image size from a pod that directly obtains said value from the source PVC func (r *PvcCloneReconciler) getSizeFromPod(targetPvc, sourcePvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume) (int64, error) { // The pod should not be created until the source PVC has finished the import process populated, err := cc.IsPopulated(sourcePvc, r.client) if err != nil { return 0, err } if !populated { r.recorder.Event(dv, corev1.EventTypeNormal, ImportPVCNotReady, MessageImportPVCNotReady) return 0, nil } pod, err := r.getOrCreateSizeDetectionPod(sourcePvc, dv) // Check if pod has failed and, in that case, record an event with the error if podErr := cc.HandleFailedPod(err, sizeDetectionPodName(sourcePvc), targetPvc, r.recorder, r.client); podErr != nil { return 0, podErr } else if !isPodComplete(pod) { r.recorder.Event(dv, corev1.EventTypeNormal, SizeDetectionPodNotReady, MessageSizeDetectionPodNotReady) return 0, nil } // Parse raw image size from the pod's termination message if pod.Status.ContainerStatuses == nil || pod.Status.ContainerStatuses[0].State.Terminated == nil || pod.Status.ContainerStatuses[0].State.Terminated.ExitCode > 0 { return 0, r.handleSizeDetectionError(pod, dv, sourcePvc) } termMsg := pod.Status.ContainerStatuses[0].State.Terminated.Message imgSize, _ := strconv.ParseInt(termMsg, 10, 64) // Update Source PVC annotations if err := r.updateClonePVCAnnotations(sourcePvc, termMsg); err != nil { return imgSize, err } // Finally, detelete the pod if cc.ShouldDeletePod(sourcePvc) { err = r.client.Delete(context.TODO(), pod) if err != nil && !k8serrors.IsNotFound(err) { return imgSize, err } } return imgSize, nil } // getOrCreateSizeDetectionPod gets the size-detection pod if it already exists/creates it if not func (r *PvcCloneReconciler) getOrCreateSizeDetectionPod( sourcePvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume) (*corev1.Pod, error) { podName := sizeDetectionPodName(sourcePvc) pod := &corev1.Pod{} nn := types.NamespacedName{Namespace: sourcePvc.Namespace, Name: podName} // Trying to get the pod if it already exists/create it if not if err := r.client.Get(context.TODO(), nn, pod); err != nil { if !k8serrors.IsNotFound(err) { return nil, err } // Generate the pod spec pod = r.makeSizeDetectionPodSpec(sourcePvc, dv) if pod == nil { return nil, errors.Errorf("Size-detection pod spec could not be generated") } // Create the pod if err := r.client.Create(context.TODO(), pod); err != nil { if !k8serrors.IsAlreadyExists(err) { return nil, err } } r.recorder.Event(dv, corev1.EventTypeNormal, SizeDetectionPodCreated, MessageSizeDetectionPodCreated) r.log.V(3).Info(MessageSizeDetectionPodCreated, "pod.Name", pod.Name, "pod.Namespace", pod.Namespace) } return pod, nil } // makeSizeDetectionPodSpec creates and returns the full size-detection pod spec func (r *PvcCloneReconciler) makeSizeDetectionPodSpec( sourcePvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume) *corev1.Pod { workloadNodePlacement, err := cc.GetWorkloadNodePlacement(context.TODO(), r.client) if err != nil { return nil } // Generate individual specs objectMeta := makeSizeDetectionObjectMeta(sourcePvc, dv) volume := makeSizeDetectionVolumeSpec(sourcePvc.Name) container := r.makeSizeDetectionContainerSpec(volume.Name) if container == nil { return nil } imagePullSecrets, err := cc.GetImagePullSecrets(r.client) if err != nil { return nil } // Assemble the pod pod := &corev1.Pod{ ObjectMeta: *objectMeta, Spec: corev1.PodSpec{ Containers: []corev1.Container{ *container, }, Volumes: []corev1.Volume{ *volume, }, RestartPolicy: corev1.RestartPolicyOnFailure, NodeSelector: workloadNodePlacement.NodeSelector, Tolerations: workloadNodePlacement.Tolerations, Affinity: workloadNodePlacement.Affinity, PriorityClassName: cc.GetPriorityClass(sourcePvc), ImagePullSecrets: imagePullSecrets, }, } if sourcePvc.Namespace == dv.Namespace { pod.OwnerReferences = []metav1.OwnerReference{ *metav1.NewControllerRef(dv, schema.GroupVersionKind{ Group: cdiv1.SchemeGroupVersion.Group, Version: cdiv1.SchemeGroupVersion.Version, Kind: "DataVolume", }), } } else { if err := setAnnOwnedByDataVolume(pod, dv); err != nil { return nil } pod.Annotations[cc.AnnOwnerUID] = string(dv.UID) } cc.SetRestrictedSecurityContext(&pod.Spec) return pod } // makeSizeDetectionObjectMeta creates and returns the object metadata for the size-detection pod func makeSizeDetectionObjectMeta(sourcePvc *corev1.PersistentVolumeClaim, dataVolume *cdiv1.DataVolume) *metav1.ObjectMeta { return &metav1.ObjectMeta{ Name: sizeDetectionPodName(sourcePvc), Namespace: sourcePvc.Namespace, Labels: map[string]string{ common.CDILabelKey: common.CDILabelValue, common.CDIComponentLabel: common.ImporterPodName, }, } } // makeSizeDetectionContainerSpec creates and returns the size-detection pod's Container spec func (r *PvcCloneReconciler) makeSizeDetectionContainerSpec(volName string) *corev1.Container { container := corev1.Container{ Name: "size-detection-volume", Image: r.importerImage, ImagePullPolicy: corev1.PullPolicy(r.pullPolicy), Command: []string{"/usr/bin/cdi-image-size-detection"}, Args: []string{"-image-path", common.ImporterWritePath}, VolumeMounts: []corev1.VolumeMount{ { MountPath: common.ImporterVolumePath, Name: volName, }, }, } // Get and assign container's default resource requirements resourceRequirements, err := cc.GetDefaultPodResourceRequirements(r.client) if err != nil { return nil } if resourceRequirements != nil { container.Resources = *resourceRequirements } return &container } // makeSizeDetectionVolumeSpec creates and returns the size-detection pod's Volume spec func makeSizeDetectionVolumeSpec(pvcName string) *corev1.Volume { return &corev1.Volume{ Name: cc.DataVolName, VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: pvcName, }, }, } } // handleSizeDetectionError handles the termination of the size-detection pod in case of error func (r *PvcCloneReconciler) handleSizeDetectionError(pod *corev1.Pod, dv *cdiv1.DataVolume, sourcePvc *corev1.PersistentVolumeClaim) error { var event Event var exitCode int if pod.Status.ContainerStatuses == nil || pod.Status.ContainerStatuses[0].State.Terminated == nil { exitCode = cc.ErrUnknown } else { exitCode = int(pod.Status.ContainerStatuses[0].State.Terminated.ExitCode) } // We attempt to delete the pod err := r.client.Delete(context.TODO(), pod) if err != nil && !k8serrors.IsNotFound(err) { return err } switch exitCode { case cc.ErrBadArguments: event.eventType = corev1.EventTypeWarning event.reason = "ErrBadArguments" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) case cc.ErrInvalidPath: event.eventType = corev1.EventTypeWarning event.reason = "ErrInvalidPath" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) case cc.ErrInvalidFile: event.eventType = corev1.EventTypeWarning event.reason = "ErrInvalidFile" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) case cc.ErrBadTermFile: event.eventType = corev1.EventTypeWarning event.reason = "ErrBadTermFile" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) default: event.eventType = corev1.EventTypeWarning event.reason = "ErrUnknown" event.message = fmt.Sprintf(MessageSizeDetectionPodFailed, event.reason) } r.recorder.Event(dv, event.eventType, event.reason, event.message) return ErrInvalidTermMsg } // updateClonePVCAnnotations updates the clone-related annotations of the source PVC func (r *PvcCloneReconciler) updateClonePVCAnnotations(sourcePvc *corev1.PersistentVolumeClaim, virtualSize string) error { currCapacity := sourcePvc.Status.Capacity sourcePvc.Annotations[AnnVirtualImageSize] = virtualSize sourcePvc.Annotations[AnnSourceCapacity] = currCapacity.Storage().String() return r.client.Update(context.TODO(), sourcePvc) } // sizeDetectionPodName returns the name of the size-detection pod accoding to the source PVC's UID func sizeDetectionPodName(pvc *corev1.PersistentVolumeClaim) string { return fmt.Sprintf("size-detection-%s", pvc.UID) } // isPodComplete returns true if a pod is in 'Succeeded' phase, false if not func isPodComplete(pod *corev1.Pod) bool { return pod != nil && pod.Status.Phase == corev1.PodSucceeded }
{ if cc.ErrQuotaExceeded(err) { syncErr = r.syncDataVolumeStatusPhaseWithEvent(&syncRes, cdiv1.Pending, nil, Event{ eventType: corev1.EventTypeWarning, reason: cc.ErrExceededQuota, message: err.Error(), }) if syncErr != nil { log.Error(syncErr, "failed to sync DataVolume status with event") } } return syncRes, err }
conditional_block
migration.ts
/** * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import * as ts from 'typescript'; import {HelperFunction} from './helpers'; import {findImportSpecifier} from './util'; /** A call expression that is based on a property access. */ type PropertyAccessCallExpression = ts.CallExpression&{expression: ts.PropertyAccessExpression}; /** Replaces an import inside an import statement with a different one. */ export function replaceImport(node: ts.NamedImports, oldImport: string, newImport: string) { const isAlreadyImported = findImportSpecifier(node.elements, newImport); if (isAlreadyImported) { return node; } const existingImport = findImportSpecifier(node.elements, oldImport); if (!existingImport) { throw new Error(`Could not find an import to replace using ${oldImport}.`); } return ts.updateNamedImports(node, [ ...node.elements.filter(current => current !== existingImport), // Create a new import while trying to preserve the alias of the old one. ts.createImportSpecifier( existingImport.propertyName ? ts.createIdentifier(newImport) : undefined, existingImport.propertyName ? existingImport.name : ts.createIdentifier(newImport)) ]); } /** * Migrates a function call expression from `Renderer` to `Renderer2`. * Returns null if the expression should be dropped. */ export function migrateExpression(node: ts.CallExpression, typeChecker: ts.TypeChecker): {node: ts.Node|null, requiredHelpers?: HelperFunction[]} { if (isPropertyAccessCallExpression(node)) { switch (node.expression.name.getText()) { case 'setElementProperty': return {node: renameMethodCall(node, 'setProperty')}; case 'setText': return {node: renameMethodCall(node, 'setValue')}; case 'listenGlobal': return {node: renameMethodCall(node, 'listen')}; case 'selectRootElement': return {node: migrateSelectRootElement(node)}; case 'setElementClass': return {node: migrateSetElementClass(node)}; case 'setElementStyle': return {node: migrateSetElementStyle(node, typeChecker)}; case 'invokeElementMethod': return {node: migrateInvokeElementMethod(node)}; case 'setBindingDebugInfo': return {node: null}; case 'createViewRoot': return {node: migrateCreateViewRoot(node)}; case 'setElementAttribute': return { node: switchToHelperCall(node, HelperFunction.setElementAttribute, node.arguments), requiredHelpers: [ HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.setElementAttribute ] }; case 'createElement': return { node: switchToHelperCall(node, HelperFunction.createElement, node.arguments.slice(0, 2)), requiredHelpers: [HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.createElement] }; case 'createText': return { node: switchToHelperCall(node, HelperFunction.createText, node.arguments.slice(0, 2)), requiredHelpers: [HelperFunction.any, HelperFunction.createText] }; case 'createTemplateAnchor': return { node: switchToHelperCall( node, HelperFunction.createTemplateAnchor, node.arguments.slice(0, 1)), requiredHelpers: [HelperFunction.any, HelperFunction.createTemplateAnchor] }; case 'projectNodes': return { node: switchToHelperCall(node, HelperFunction.projectNodes, node.arguments), requiredHelpers: [HelperFunction.any, HelperFunction.projectNodes] }; case 'animate': return { node: migrateAnimateCall(), requiredHelpers: [HelperFunction.any, HelperFunction.animate] }; case 'destroyView': return { node: switchToHelperCall(node, HelperFunction.destroyView, [node.arguments[1]]), requiredHelpers: [HelperFunction.any, HelperFunction.destroyView] }; case 'detachView': return { node: switchToHelperCall(node, HelperFunction.detachView, [node.arguments[0]]), requiredHelpers: [HelperFunction.any, HelperFunction.detachView] }; case 'attachViewAfter': return { node: switchToHelperCall(node, HelperFunction.attachViewAfter, node.arguments), requiredHelpers: [HelperFunction.any, HelperFunction.attachViewAfter] }; } } return {node}; } /** Checks whether a node is a PropertyAccessExpression. */ function isPropertyAccessCallExpression(node: ts.Node): node is PropertyAccessCallExpression { return ts.isCallExpression(node) && ts.isPropertyAccessExpression(node.expression); } /** Renames a method call while keeping all of the parameters in place. */ function renameMethodCall(node: PropertyAccessCallExpression, newName: string): ts.CallExpression { const newExpression = ts.updatePropertyAccess( node.expression, node.expression.expression, ts.createIdentifier(newName)); return ts.updateCall(node, newExpression, node.typeArguments, node.arguments); } /** * Migrates a `selectRootElement` call by removing the last argument which is no longer supported. */ function migrateSelectRootElement(node: ts.CallExpression): ts.Node { // The only thing we need to do is to drop the last argument // (`debugInfo`), if the consumer was passing it in. if (node.arguments.length > 1) { return ts.updateCall(node, node.expression, node.typeArguments, [node.arguments[0]]); } return node; } /** * Migrates a call to `setElementClass` either to a call to `addClass` or `removeClass`, or * to an expression like `isAdd ? addClass(el, className) : removeClass(el, className)`. */ function migrateSetElementClass(node: PropertyAccessCallExpression): ts.Node { // Clone so we don't mutate by accident. Note that we assume that // the user's code is providing all three required arguments. const outputMethodArgs = node.arguments.slice(); const isAddArgument = outputMethodArgs.pop()!; const createRendererCall = (isAdd: boolean) => { const innerExpression = node.expression.expression; const topExpression = ts.createPropertyAccess(innerExpression, isAdd ? 'addClass' : 'removeClass'); return ts.createCall(topExpression, [], node.arguments.slice(0, 2)); }; // If the call has the `isAdd` argument as a literal boolean, we can map it directly to // `addClass` or `removeClass`. Note that we can't use the type checker here, because it // won't tell us whether the value resolves to true or false. if (isAddArgument.kind === ts.SyntaxKind.TrueKeyword || isAddArgument.kind === ts.SyntaxKind.FalseKeyword) { return createRendererCall(isAddArgument.kind === ts.SyntaxKind.TrueKeyword); } // Otherwise create a ternary on the variable. return ts.createConditional(isAddArgument, createRendererCall(true), createRendererCall(false)); } /** * Migrates a call to `setElementStyle` call either to a call to * `setStyle` or `removeStyle`. or to an expression like * `value == null ? removeStyle(el, key) : setStyle(el, key, value)`. */ function migrateSetElementStyle( node: PropertyAccessCallExpression, typeChecker: ts.TypeChecker): ts.Node { const args = node.arguments; const addMethodName = 'setStyle'; const removeMethodName = 'removeStyle'; const lastArgType = args[2] ? typeChecker.typeToString( typeChecker.getTypeAtLocation(args[2]), node, ts.TypeFormatFlags.AddUndefined) : null; // Note that for a literal null, TS considers it a `NullKeyword`, // whereas a literal `undefined` is just an Identifier. if (args.length === 2 || lastArgType === 'null' || lastArgType === 'undefined') { // If we've got a call with two arguments, or one with three arguments where the last one is // `undefined` or `null`, we can safely switch to a `removeStyle` call. const innerExpression = node.expression.expression; const topExpression = ts.createPropertyAccess(innerExpression, removeMethodName); return ts.createCall(topExpression, [], args.slice(0, 2)); } else if (args.length === 3) { // We need the checks for string literals, because the type of something // like `"blue"` is the literal `blue`, not `string`. if (lastArgType === 'string' || lastArgType === 'number' || ts.isStringLiteral(args[2]) || ts.isNoSubstitutionTemplateLiteral(args[2]) || ts.isNumericLiteral(args[2])) { // If we've got three arguments and the last one is a string literal or a number, we // can safely rename to `setStyle`. return renameMethodCall(node, addMethodName); } else { // Otherwise migrate to a ternary that looks like: // `value == null ? removeStyle(el, key) : setStyle(el, key, value)` const condition = ts.createBinary(args[2], ts.SyntaxKind.EqualsEqualsToken, ts.createNull()); const whenNullCall = renameMethodCall( ts.createCall(node.expression, [], args.slice(0, 2)) as PropertyAccessCallExpression, removeMethodName); return ts.createConditional(condition, whenNullCall, renameMethodCall(node, addMethodName)); } } return node; } /** * Migrates a call to `invokeElementMethod(target, method, [arg1, arg2])` either to * `target.method(arg1, arg2)` or `(target as any)[method].apply(target, [arg1, arg2])`. */ function
(node: ts.CallExpression): ts.Node { const [target, name, args] = node.arguments; const isNameStatic = ts.isStringLiteral(name) || ts.isNoSubstitutionTemplateLiteral(name); const isArgsStatic = !args || ts.isArrayLiteralExpression(args); if (isNameStatic && isArgsStatic) { // If the name is a static string and the arguments are an array literal, // we can safely convert the node into a call expression. const expression = ts.createPropertyAccess( target, (name as ts.StringLiteral | ts.NoSubstitutionTemplateLiteral).text); const callArguments = args ? (args as ts.ArrayLiteralExpression).elements : []; return ts.createCall(expression, [], callArguments); } else { // Otherwise create an expression in the form of `(target as any)[name].apply(target, args)`. const asExpression = ts.createParen( ts.createAsExpression(target, ts.createKeywordTypeNode(ts.SyntaxKind.AnyKeyword))); const elementAccess = ts.createElementAccess(asExpression, name); const applyExpression = ts.createPropertyAccess(elementAccess, 'apply'); return ts.createCall(applyExpression, [], args ? [target, args] : [target]); } } /** Migrates a call to `createViewRoot` to whatever node was passed in as the first argument. */ function migrateCreateViewRoot(node: ts.CallExpression): ts.Node { return node.arguments[0]; } /** Migrates a call to `migrate` a direct call to the helper. */ function migrateAnimateCall() { return ts.createCall(ts.createIdentifier(HelperFunction.animate), [], []); } /** * Switches out a call to the `Renderer` to a call to one of our helper functions. * Most of the helpers accept an instance of `Renderer2` as the first argument and all * subsequent arguments differ. * @param node Node of the original method call. * @param helper Name of the helper with which to replace the original call. * @param args Arguments that should be passed into the helper after the renderer argument. */ function switchToHelperCall( node: PropertyAccessCallExpression, helper: HelperFunction, args: ts.Expression[]|ts.NodeArray<ts.Expression>): ts.Node { return ts.createCall(ts.createIdentifier(helper), [], [node.expression.expression, ...args]); }
migrateInvokeElementMethod
identifier_name
migration.ts
/** * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import * as ts from 'typescript'; import {HelperFunction} from './helpers'; import {findImportSpecifier} from './util'; /** A call expression that is based on a property access. */ type PropertyAccessCallExpression = ts.CallExpression&{expression: ts.PropertyAccessExpression}; /** Replaces an import inside an import statement with a different one. */ export function replaceImport(node: ts.NamedImports, oldImport: string, newImport: string) { const isAlreadyImported = findImportSpecifier(node.elements, newImport); if (isAlreadyImported) { return node; } const existingImport = findImportSpecifier(node.elements, oldImport); if (!existingImport) { throw new Error(`Could not find an import to replace using ${oldImport}.`); } return ts.updateNamedImports(node, [ ...node.elements.filter(current => current !== existingImport), // Create a new import while trying to preserve the alias of the old one. ts.createImportSpecifier( existingImport.propertyName ? ts.createIdentifier(newImport) : undefined, existingImport.propertyName ? existingImport.name : ts.createIdentifier(newImport)) ]); } /** * Migrates a function call expression from `Renderer` to `Renderer2`. * Returns null if the expression should be dropped. */ export function migrateExpression(node: ts.CallExpression, typeChecker: ts.TypeChecker): {node: ts.Node|null, requiredHelpers?: HelperFunction[]} { if (isPropertyAccessCallExpression(node)) { switch (node.expression.name.getText()) { case 'setElementProperty': return {node: renameMethodCall(node, 'setProperty')}; case 'setText': return {node: renameMethodCall(node, 'setValue')}; case 'listenGlobal': return {node: renameMethodCall(node, 'listen')}; case 'selectRootElement': return {node: migrateSelectRootElement(node)}; case 'setElementClass': return {node: migrateSetElementClass(node)}; case 'setElementStyle': return {node: migrateSetElementStyle(node, typeChecker)}; case 'invokeElementMethod': return {node: migrateInvokeElementMethod(node)}; case 'setBindingDebugInfo': return {node: null}; case 'createViewRoot': return {node: migrateCreateViewRoot(node)}; case 'setElementAttribute': return { node: switchToHelperCall(node, HelperFunction.setElementAttribute, node.arguments), requiredHelpers: [ HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.setElementAttribute ] }; case 'createElement': return { node: switchToHelperCall(node, HelperFunction.createElement, node.arguments.slice(0, 2)), requiredHelpers: [HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.createElement] }; case 'createText': return { node: switchToHelperCall(node, HelperFunction.createText, node.arguments.slice(0, 2)), requiredHelpers: [HelperFunction.any, HelperFunction.createText] }; case 'createTemplateAnchor': return { node: switchToHelperCall( node, HelperFunction.createTemplateAnchor, node.arguments.slice(0, 1)), requiredHelpers: [HelperFunction.any, HelperFunction.createTemplateAnchor] }; case 'projectNodes': return { node: switchToHelperCall(node, HelperFunction.projectNodes, node.arguments), requiredHelpers: [HelperFunction.any, HelperFunction.projectNodes] }; case 'animate': return { node: migrateAnimateCall(), requiredHelpers: [HelperFunction.any, HelperFunction.animate] }; case 'destroyView': return { node: switchToHelperCall(node, HelperFunction.destroyView, [node.arguments[1]]), requiredHelpers: [HelperFunction.any, HelperFunction.destroyView] }; case 'detachView': return { node: switchToHelperCall(node, HelperFunction.detachView, [node.arguments[0]]), requiredHelpers: [HelperFunction.any, HelperFunction.detachView] }; case 'attachViewAfter': return { node: switchToHelperCall(node, HelperFunction.attachViewAfter, node.arguments), requiredHelpers: [HelperFunction.any, HelperFunction.attachViewAfter] }; } } return {node}; } /** Checks whether a node is a PropertyAccessExpression. */ function isPropertyAccessCallExpression(node: ts.Node): node is PropertyAccessCallExpression { return ts.isCallExpression(node) && ts.isPropertyAccessExpression(node.expression); } /** Renames a method call while keeping all of the parameters in place. */ function renameMethodCall(node: PropertyAccessCallExpression, newName: string): ts.CallExpression { const newExpression = ts.updatePropertyAccess( node.expression, node.expression.expression, ts.createIdentifier(newName)); return ts.updateCall(node, newExpression, node.typeArguments, node.arguments); } /** * Migrates a `selectRootElement` call by removing the last argument which is no longer supported. */ function migrateSelectRootElement(node: ts.CallExpression): ts.Node { // The only thing we need to do is to drop the last argument // (`debugInfo`), if the consumer was passing it in. if (node.arguments.length > 1) { return ts.updateCall(node, node.expression, node.typeArguments, [node.arguments[0]]); } return node; } /** * Migrates a call to `setElementClass` either to a call to `addClass` or `removeClass`, or * to an expression like `isAdd ? addClass(el, className) : removeClass(el, className)`. */ function migrateSetElementClass(node: PropertyAccessCallExpression): ts.Node { // Clone so we don't mutate by accident. Note that we assume that // the user's code is providing all three required arguments. const outputMethodArgs = node.arguments.slice(); const isAddArgument = outputMethodArgs.pop()!; const createRendererCall = (isAdd: boolean) => { const innerExpression = node.expression.expression; const topExpression = ts.createPropertyAccess(innerExpression, isAdd ? 'addClass' : 'removeClass'); return ts.createCall(topExpression, [], node.arguments.slice(0, 2)); }; // If the call has the `isAdd` argument as a literal boolean, we can map it directly to // `addClass` or `removeClass`. Note that we can't use the type checker here, because it // won't tell us whether the value resolves to true or false. if (isAddArgument.kind === ts.SyntaxKind.TrueKeyword || isAddArgument.kind === ts.SyntaxKind.FalseKeyword) { return createRendererCall(isAddArgument.kind === ts.SyntaxKind.TrueKeyword); } // Otherwise create a ternary on the variable. return ts.createConditional(isAddArgument, createRendererCall(true), createRendererCall(false)); } /** * Migrates a call to `setElementStyle` call either to a call to * `setStyle` or `removeStyle`. or to an expression like * `value == null ? removeStyle(el, key) : setStyle(el, key, value)`. */ function migrateSetElementStyle( node: PropertyAccessCallExpression, typeChecker: ts.TypeChecker): ts.Node { const args = node.arguments; const addMethodName = 'setStyle'; const removeMethodName = 'removeStyle'; const lastArgType = args[2] ? typeChecker.typeToString( typeChecker.getTypeAtLocation(args[2]), node, ts.TypeFormatFlags.AddUndefined) : null; // Note that for a literal null, TS considers it a `NullKeyword`, // whereas a literal `undefined` is just an Identifier. if (args.length === 2 || lastArgType === 'null' || lastArgType === 'undefined') { // If we've got a call with two arguments, or one with three arguments where the last one is // `undefined` or `null`, we can safely switch to a `removeStyle` call. const innerExpression = node.expression.expression; const topExpression = ts.createPropertyAccess(innerExpression, removeMethodName); return ts.createCall(topExpression, [], args.slice(0, 2)); } else if (args.length === 3)
return node; } /** * Migrates a call to `invokeElementMethod(target, method, [arg1, arg2])` either to * `target.method(arg1, arg2)` or `(target as any)[method].apply(target, [arg1, arg2])`. */ function migrateInvokeElementMethod(node: ts.CallExpression): ts.Node { const [target, name, args] = node.arguments; const isNameStatic = ts.isStringLiteral(name) || ts.isNoSubstitutionTemplateLiteral(name); const isArgsStatic = !args || ts.isArrayLiteralExpression(args); if (isNameStatic && isArgsStatic) { // If the name is a static string and the arguments are an array literal, // we can safely convert the node into a call expression. const expression = ts.createPropertyAccess( target, (name as ts.StringLiteral | ts.NoSubstitutionTemplateLiteral).text); const callArguments = args ? (args as ts.ArrayLiteralExpression).elements : []; return ts.createCall(expression, [], callArguments); } else { // Otherwise create an expression in the form of `(target as any)[name].apply(target, args)`. const asExpression = ts.createParen( ts.createAsExpression(target, ts.createKeywordTypeNode(ts.SyntaxKind.AnyKeyword))); const elementAccess = ts.createElementAccess(asExpression, name); const applyExpression = ts.createPropertyAccess(elementAccess, 'apply'); return ts.createCall(applyExpression, [], args ? [target, args] : [target]); } } /** Migrates a call to `createViewRoot` to whatever node was passed in as the first argument. */ function migrateCreateViewRoot(node: ts.CallExpression): ts.Node { return node.arguments[0]; } /** Migrates a call to `migrate` a direct call to the helper. */ function migrateAnimateCall() { return ts.createCall(ts.createIdentifier(HelperFunction.animate), [], []); } /** * Switches out a call to the `Renderer` to a call to one of our helper functions. * Most of the helpers accept an instance of `Renderer2` as the first argument and all * subsequent arguments differ. * @param node Node of the original method call. * @param helper Name of the helper with which to replace the original call. * @param args Arguments that should be passed into the helper after the renderer argument. */ function switchToHelperCall( node: PropertyAccessCallExpression, helper: HelperFunction, args: ts.Expression[]|ts.NodeArray<ts.Expression>): ts.Node { return ts.createCall(ts.createIdentifier(helper), [], [node.expression.expression, ...args]); }
{ // We need the checks for string literals, because the type of something // like `"blue"` is the literal `blue`, not `string`. if (lastArgType === 'string' || lastArgType === 'number' || ts.isStringLiteral(args[2]) || ts.isNoSubstitutionTemplateLiteral(args[2]) || ts.isNumericLiteral(args[2])) { // If we've got three arguments and the last one is a string literal or a number, we // can safely rename to `setStyle`. return renameMethodCall(node, addMethodName); } else { // Otherwise migrate to a ternary that looks like: // `value == null ? removeStyle(el, key) : setStyle(el, key, value)` const condition = ts.createBinary(args[2], ts.SyntaxKind.EqualsEqualsToken, ts.createNull()); const whenNullCall = renameMethodCall( ts.createCall(node.expression, [], args.slice(0, 2)) as PropertyAccessCallExpression, removeMethodName); return ts.createConditional(condition, whenNullCall, renameMethodCall(node, addMethodName)); } }
conditional_block
migration.ts
/** * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import * as ts from 'typescript'; import {HelperFunction} from './helpers'; import {findImportSpecifier} from './util'; /** A call expression that is based on a property access. */ type PropertyAccessCallExpression = ts.CallExpression&{expression: ts.PropertyAccessExpression}; /** Replaces an import inside an import statement with a different one. */ export function replaceImport(node: ts.NamedImports, oldImport: string, newImport: string) { const isAlreadyImported = findImportSpecifier(node.elements, newImport); if (isAlreadyImported) { return node; } const existingImport = findImportSpecifier(node.elements, oldImport); if (!existingImport) { throw new Error(`Could not find an import to replace using ${oldImport}.`); } return ts.updateNamedImports(node, [ ...node.elements.filter(current => current !== existingImport), // Create a new import while trying to preserve the alias of the old one. ts.createImportSpecifier( existingImport.propertyName ? ts.createIdentifier(newImport) : undefined, existingImport.propertyName ? existingImport.name : ts.createIdentifier(newImport)) ]); } /** * Migrates a function call expression from `Renderer` to `Renderer2`. * Returns null if the expression should be dropped. */ export function migrateExpression(node: ts.CallExpression, typeChecker: ts.TypeChecker): {node: ts.Node|null, requiredHelpers?: HelperFunction[]} { if (isPropertyAccessCallExpression(node)) { switch (node.expression.name.getText()) { case 'setElementProperty': return {node: renameMethodCall(node, 'setProperty')}; case 'setText': return {node: renameMethodCall(node, 'setValue')}; case 'listenGlobal': return {node: renameMethodCall(node, 'listen')}; case 'selectRootElement': return {node: migrateSelectRootElement(node)}; case 'setElementClass': return {node: migrateSetElementClass(node)}; case 'setElementStyle': return {node: migrateSetElementStyle(node, typeChecker)}; case 'invokeElementMethod': return {node: migrateInvokeElementMethod(node)}; case 'setBindingDebugInfo': return {node: null}; case 'createViewRoot': return {node: migrateCreateViewRoot(node)}; case 'setElementAttribute': return { node: switchToHelperCall(node, HelperFunction.setElementAttribute, node.arguments), requiredHelpers: [ HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.setElementAttribute ] }; case 'createElement': return { node: switchToHelperCall(node, HelperFunction.createElement, node.arguments.slice(0, 2)), requiredHelpers: [HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.createElement] }; case 'createText': return { node: switchToHelperCall(node, HelperFunction.createText, node.arguments.slice(0, 2)), requiredHelpers: [HelperFunction.any, HelperFunction.createText] }; case 'createTemplateAnchor': return { node: switchToHelperCall( node, HelperFunction.createTemplateAnchor, node.arguments.slice(0, 1)), requiredHelpers: [HelperFunction.any, HelperFunction.createTemplateAnchor] }; case 'projectNodes': return { node: switchToHelperCall(node, HelperFunction.projectNodes, node.arguments), requiredHelpers: [HelperFunction.any, HelperFunction.projectNodes] }; case 'animate': return { node: migrateAnimateCall(), requiredHelpers: [HelperFunction.any, HelperFunction.animate] }; case 'destroyView': return { node: switchToHelperCall(node, HelperFunction.destroyView, [node.arguments[1]]), requiredHelpers: [HelperFunction.any, HelperFunction.destroyView] }; case 'detachView': return { node: switchToHelperCall(node, HelperFunction.detachView, [node.arguments[0]]), requiredHelpers: [HelperFunction.any, HelperFunction.detachView] }; case 'attachViewAfter': return { node: switchToHelperCall(node, HelperFunction.attachViewAfter, node.arguments), requiredHelpers: [HelperFunction.any, HelperFunction.attachViewAfter] }; } } return {node}; } /** Checks whether a node is a PropertyAccessExpression. */ function isPropertyAccessCallExpression(node: ts.Node): node is PropertyAccessCallExpression { return ts.isCallExpression(node) && ts.isPropertyAccessExpression(node.expression); } /** Renames a method call while keeping all of the parameters in place. */ function renameMethodCall(node: PropertyAccessCallExpression, newName: string): ts.CallExpression { const newExpression = ts.updatePropertyAccess( node.expression, node.expression.expression, ts.createIdentifier(newName)); return ts.updateCall(node, newExpression, node.typeArguments, node.arguments); } /** * Migrates a `selectRootElement` call by removing the last argument which is no longer supported. */ function migrateSelectRootElement(node: ts.CallExpression): ts.Node { // The only thing we need to do is to drop the last argument // (`debugInfo`), if the consumer was passing it in. if (node.arguments.length > 1) { return ts.updateCall(node, node.expression, node.typeArguments, [node.arguments[0]]); } return node; } /** * Migrates a call to `setElementClass` either to a call to `addClass` or `removeClass`, or * to an expression like `isAdd ? addClass(el, className) : removeClass(el, className)`. */ function migrateSetElementClass(node: PropertyAccessCallExpression): ts.Node
/** * Migrates a call to `setElementStyle` call either to a call to * `setStyle` or `removeStyle`. or to an expression like * `value == null ? removeStyle(el, key) : setStyle(el, key, value)`. */ function migrateSetElementStyle( node: PropertyAccessCallExpression, typeChecker: ts.TypeChecker): ts.Node { const args = node.arguments; const addMethodName = 'setStyle'; const removeMethodName = 'removeStyle'; const lastArgType = args[2] ? typeChecker.typeToString( typeChecker.getTypeAtLocation(args[2]), node, ts.TypeFormatFlags.AddUndefined) : null; // Note that for a literal null, TS considers it a `NullKeyword`, // whereas a literal `undefined` is just an Identifier. if (args.length === 2 || lastArgType === 'null' || lastArgType === 'undefined') { // If we've got a call with two arguments, or one with three arguments where the last one is // `undefined` or `null`, we can safely switch to a `removeStyle` call. const innerExpression = node.expression.expression; const topExpression = ts.createPropertyAccess(innerExpression, removeMethodName); return ts.createCall(topExpression, [], args.slice(0, 2)); } else if (args.length === 3) { // We need the checks for string literals, because the type of something // like `"blue"` is the literal `blue`, not `string`. if (lastArgType === 'string' || lastArgType === 'number' || ts.isStringLiteral(args[2]) || ts.isNoSubstitutionTemplateLiteral(args[2]) || ts.isNumericLiteral(args[2])) { // If we've got three arguments and the last one is a string literal or a number, we // can safely rename to `setStyle`. return renameMethodCall(node, addMethodName); } else { // Otherwise migrate to a ternary that looks like: // `value == null ? removeStyle(el, key) : setStyle(el, key, value)` const condition = ts.createBinary(args[2], ts.SyntaxKind.EqualsEqualsToken, ts.createNull()); const whenNullCall = renameMethodCall( ts.createCall(node.expression, [], args.slice(0, 2)) as PropertyAccessCallExpression, removeMethodName); return ts.createConditional(condition, whenNullCall, renameMethodCall(node, addMethodName)); } } return node; } /** * Migrates a call to `invokeElementMethod(target, method, [arg1, arg2])` either to * `target.method(arg1, arg2)` or `(target as any)[method].apply(target, [arg1, arg2])`. */ function migrateInvokeElementMethod(node: ts.CallExpression): ts.Node { const [target, name, args] = node.arguments; const isNameStatic = ts.isStringLiteral(name) || ts.isNoSubstitutionTemplateLiteral(name); const isArgsStatic = !args || ts.isArrayLiteralExpression(args); if (isNameStatic && isArgsStatic) { // If the name is a static string and the arguments are an array literal, // we can safely convert the node into a call expression. const expression = ts.createPropertyAccess( target, (name as ts.StringLiteral | ts.NoSubstitutionTemplateLiteral).text); const callArguments = args ? (args as ts.ArrayLiteralExpression).elements : []; return ts.createCall(expression, [], callArguments); } else { // Otherwise create an expression in the form of `(target as any)[name].apply(target, args)`. const asExpression = ts.createParen( ts.createAsExpression(target, ts.createKeywordTypeNode(ts.SyntaxKind.AnyKeyword))); const elementAccess = ts.createElementAccess(asExpression, name); const applyExpression = ts.createPropertyAccess(elementAccess, 'apply'); return ts.createCall(applyExpression, [], args ? [target, args] : [target]); } } /** Migrates a call to `createViewRoot` to whatever node was passed in as the first argument. */ function migrateCreateViewRoot(node: ts.CallExpression): ts.Node { return node.arguments[0]; } /** Migrates a call to `migrate` a direct call to the helper. */ function migrateAnimateCall() { return ts.createCall(ts.createIdentifier(HelperFunction.animate), [], []); } /** * Switches out a call to the `Renderer` to a call to one of our helper functions. * Most of the helpers accept an instance of `Renderer2` as the first argument and all * subsequent arguments differ. * @param node Node of the original method call. * @param helper Name of the helper with which to replace the original call. * @param args Arguments that should be passed into the helper after the renderer argument. */ function switchToHelperCall( node: PropertyAccessCallExpression, helper: HelperFunction, args: ts.Expression[]|ts.NodeArray<ts.Expression>): ts.Node { return ts.createCall(ts.createIdentifier(helper), [], [node.expression.expression, ...args]); }
{ // Clone so we don't mutate by accident. Note that we assume that // the user's code is providing all three required arguments. const outputMethodArgs = node.arguments.slice(); const isAddArgument = outputMethodArgs.pop()!; const createRendererCall = (isAdd: boolean) => { const innerExpression = node.expression.expression; const topExpression = ts.createPropertyAccess(innerExpression, isAdd ? 'addClass' : 'removeClass'); return ts.createCall(topExpression, [], node.arguments.slice(0, 2)); }; // If the call has the `isAdd` argument as a literal boolean, we can map it directly to // `addClass` or `removeClass`. Note that we can't use the type checker here, because it // won't tell us whether the value resolves to true or false. if (isAddArgument.kind === ts.SyntaxKind.TrueKeyword || isAddArgument.kind === ts.SyntaxKind.FalseKeyword) { return createRendererCall(isAddArgument.kind === ts.SyntaxKind.TrueKeyword); } // Otherwise create a ternary on the variable. return ts.createConditional(isAddArgument, createRendererCall(true), createRendererCall(false)); }
identifier_body
migration.ts
/** * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import * as ts from 'typescript'; import {HelperFunction} from './helpers'; import {findImportSpecifier} from './util'; /** A call expression that is based on a property access. */ type PropertyAccessCallExpression = ts.CallExpression&{expression: ts.PropertyAccessExpression}; /** Replaces an import inside an import statement with a different one. */ export function replaceImport(node: ts.NamedImports, oldImport: string, newImport: string) { const isAlreadyImported = findImportSpecifier(node.elements, newImport); if (isAlreadyImported) { return node; } const existingImport = findImportSpecifier(node.elements, oldImport); if (!existingImport) { throw new Error(`Could not find an import to replace using ${oldImport}.`); } return ts.updateNamedImports(node, [ ...node.elements.filter(current => current !== existingImport), // Create a new import while trying to preserve the alias of the old one. ts.createImportSpecifier( existingImport.propertyName ? ts.createIdentifier(newImport) : undefined, existingImport.propertyName ? existingImport.name : ts.createIdentifier(newImport)) ]); } /** * Migrates a function call expression from `Renderer` to `Renderer2`. * Returns null if the expression should be dropped. */ export function migrateExpression(node: ts.CallExpression, typeChecker: ts.TypeChecker): {node: ts.Node|null, requiredHelpers?: HelperFunction[]} { if (isPropertyAccessCallExpression(node)) { switch (node.expression.name.getText()) { case 'setElementProperty': return {node: renameMethodCall(node, 'setProperty')}; case 'setText': return {node: renameMethodCall(node, 'setValue')}; case 'listenGlobal': return {node: renameMethodCall(node, 'listen')}; case 'selectRootElement': return {node: migrateSelectRootElement(node)}; case 'setElementClass': return {node: migrateSetElementClass(node)}; case 'setElementStyle': return {node: migrateSetElementStyle(node, typeChecker)}; case 'invokeElementMethod': return {node: migrateInvokeElementMethod(node)}; case 'setBindingDebugInfo': return {node: null}; case 'createViewRoot': return {node: migrateCreateViewRoot(node)}; case 'setElementAttribute': return { node: switchToHelperCall(node, HelperFunction.setElementAttribute, node.arguments), requiredHelpers: [ HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.setElementAttribute ] }; case 'createElement': return { node: switchToHelperCall(node, HelperFunction.createElement, node.arguments.slice(0, 2)), requiredHelpers: [HelperFunction.any, HelperFunction.splitNamespace, HelperFunction.createElement] }; case 'createText': return { node: switchToHelperCall(node, HelperFunction.createText, node.arguments.slice(0, 2)), requiredHelpers: [HelperFunction.any, HelperFunction.createText] }; case 'createTemplateAnchor': return { node: switchToHelperCall( node, HelperFunction.createTemplateAnchor, node.arguments.slice(0, 1)), requiredHelpers: [HelperFunction.any, HelperFunction.createTemplateAnchor] }; case 'projectNodes': return { node: switchToHelperCall(node, HelperFunction.projectNodes, node.arguments), requiredHelpers: [HelperFunction.any, HelperFunction.projectNodes] }; case 'animate': return { node: migrateAnimateCall(), requiredHelpers: [HelperFunction.any, HelperFunction.animate] }; case 'destroyView': return { node: switchToHelperCall(node, HelperFunction.destroyView, [node.arguments[1]]), requiredHelpers: [HelperFunction.any, HelperFunction.destroyView] }; case 'detachView': return { node: switchToHelperCall(node, HelperFunction.detachView, [node.arguments[0]]), requiredHelpers: [HelperFunction.any, HelperFunction.detachView] }; case 'attachViewAfter': return { node: switchToHelperCall(node, HelperFunction.attachViewAfter, node.arguments), requiredHelpers: [HelperFunction.any, HelperFunction.attachViewAfter] }; } } return {node}; } /** Checks whether a node is a PropertyAccessExpression. */ function isPropertyAccessCallExpression(node: ts.Node): node is PropertyAccessCallExpression { return ts.isCallExpression(node) && ts.isPropertyAccessExpression(node.expression); } /** Renames a method call while keeping all of the parameters in place. */ function renameMethodCall(node: PropertyAccessCallExpression, newName: string): ts.CallExpression { const newExpression = ts.updatePropertyAccess( node.expression, node.expression.expression, ts.createIdentifier(newName)); return ts.updateCall(node, newExpression, node.typeArguments, node.arguments); } /** * Migrates a `selectRootElement` call by removing the last argument which is no longer supported. */ function migrateSelectRootElement(node: ts.CallExpression): ts.Node { // The only thing we need to do is to drop the last argument // (`debugInfo`), if the consumer was passing it in. if (node.arguments.length > 1) { return ts.updateCall(node, node.expression, node.typeArguments, [node.arguments[0]]); }
/** * Migrates a call to `setElementClass` either to a call to `addClass` or `removeClass`, or * to an expression like `isAdd ? addClass(el, className) : removeClass(el, className)`. */ function migrateSetElementClass(node: PropertyAccessCallExpression): ts.Node { // Clone so we don't mutate by accident. Note that we assume that // the user's code is providing all three required arguments. const outputMethodArgs = node.arguments.slice(); const isAddArgument = outputMethodArgs.pop()!; const createRendererCall = (isAdd: boolean) => { const innerExpression = node.expression.expression; const topExpression = ts.createPropertyAccess(innerExpression, isAdd ? 'addClass' : 'removeClass'); return ts.createCall(topExpression, [], node.arguments.slice(0, 2)); }; // If the call has the `isAdd` argument as a literal boolean, we can map it directly to // `addClass` or `removeClass`. Note that we can't use the type checker here, because it // won't tell us whether the value resolves to true or false. if (isAddArgument.kind === ts.SyntaxKind.TrueKeyword || isAddArgument.kind === ts.SyntaxKind.FalseKeyword) { return createRendererCall(isAddArgument.kind === ts.SyntaxKind.TrueKeyword); } // Otherwise create a ternary on the variable. return ts.createConditional(isAddArgument, createRendererCall(true), createRendererCall(false)); } /** * Migrates a call to `setElementStyle` call either to a call to * `setStyle` or `removeStyle`. or to an expression like * `value == null ? removeStyle(el, key) : setStyle(el, key, value)`. */ function migrateSetElementStyle( node: PropertyAccessCallExpression, typeChecker: ts.TypeChecker): ts.Node { const args = node.arguments; const addMethodName = 'setStyle'; const removeMethodName = 'removeStyle'; const lastArgType = args[2] ? typeChecker.typeToString( typeChecker.getTypeAtLocation(args[2]), node, ts.TypeFormatFlags.AddUndefined) : null; // Note that for a literal null, TS considers it a `NullKeyword`, // whereas a literal `undefined` is just an Identifier. if (args.length === 2 || lastArgType === 'null' || lastArgType === 'undefined') { // If we've got a call with two arguments, or one with three arguments where the last one is // `undefined` or `null`, we can safely switch to a `removeStyle` call. const innerExpression = node.expression.expression; const topExpression = ts.createPropertyAccess(innerExpression, removeMethodName); return ts.createCall(topExpression, [], args.slice(0, 2)); } else if (args.length === 3) { // We need the checks for string literals, because the type of something // like `"blue"` is the literal `blue`, not `string`. if (lastArgType === 'string' || lastArgType === 'number' || ts.isStringLiteral(args[2]) || ts.isNoSubstitutionTemplateLiteral(args[2]) || ts.isNumericLiteral(args[2])) { // If we've got three arguments and the last one is a string literal or a number, we // can safely rename to `setStyle`. return renameMethodCall(node, addMethodName); } else { // Otherwise migrate to a ternary that looks like: // `value == null ? removeStyle(el, key) : setStyle(el, key, value)` const condition = ts.createBinary(args[2], ts.SyntaxKind.EqualsEqualsToken, ts.createNull()); const whenNullCall = renameMethodCall( ts.createCall(node.expression, [], args.slice(0, 2)) as PropertyAccessCallExpression, removeMethodName); return ts.createConditional(condition, whenNullCall, renameMethodCall(node, addMethodName)); } } return node; } /** * Migrates a call to `invokeElementMethod(target, method, [arg1, arg2])` either to * `target.method(arg1, arg2)` or `(target as any)[method].apply(target, [arg1, arg2])`. */ function migrateInvokeElementMethod(node: ts.CallExpression): ts.Node { const [target, name, args] = node.arguments; const isNameStatic = ts.isStringLiteral(name) || ts.isNoSubstitutionTemplateLiteral(name); const isArgsStatic = !args || ts.isArrayLiteralExpression(args); if (isNameStatic && isArgsStatic) { // If the name is a static string and the arguments are an array literal, // we can safely convert the node into a call expression. const expression = ts.createPropertyAccess( target, (name as ts.StringLiteral | ts.NoSubstitutionTemplateLiteral).text); const callArguments = args ? (args as ts.ArrayLiteralExpression).elements : []; return ts.createCall(expression, [], callArguments); } else { // Otherwise create an expression in the form of `(target as any)[name].apply(target, args)`. const asExpression = ts.createParen( ts.createAsExpression(target, ts.createKeywordTypeNode(ts.SyntaxKind.AnyKeyword))); const elementAccess = ts.createElementAccess(asExpression, name); const applyExpression = ts.createPropertyAccess(elementAccess, 'apply'); return ts.createCall(applyExpression, [], args ? [target, args] : [target]); } } /** Migrates a call to `createViewRoot` to whatever node was passed in as the first argument. */ function migrateCreateViewRoot(node: ts.CallExpression): ts.Node { return node.arguments[0]; } /** Migrates a call to `migrate` a direct call to the helper. */ function migrateAnimateCall() { return ts.createCall(ts.createIdentifier(HelperFunction.animate), [], []); } /** * Switches out a call to the `Renderer` to a call to one of our helper functions. * Most of the helpers accept an instance of `Renderer2` as the first argument and all * subsequent arguments differ. * @param node Node of the original method call. * @param helper Name of the helper with which to replace the original call. * @param args Arguments that should be passed into the helper after the renderer argument. */ function switchToHelperCall( node: PropertyAccessCallExpression, helper: HelperFunction, args: ts.Expression[]|ts.NodeArray<ts.Expression>): ts.Node { return ts.createCall(ts.createIdentifier(helper), [], [node.expression.expression, ...args]); }
return node; }
random_line_split
page.js
/** Rendering Pages ================ SIMPLE TEXT -------------- To put simple text or HTML to a screen: res.render( { body_text: 'Hello <b>world</b>' } ); TEMPLATE --------- To put a specific template (like a form) with variables: res.render( 'inbox/form.html', { username: name, phone: phone } ); STATUS MESSAGES ----------------- To ouput specific status messages: var page = safeharbor.page; res.outputMessage( page.MESSAGE_LEVELS.warning, 'Warning Title', 'Some warning text goes here' ); You can have multiple of these output message on the same page. Later, you can then call res.render() as above. This allows for the following scenario: if( onSubmit && (some_error == true) ) { // on submit there was some error res.outputMessage( page.MESSAGE_LEVELS.error, 'Try again', 'Some error text goes here' ); } // on first time render OR error re-submit: res.render( 'inbox/form.html', postargs ); STATUS (ONLY) PAGES --------------------- If all you want to output is the message (no template): res.outputMessage( page.MESSAGE_LEVELS.success, 'Wonderful', 'Some happy text goes here' ); res.render( page.MESSAGE_VIEW, { pageTitle: 'Happy Joy' } ); AJAX-STYLE HTML SNIPPETS -------------------------- To return a snippet of HTML (either as 'body_text', message or template) use the same techniques as above but the layout option: res.render( { layout: page.SNIPPET, body_text: 'This text is for embedding' } ); Also works for templates: res.render( 'profile/acct_email.html', // <-- template for embedded editing { layout: page.SNIPPET } ) @module lib @submodule page **/ var loginstate = require('./loginstate.js'), ROLES = require('./roles.js'), utils = require('./utils.js'); exports.MESSAGE_VIEW = // alias for... exports.BODY_TEXT_VIEW = 'shared/body_text.html'; exports.DEFAULT_LAYOUT = 'shared/main.html'; exports.SNIPPET = 'shared/ajax_html.html'; var MESSAGE_LEVELS = exports.MESSAGE_LEVELS = { info: 'info', success: 'success', warning: 'warning', danger: 'danger', // app error error: 'error' // sys error }; function
( req, res ) { var cmds = {}; function cmd( group, url, link, help ) { this.url = url; this.link = link; this.help = help; if(!cmds[group]) { cmds[group] = {}; cmds[group].items = [ ] }; cmds[group].items.push(this); } var user = loginstate.getUser(req); new cmd('safeharbor', '/about', 'About', 'Learn about Safe Harbor'); new cmd('safeharbor', '/learn', 'Learn', 'Learn about your rights and the DMCA'); new cmd('safeharbor', '/support', 'Support', 'Ask us stuff'); if( user ) { new cmd('user', '/dash', 'Dashboard', 'Manage your disputes' ); // new cmd('user', '/passwordreset', 'Password reset', 'Change your password'); new cmd('user', '/account', 'Account settings', 'Change your email and other settings'); new cmd('user', '/accountdeleter', 'Delete your account', 'hrumph'); new cmd('user', '/logout', 'Log out', 'bye for now' ); new cmd('site', '/siteeditor','Edit your site properties'); new cmd('tablinks', '/disputes', 'Past Disputes', 'Your dispute history' ); new cmd('tablinks', '/form', 'Future Disputes', 'Your dispute future' ); var r = user.role>>>0; // WTF? if( r <= ROLES.admin ) { new cmd('admin', '/admin', 'Admin stuff', '' ); if( r == ROLES.developer ) { new cmd('developer', '/dev', 'Developer stuff', '' ); } } } else { new cmd( 'user', '/login', 'Login', 'For existing accounts' ); new cmd( 'user', '/reg', 'Register', 'For creating new accounts' ); new cmd( 'user', '/lostpassword', 'Lost password', 'For existing, if forgetful accounts'); } return cmds; } exports.Message = function( msgLevel, msgTitle, text, opts ) { utils.copy( this, opts || {}); this.level = msgLevel; this.title = msgTitle; this.text = text; if( !this.status ) { switch( this.level ) { case MESSAGE_LEVELS.info: case MESSAGE_LEVELS.success: this.status = 'ok'; break; case MESSAGE_LEVELS.warning: case MESSAGE_LEVELS.danger: case MESSAGE_LEVELS.error: this.status = '??'; // TODO fill these info break; } } } exports.setup = function(app) { var Handlebars = require('handlebars'); Handlebars.registerHelper('loggedInStatusClass', function() { var isLoggedIn = loginstate.isLoggedIn(); if( isLoggedIn ) return('loggedin'); // that's a CSS selector name else return('loggedout'); }); Handlebars.registerHelper('contextDumper', function(a) { // I haven't figured out if this context blob // is a copy or an actual instance of something // important and shared, so we remove the 'app' // thingy so the dump is managable... var app = a.app; a.app = null; var text = require('util').inspect(a,true,null); // ...and then restore it just in case someone // else was using it a.app = app; return text; }); app.register('.html', Handlebars); app.set('view engine', 'handlebars'); app.dynamicHelpers( { // these will all be passed to every page... user: function( req, res ) { var u = loginstate.getUser(req); if( u && u.password ) u.password = '****'; return u; }, isLoggedIn: function( req, res ) { return !!loginstate.getUser(req); }, isAdmin: function( req, res ) { var u = loginstate.getUser(req); return u && (u.role>>>0 <= ROLES.admin>>>0); }, menu: buildMenu, // we should consider not outputting this on Ajax messages: function( req, res ) { return res.sh_output_messages || [ ] } } ); app.use( function setupPage(req,res,next) { /** Override of the express.response.render method in order put our application specific standard templates into the call stream. @method render @for Response @param {string} view Path to template file (relative to './view') @param {Object} opts Can include things like body_text, pageTitle **/ var oldRender = res.render; res.render = function(view, opts, fn, parent, sub ) { if( typeof view != 'string' ) { opts = view; view = exports.BODY_TEXT_VIEW; } if( view == exports.BODY_TEXT_VIEW ) { if( !opts.body_text ) opts.body_text = ''; } if( !opts.layout ) { opts.layout = exports.DEFAULT_LAYOUT; } if( !opts.bodyClass ) { try { opts.bodyClass = view.match(/([a-z0-9]+)\/[^\/]+$/)[1]; } catch( e ) { } } res.render = oldRender; return res.render(view, opts, fn, parent, sub ); } if( !res.outputMessage ) { /** Call this to setup a message to be ouput during the res.render() call. @method outputMessage @for Response @param {MESSAGE_LEVELS} msgLevel @param {STRING} msgTitle @param {STRING} text @param {Object} [opts] **/ res.outputMessage = function( msgLevel, msgTitle, text, opts ) { if( !res.sh_output_messages ) res.sh_output_messages = [ ]; res.sh_output_messages.push( new exports.Message(msgLevel,msgTitle,text,opts) ); return res; } } next(); }); } exports.countryList = function(selectedElementName){ var json = [ {name:"Afghanistan","data-alternative-spellings":"AF افغانستان"} ,{name:"Åland Islands","data-alternative-spellings":"AX Aaland Aland","data-relevancy-booster":"0.5"} ,{name:"Albania","data-alternative-spellings":"AL"} ,{name:"Algeria","data-alternative-spellings":"DZ الجزائر"} ,{name:"American Samoa","data-alternative-spellings":"AS","data-relevancy-booster":"0.5"} ,{name:"Andorra","data-alternative-spellings":"AD","data-relevancy-booster":"0.5"} ,{name:"Angola","data-alternative-spellings":"AO"} ,{name:"Anguilla","data-alternative-spellings":"AI","data-relevancy-booster":"0.5"} ,{name:"Antarctica","data-alternative-spellings":"AQ","data-relevancy-booster":"0.5"} ,{name:"Antigua And Barbuda","data-alternative-spellings":"AG","data-relevancy-booster":"0.5"} ,{name:"Argentina","data-alternative-spellings":"AR"} ,{name:"Armenia","data-alternative-spellings":"AM Հայաստան"} ,{name:"Aruba","data-alternative-spellings":"AW","data-relevancy-booster":"0.5"} ,{name:"Australia","data-alternative-spellings":"AU","data-relevancy-booster":"1.5"} ,{name:"Austria","data-alternative-spellings":"AT Österreich Osterreich Oesterreich "} ,{name:"Azerbaijan","data-alternative-spellings":"AZ"} ,{name:"Bahamas","data-alternative-spellings":"BS"} ,{name:"Bahrain","data-alternative-spellings":"BH البحرين"} ,{name:"Bangladesh","data-alternative-spellings":"BD বাংলাদেশ","data-relevancy-booster":"2"} ,{name:"Barbados","data-alternative-spellings":"BB"} ,{name:"Belarus","data-alternative-spellings":"BY Беларусь"} ,{name:"Belgium","data-alternative-spellings":"BE België Belgie Belgien Belgique","data-relevancy-booster":"1.5"} ,{name:"Belize","data-alternative-spellings":"BZ"} ,{name:"Benin","data-alternative-spellings":"BJ"} ,{name:"Bermuda","data-alternative-spellings":"BM","data-relevancy-booster":"0.5"} ,{name:"Bhutan","data-alternative-spellings":"BT भूटान"} ,{name:"Bolivia","data-alternative-spellings":"BO"} ,{name:"Bonaire,Sint Eustatius and Saba","data-alternative-spellings":"BQ"} ,{name:"Bosnia and Herzegovina","data-alternative-spellings":"BA Босна и Херцеговина"} ,{name:"Botswana","data-alternative-spellings":"BW"} ,{name:"Bouvet Island","data-alternative-spellings":"BV"} ,{name:"Brazil","data-alternative-spellings":"BR Brasil","data-relevancy-booster":"2"} ,{name:"British Indian Ocean Territory","data-alternative-spellings":"IO"} ,{name:"Brunei Darussalam","data-alternative-spellings":"BN"} ,{name:"Bulgaria","data-alternative-spellings":"BG България"} ,{name:"Burkina Faso","data-alternative-spellings":"BF"} ,{name:"Burundi","data-alternative-spellings":"BI"} ,{name:"Cambodia","data-alternative-spellings":"KH កម្ពុជា"} ,{name:"Cameroon","data-alternative-spellings":"CM"} ,{name:"Canada","data-alternative-spellings":"CA","data-relevancy-booster":"2"} ,{name:"Cape Verde","data-alternative-spellings":"CV Cabo"} ,{name:"Cayman Islands","data-alternative-spellings":"KY","data-relevancy-booster":"0.5"} ,{name:"Central African Republic","data-alternative-spellings":"CF"} ,{name:"Chad","data-alternative-spellings":"TD تشاد‎ Tchad"} ,{name:"Chile","data-alternative-spellings":"CL"} ,{name:"China","data-relevancy-booster":"3.5","data-alternative-spellings":"CN Zhongguo Zhonghua Peoples Republic 中国/中华"} ,{name:"Christmas Island","data-alternative-spellings":"CX","data-relevancy-booster":"0.5"} ,{name:"Cocos (Keeling) Islands","data-alternative-spellings":"CC","data-relevancy-booster":"0.5"} ,{name:"Colombia","data-alternative-spellings":"CO"} ,{name:"Comoros","data-alternative-spellings":"KM جزر القمر"} ,{name:"Congo","data-alternative-spellings":"CG"} ,{name:"Congo,the Democratic Republic of the","data-alternative-spellings":"CD Congo-Brazzaville Repubilika ya Kongo"} ,{name:"Cook Islands","data-alternative-spellings":"CK","data-relevancy-booster":"0.5"} ,{name:"Costa Rica","data-alternative-spellings":"CR"} ,{name:"Côte d'Ivoire","data-alternative-spellings":"CI Cote dIvoire"} ,{name:"Croatia","data-alternative-spellings":"HR Hrvatska"} ,{name:"Cuba","data-alternative-spellings":"CU"} ,{name:"Curaçao","data-alternative-spellings":"CW Curacao"} ,{name:"Cyprus","data-alternative-spellings":"CY Κύπρος Kýpros Kıbrıs"} ,{name:"Czech Republic","data-alternative-spellings":"CZ Česká Ceska"} ,{name:"Denmark","data-alternative-spellings":"DK Danmark","data-relevancy-booster":"1.5"} ,{name:"Djibouti","data-alternative-spellings":"DJ جيبوتي‎ Jabuuti Gabuuti"} ,{name:"Dominica","data-alternative-spellings":"DM Dominique","data-relevancy-booster":"0.5"} ,{name:"Dominican Republic","data-alternative-spellings":"DO"} ,{name:"Ecuador","data-alternative-spellings":"EC"} ,{name:"Egypt","data-alternative-spellings":"EG","data-relevancy-booster":"1.5"} ,{name:"El Salvador","data-alternative-spellings":"SV"} ,{name:"Equatorial Guinea","data-alternative-spellings":"GQ"} ,{name:"Eritrea","data-alternative-spellings":"ER إرتريا ኤርትራ"} ,{name:"Estonia","data-alternative-spellings":"EE Eesti"} ,{name:"Ethiopia","data-alternative-spellings":"ET ኢትዮጵያ"} ,{name:"Falkland Islands (Malvinas)","data-alternative-spellings":"FK","data-relevancy-booster":"0.5"} ,{name:"Faroe Islands","data-alternative-spellings":"FO Føroyar Færøerne","data-relevancy-booster":"0.5"} ,{name:"Fiji","data-alternative-spellings":"FJ Viti फ़िजी"} ,{name:"Finland","data-alternative-spellings":"FI Suomi"} ,{name:"France","data-alternative-spellings":"FR République française","data-relevancy-booster":"2.5"} ,{name:"French Guiana","data-alternative-spellings":"GF"} ,{name:"French Polynesia","data-alternative-spellings":"PF Polynésie française"} ,{name:"French Southern Territories","data-alternative-spellings":"TF"} ,{name:"Gabon","data-alternative-spellings":"GA République Gabonaise"} ,{name:"Gambia","data-alternative-spellings":"GM"} ,{name:"Georgia","data-alternative-spellings":"GE საქართველო"} ,{name:"Germany","data-alternative-spellings":"DE Bundesrepublik Deutschland","data-relevancy-booster":"3"} ,{name:"Ghana","data-alternative-spellings":"GH"} ,{name:"Gibraltar","data-alternative-spellings":"GI","data-relevancy-booster":"0.5"} ,{name:"Greece","data-alternative-spellings":"GR Ελλάδα","data-relevancy-booster":"1.5"} ,{name:"Greenland","data-alternative-spellings":"GL grønland","data-relevancy-booster":"0.5"} ,{name:"Grenada","data-alternative-spellings":"GD"} ,{name:"Guadeloupe","data-alternative-spellings":"GP"} ,{name:"Guam","data-alternative-spellings":"GU"} ,{name:"Guatemala","data-alternative-spellings":"GT"} ,{name:"Guernsey","data-alternative-spellings":"GG","data-relevancy-booster":"0.5"} ,{name:"Guinea","data-alternative-spellings":"GN"} ,{name:"Guinea-Bissau","data-alternative-spellings":"GW"} ,{name:"Guyana","data-alternative-spellings":"GY"} ,{name:"Haiti","data-alternative-spellings":"HT"} ,{name:"Heard Island and McDonald Islands","data-alternative-spellings":"HM"} ,{name:"Holy See (Vatican City State)","data-alternative-spellings":"VA","data-relevancy-booster":"0.5"} ,{name:"Honduras","data-alternative-spellings":"HN"} ,{name:"Hong Kong","data-alternative-spellings":"HK 香港"} ,{name:"Hungary","data-alternative-spellings":"HU Magyarország"} ,{name:"Iceland","data-alternative-spellings":"IS Island"} ,{name:"India","data-alternative-spellings":"IN भारत गणराज्य Hindustan","data-relevancy-booster":"3"} ,{name:"Indonesia","data-alternative-spellings":"ID","data-relevancy-booster":"2"} ,{name:"Iran,Islamic Republic of","data-alternative-spellings":"IR ایران"} ,{name:"Iraq","data-alternative-spellings":"IQ العراق‎"} ,{name:"Ireland","data-alternative-spellings":"IE Éire","data-relevancy-booster":"1.2"} ,{name:"Isle of Man","data-alternative-spellings":"IM","data-relevancy-booster":"0.5"} ,{name:"Israel","data-alternative-spellings":"IL إسرائيل ישראל"} ,{name:"Italy","data-alternative-spellings":"IT Italia","data-relevancy-booster":"2"} ,{name:"Jamaica","data-alternative-spellings":"JM"} ,{name:"Japan","data-alternative-spellings":"JP Nippon Nihon 日本","data-relevancy-booster":"2.5"} ,{name:"Jersey","data-alternative-spellings":"JE","data-relevancy-booster":"0.5"} ,{name:"Jordan","data-alternative-spellings":"JO الأردن"} ,{name:"Kazakhstan","data-alternative-spellings":"KZ Қазақстан Казахстан"} ,{name:"Kenya","data-alternative-spellings":"KE"} ,{name:"Kiribati","data-alternative-spellings":"KI"} ,{name:"Korea,Democratic People's Republic of","data-alternative-spellings":"KP North Korea"} ,{name:"Korea,Republic of","data-alternative-spellings":"KR South Korea","data-relevancy-booster":"1.5"} ,{name:"Kuwait","data-alternative-spellings":"KW الكويت"} ,{name:"Kyrgyzstan","data-alternative-spellings":"KG Кыргызстан"} ,{name:"Lao People's Democratic Republic","data-alternative-spellings":"LA"} ,{name:"Latvia","data-alternative-spellings":"LV Latvija"} ,{name:"Lebanon","data-alternative-spellings":"LB لبنان"} ,{name:"Lesotho","data-alternative-spellings":"LS"} ,{name:"Liberia","data-alternative-spellings":"LR"} ,{name:"Libyan Arab Jamahiriya","data-alternative-spellings":"LY ليبيا"} ,{name:"Liechtenstein","data-alternative-spellings":"LI"} ,{name:"Lithuania","data-alternative-spellings":"LT Lietuva"} ,{name:"Luxembourg","data-alternative-spellings":"LU"} ,{name:"Macao","data-alternative-spellings":"MO"} ,{name:"Macedonia,The Former Yugoslav Republic Of","data-alternative-spellings":"MK Македонија"} ,{name:"Madagascar","data-alternative-spellings":"MG Madagasikara"} ,{name:"Malawi","data-alternative-spellings":"MW"} ,{name:"Malaysia","data-alternative-spellings":"MY"} ,{name:"Maldives","data-alternative-spellings":"MV"} ,{name:"Mali","data-alternative-spellings":"ML"} ,{name:"Malta","data-alternative-spellings":"MT"} ,{name:"Marshall Islands","data-alternative-spellings":"MH","data-relevancy-booster":"0.5"} ,{name:"Martinique","data-alternative-spellings":"MQ"} ,{name:"Mauritania","data-alternative-spellings":"MR الموريتانية"} ,{name:"Mauritius","data-alternative-spellings":"MU"} ,{name:"Mayotte","data-alternative-spellings":"YT"} ,{name:"Mexico","data-alternative-spellings":"MX Mexicanos","data-relevancy-booster":"1.5"} ,{name:"Micronesia,Federated States of","data-alternative-spellings":"FM"} ,{name:"Moldova,Republic of","data-alternative-spellings":"MD"} ,{name:"Monaco","data-alternative-spellings":"MC"} ,{name:"Mongolia","data-alternative-spellings":"MN Mongγol ulus Монгол улс"} ,{name:"Montenegro","data-alternative-spellings":"ME"} ,{name:"Montserrat","data-alternative-spellings":"MS","data-relevancy-booster":"0.5"} ,{name:"Morocco","data-alternative-spellings":"MA المغرب"} ,{name:"Mozambique","data-alternative-spellings":"MZ Moçambique"} ,{name:"Myanmar","data-alternative-spellings":"MM"} ,{name:"Namibia","data-alternative-spellings":"NA Namibië"} ,{name:"Nauru","data-alternative-spellings":"NR Naoero","data-relevancy-booster":"0.5"} ,{name:"Nepal","data-alternative-spellings":"NP नेपाल"} ,{name:"Netherlands","data-alternative-spellings":"NL Holland Nederland","data-relevancy-booster":"1.5"} ,{name:"New Caledonia","data-alternative-spellings":"NC","data-relevancy-booster":"0.5"} ,{name:"New Zealand","data-alternative-spellings":"NZ Aotearoa"} ,{name:"Nicaragua","data-alternative-spellings":"NI"} ,{name:"Niger","data-alternative-spellings":"NE Nijar"} ,{name:"Nigeria","data-alternative-spellings":"NG Nijeriya Naíjíríà","data-relevancy-booster":"1.5"} ,{name:"Niue","data-alternative-spellings":"NU","data-relevancy-booster":"0.5"} ,{name:"Norfolk Island","data-alternative-spellings":"NF","data-relevancy-booster":"0.5"} ,{name:"Northern Mariana Islands","data-alternative-spellings":"MP","data-relevancy-booster":"0.5"} ,{name:"Norway","data-alternative-spellings":"NO Norge Noreg","data-relevancy-booster":"1.5"} ,{name:"Oman","data-alternative-spellings":"OM عمان"} ,{name:"Pakistan","data-alternative-spellings":"PK پاکستان","data-relevancy-booster":"2"} ,{name:"Palau","data-alternative-spellings":"PW","data-relevancy-booster":"0.5"} ,{name:"Palestinian Territory,Occupied","data-alternative-spellings":"PS فلسطين"} ,{name:"Panama","data-alternative-spellings":"PA"} ,{name:"Papua New Guinea","data-alternative-spellings":"PG"} ,{name:"Paraguay","data-alternative-spellings":"PY"} ,{name:"Peru","data-alternative-spellings":"PE"} ,{name:"Philippines","data-alternative-spellings":"PH Pilipinas","data-relevancy-booster":"1.5"} ,{name:"Pitcairn","data-alternative-spellings":"PN","data-relevancy-booster":"0.5"} ,{name:"Poland","data-alternative-spellings":"PL Polska","data-relevancy-booster":"1.25"} ,{name:"Portugal","data-alternative-spellings":"PT Portuguesa","data-relevancy-booster":"1.5"} ,{name:"Puerto Rico","data-alternative-spellings":"PR"} ,{name:"Qatar","data-alternative-spellings":"QA قطر"} ,{name:"Réunion","data-alternative-spellings":"RE Reunion"} ,{name:"Romania","data-alternative-spellings":"RO Rumania Roumania România"} ,{name:"Russian Federation","data-alternative-spellings":"RU Rossiya Российская Россия","data-relevancy-booster":"2.5"} ,{name:"Rwanda","data-alternative-spellings":"RW"} ,{name:"Saint Barthélemy","data-alternative-spellings":"BL St. Barthelemy"} ,{name:"Saint Helena","data-alternative-spellings":"SH St."} ,{name:"Saint Kitts and Nevis","data-alternative-spellings":"KN St."} ,{name:"Saint Lucia","data-alternative-spellings":"LC St."} ,{name:"Saint Martin (French Part)","data-alternative-spellings":"MF St."} ,{name:"Saint Pierre and Miquelon","data-alternative-spellings":"PM St."} ,{name:"Saint Vincent and the Grenadines","data-alternative-spellings":"VC St."} ,{name:"Samoa","data-alternative-spellings":"WS"} ,{name:"San Marino","data-alternative-spellings":"SM"} ,{name:"Sao Tome and Principe","data-alternative-spellings":"ST"} ,{name:"Saudi Arabia","data-alternative-spellings":"SA السعودية"} ,{name:"Senegal","data-alternative-spellings":"SN Sénégal"} ,{name:"Serbia","data-alternative-spellings":"RS Србија Srbija"} ,{name:"Seychelles","data-alternative-spellings":"SC","data-relevancy-booster":"0.5"} ,{name:"Sierra Leone","data-alternative-spellings":"SL"} ,{name:"Singapore","data-alternative-spellings":"SG Singapura சிங்கப்பூர் குடியரசு 新加坡共和国"} ,{name:"Sint Maarten (Dutch Part)","data-alternative-spellings":"SX"} ,{name:"Slovakia","data-alternative-spellings":"SK Slovenská Slovensko"} ,{name:"Slovenia","data-alternative-spellings":"SI Slovenija"} ,{name:"Solomon Islands","data-alternative-spellings":"SB"} ,{name:"Somalia","data-alternative-spellings":"SO الصومال"} ,{name:"South Africa","data-alternative-spellings":"ZA RSA Suid-Afrika"} ,{name:"South Georgia and the South Sandwich Islands","data-alternative-spellings":"GS"} ,{name:"South Sudan","data-alternative-spellings":"SS"} ,{name:"Spain","data-alternative-spellings":"ES España","data-relevancy-booster":"2"} ,{name:"Sri Lanka","data-alternative-spellings":"LK ශ්‍රී ලංකා இலங்கை Ceylon"} ,{name:"Sudan","data-alternative-spellings":"SD السودان"} ,{name:"Suriname","data-alternative-spellings":"SR शर्नम् Sarnam Sranangron"} ,{name:"Svalbard and Jan Mayen","data-alternative-spellings":"SJ","data-relevancy-booster":"0.5"} ,{name:"Swaziland","data-alternative-spellings":"SZ weSwatini Swatini Ngwane"} ,{name:"Sweden","data-alternative-spellings":"SE Sverige","data-relevancy-booster":"1.5"} ,{name:"Switzerland","data-alternative-spellings":"CH Swiss Confederation Schweiz Suisse Svizzera Svizra","data-relevancy-booster":"1.5"} ,{name:"Syrian Arab Republic","data-alternative-spellings":"SY Syria سورية"} ,{name:"Taiwan,Province of China","data-alternative-spellings":"TW 台灣 臺灣"} ,{name:"Tajikistan","data-alternative-spellings":"TJ Тоҷикистон Toçikiston"} ,{name:"Tanzania,United Republic of","data-alternative-spellings":"TZ"} ,{name:"Thailand","data-alternative-spellings":"TH ประเทศไทย Prathet Thai"} ,{name:"Timor-Leste","data-alternative-spellings":"TL"} ,{name:"Togo","data-alternative-spellings":"TG Togolese"} ,{name:"Tokelau","data-alternative-spellings":"TK","data-relevancy-booster":"0.5"} ,{name:"Tonga","data-alternative-spellings":"TO"} ,{name:"Trinidad and Tobago","data-alternative-spellings":"TT"} ,{name:"Tunisia","data-alternative-spellings":"TN تونس"} ,{name:"Turkey","data-alternative-spellings":"TR Türkiye Turkiye"} ,{name:"Turkmenistan","data-alternative-spellings":"TM Türkmenistan"} ,{name:"Turks and Caicos Islands","data-alternative-spellings":"TC","data-relevancy-booster":"0.5"} ,{name:"Tuvalu","data-alternative-spellings":"TV","data-relevancy-booster":"0.5"} ,{name:"Uganda","data-alternative-spellings":"UG"} ,{name:"Ukraine","data-alternative-spellings":"UA Ukrayina Україна"} ,{name:"United Arab Emirates","data-alternative-spellings":"AE UAE الإمارات"} ,{name:"United Kingdom","data-alternative-spellings":"GB Great Britain England UK Wales Scotland Northern Ireland","data-relevancy-booster":"2.5"} ,{name:"United States","data-relevancy-booster":"3.5","data-alternative-spellings":"US USA United States of America"} ,{name:"United States Minor Outlying Islands","data-alternative-spellings":"UM"} ,{name:"Uruguay","data-alternative-spellings":"UY"} ,{name:"Uzbekistan","data-alternative-spellings":"UZ Ўзбекистон O'zbekstan O‘zbekiston"} ,{name:"Vanuatu","data-alternative-spellings":"VU"} ,{name:"Venezuela","data-alternative-spellings":"VE"} ,{name:"Vietnam","data-alternative-spellings":"VN Việt Nam","data-relevancy-booster":"1.5"} ,{name:"Virgin Islands,British","data-alternative-spellings":"VG","data-relevancy-booster":"0.5"} ,{name:"Virgin Islands,U.S.","data-alternative-spellings":"VI","data-relevancy-booster":"0.5"} ,{name:"Wallis and Futuna","data-alternative-spellings":"WF","data-relevancy-booster":"0.5"} ,{name:"Western Sahara","data-alternative-spellings":"EH لصحراء الغربية"} ,{name:"Yemen","data-alternative-spellings":"YE اليمن"} ,{name:"Zambia","data-alternative-spellings":"ZM"} ,{name:"Zimbabwe","data-alternative-spellings":"ZW"} ]; var html = ""; // Provide a "Select Country" leader element, but only if there is no pre-selected item. // This is to prevent users who have previously selected a country from setting an empty country. if( !selectedElementName || selectedElementName.length < 1) html = '<option value="" selected="selected">Select Country</option>\n'; json.forEach(function(element, index, array){ var str = '<option value="' + element.name+'"'; if( element.name == selectedElementName ) str += " selected "; var helper=function(field){ if( typeof element[field] != "string" ) return(""); if( element[field].length == 0 ) return(""); return(" "+field+'="'+element[field]+'" '); } str += helper("data-alternative-spellings"); str += helper("data-relevancy-booster"); str += ">"+element.name+"</option>\n"; html += str; }) return(html); }
buildMenu
identifier_name
page.js
/** Rendering Pages ================ SIMPLE TEXT -------------- To put simple text or HTML to a screen: res.render( { body_text: 'Hello <b>world</b>' } ); TEMPLATE --------- To put a specific template (like a form) with variables: res.render( 'inbox/form.html', { username: name, phone: phone } ); STATUS MESSAGES ----------------- To ouput specific status messages: var page = safeharbor.page; res.outputMessage( page.MESSAGE_LEVELS.warning, 'Warning Title', 'Some warning text goes here' ); You can have multiple of these output message on the same page. Later, you can then call res.render() as above. This allows for the following scenario: if( onSubmit && (some_error == true) ) { // on submit there was some error res.outputMessage( page.MESSAGE_LEVELS.error, 'Try again', 'Some error text goes here' ); } // on first time render OR error re-submit: res.render( 'inbox/form.html', postargs ); STATUS (ONLY) PAGES --------------------- If all you want to output is the message (no template): res.outputMessage( page.MESSAGE_LEVELS.success, 'Wonderful', 'Some happy text goes here' ); res.render( page.MESSAGE_VIEW, { pageTitle: 'Happy Joy' } ); AJAX-STYLE HTML SNIPPETS -------------------------- To return a snippet of HTML (either as 'body_text', message or template) use the same techniques as above but the layout option: res.render( { layout: page.SNIPPET, body_text: 'This text is for embedding' } ); Also works for templates: res.render( 'profile/acct_email.html', // <-- template for embedded editing { layout: page.SNIPPET } ) @module lib @submodule page **/ var loginstate = require('./loginstate.js'), ROLES = require('./roles.js'), utils = require('./utils.js'); exports.MESSAGE_VIEW = // alias for... exports.BODY_TEXT_VIEW = 'shared/body_text.html'; exports.DEFAULT_LAYOUT = 'shared/main.html'; exports.SNIPPET = 'shared/ajax_html.html'; var MESSAGE_LEVELS = exports.MESSAGE_LEVELS = { info: 'info', success: 'success', warning: 'warning', danger: 'danger', // app error error: 'error' // sys error }; function buildMenu( req, res ) { var cmds = {}; function cmd( group, url, link, help ) { this.url = url; this.link = link; this.help = help; if(!cmds[group])
; cmds[group].items.push(this); } var user = loginstate.getUser(req); new cmd('safeharbor', '/about', 'About', 'Learn about Safe Harbor'); new cmd('safeharbor', '/learn', 'Learn', 'Learn about your rights and the DMCA'); new cmd('safeharbor', '/support', 'Support', 'Ask us stuff'); if( user ) { new cmd('user', '/dash', 'Dashboard', 'Manage your disputes' ); // new cmd('user', '/passwordreset', 'Password reset', 'Change your password'); new cmd('user', '/account', 'Account settings', 'Change your email and other settings'); new cmd('user', '/accountdeleter', 'Delete your account', 'hrumph'); new cmd('user', '/logout', 'Log out', 'bye for now' ); new cmd('site', '/siteeditor','Edit your site properties'); new cmd('tablinks', '/disputes', 'Past Disputes', 'Your dispute history' ); new cmd('tablinks', '/form', 'Future Disputes', 'Your dispute future' ); var r = user.role>>>0; // WTF? if( r <= ROLES.admin ) { new cmd('admin', '/admin', 'Admin stuff', '' ); if( r == ROLES.developer ) { new cmd('developer', '/dev', 'Developer stuff', '' ); } } } else { new cmd( 'user', '/login', 'Login', 'For existing accounts' ); new cmd( 'user', '/reg', 'Register', 'For creating new accounts' ); new cmd( 'user', '/lostpassword', 'Lost password', 'For existing, if forgetful accounts'); } return cmds; } exports.Message = function( msgLevel, msgTitle, text, opts ) { utils.copy( this, opts || {}); this.level = msgLevel; this.title = msgTitle; this.text = text; if( !this.status ) { switch( this.level ) { case MESSAGE_LEVELS.info: case MESSAGE_LEVELS.success: this.status = 'ok'; break; case MESSAGE_LEVELS.warning: case MESSAGE_LEVELS.danger: case MESSAGE_LEVELS.error: this.status = '??'; // TODO fill these info break; } } } exports.setup = function(app) { var Handlebars = require('handlebars'); Handlebars.registerHelper('loggedInStatusClass', function() { var isLoggedIn = loginstate.isLoggedIn(); if( isLoggedIn ) return('loggedin'); // that's a CSS selector name else return('loggedout'); }); Handlebars.registerHelper('contextDumper', function(a) { // I haven't figured out if this context blob // is a copy or an actual instance of something // important and shared, so we remove the 'app' // thingy so the dump is managable... var app = a.app; a.app = null; var text = require('util').inspect(a,true,null); // ...and then restore it just in case someone // else was using it a.app = app; return text; }); app.register('.html', Handlebars); app.set('view engine', 'handlebars'); app.dynamicHelpers( { // these will all be passed to every page... user: function( req, res ) { var u = loginstate.getUser(req); if( u && u.password ) u.password = '****'; return u; }, isLoggedIn: function( req, res ) { return !!loginstate.getUser(req); }, isAdmin: function( req, res ) { var u = loginstate.getUser(req); return u && (u.role>>>0 <= ROLES.admin>>>0); }, menu: buildMenu, // we should consider not outputting this on Ajax messages: function( req, res ) { return res.sh_output_messages || [ ] } } ); app.use( function setupPage(req,res,next) { /** Override of the express.response.render method in order put our application specific standard templates into the call stream. @method render @for Response @param {string} view Path to template file (relative to './view') @param {Object} opts Can include things like body_text, pageTitle **/ var oldRender = res.render; res.render = function(view, opts, fn, parent, sub ) { if( typeof view != 'string' ) { opts = view; view = exports.BODY_TEXT_VIEW; } if( view == exports.BODY_TEXT_VIEW ) { if( !opts.body_text ) opts.body_text = ''; } if( !opts.layout ) { opts.layout = exports.DEFAULT_LAYOUT; } if( !opts.bodyClass ) { try { opts.bodyClass = view.match(/([a-z0-9]+)\/[^\/]+$/)[1]; } catch( e ) { } } res.render = oldRender; return res.render(view, opts, fn, parent, sub ); } if( !res.outputMessage ) { /** Call this to setup a message to be ouput during the res.render() call. @method outputMessage @for Response @param {MESSAGE_LEVELS} msgLevel @param {STRING} msgTitle @param {STRING} text @param {Object} [opts] **/ res.outputMessage = function( msgLevel, msgTitle, text, opts ) { if( !res.sh_output_messages ) res.sh_output_messages = [ ]; res.sh_output_messages.push( new exports.Message(msgLevel,msgTitle,text,opts) ); return res; } } next(); }); } exports.countryList = function(selectedElementName){ var json = [ {name:"Afghanistan","data-alternative-spellings":"AF افغانستان"} ,{name:"Åland Islands","data-alternative-spellings":"AX Aaland Aland","data-relevancy-booster":"0.5"} ,{name:"Albania","data-alternative-spellings":"AL"} ,{name:"Algeria","data-alternative-spellings":"DZ الجزائر"} ,{name:"American Samoa","data-alternative-spellings":"AS","data-relevancy-booster":"0.5"} ,{name:"Andorra","data-alternative-spellings":"AD","data-relevancy-booster":"0.5"} ,{name:"Angola","data-alternative-spellings":"AO"} ,{name:"Anguilla","data-alternative-spellings":"AI","data-relevancy-booster":"0.5"} ,{name:"Antarctica","data-alternative-spellings":"AQ","data-relevancy-booster":"0.5"} ,{name:"Antigua And Barbuda","data-alternative-spellings":"AG","data-relevancy-booster":"0.5"} ,{name:"Argentina","data-alternative-spellings":"AR"} ,{name:"Armenia","data-alternative-spellings":"AM Հայաստան"} ,{name:"Aruba","data-alternative-spellings":"AW","data-relevancy-booster":"0.5"} ,{name:"Australia","data-alternative-spellings":"AU","data-relevancy-booster":"1.5"} ,{name:"Austria","data-alternative-spellings":"AT Österreich Osterreich Oesterreich "} ,{name:"Azerbaijan","data-alternative-spellings":"AZ"} ,{name:"Bahamas","data-alternative-spellings":"BS"} ,{name:"Bahrain","data-alternative-spellings":"BH البحرين"} ,{name:"Bangladesh","data-alternative-spellings":"BD বাংলাদেশ","data-relevancy-booster":"2"} ,{name:"Barbados","data-alternative-spellings":"BB"} ,{name:"Belarus","data-alternative-spellings":"BY Беларусь"} ,{name:"Belgium","data-alternative-spellings":"BE België Belgie Belgien Belgique","data-relevancy-booster":"1.5"} ,{name:"Belize","data-alternative-spellings":"BZ"} ,{name:"Benin","data-alternative-spellings":"BJ"} ,{name:"Bermuda","data-alternative-spellings":"BM","data-relevancy-booster":"0.5"} ,{name:"Bhutan","data-alternative-spellings":"BT भूटान"} ,{name:"Bolivia","data-alternative-spellings":"BO"} ,{name:"Bonaire,Sint Eustatius and Saba","data-alternative-spellings":"BQ"} ,{name:"Bosnia and Herzegovina","data-alternative-spellings":"BA Босна и Херцеговина"} ,{name:"Botswana","data-alternative-spellings":"BW"} ,{name:"Bouvet Island","data-alternative-spellings":"BV"} ,{name:"Brazil","data-alternative-spellings":"BR Brasil","data-relevancy-booster":"2"} ,{name:"British Indian Ocean Territory","data-alternative-spellings":"IO"} ,{name:"Brunei Darussalam","data-alternative-spellings":"BN"} ,{name:"Bulgaria","data-alternative-spellings":"BG България"} ,{name:"Burkina Faso","data-alternative-spellings":"BF"} ,{name:"Burundi","data-alternative-spellings":"BI"} ,{name:"Cambodia","data-alternative-spellings":"KH កម្ពុជា"} ,{name:"Cameroon","data-alternative-spellings":"CM"} ,{name:"Canada","data-alternative-spellings":"CA","data-relevancy-booster":"2"} ,{name:"Cape Verde","data-alternative-spellings":"CV Cabo"} ,{name:"Cayman Islands","data-alternative-spellings":"KY","data-relevancy-booster":"0.5"} ,{name:"Central African Republic","data-alternative-spellings":"CF"} ,{name:"Chad","data-alternative-spellings":"TD تشاد‎ Tchad"} ,{name:"Chile","data-alternative-spellings":"CL"} ,{name:"China","data-relevancy-booster":"3.5","data-alternative-spellings":"CN Zhongguo Zhonghua Peoples Republic 中国/中华"} ,{name:"Christmas Island","data-alternative-spellings":"CX","data-relevancy-booster":"0.5"} ,{name:"Cocos (Keeling) Islands","data-alternative-spellings":"CC","data-relevancy-booster":"0.5"} ,{name:"Colombia","data-alternative-spellings":"CO"} ,{name:"Comoros","data-alternative-spellings":"KM جزر القمر"} ,{name:"Congo","data-alternative-spellings":"CG"} ,{name:"Congo,the Democratic Republic of the","data-alternative-spellings":"CD Congo-Brazzaville Repubilika ya Kongo"} ,{name:"Cook Islands","data-alternative-spellings":"CK","data-relevancy-booster":"0.5"} ,{name:"Costa Rica","data-alternative-spellings":"CR"} ,{name:"Côte d'Ivoire","data-alternative-spellings":"CI Cote dIvoire"} ,{name:"Croatia","data-alternative-spellings":"HR Hrvatska"} ,{name:"Cuba","data-alternative-spellings":"CU"} ,{name:"Curaçao","data-alternative-spellings":"CW Curacao"} ,{name:"Cyprus","data-alternative-spellings":"CY Κύπρος Kýpros Kıbrıs"} ,{name:"Czech Republic","data-alternative-spellings":"CZ Česká Ceska"} ,{name:"Denmark","data-alternative-spellings":"DK Danmark","data-relevancy-booster":"1.5"} ,{name:"Djibouti","data-alternative-spellings":"DJ جيبوتي‎ Jabuuti Gabuuti"} ,{name:"Dominica","data-alternative-spellings":"DM Dominique","data-relevancy-booster":"0.5"} ,{name:"Dominican Republic","data-alternative-spellings":"DO"} ,{name:"Ecuador","data-alternative-spellings":"EC"} ,{name:"Egypt","data-alternative-spellings":"EG","data-relevancy-booster":"1.5"} ,{name:"El Salvador","data-alternative-spellings":"SV"} ,{name:"Equatorial Guinea","data-alternative-spellings":"GQ"} ,{name:"Eritrea","data-alternative-spellings":"ER إرتريا ኤርትራ"} ,{name:"Estonia","data-alternative-spellings":"EE Eesti"} ,{name:"Ethiopia","data-alternative-spellings":"ET ኢትዮጵያ"} ,{name:"Falkland Islands (Malvinas)","data-alternative-spellings":"FK","data-relevancy-booster":"0.5"} ,{name:"Faroe Islands","data-alternative-spellings":"FO Føroyar Færøerne","data-relevancy-booster":"0.5"} ,{name:"Fiji","data-alternative-spellings":"FJ Viti फ़िजी"} ,{name:"Finland","data-alternative-spellings":"FI Suomi"} ,{name:"France","data-alternative-spellings":"FR République française","data-relevancy-booster":"2.5"} ,{name:"French Guiana","data-alternative-spellings":"GF"} ,{name:"French Polynesia","data-alternative-spellings":"PF Polynésie française"} ,{name:"French Southern Territories","data-alternative-spellings":"TF"} ,{name:"Gabon","data-alternative-spellings":"GA République Gabonaise"} ,{name:"Gambia","data-alternative-spellings":"GM"} ,{name:"Georgia","data-alternative-spellings":"GE საქართველო"} ,{name:"Germany","data-alternative-spellings":"DE Bundesrepublik Deutschland","data-relevancy-booster":"3"} ,{name:"Ghana","data-alternative-spellings":"GH"} ,{name:"Gibraltar","data-alternative-spellings":"GI","data-relevancy-booster":"0.5"} ,{name:"Greece","data-alternative-spellings":"GR Ελλάδα","data-relevancy-booster":"1.5"} ,{name:"Greenland","data-alternative-spellings":"GL grønland","data-relevancy-booster":"0.5"} ,{name:"Grenada","data-alternative-spellings":"GD"} ,{name:"Guadeloupe","data-alternative-spellings":"GP"} ,{name:"Guam","data-alternative-spellings":"GU"} ,{name:"Guatemala","data-alternative-spellings":"GT"} ,{name:"Guernsey","data-alternative-spellings":"GG","data-relevancy-booster":"0.5"} ,{name:"Guinea","data-alternative-spellings":"GN"} ,{name:"Guinea-Bissau","data-alternative-spellings":"GW"} ,{name:"Guyana","data-alternative-spellings":"GY"} ,{name:"Haiti","data-alternative-spellings":"HT"} ,{name:"Heard Island and McDonald Islands","data-alternative-spellings":"HM"} ,{name:"Holy See (Vatican City State)","data-alternative-spellings":"VA","data-relevancy-booster":"0.5"} ,{name:"Honduras","data-alternative-spellings":"HN"} ,{name:"Hong Kong","data-alternative-spellings":"HK 香港"} ,{name:"Hungary","data-alternative-spellings":"HU Magyarország"} ,{name:"Iceland","data-alternative-spellings":"IS Island"} ,{name:"India","data-alternative-spellings":"IN भारत गणराज्य Hindustan","data-relevancy-booster":"3"} ,{name:"Indonesia","data-alternative-spellings":"ID","data-relevancy-booster":"2"} ,{name:"Iran,Islamic Republic of","data-alternative-spellings":"IR ایران"} ,{name:"Iraq","data-alternative-spellings":"IQ العراق‎"} ,{name:"Ireland","data-alternative-spellings":"IE Éire","data-relevancy-booster":"1.2"} ,{name:"Isle of Man","data-alternative-spellings":"IM","data-relevancy-booster":"0.5"} ,{name:"Israel","data-alternative-spellings":"IL إسرائيل ישראל"} ,{name:"Italy","data-alternative-spellings":"IT Italia","data-relevancy-booster":"2"} ,{name:"Jamaica","data-alternative-spellings":"JM"} ,{name:"Japan","data-alternative-spellings":"JP Nippon Nihon 日本","data-relevancy-booster":"2.5"} ,{name:"Jersey","data-alternative-spellings":"JE","data-relevancy-booster":"0.5"} ,{name:"Jordan","data-alternative-spellings":"JO الأردن"} ,{name:"Kazakhstan","data-alternative-spellings":"KZ Қазақстан Казахстан"} ,{name:"Kenya","data-alternative-spellings":"KE"} ,{name:"Kiribati","data-alternative-spellings":"KI"} ,{name:"Korea,Democratic People's Republic of","data-alternative-spellings":"KP North Korea"} ,{name:"Korea,Republic of","data-alternative-spellings":"KR South Korea","data-relevancy-booster":"1.5"} ,{name:"Kuwait","data-alternative-spellings":"KW الكويت"} ,{name:"Kyrgyzstan","data-alternative-spellings":"KG Кыргызстан"} ,{name:"Lao People's Democratic Republic","data-alternative-spellings":"LA"} ,{name:"Latvia","data-alternative-spellings":"LV Latvija"} ,{name:"Lebanon","data-alternative-spellings":"LB لبنان"} ,{name:"Lesotho","data-alternative-spellings":"LS"} ,{name:"Liberia","data-alternative-spellings":"LR"} ,{name:"Libyan Arab Jamahiriya","data-alternative-spellings":"LY ليبيا"} ,{name:"Liechtenstein","data-alternative-spellings":"LI"} ,{name:"Lithuania","data-alternative-spellings":"LT Lietuva"} ,{name:"Luxembourg","data-alternative-spellings":"LU"} ,{name:"Macao","data-alternative-spellings":"MO"} ,{name:"Macedonia,The Former Yugoslav Republic Of","data-alternative-spellings":"MK Македонија"} ,{name:"Madagascar","data-alternative-spellings":"MG Madagasikara"} ,{name:"Malawi","data-alternative-spellings":"MW"} ,{name:"Malaysia","data-alternative-spellings":"MY"} ,{name:"Maldives","data-alternative-spellings":"MV"} ,{name:"Mali","data-alternative-spellings":"ML"} ,{name:"Malta","data-alternative-spellings":"MT"} ,{name:"Marshall Islands","data-alternative-spellings":"MH","data-relevancy-booster":"0.5"} ,{name:"Martinique","data-alternative-spellings":"MQ"} ,{name:"Mauritania","data-alternative-spellings":"MR الموريتانية"} ,{name:"Mauritius","data-alternative-spellings":"MU"} ,{name:"Mayotte","data-alternative-spellings":"YT"} ,{name:"Mexico","data-alternative-spellings":"MX Mexicanos","data-relevancy-booster":"1.5"} ,{name:"Micronesia,Federated States of","data-alternative-spellings":"FM"} ,{name:"Moldova,Republic of","data-alternative-spellings":"MD"} ,{name:"Monaco","data-alternative-spellings":"MC"} ,{name:"Mongolia","data-alternative-spellings":"MN Mongγol ulus Монгол улс"} ,{name:"Montenegro","data-alternative-spellings":"ME"} ,{name:"Montserrat","data-alternative-spellings":"MS","data-relevancy-booster":"0.5"} ,{name:"Morocco","data-alternative-spellings":"MA المغرب"} ,{name:"Mozambique","data-alternative-spellings":"MZ Moçambique"} ,{name:"Myanmar","data-alternative-spellings":"MM"} ,{name:"Namibia","data-alternative-spellings":"NA Namibië"} ,{name:"Nauru","data-alternative-spellings":"NR Naoero","data-relevancy-booster":"0.5"} ,{name:"Nepal","data-alternative-spellings":"NP नेपाल"} ,{name:"Netherlands","data-alternative-spellings":"NL Holland Nederland","data-relevancy-booster":"1.5"} ,{name:"New Caledonia","data-alternative-spellings":"NC","data-relevancy-booster":"0.5"} ,{name:"New Zealand","data-alternative-spellings":"NZ Aotearoa"} ,{name:"Nicaragua","data-alternative-spellings":"NI"} ,{name:"Niger","data-alternative-spellings":"NE Nijar"} ,{name:"Nigeria","data-alternative-spellings":"NG Nijeriya Naíjíríà","data-relevancy-booster":"1.5"} ,{name:"Niue","data-alternative-spellings":"NU","data-relevancy-booster":"0.5"} ,{name:"Norfolk Island","data-alternative-spellings":"NF","data-relevancy-booster":"0.5"} ,{name:"Northern Mariana Islands","data-alternative-spellings":"MP","data-relevancy-booster":"0.5"} ,{name:"Norway","data-alternative-spellings":"NO Norge Noreg","data-relevancy-booster":"1.5"} ,{name:"Oman","data-alternative-spellings":"OM عمان"} ,{name:"Pakistan","data-alternative-spellings":"PK پاکستان","data-relevancy-booster":"2"} ,{name:"Palau","data-alternative-spellings":"PW","data-relevancy-booster":"0.5"} ,{name:"Palestinian Territory,Occupied","data-alternative-spellings":"PS فلسطين"} ,{name:"Panama","data-alternative-spellings":"PA"} ,{name:"Papua New Guinea","data-alternative-spellings":"PG"} ,{name:"Paraguay","data-alternative-spellings":"PY"} ,{name:"Peru","data-alternative-spellings":"PE"} ,{name:"Philippines","data-alternative-spellings":"PH Pilipinas","data-relevancy-booster":"1.5"} ,{name:"Pitcairn","data-alternative-spellings":"PN","data-relevancy-booster":"0.5"} ,{name:"Poland","data-alternative-spellings":"PL Polska","data-relevancy-booster":"1.25"} ,{name:"Portugal","data-alternative-spellings":"PT Portuguesa","data-relevancy-booster":"1.5"} ,{name:"Puerto Rico","data-alternative-spellings":"PR"} ,{name:"Qatar","data-alternative-spellings":"QA قطر"} ,{name:"Réunion","data-alternative-spellings":"RE Reunion"} ,{name:"Romania","data-alternative-spellings":"RO Rumania Roumania România"} ,{name:"Russian Federation","data-alternative-spellings":"RU Rossiya Российская Россия","data-relevancy-booster":"2.5"} ,{name:"Rwanda","data-alternative-spellings":"RW"} ,{name:"Saint Barthélemy","data-alternative-spellings":"BL St. Barthelemy"} ,{name:"Saint Helena","data-alternative-spellings":"SH St."} ,{name:"Saint Kitts and Nevis","data-alternative-spellings":"KN St."} ,{name:"Saint Lucia","data-alternative-spellings":"LC St."} ,{name:"Saint Martin (French Part)","data-alternative-spellings":"MF St."} ,{name:"Saint Pierre and Miquelon","data-alternative-spellings":"PM St."} ,{name:"Saint Vincent and the Grenadines","data-alternative-spellings":"VC St."} ,{name:"Samoa","data-alternative-spellings":"WS"} ,{name:"San Marino","data-alternative-spellings":"SM"} ,{name:"Sao Tome and Principe","data-alternative-spellings":"ST"} ,{name:"Saudi Arabia","data-alternative-spellings":"SA السعودية"} ,{name:"Senegal","data-alternative-spellings":"SN Sénégal"} ,{name:"Serbia","data-alternative-spellings":"RS Србија Srbija"} ,{name:"Seychelles","data-alternative-spellings":"SC","data-relevancy-booster":"0.5"} ,{name:"Sierra Leone","data-alternative-spellings":"SL"} ,{name:"Singapore","data-alternative-spellings":"SG Singapura சிங்கப்பூர் குடியரசு 新加坡共和国"} ,{name:"Sint Maarten (Dutch Part)","data-alternative-spellings":"SX"} ,{name:"Slovakia","data-alternative-spellings":"SK Slovenská Slovensko"} ,{name:"Slovenia","data-alternative-spellings":"SI Slovenija"} ,{name:"Solomon Islands","data-alternative-spellings":"SB"} ,{name:"Somalia","data-alternative-spellings":"SO الصومال"} ,{name:"South Africa","data-alternative-spellings":"ZA RSA Suid-Afrika"} ,{name:"South Georgia and the South Sandwich Islands","data-alternative-spellings":"GS"} ,{name:"South Sudan","data-alternative-spellings":"SS"} ,{name:"Spain","data-alternative-spellings":"ES España","data-relevancy-booster":"2"} ,{name:"Sri Lanka","data-alternative-spellings":"LK ශ්‍රී ලංකා இலங்கை Ceylon"} ,{name:"Sudan","data-alternative-spellings":"SD السودان"} ,{name:"Suriname","data-alternative-spellings":"SR शर्नम् Sarnam Sranangron"} ,{name:"Svalbard and Jan Mayen","data-alternative-spellings":"SJ","data-relevancy-booster":"0.5"} ,{name:"Swaziland","data-alternative-spellings":"SZ weSwatini Swatini Ngwane"} ,{name:"Sweden","data-alternative-spellings":"SE Sverige","data-relevancy-booster":"1.5"} ,{name:"Switzerland","data-alternative-spellings":"CH Swiss Confederation Schweiz Suisse Svizzera Svizra","data-relevancy-booster":"1.5"} ,{name:"Syrian Arab Republic","data-alternative-spellings":"SY Syria سورية"} ,{name:"Taiwan,Province of China","data-alternative-spellings":"TW 台灣 臺灣"} ,{name:"Tajikistan","data-alternative-spellings":"TJ Тоҷикистон Toçikiston"} ,{name:"Tanzania,United Republic of","data-alternative-spellings":"TZ"} ,{name:"Thailand","data-alternative-spellings":"TH ประเทศไทย Prathet Thai"} ,{name:"Timor-Leste","data-alternative-spellings":"TL"} ,{name:"Togo","data-alternative-spellings":"TG Togolese"} ,{name:"Tokelau","data-alternative-spellings":"TK","data-relevancy-booster":"0.5"} ,{name:"Tonga","data-alternative-spellings":"TO"} ,{name:"Trinidad and Tobago","data-alternative-spellings":"TT"} ,{name:"Tunisia","data-alternative-spellings":"TN تونس"} ,{name:"Turkey","data-alternative-spellings":"TR Türkiye Turkiye"} ,{name:"Turkmenistan","data-alternative-spellings":"TM Türkmenistan"} ,{name:"Turks and Caicos Islands","data-alternative-spellings":"TC","data-relevancy-booster":"0.5"} ,{name:"Tuvalu","data-alternative-spellings":"TV","data-relevancy-booster":"0.5"} ,{name:"Uganda","data-alternative-spellings":"UG"} ,{name:"Ukraine","data-alternative-spellings":"UA Ukrayina Україна"} ,{name:"United Arab Emirates","data-alternative-spellings":"AE UAE الإمارات"} ,{name:"United Kingdom","data-alternative-spellings":"GB Great Britain England UK Wales Scotland Northern Ireland","data-relevancy-booster":"2.5"} ,{name:"United States","data-relevancy-booster":"3.5","data-alternative-spellings":"US USA United States of America"} ,{name:"United States Minor Outlying Islands","data-alternative-spellings":"UM"} ,{name:"Uruguay","data-alternative-spellings":"UY"} ,{name:"Uzbekistan","data-alternative-spellings":"UZ Ўзбекистон O'zbekstan O‘zbekiston"} ,{name:"Vanuatu","data-alternative-spellings":"VU"} ,{name:"Venezuela","data-alternative-spellings":"VE"} ,{name:"Vietnam","data-alternative-spellings":"VN Việt Nam","data-relevancy-booster":"1.5"} ,{name:"Virgin Islands,British","data-alternative-spellings":"VG","data-relevancy-booster":"0.5"} ,{name:"Virgin Islands,U.S.","data-alternative-spellings":"VI","data-relevancy-booster":"0.5"} ,{name:"Wallis and Futuna","data-alternative-spellings":"WF","data-relevancy-booster":"0.5"} ,{name:"Western Sahara","data-alternative-spellings":"EH لصحراء الغربية"} ,{name:"Yemen","data-alternative-spellings":"YE اليمن"} ,{name:"Zambia","data-alternative-spellings":"ZM"} ,{name:"Zimbabwe","data-alternative-spellings":"ZW"} ]; var html = ""; // Provide a "Select Country" leader element, but only if there is no pre-selected item. // This is to prevent users who have previously selected a country from setting an empty country. if( !selectedElementName || selectedElementName.length < 1) html = '<option value="" selected="selected">Select Country</option>\n'; json.forEach(function(element, index, array){ var str = '<option value="' + element.name+'"'; if( element.name == selectedElementName ) str += " selected "; var helper=function(field){ if( typeof element[field] != "string" ) return(""); if( element[field].length == 0 ) return(""); return(" "+field+'="'+element[field]+'" '); } str += helper("data-alternative-spellings"); str += helper("data-relevancy-booster"); str += ">"+element.name+"</option>\n"; html += str; }) return(html); }
{ cmds[group] = {}; cmds[group].items = [ ] }
conditional_block
page.js
/** Rendering Pages ================ SIMPLE TEXT -------------- To put simple text or HTML to a screen: res.render( { body_text: 'Hello <b>world</b>' } ); TEMPLATE --------- To put a specific template (like a form) with variables: res.render( 'inbox/form.html', { username: name, phone: phone } ); STATUS MESSAGES ----------------- To ouput specific status messages: var page = safeharbor.page; res.outputMessage( page.MESSAGE_LEVELS.warning, 'Warning Title', 'Some warning text goes here' ); You can have multiple of these output message on the same page. Later, you can then call res.render() as above. This allows for the following scenario: if( onSubmit && (some_error == true) ) { // on submit there was some error res.outputMessage( page.MESSAGE_LEVELS.error, 'Try again', 'Some error text goes here' ); } // on first time render OR error re-submit: res.render( 'inbox/form.html', postargs ); STATUS (ONLY) PAGES --------------------- If all you want to output is the message (no template): res.outputMessage( page.MESSAGE_LEVELS.success, 'Wonderful', 'Some happy text goes here' ); res.render( page.MESSAGE_VIEW, { pageTitle: 'Happy Joy' } ); AJAX-STYLE HTML SNIPPETS -------------------------- To return a snippet of HTML (either as 'body_text', message or template) use the same techniques as above but the layout option: res.render( { layout: page.SNIPPET, body_text: 'This text is for embedding' } ); Also works for templates: res.render( 'profile/acct_email.html', // <-- template for embedded editing { layout: page.SNIPPET } ) @module lib @submodule page **/ var loginstate = require('./loginstate.js'), ROLES = require('./roles.js'), utils = require('./utils.js'); exports.MESSAGE_VIEW = // alias for... exports.BODY_TEXT_VIEW = 'shared/body_text.html'; exports.DEFAULT_LAYOUT = 'shared/main.html'; exports.SNIPPET = 'shared/ajax_html.html'; var MESSAGE_LEVELS = exports.MESSAGE_LEVELS = { info: 'info', success: 'success', warning: 'warning', danger: 'danger', // app error error: 'error' // sys error }; function buildMenu( req, res ) { var cmds = {}; function cmd( group, url, link, help ) { this.url = url; this.link = link; this.help = help; if(!cmds[group]) { cmds[group] = {}; cmds[group].items = [ ] }; cmds[group].items.push(this); } var user = loginstate.getUser(req); new cmd('safeharbor', '/about', 'About', 'Learn about Safe Harbor'); new cmd('safeharbor', '/learn', 'Learn', 'Learn about your rights and the DMCA'); new cmd('safeharbor', '/support', 'Support', 'Ask us stuff'); if( user ) { new cmd('user', '/dash', 'Dashboard', 'Manage your disputes' ); // new cmd('user', '/passwordreset', 'Password reset', 'Change your password'); new cmd('user', '/account', 'Account settings', 'Change your email and other settings'); new cmd('user', '/accountdeleter', 'Delete your account', 'hrumph'); new cmd('user', '/logout', 'Log out', 'bye for now' ); new cmd('site', '/siteeditor','Edit your site properties'); new cmd('tablinks', '/disputes', 'Past Disputes', 'Your dispute history' ); new cmd('tablinks', '/form', 'Future Disputes', 'Your dispute future' ); var r = user.role>>>0; // WTF? if( r <= ROLES.admin ) { new cmd('admin', '/admin', 'Admin stuff', '' ); if( r == ROLES.developer ) { new cmd('developer', '/dev', 'Developer stuff', '' ); } } } else { new cmd( 'user', '/login', 'Login', 'For existing accounts' ); new cmd( 'user', '/reg', 'Register', 'For creating new accounts' ); new cmd( 'user', '/lostpassword', 'Lost password', 'For existing, if forgetful accounts'); } return cmds; } exports.Message = function( msgLevel, msgTitle, text, opts ) { utils.copy( this, opts || {}); this.level = msgLevel; this.title = msgTitle; this.text = text; if( !this.status ) { switch( this.level ) { case MESSAGE_LEVELS.info: case MESSAGE_LEVELS.success: this.status = 'ok'; break; case MESSAGE_LEVELS.warning: case MESSAGE_LEVELS.danger: case MESSAGE_LEVELS.error: this.status = '??'; // TODO fill these info break; } } } exports.setup = function(app) { var Handlebars = require('handlebars'); Handlebars.registerHelper('loggedInStatusClass', function() { var isLoggedIn = loginstate.isLoggedIn(); if( isLoggedIn ) return('loggedin'); // that's a CSS selector name else return('loggedout'); }); Handlebars.registerHelper('contextDumper', function(a) { // I haven't figured out if this context blob // is a copy or an actual instance of something // important and shared, so we remove the 'app' // thingy so the dump is managable... var app = a.app; a.app = null; var text = require('util').inspect(a,true,null); // ...and then restore it just in case someone // else was using it a.app = app; return text; }); app.register('.html', Handlebars); app.set('view engine', 'handlebars'); app.dynamicHelpers( { // these will all be passed to every page... user: function( req, res ) { var u = loginstate.getUser(req); if( u && u.password ) u.password = '****'; return u; }, isLoggedIn: function( req, res ) { return !!loginstate.getUser(req); }, isAdmin: function( req, res ) { var u = loginstate.getUser(req); return u && (u.role>>>0 <= ROLES.admin>>>0); }, menu: buildMenu, // we should consider not outputting this on Ajax messages: function( req, res ) { return res.sh_output_messages || [ ] } } ); app.use( function setupPage(req,res,next) { /** Override of the express.response.render method in order put our application specific standard templates into the call stream. @method render @for Response @param {string} view Path to template file (relative to './view') @param {Object} opts Can include things like body_text, pageTitle **/ var oldRender = res.render; res.render = function(view, opts, fn, parent, sub ) { if( typeof view != 'string' ) { opts = view; view = exports.BODY_TEXT_VIEW; } if( view == exports.BODY_TEXT_VIEW ) { if( !opts.body_text ) opts.body_text = ''; } if( !opts.layout ) { opts.layout = exports.DEFAULT_LAYOUT; } if( !opts.bodyClass ) { try { opts.bodyClass = view.match(/([a-z0-9]+)\/[^\/]+$/)[1]; } catch( e ) { } } res.render = oldRender; return res.render(view, opts, fn, parent, sub ); } if( !res.outputMessage ) { /** Call this to setup a message to be ouput during the res.render() call. @method outputMessage @for Response @param {MESSAGE_LEVELS} msgLevel @param {STRING} msgTitle @param {STRING} text @param {Object} [opts] **/ res.outputMessage = function( msgLevel, msgTitle, text, opts ) { if( !res.sh_output_messages ) res.sh_output_messages = [ ]; res.sh_output_messages.push( new exports.Message(msgLevel,msgTitle,text,opts) ); return res; } } next(); }); } exports.countryList = function(selectedElementName){ var json = [ {name:"Afghanistan","data-alternative-spellings":"AF افغانستان"} ,{name:"Åland Islands","data-alternative-spellings":"AX Aaland Aland","data-relevancy-booster":"0.5"} ,{name:"Albania","data-alternative-spellings":"AL"} ,{name:"Algeria","data-alternative-spellings":"DZ الجزائر"} ,{name:"American Samoa","data-alternative-spellings":"AS","data-relevancy-booster":"0.5"} ,{name:"Andorra","data-alternative-spellings":"AD","data-relevancy-booster":"0.5"} ,{name:"Angola","data-alternative-spellings":"AO"} ,{name:"Anguilla","data-alternative-spellings":"AI","data-relevancy-booster":"0.5"} ,{name:"Antarctica","data-alternative-spellings":"AQ","data-relevancy-booster":"0.5"} ,{name:"Antigua And Barbuda","data-alternative-spellings":"AG","data-relevancy-booster":"0.5"} ,{name:"Argentina","data-alternative-spellings":"AR"} ,{name:"Armenia","data-alternative-spellings":"AM Հայաստան"} ,{name:"Aruba","data-alternative-spellings":"AW","data-relevancy-booster":"0.5"} ,{name:"Australia","data-alternative-spellings":"AU","data-relevancy-booster":"1.5"} ,{name:"Austria","data-alternative-spellings":"AT Österreich Osterreich Oesterreich "} ,{name:"Azerbaijan","data-alternative-spellings":"AZ"} ,{name:"Bahamas","data-alternative-spellings":"BS"} ,{name:"Bahrain","data-alternative-spellings":"BH البحرين"} ,{name:"Bangladesh","data-alternative-spellings":"BD বাংলাদেশ","data-relevancy-booster":"2"} ,{name:"Barbados","data-alternative-spellings":"BB"} ,{name:"Belarus","data-alternative-spellings":"BY Беларусь"} ,{name:"Belgium","data-alternative-spellings":"BE België Belgie Belgien Belgique","data-relevancy-booster":"1.5"} ,{name:"Belize","data-alternative-spellings":"BZ"} ,{name:"Benin","data-alternative-spellings":"BJ"} ,{name:"Bermuda","data-alternative-spellings":"BM","data-relevancy-booster":"0.5"} ,{name:"Bhutan","data-alternative-spellings":"BT भूटान"} ,{name:"Bolivia","data-alternative-spellings":"BO"} ,{name:"Bonaire,Sint Eustatius and Saba","data-alternative-spellings":"BQ"} ,{name:"Bosnia and Herzegovina","data-alternative-spellings":"BA Босна и Херцеговина"} ,{name:"Botswana","data-alternative-spellings":"BW"} ,{name:"Bouvet Island","data-alternative-spellings":"BV"} ,{name:"Brazil","data-alternative-spellings":"BR Brasil","data-relevancy-booster":"2"} ,{name:"British Indian Ocean Territory","data-alternative-spellings":"IO"} ,{name:"Brunei Darussalam","data-alternative-spellings":"BN"} ,{name:"Bulgaria","data-alternative-spellings":"BG България"} ,{name:"Burkina Faso","data-alternative-spellings":"BF"} ,{name:"Burundi","data-alternative-spellings":"BI"} ,{name:"Cambodia","data-alternative-spellings":"KH កម្ពុជា"} ,{name:"Cameroon","data-alternative-spellings":"CM"} ,{name:"Canada","data-alternative-spellings":"CA","data-relevancy-booster":"2"} ,{name:"Cape Verde","data-alternative-spellings":"CV Cabo"} ,{name:"Cayman Islands","data-alternative-spellings":"KY","data-relevancy-booster":"0.5"} ,{name:"Central African Republic","data-alternative-spellings":"CF"} ,{name:"Chad","data-alternative-spellings":"TD تشاد‎ Tchad"} ,{name:"Chile","data-alternative-spellings":"CL"} ,{name:"China","data-relevancy-booster":"3.5","data-alternative-spellings":"CN Zhongguo Zhonghua Peoples Republic 中国/中华"} ,{name:"Christmas Island","data-alternative-spellings":"CX","data-relevancy-booster":"0.5"} ,{name:"Cocos (Keeling) Islands","data-alternative-spellings":"CC","data-relevancy-booster":"0.5"} ,{name:"Colombia","data-alternative-spellings":"CO"} ,{name:"Comoros","data-alternative-spellings":"KM جزر القمر"} ,{name:"Congo","data-alternative-spellings":"CG"} ,{name:"Congo,the Democratic Republic of the","data-alternative-spellings":"CD Congo-Brazzaville Repubilika ya Kongo"} ,{name:"Cook Islands","data-alternative-spellings":"CK","data-relevancy-booster":"0.5"} ,{name:"Costa Rica","data-alternative-spellings":"CR"} ,{name:"Côte d'Ivoire","data-alternative-spellings":"CI Cote dIvoire"} ,{name:"Croatia","data-alternative-spellings":"HR Hrvatska"} ,{name:"Cuba","data-alternative-spellings":"CU"} ,{name:"Curaçao","data-alternative-spellings":"CW Curacao"} ,{name:"Cyprus","data-alternative-spellings":"CY Κύπρος Kýpros Kıbrıs"} ,{name:"Czech Republic","data-alternative-spellings":"CZ Česká Ceska"} ,{name:"Denmark","data-alternative-spellings":"DK Danmark","data-relevancy-booster":"1.5"} ,{name:"Djibouti","data-alternative-spellings":"DJ جيبوتي‎ Jabuuti Gabuuti"} ,{name:"Dominica","data-alternative-spellings":"DM Dominique","data-relevancy-booster":"0.5"} ,{name:"Dominican Republic","data-alternative-spellings":"DO"} ,{name:"Ecuador","data-alternative-spellings":"EC"} ,{name:"Egypt","data-alternative-spellings":"EG","data-relevancy-booster":"1.5"} ,{name:"El Salvador","data-alternative-spellings":"SV"} ,{name:"Equatorial Guinea","data-alternative-spellings":"GQ"} ,{name:"Eritrea","data-alternative-spellings":"ER إرتريا ኤርትራ"} ,{name:"Estonia","data-alternative-spellings":"EE Eesti"} ,{name:"Ethiopia","data-alternative-spellings":"ET ኢትዮጵያ"} ,{name:"Falkland Islands (Malvinas)","data-alternative-spellings":"FK","data-relevancy-booster":"0.5"} ,{name:"Faroe Islands","data-alternative-spellings":"FO Føroyar Færøerne","data-relevancy-booster":"0.5"} ,{name:"Fiji","data-alternative-spellings":"FJ Viti फ़िजी"} ,{name:"Finland","data-alternative-spellings":"FI Suomi"} ,{name:"France","data-alternative-spellings":"FR République française","data-relevancy-booster":"2.5"} ,{name:"French Guiana","data-alternative-spellings":"GF"} ,{name:"French Polynesia","data-alternative-spellings":"PF Polynésie française"} ,{name:"French Southern Territories","data-alternative-spellings":"TF"} ,{name:"Gabon","data-alternative-spellings":"GA République Gabonaise"} ,{name:"Gambia","data-alternative-spellings":"GM"} ,{name:"Georgia","data-alternative-spellings":"GE საქართველო"} ,{name:"Germany","data-alternative-spellings":"DE Bundesrepublik Deutschland","data-relevancy-booster":"3"} ,{name:"Ghana","data-alternative-spellings":"GH"} ,{name:"Gibraltar","data-alternative-spellings":"GI","data-relevancy-booster":"0.5"} ,{name:"Greece","data-alternative-spellings":"GR Ελλάδα","data-relevancy-booster":"1.5"} ,{name:"Greenland","data-alternative-spellings":"GL grønland","data-relevancy-booster":"0.5"} ,{name:"Grenada","data-alternative-spellings":"GD"} ,{name:"Guadeloupe","data-alternative-spellings":"GP"} ,{name:"Guam","data-alternative-spellings":"GU"} ,{name:"Guatemala","data-alternative-spellings":"GT"} ,{name:"Guernsey","data-alternative-spellings":"GG","data-relevancy-booster":"0.5"} ,{name:"Guinea","data-alternative-spellings":"GN"} ,{name:"Guinea-Bissau","data-alternative-spellings":"GW"} ,{name:"Guyana","data-alternative-spellings":"GY"} ,{name:"Haiti","data-alternative-spellings":"HT"} ,{name:"Heard Island and McDonald Islands","data-alternative-spellings":"HM"} ,{name:"Holy See (Vatican City State)","data-alternative-spellings":"VA","data-relevancy-booster":"0.5"} ,{name:"Honduras","data-alternative-spellings":"HN"} ,{name:"Hong Kong","data-alternative-spellings":"HK 香港"} ,{name:"Hungary","data-alternative-spellings":"HU Magyarország"} ,{name:"Iceland","data-alternative-spellings":"IS Island"} ,{name:"India","data-alternative-spellings":"IN भारत गणराज्य Hindustan","data-relevancy-booster":"3"} ,{name:"Indonesia","data-alternative-spellings":"ID","data-relevancy-booster":"2"} ,{name:"Iran,Islamic Republic of","data-alternative-spellings":"IR ایران"} ,{name:"Iraq","data-alternative-spellings":"IQ العراق‎"} ,{name:"Ireland","data-alternative-spellings":"IE Éire","data-relevancy-booster":"1.2"} ,{name:"Isle of Man","data-alternative-spellings":"IM","data-relevancy-booster":"0.5"} ,{name:"Israel","data-alternative-spellings":"IL إسرائيل ישראל"} ,{name:"Italy","data-alternative-spellings":"IT Italia","data-relevancy-booster":"2"} ,{name:"Jamaica","data-alternative-spellings":"JM"} ,{name:"Japan","data-alternative-spellings":"JP Nippon Nihon 日本","data-relevancy-booster":"2.5"} ,{name:"Jersey","data-alternative-spellings":"JE","data-relevancy-booster":"0.5"} ,{name:"Jordan","data-alternative-spellings":"JO الأردن"} ,{name:"Kazakhstan","data-alternative-spellings":"KZ Қазақстан Казахстан"} ,{name:"Kenya","data-alternative-spellings":"KE"} ,{name:"Kiribati","data-alternative-spellings":"KI"} ,{name:"Korea,Democratic People's Republic of","data-alternative-spellings":"KP North Korea"} ,{name:"Korea,Republic of","data-alternative-spellings":"KR South Korea","data-relevancy-booster":"1.5"} ,{name:"Kuwait","data-alternative-spellings":"KW الكويت"} ,{name:"Kyrgyzstan","data-alternative-spellings":"KG Кыргызстан"} ,{name:"Lao People's Democratic Republic","data-alternative-spellings":"LA"} ,{name:"Latvia","data-alternative-spellings":"LV Latvija"} ,{name:"Lebanon","data-alternative-spellings":"LB لبنان"} ,{name:"Lesotho","data-alternative-spellings":"LS"} ,{name:"Liberia","data-alternative-spellings":"LR"} ,{name:"Libyan Arab Jamahiriya","data-alternative-spellings":"LY ليبيا"} ,{name:"Liechtenstein","data-alternative-spellings":"LI"} ,{name:"Lithuania","data-alternative-spellings":"LT Lietuva"} ,{name:"Luxembourg","data-alternative-spellings":"LU"} ,{name:"Macao","data-alternative-spellings":"MO"} ,{name:"Macedonia,The Former Yugoslav Republic Of","data-alternative-spellings":"MK Македонија"} ,{name:"Madagascar","data-alternative-spellings":"MG Madagasikara"} ,{name:"Malawi","data-alternative-spellings":"MW"} ,{name:"Malaysia","data-alternative-spellings":"MY"} ,{name:"Maldives","data-alternative-spellings":"MV"} ,{name:"Mali","data-alternative-spellings":"ML"} ,{name:"Malta","data-alternative-spellings":"MT"} ,{name:"Marshall Islands","data-alternative-spellings":"MH","data-relevancy-booster":"0.5"} ,{name:"Martinique","data-alternative-spellings":"MQ"} ,{name:"Mauritania","data-alternative-spellings":"MR الموريتانية"} ,{name:"Mauritius","data-alternative-spellings":"MU"} ,{name:"Mayotte","data-alternative-spellings":"YT"} ,{name:"Mexico","data-alternative-spellings":"MX Mexicanos","data-relevancy-booster":"1.5"} ,{name:"Micronesia,Federated States of","data-alternative-spellings":"FM"} ,{name:"Moldova,Republic of","data-alternative-spellings":"MD"} ,{name:"Monaco","data-alternative-spellings":"MC"} ,{name:"Mongolia","data-alternative-spellings":"MN Mongγol ulus Монгол улс"} ,{name:"Montenegro","data-alternative-spellings":"ME"} ,{name:"Montserrat","data-alternative-spellings":"MS","data-relevancy-booster":"0.5"} ,{name:"Morocco","data-alternative-spellings":"MA المغرب"} ,{name:"Mozambique","data-alternative-spellings":"MZ Moçambique"} ,{name:"Myanmar","data-alternative-spellings":"MM"} ,{name:"Namibia","data-alternative-spellings":"NA Namibië"} ,{name:"Nauru","data-alternative-spellings":"NR Naoero","data-relevancy-booster":"0.5"} ,{name:"Nepal","data-alternative-spellings":"NP नेपाल"} ,{name:"Netherlands","data-alternative-spellings":"NL Holland Nederland","data-relevancy-booster":"1.5"} ,{name:"New Caledonia","data-alternative-spellings":"NC","data-relevancy-booster":"0.5"} ,{name:"New Zealand","data-alternative-spellings":"NZ Aotearoa"} ,{name:"Nicaragua","data-alternative-spellings":"NI"} ,{name:"Niger","data-alternative-spellings":"NE Nijar"} ,{name:"Nigeria","data-alternative-spellings":"NG Nijeriya Naíjíríà","data-relevancy-booster":"1.5"} ,{name:"Niue","data-alternative-spellings":"NU","data-relevancy-booster":"0.5"} ,{name:"Norfolk Island","data-alternative-spellings":"NF","data-relevancy-booster":"0.5"} ,{name:"Northern Mariana Islands","data-alternative-spellings":"MP","data-relevancy-booster":"0.5"} ,{name:"Norway","data-alternative-spellings":"NO Norge Noreg","data-relevancy-booster":"1.5"} ,{name:"Oman","data-alternative-spellings":"OM عمان"} ,{name:"Pakistan","data-alternative-spellings":"PK پاکستان","data-relevancy-booster":"2"} ,{name:"Palau","data-alternative-spellings":"PW","data-relevancy-booster":"0.5"} ,{name:"Palestinian Territory,Occupied","data-alternative-spellings":"PS فلسطين"} ,{name:"Panama","data-alternative-spellings":"PA"} ,{name:"Papua New Guinea","data-alternative-spellings":"PG"} ,{name:"Paraguay","data-alternative-spellings":"PY"} ,{name:"Peru","data-alternative-spellings":"PE"} ,{name:"Philippines","data-alternative-spellings":"PH Pilipinas","data-relevancy-booster":"1.5"} ,{name:"Pitcairn","data-alternative-spellings":"PN","data-relevancy-booster":"0.5"} ,{name:"Poland","data-alternative-spellings":"PL Polska","data-relevancy-booster":"1.25"} ,{name:"Portugal","data-alternative-spellings":"PT Portuguesa","data-relevancy-booster":"1.5"} ,{name:"Puerto Rico","data-alternative-spellings":"PR"} ,{name:"Qatar","data-alternative-spellings":"QA قطر"} ,{name:"Réunion","data-alternative-spellings":"RE Reunion"} ,{name:"Romania","data-alternative-spellings":"RO Rumania Roumania România"} ,{name:"Russian Federation","data-alternative-spellings":"RU Rossiya Российская Россия","data-relevancy-booster":"2.5"} ,{name:"Rwanda","data-alternative-spellings":"RW"} ,{name:"Saint Barthélemy","data-alternative-spellings":"BL St. Barthelemy"} ,{name:"Saint Helena","data-alternative-spellings":"SH St."} ,{name:"Saint Kitts and Nevis","data-alternative-spellings":"KN St."} ,{name:"Saint Lucia","data-alternative-spellings":"LC St."} ,{name:"Saint Martin (French Part)","data-alternative-spellings":"MF St."} ,{name:"Saint Pierre and Miquelon","data-alternative-spellings":"PM St."} ,{name:"Saint Vincent and the Grenadines","data-alternative-spellings":"VC St."} ,{name:"Samoa","data-alternative-spellings":"WS"} ,{name:"San Marino","data-alternative-spellings":"SM"} ,{name:"Sao Tome and Principe","data-alternative-spellings":"ST"} ,{name:"Saudi Arabia","data-alternative-spellings":"SA السعودية"} ,{name:"Senegal","data-alternative-spellings":"SN Sénégal"} ,{name:"Serbia","data-alternative-spellings":"RS Србија Srbija"} ,{name:"Seychelles","data-alternative-spellings":"SC","data-relevancy-booster":"0.5"} ,{name:"Sierra Leone","data-alternative-spellings":"SL"} ,{name:"Singapore","data-alternative-spellings":"SG Singapura சிங்கப்பூர் குடியரசு 新加坡共和国"} ,{name:"Sint Maarten (Dutch Part)","data-alternative-spellings":"SX"} ,{name:"Slovakia","data-alternative-spellings":"SK Slovenská Slovensko"} ,{name:"Slovenia","data-alternative-spellings":"SI Slovenija"} ,{name:"Solomon Islands","data-alternative-spellings":"SB"} ,{name:"Somalia","data-alternative-spellings":"SO الصومال"} ,{name:"South Africa","data-alternative-spellings":"ZA RSA Suid-Afrika"} ,{name:"South Georgia and the South Sandwich Islands","data-alternative-spellings":"GS"} ,{name:"South Sudan","data-alternative-spellings":"SS"} ,{name:"Spain","data-alternative-spellings":"ES España","data-relevancy-booster":"2"} ,{name:"Sri Lanka","data-alternative-spellings":"LK ශ්‍රී ලංකා இலங்கை Ceylon"} ,{name:"Sudan","data-alternative-spellings":"SD السودان"} ,{name:"Suriname","data-alternative-spellings":"SR शर्नम् Sarnam Sranangron"} ,{name:"Svalbard and Jan Mayen","data-alternative-spellings":"SJ","data-relevancy-booster":"0.5"} ,{name:"Swaziland","data-alternative-spellings":"SZ weSwatini Swatini Ngwane"} ,{name:"Sweden","data-alternative-spellings":"SE Sverige","data-relevancy-booster":"1.5"} ,{name:"Switzerland","data-alternative-spellings":"CH Swiss Confederation Schweiz Suisse Svizzera Svizra","data-relevancy-booster":"1.5"} ,{name:"Syrian Arab Republic","data-alternative-spellings":"SY Syria سورية"} ,{name:"Taiwan,Province of China","data-alternative-spellings":"TW 台灣 臺灣"} ,{name:"Tajikistan","data-alternative-spellings":"TJ Тоҷикистон Toçikiston"} ,{name:"Tanzania,United Republic of","data-alternative-spellings":"TZ"} ,{name:"Thailand","data-alternative-spellings":"TH ประเทศไทย Prathet Thai"} ,{name:"Timor-Leste","data-alternative-spellings":"TL"} ,{name:"Togo","data-alternative-spellings":"TG Togolese"} ,{name:"Tokelau","data-alternative-spellings":"TK","data-relevancy-booster":"0.5"} ,{name:"Tonga","data-alternative-spellings":"TO"} ,{name:"Trinidad and Tobago","data-alternative-spellings":"TT"} ,{name:"Tunisia","data-alternative-spellings":"TN تونس"} ,{name:"Turkey","data-alternative-spellings":"TR Türkiye Turkiye"} ,{name:"Turkmenistan","data-alternative-spellings":"TM Türkmenistan"} ,{name:"Turks and Caicos Islands","data-alternative-spellings":"TC","data-relevancy-booster":"0.5"} ,{name:"Tuvalu","data-alternative-spellings":"TV","data-relevancy-booster":"0.5"} ,{name:"Uganda","data-alternative-spellings":"UG"} ,{name:"Ukraine","data-alternative-spellings":"UA Ukrayina Україна"} ,{name:"United Arab Emirates","data-alternative-spellings":"AE UAE الإمارات"} ,{name:"United Kingdom","data-alternative-spellings":"GB Great Britain England UK Wales Scotland Northern Ireland","data-relevancy-booster":"2.5"} ,{name:"United States","data-relevancy-booster":"3.5","data-alternative-spellings":"US USA United States of America"} ,{name:"United States Minor Outlying Islands","data-alternative-spellings":"UM"} ,{name:"Uruguay","data-alternative-spellings":"UY"} ,{name:"Uzbekistan","data-alternative-spellings":"UZ Ўзбекистон O'zbekstan O‘zbekiston"} ,{name:"Vanuatu","data-alternative-spellings":"VU"} ,{name:"Venezuela","data-alternative-spellings":"VE"} ,{name:"Vietnam","data-alternative-spellings":"VN Việt Nam","data-relevancy-booster":"1.5"} ,{name:"Virgin Islands,British","data-alternative-spellings":"VG","data-relevancy-booster":"0.5"} ,{name:"Virgin Islands,U.S.","data-alternative-spellings":"VI","data-relevancy-booster":"0.5"} ,{name:"Wallis and Futuna","data-alternative-spellings":"WF","data-relevancy-booster":"0.5"} ,{name:"Western Sahara","data-alternative-spellings":"EH لصحراء الغربية"} ,{name:"Yemen","data-alternative-spellings":"YE اليمن"} ,{name:"Zambia","data-alternative-spellings":"ZM"} ,{name:"Zimbabwe","data-alternative-spellings":"ZW"} ]; var html = ""; // Provide a "Select Country" leader element, but only if there is no pre-selected item. // This is to prevent users who have previously selected a country from setting an empty country. if( !selectedElementName || selectedElementName.length < 1) html = '<option value="" selected="selected">Select Country</option>\n'; json.forEach(function(element, index, array){ var str = '<option value="' + element.name+'"'; if( element.name == selectedElementName ) str += " selected "; var helper=function(field){ if( typeof element[field] != "string" ) return(""); if( element[field].length == 0 ) return("");
return(" "+field+'="'+element[field]+'" '); } str += helper("data-alternative-spellings"); str += helper("data-relevancy-booster"); str += ">"+element.name+"</option>\n"; html += str; }) return(html); }
random_line_split
page.js
/** Rendering Pages ================ SIMPLE TEXT -------------- To put simple text or HTML to a screen: res.render( { body_text: 'Hello <b>world</b>' } ); TEMPLATE --------- To put a specific template (like a form) with variables: res.render( 'inbox/form.html', { username: name, phone: phone } ); STATUS MESSAGES ----------------- To ouput specific status messages: var page = safeharbor.page; res.outputMessage( page.MESSAGE_LEVELS.warning, 'Warning Title', 'Some warning text goes here' ); You can have multiple of these output message on the same page. Later, you can then call res.render() as above. This allows for the following scenario: if( onSubmit && (some_error == true) ) { // on submit there was some error res.outputMessage( page.MESSAGE_LEVELS.error, 'Try again', 'Some error text goes here' ); } // on first time render OR error re-submit: res.render( 'inbox/form.html', postargs ); STATUS (ONLY) PAGES --------------------- If all you want to output is the message (no template): res.outputMessage( page.MESSAGE_LEVELS.success, 'Wonderful', 'Some happy text goes here' ); res.render( page.MESSAGE_VIEW, { pageTitle: 'Happy Joy' } ); AJAX-STYLE HTML SNIPPETS -------------------------- To return a snippet of HTML (either as 'body_text', message or template) use the same techniques as above but the layout option: res.render( { layout: page.SNIPPET, body_text: 'This text is for embedding' } ); Also works for templates: res.render( 'profile/acct_email.html', // <-- template for embedded editing { layout: page.SNIPPET } ) @module lib @submodule page **/ var loginstate = require('./loginstate.js'), ROLES = require('./roles.js'), utils = require('./utils.js'); exports.MESSAGE_VIEW = // alias for... exports.BODY_TEXT_VIEW = 'shared/body_text.html'; exports.DEFAULT_LAYOUT = 'shared/main.html'; exports.SNIPPET = 'shared/ajax_html.html'; var MESSAGE_LEVELS = exports.MESSAGE_LEVELS = { info: 'info', success: 'success', warning: 'warning', danger: 'danger', // app error error: 'error' // sys error }; function buildMenu( req, res ) { var cmds = {}; function cmd( group, url, link, help )
var user = loginstate.getUser(req); new cmd('safeharbor', '/about', 'About', 'Learn about Safe Harbor'); new cmd('safeharbor', '/learn', 'Learn', 'Learn about your rights and the DMCA'); new cmd('safeharbor', '/support', 'Support', 'Ask us stuff'); if( user ) { new cmd('user', '/dash', 'Dashboard', 'Manage your disputes' ); // new cmd('user', '/passwordreset', 'Password reset', 'Change your password'); new cmd('user', '/account', 'Account settings', 'Change your email and other settings'); new cmd('user', '/accountdeleter', 'Delete your account', 'hrumph'); new cmd('user', '/logout', 'Log out', 'bye for now' ); new cmd('site', '/siteeditor','Edit your site properties'); new cmd('tablinks', '/disputes', 'Past Disputes', 'Your dispute history' ); new cmd('tablinks', '/form', 'Future Disputes', 'Your dispute future' ); var r = user.role>>>0; // WTF? if( r <= ROLES.admin ) { new cmd('admin', '/admin', 'Admin stuff', '' ); if( r == ROLES.developer ) { new cmd('developer', '/dev', 'Developer stuff', '' ); } } } else { new cmd( 'user', '/login', 'Login', 'For existing accounts' ); new cmd( 'user', '/reg', 'Register', 'For creating new accounts' ); new cmd( 'user', '/lostpassword', 'Lost password', 'For existing, if forgetful accounts'); } return cmds; } exports.Message = function( msgLevel, msgTitle, text, opts ) { utils.copy( this, opts || {}); this.level = msgLevel; this.title = msgTitle; this.text = text; if( !this.status ) { switch( this.level ) { case MESSAGE_LEVELS.info: case MESSAGE_LEVELS.success: this.status = 'ok'; break; case MESSAGE_LEVELS.warning: case MESSAGE_LEVELS.danger: case MESSAGE_LEVELS.error: this.status = '??'; // TODO fill these info break; } } } exports.setup = function(app) { var Handlebars = require('handlebars'); Handlebars.registerHelper('loggedInStatusClass', function() { var isLoggedIn = loginstate.isLoggedIn(); if( isLoggedIn ) return('loggedin'); // that's a CSS selector name else return('loggedout'); }); Handlebars.registerHelper('contextDumper', function(a) { // I haven't figured out if this context blob // is a copy or an actual instance of something // important and shared, so we remove the 'app' // thingy so the dump is managable... var app = a.app; a.app = null; var text = require('util').inspect(a,true,null); // ...and then restore it just in case someone // else was using it a.app = app; return text; }); app.register('.html', Handlebars); app.set('view engine', 'handlebars'); app.dynamicHelpers( { // these will all be passed to every page... user: function( req, res ) { var u = loginstate.getUser(req); if( u && u.password ) u.password = '****'; return u; }, isLoggedIn: function( req, res ) { return !!loginstate.getUser(req); }, isAdmin: function( req, res ) { var u = loginstate.getUser(req); return u && (u.role>>>0 <= ROLES.admin>>>0); }, menu: buildMenu, // we should consider not outputting this on Ajax messages: function( req, res ) { return res.sh_output_messages || [ ] } } ); app.use( function setupPage(req,res,next) { /** Override of the express.response.render method in order put our application specific standard templates into the call stream. @method render @for Response @param {string} view Path to template file (relative to './view') @param {Object} opts Can include things like body_text, pageTitle **/ var oldRender = res.render; res.render = function(view, opts, fn, parent, sub ) { if( typeof view != 'string' ) { opts = view; view = exports.BODY_TEXT_VIEW; } if( view == exports.BODY_TEXT_VIEW ) { if( !opts.body_text ) opts.body_text = ''; } if( !opts.layout ) { opts.layout = exports.DEFAULT_LAYOUT; } if( !opts.bodyClass ) { try { opts.bodyClass = view.match(/([a-z0-9]+)\/[^\/]+$/)[1]; } catch( e ) { } } res.render = oldRender; return res.render(view, opts, fn, parent, sub ); } if( !res.outputMessage ) { /** Call this to setup a message to be ouput during the res.render() call. @method outputMessage @for Response @param {MESSAGE_LEVELS} msgLevel @param {STRING} msgTitle @param {STRING} text @param {Object} [opts] **/ res.outputMessage = function( msgLevel, msgTitle, text, opts ) { if( !res.sh_output_messages ) res.sh_output_messages = [ ]; res.sh_output_messages.push( new exports.Message(msgLevel,msgTitle,text,opts) ); return res; } } next(); }); } exports.countryList = function(selectedElementName){ var json = [ {name:"Afghanistan","data-alternative-spellings":"AF افغانستان"} ,{name:"Åland Islands","data-alternative-spellings":"AX Aaland Aland","data-relevancy-booster":"0.5"} ,{name:"Albania","data-alternative-spellings":"AL"} ,{name:"Algeria","data-alternative-spellings":"DZ الجزائر"} ,{name:"American Samoa","data-alternative-spellings":"AS","data-relevancy-booster":"0.5"} ,{name:"Andorra","data-alternative-spellings":"AD","data-relevancy-booster":"0.5"} ,{name:"Angola","data-alternative-spellings":"AO"} ,{name:"Anguilla","data-alternative-spellings":"AI","data-relevancy-booster":"0.5"} ,{name:"Antarctica","data-alternative-spellings":"AQ","data-relevancy-booster":"0.5"} ,{name:"Antigua And Barbuda","data-alternative-spellings":"AG","data-relevancy-booster":"0.5"} ,{name:"Argentina","data-alternative-spellings":"AR"} ,{name:"Armenia","data-alternative-spellings":"AM Հայաստան"} ,{name:"Aruba","data-alternative-spellings":"AW","data-relevancy-booster":"0.5"} ,{name:"Australia","data-alternative-spellings":"AU","data-relevancy-booster":"1.5"} ,{name:"Austria","data-alternative-spellings":"AT Österreich Osterreich Oesterreich "} ,{name:"Azerbaijan","data-alternative-spellings":"AZ"} ,{name:"Bahamas","data-alternative-spellings":"BS"} ,{name:"Bahrain","data-alternative-spellings":"BH البحرين"} ,{name:"Bangladesh","data-alternative-spellings":"BD বাংলাদেশ","data-relevancy-booster":"2"} ,{name:"Barbados","data-alternative-spellings":"BB"} ,{name:"Belarus","data-alternative-spellings":"BY Беларусь"} ,{name:"Belgium","data-alternative-spellings":"BE België Belgie Belgien Belgique","data-relevancy-booster":"1.5"} ,{name:"Belize","data-alternative-spellings":"BZ"} ,{name:"Benin","data-alternative-spellings":"BJ"} ,{name:"Bermuda","data-alternative-spellings":"BM","data-relevancy-booster":"0.5"} ,{name:"Bhutan","data-alternative-spellings":"BT भूटान"} ,{name:"Bolivia","data-alternative-spellings":"BO"} ,{name:"Bonaire,Sint Eustatius and Saba","data-alternative-spellings":"BQ"} ,{name:"Bosnia and Herzegovina","data-alternative-spellings":"BA Босна и Херцеговина"} ,{name:"Botswana","data-alternative-spellings":"BW"} ,{name:"Bouvet Island","data-alternative-spellings":"BV"} ,{name:"Brazil","data-alternative-spellings":"BR Brasil","data-relevancy-booster":"2"} ,{name:"British Indian Ocean Territory","data-alternative-spellings":"IO"} ,{name:"Brunei Darussalam","data-alternative-spellings":"BN"} ,{name:"Bulgaria","data-alternative-spellings":"BG България"} ,{name:"Burkina Faso","data-alternative-spellings":"BF"} ,{name:"Burundi","data-alternative-spellings":"BI"} ,{name:"Cambodia","data-alternative-spellings":"KH កម្ពុជា"} ,{name:"Cameroon","data-alternative-spellings":"CM"} ,{name:"Canada","data-alternative-spellings":"CA","data-relevancy-booster":"2"} ,{name:"Cape Verde","data-alternative-spellings":"CV Cabo"} ,{name:"Cayman Islands","data-alternative-spellings":"KY","data-relevancy-booster":"0.5"} ,{name:"Central African Republic","data-alternative-spellings":"CF"} ,{name:"Chad","data-alternative-spellings":"TD تشاد‎ Tchad"} ,{name:"Chile","data-alternative-spellings":"CL"} ,{name:"China","data-relevancy-booster":"3.5","data-alternative-spellings":"CN Zhongguo Zhonghua Peoples Republic 中国/中华"} ,{name:"Christmas Island","data-alternative-spellings":"CX","data-relevancy-booster":"0.5"} ,{name:"Cocos (Keeling) Islands","data-alternative-spellings":"CC","data-relevancy-booster":"0.5"} ,{name:"Colombia","data-alternative-spellings":"CO"} ,{name:"Comoros","data-alternative-spellings":"KM جزر القمر"} ,{name:"Congo","data-alternative-spellings":"CG"} ,{name:"Congo,the Democratic Republic of the","data-alternative-spellings":"CD Congo-Brazzaville Repubilika ya Kongo"} ,{name:"Cook Islands","data-alternative-spellings":"CK","data-relevancy-booster":"0.5"} ,{name:"Costa Rica","data-alternative-spellings":"CR"} ,{name:"Côte d'Ivoire","data-alternative-spellings":"CI Cote dIvoire"} ,{name:"Croatia","data-alternative-spellings":"HR Hrvatska"} ,{name:"Cuba","data-alternative-spellings":"CU"} ,{name:"Curaçao","data-alternative-spellings":"CW Curacao"} ,{name:"Cyprus","data-alternative-spellings":"CY Κύπρος Kýpros Kıbrıs"} ,{name:"Czech Republic","data-alternative-spellings":"CZ Česká Ceska"} ,{name:"Denmark","data-alternative-spellings":"DK Danmark","data-relevancy-booster":"1.5"} ,{name:"Djibouti","data-alternative-spellings":"DJ جيبوتي‎ Jabuuti Gabuuti"} ,{name:"Dominica","data-alternative-spellings":"DM Dominique","data-relevancy-booster":"0.5"} ,{name:"Dominican Republic","data-alternative-spellings":"DO"} ,{name:"Ecuador","data-alternative-spellings":"EC"} ,{name:"Egypt","data-alternative-spellings":"EG","data-relevancy-booster":"1.5"} ,{name:"El Salvador","data-alternative-spellings":"SV"} ,{name:"Equatorial Guinea","data-alternative-spellings":"GQ"} ,{name:"Eritrea","data-alternative-spellings":"ER إرتريا ኤርትራ"} ,{name:"Estonia","data-alternative-spellings":"EE Eesti"} ,{name:"Ethiopia","data-alternative-spellings":"ET ኢትዮጵያ"} ,{name:"Falkland Islands (Malvinas)","data-alternative-spellings":"FK","data-relevancy-booster":"0.5"} ,{name:"Faroe Islands","data-alternative-spellings":"FO Føroyar Færøerne","data-relevancy-booster":"0.5"} ,{name:"Fiji","data-alternative-spellings":"FJ Viti फ़िजी"} ,{name:"Finland","data-alternative-spellings":"FI Suomi"} ,{name:"France","data-alternative-spellings":"FR République française","data-relevancy-booster":"2.5"} ,{name:"French Guiana","data-alternative-spellings":"GF"} ,{name:"French Polynesia","data-alternative-spellings":"PF Polynésie française"} ,{name:"French Southern Territories","data-alternative-spellings":"TF"} ,{name:"Gabon","data-alternative-spellings":"GA République Gabonaise"} ,{name:"Gambia","data-alternative-spellings":"GM"} ,{name:"Georgia","data-alternative-spellings":"GE საქართველო"} ,{name:"Germany","data-alternative-spellings":"DE Bundesrepublik Deutschland","data-relevancy-booster":"3"} ,{name:"Ghana","data-alternative-spellings":"GH"} ,{name:"Gibraltar","data-alternative-spellings":"GI","data-relevancy-booster":"0.5"} ,{name:"Greece","data-alternative-spellings":"GR Ελλάδα","data-relevancy-booster":"1.5"} ,{name:"Greenland","data-alternative-spellings":"GL grønland","data-relevancy-booster":"0.5"} ,{name:"Grenada","data-alternative-spellings":"GD"} ,{name:"Guadeloupe","data-alternative-spellings":"GP"} ,{name:"Guam","data-alternative-spellings":"GU"} ,{name:"Guatemala","data-alternative-spellings":"GT"} ,{name:"Guernsey","data-alternative-spellings":"GG","data-relevancy-booster":"0.5"} ,{name:"Guinea","data-alternative-spellings":"GN"} ,{name:"Guinea-Bissau","data-alternative-spellings":"GW"} ,{name:"Guyana","data-alternative-spellings":"GY"} ,{name:"Haiti","data-alternative-spellings":"HT"} ,{name:"Heard Island and McDonald Islands","data-alternative-spellings":"HM"} ,{name:"Holy See (Vatican City State)","data-alternative-spellings":"VA","data-relevancy-booster":"0.5"} ,{name:"Honduras","data-alternative-spellings":"HN"} ,{name:"Hong Kong","data-alternative-spellings":"HK 香港"} ,{name:"Hungary","data-alternative-spellings":"HU Magyarország"} ,{name:"Iceland","data-alternative-spellings":"IS Island"} ,{name:"India","data-alternative-spellings":"IN भारत गणराज्य Hindustan","data-relevancy-booster":"3"} ,{name:"Indonesia","data-alternative-spellings":"ID","data-relevancy-booster":"2"} ,{name:"Iran,Islamic Republic of","data-alternative-spellings":"IR ایران"} ,{name:"Iraq","data-alternative-spellings":"IQ العراق‎"} ,{name:"Ireland","data-alternative-spellings":"IE Éire","data-relevancy-booster":"1.2"} ,{name:"Isle of Man","data-alternative-spellings":"IM","data-relevancy-booster":"0.5"} ,{name:"Israel","data-alternative-spellings":"IL إسرائيل ישראל"} ,{name:"Italy","data-alternative-spellings":"IT Italia","data-relevancy-booster":"2"} ,{name:"Jamaica","data-alternative-spellings":"JM"} ,{name:"Japan","data-alternative-spellings":"JP Nippon Nihon 日本","data-relevancy-booster":"2.5"} ,{name:"Jersey","data-alternative-spellings":"JE","data-relevancy-booster":"0.5"} ,{name:"Jordan","data-alternative-spellings":"JO الأردن"} ,{name:"Kazakhstan","data-alternative-spellings":"KZ Қазақстан Казахстан"} ,{name:"Kenya","data-alternative-spellings":"KE"} ,{name:"Kiribati","data-alternative-spellings":"KI"} ,{name:"Korea,Democratic People's Republic of","data-alternative-spellings":"KP North Korea"} ,{name:"Korea,Republic of","data-alternative-spellings":"KR South Korea","data-relevancy-booster":"1.5"} ,{name:"Kuwait","data-alternative-spellings":"KW الكويت"} ,{name:"Kyrgyzstan","data-alternative-spellings":"KG Кыргызстан"} ,{name:"Lao People's Democratic Republic","data-alternative-spellings":"LA"} ,{name:"Latvia","data-alternative-spellings":"LV Latvija"} ,{name:"Lebanon","data-alternative-spellings":"LB لبنان"} ,{name:"Lesotho","data-alternative-spellings":"LS"} ,{name:"Liberia","data-alternative-spellings":"LR"} ,{name:"Libyan Arab Jamahiriya","data-alternative-spellings":"LY ليبيا"} ,{name:"Liechtenstein","data-alternative-spellings":"LI"} ,{name:"Lithuania","data-alternative-spellings":"LT Lietuva"} ,{name:"Luxembourg","data-alternative-spellings":"LU"} ,{name:"Macao","data-alternative-spellings":"MO"} ,{name:"Macedonia,The Former Yugoslav Republic Of","data-alternative-spellings":"MK Македонија"} ,{name:"Madagascar","data-alternative-spellings":"MG Madagasikara"} ,{name:"Malawi","data-alternative-spellings":"MW"} ,{name:"Malaysia","data-alternative-spellings":"MY"} ,{name:"Maldives","data-alternative-spellings":"MV"} ,{name:"Mali","data-alternative-spellings":"ML"} ,{name:"Malta","data-alternative-spellings":"MT"} ,{name:"Marshall Islands","data-alternative-spellings":"MH","data-relevancy-booster":"0.5"} ,{name:"Martinique","data-alternative-spellings":"MQ"} ,{name:"Mauritania","data-alternative-spellings":"MR الموريتانية"} ,{name:"Mauritius","data-alternative-spellings":"MU"} ,{name:"Mayotte","data-alternative-spellings":"YT"} ,{name:"Mexico","data-alternative-spellings":"MX Mexicanos","data-relevancy-booster":"1.5"} ,{name:"Micronesia,Federated States of","data-alternative-spellings":"FM"} ,{name:"Moldova,Republic of","data-alternative-spellings":"MD"} ,{name:"Monaco","data-alternative-spellings":"MC"} ,{name:"Mongolia","data-alternative-spellings":"MN Mongγol ulus Монгол улс"} ,{name:"Montenegro","data-alternative-spellings":"ME"} ,{name:"Montserrat","data-alternative-spellings":"MS","data-relevancy-booster":"0.5"} ,{name:"Morocco","data-alternative-spellings":"MA المغرب"} ,{name:"Mozambique","data-alternative-spellings":"MZ Moçambique"} ,{name:"Myanmar","data-alternative-spellings":"MM"} ,{name:"Namibia","data-alternative-spellings":"NA Namibië"} ,{name:"Nauru","data-alternative-spellings":"NR Naoero","data-relevancy-booster":"0.5"} ,{name:"Nepal","data-alternative-spellings":"NP नेपाल"} ,{name:"Netherlands","data-alternative-spellings":"NL Holland Nederland","data-relevancy-booster":"1.5"} ,{name:"New Caledonia","data-alternative-spellings":"NC","data-relevancy-booster":"0.5"} ,{name:"New Zealand","data-alternative-spellings":"NZ Aotearoa"} ,{name:"Nicaragua","data-alternative-spellings":"NI"} ,{name:"Niger","data-alternative-spellings":"NE Nijar"} ,{name:"Nigeria","data-alternative-spellings":"NG Nijeriya Naíjíríà","data-relevancy-booster":"1.5"} ,{name:"Niue","data-alternative-spellings":"NU","data-relevancy-booster":"0.5"} ,{name:"Norfolk Island","data-alternative-spellings":"NF","data-relevancy-booster":"0.5"} ,{name:"Northern Mariana Islands","data-alternative-spellings":"MP","data-relevancy-booster":"0.5"} ,{name:"Norway","data-alternative-spellings":"NO Norge Noreg","data-relevancy-booster":"1.5"} ,{name:"Oman","data-alternative-spellings":"OM عمان"} ,{name:"Pakistan","data-alternative-spellings":"PK پاکستان","data-relevancy-booster":"2"} ,{name:"Palau","data-alternative-spellings":"PW","data-relevancy-booster":"0.5"} ,{name:"Palestinian Territory,Occupied","data-alternative-spellings":"PS فلسطين"} ,{name:"Panama","data-alternative-spellings":"PA"} ,{name:"Papua New Guinea","data-alternative-spellings":"PG"} ,{name:"Paraguay","data-alternative-spellings":"PY"} ,{name:"Peru","data-alternative-spellings":"PE"} ,{name:"Philippines","data-alternative-spellings":"PH Pilipinas","data-relevancy-booster":"1.5"} ,{name:"Pitcairn","data-alternative-spellings":"PN","data-relevancy-booster":"0.5"} ,{name:"Poland","data-alternative-spellings":"PL Polska","data-relevancy-booster":"1.25"} ,{name:"Portugal","data-alternative-spellings":"PT Portuguesa","data-relevancy-booster":"1.5"} ,{name:"Puerto Rico","data-alternative-spellings":"PR"} ,{name:"Qatar","data-alternative-spellings":"QA قطر"} ,{name:"Réunion","data-alternative-spellings":"RE Reunion"} ,{name:"Romania","data-alternative-spellings":"RO Rumania Roumania România"} ,{name:"Russian Federation","data-alternative-spellings":"RU Rossiya Российская Россия","data-relevancy-booster":"2.5"} ,{name:"Rwanda","data-alternative-spellings":"RW"} ,{name:"Saint Barthélemy","data-alternative-spellings":"BL St. Barthelemy"} ,{name:"Saint Helena","data-alternative-spellings":"SH St."} ,{name:"Saint Kitts and Nevis","data-alternative-spellings":"KN St."} ,{name:"Saint Lucia","data-alternative-spellings":"LC St."} ,{name:"Saint Martin (French Part)","data-alternative-spellings":"MF St."} ,{name:"Saint Pierre and Miquelon","data-alternative-spellings":"PM St."} ,{name:"Saint Vincent and the Grenadines","data-alternative-spellings":"VC St."} ,{name:"Samoa","data-alternative-spellings":"WS"} ,{name:"San Marino","data-alternative-spellings":"SM"} ,{name:"Sao Tome and Principe","data-alternative-spellings":"ST"} ,{name:"Saudi Arabia","data-alternative-spellings":"SA السعودية"} ,{name:"Senegal","data-alternative-spellings":"SN Sénégal"} ,{name:"Serbia","data-alternative-spellings":"RS Србија Srbija"} ,{name:"Seychelles","data-alternative-spellings":"SC","data-relevancy-booster":"0.5"} ,{name:"Sierra Leone","data-alternative-spellings":"SL"} ,{name:"Singapore","data-alternative-spellings":"SG Singapura சிங்கப்பூர் குடியரசு 新加坡共和国"} ,{name:"Sint Maarten (Dutch Part)","data-alternative-spellings":"SX"} ,{name:"Slovakia","data-alternative-spellings":"SK Slovenská Slovensko"} ,{name:"Slovenia","data-alternative-spellings":"SI Slovenija"} ,{name:"Solomon Islands","data-alternative-spellings":"SB"} ,{name:"Somalia","data-alternative-spellings":"SO الصومال"} ,{name:"South Africa","data-alternative-spellings":"ZA RSA Suid-Afrika"} ,{name:"South Georgia and the South Sandwich Islands","data-alternative-spellings":"GS"} ,{name:"South Sudan","data-alternative-spellings":"SS"} ,{name:"Spain","data-alternative-spellings":"ES España","data-relevancy-booster":"2"} ,{name:"Sri Lanka","data-alternative-spellings":"LK ශ්‍රී ලංකා இலங்கை Ceylon"} ,{name:"Sudan","data-alternative-spellings":"SD السودان"} ,{name:"Suriname","data-alternative-spellings":"SR शर्नम् Sarnam Sranangron"} ,{name:"Svalbard and Jan Mayen","data-alternative-spellings":"SJ","data-relevancy-booster":"0.5"} ,{name:"Swaziland","data-alternative-spellings":"SZ weSwatini Swatini Ngwane"} ,{name:"Sweden","data-alternative-spellings":"SE Sverige","data-relevancy-booster":"1.5"} ,{name:"Switzerland","data-alternative-spellings":"CH Swiss Confederation Schweiz Suisse Svizzera Svizra","data-relevancy-booster":"1.5"} ,{name:"Syrian Arab Republic","data-alternative-spellings":"SY Syria سورية"} ,{name:"Taiwan,Province of China","data-alternative-spellings":"TW 台灣 臺灣"} ,{name:"Tajikistan","data-alternative-spellings":"TJ Тоҷикистон Toçikiston"} ,{name:"Tanzania,United Republic of","data-alternative-spellings":"TZ"} ,{name:"Thailand","data-alternative-spellings":"TH ประเทศไทย Prathet Thai"} ,{name:"Timor-Leste","data-alternative-spellings":"TL"} ,{name:"Togo","data-alternative-spellings":"TG Togolese"} ,{name:"Tokelau","data-alternative-spellings":"TK","data-relevancy-booster":"0.5"} ,{name:"Tonga","data-alternative-spellings":"TO"} ,{name:"Trinidad and Tobago","data-alternative-spellings":"TT"} ,{name:"Tunisia","data-alternative-spellings":"TN تونس"} ,{name:"Turkey","data-alternative-spellings":"TR Türkiye Turkiye"} ,{name:"Turkmenistan","data-alternative-spellings":"TM Türkmenistan"} ,{name:"Turks and Caicos Islands","data-alternative-spellings":"TC","data-relevancy-booster":"0.5"} ,{name:"Tuvalu","data-alternative-spellings":"TV","data-relevancy-booster":"0.5"} ,{name:"Uganda","data-alternative-spellings":"UG"} ,{name:"Ukraine","data-alternative-spellings":"UA Ukrayina Україна"} ,{name:"United Arab Emirates","data-alternative-spellings":"AE UAE الإمارات"} ,{name:"United Kingdom","data-alternative-spellings":"GB Great Britain England UK Wales Scotland Northern Ireland","data-relevancy-booster":"2.5"} ,{name:"United States","data-relevancy-booster":"3.5","data-alternative-spellings":"US USA United States of America"} ,{name:"United States Minor Outlying Islands","data-alternative-spellings":"UM"} ,{name:"Uruguay","data-alternative-spellings":"UY"} ,{name:"Uzbekistan","data-alternative-spellings":"UZ Ўзбекистон O'zbekstan O‘zbekiston"} ,{name:"Vanuatu","data-alternative-spellings":"VU"} ,{name:"Venezuela","data-alternative-spellings":"VE"} ,{name:"Vietnam","data-alternative-spellings":"VN Việt Nam","data-relevancy-booster":"1.5"} ,{name:"Virgin Islands,British","data-alternative-spellings":"VG","data-relevancy-booster":"0.5"} ,{name:"Virgin Islands,U.S.","data-alternative-spellings":"VI","data-relevancy-booster":"0.5"} ,{name:"Wallis and Futuna","data-alternative-spellings":"WF","data-relevancy-booster":"0.5"} ,{name:"Western Sahara","data-alternative-spellings":"EH لصحراء الغربية"} ,{name:"Yemen","data-alternative-spellings":"YE اليمن"} ,{name:"Zambia","data-alternative-spellings":"ZM"} ,{name:"Zimbabwe","data-alternative-spellings":"ZW"} ]; var html = ""; // Provide a "Select Country" leader element, but only if there is no pre-selected item. // This is to prevent users who have previously selected a country from setting an empty country. if( !selectedElementName || selectedElementName.length < 1) html = '<option value="" selected="selected">Select Country</option>\n'; json.forEach(function(element, index, array){ var str = '<option value="' + element.name+'"'; if( element.name == selectedElementName ) str += " selected "; var helper=function(field){ if( typeof element[field] != "string" ) return(""); if( element[field].length == 0 ) return(""); return(" "+field+'="'+element[field]+'" '); } str += helper("data-alternative-spellings"); str += helper("data-relevancy-booster"); str += ">"+element.name+"</option>\n"; html += str; }) return(html); }
{ this.url = url; this.link = link; this.help = help; if(!cmds[group]) { cmds[group] = {}; cmds[group].items = [ ] }; cmds[group].items.push(this); }
identifier_body
eval.rs
use error::*; use ast::*; use engine::*; use types::*; use eval_static::*; use std::fmt; use std::rc::Rc; use std::collections::HashMap; #[derive(Clone)] pub struct Macro(pub Ident, pub Rc<dyn Fn(&Exp, &Context) -> Ret<RunVal>>); impl fmt::Debug for Macro { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, ":macro: {}", self.0) } } impl PartialEq for Macro { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } fn ne(&self, other: &Self) -> bool { self.0 != other.0 } } #[derive(Clone,Debug,PartialEq)] pub enum RunVal { Index(usize), String(String), Data(Rc<DataType>, usize), Tuple(Vec<RunVal>), Func(Rc<Context>, Pat, Exp, Type), Macro(Macro), State(State, Type), Gate(Gate), } impl fmt::Display for RunVal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &RunVal::Index(ref n) => write!(f, "{}", n), &RunVal::String(ref s) => write!(f, "{:?}", s), &RunVal::Data(ref dt, ref index) => write!(f, "{}", dt.variants[*index]), &RunVal::Tuple(ref vals) => write!(f, "({})", vals.iter().map(|val| format!("{}", val)).collect::<Vec<_>>().join(", ")), &RunVal::Func(ref _ctx, ref _pat, ref _body, ref ty) => write!(f, "fn{}", ty), &RunVal::Macro(ref mc) => write!(f, "{:?}", mc), &RunVal::State(ref state, ref ty) => if ty != &Type::Any { write!(f, "{}: {}", StateView(state), ty) } else { write!(f, "{}", StateView(state)) }, &RunVal::Gate(ref gate) => write!(f, "[{}]", gate.iter().map(|state| format!("{}", StateView(state))).collect::<Vec<_>>().join(", ")), } } } #[derive(Clone,Debug,PartialEq)] pub struct Context { path: String, vars: HashMap<Ident, RunVal>, types: TypeContext, } impl Context { pub fn new(path: String) -> Context { Context { path, vars: HashMap::new(), types: TypeContext::new(), } } pub fn path(&self) -> &String { &self.path } pub fn types(&self) -> &TypeContext { &self.types } pub fn create_child(&self) -> Context { self.clone() } pub fn find_var(&self, id: &Ident) -> Ret<RunVal> { unwrap_from_context("Variable", id, self.vars.get(id)) } pub fn add_var(&mut self, id: Ident, val: RunVal, ty: Type) -> Ret { self.vars.insert(id.clone(), val); self.types.add_var_type(id, ty) } pub fn find_type(&self, id: &Ident) -> Ret<Type> { self.types.find_type(id) } pub fn add_type(&mut self, id: String, ty: Type) -> Ret { self.types.add_type(id, ty) } pub fn
(&mut self, id: String, variants: Vec<Ident>) -> Ret { let rc = Rc::new(DataType {id: id.clone(), variants: variants.clone()}); for (i, variant) in variants.iter().enumerate() { self.add_var(variant.clone(), RunVal::Data(rc.clone(), i), Type::Data(rc.clone()))?; } self.add_type(id, Type::Data(rc)) } pub fn add_macro(&mut self, id: &str, handle: &'static dyn Fn(&Exp, &Context) -> Ret<RunVal>) -> Ret { self.add_var(id.to_string(), RunVal::Macro(Macro(id.to_string(), Rc::new(handle))), Type::Any /* TODO define macro types */) } pub fn import(&self, path: &str) -> Ret<Module> { use regex::Regex; use std::path::Path; use resource; use stdlib; use parser; let (ctx, file) = if Regex::new("^[a-z]+:").unwrap().is_match(path) {(self.create_child(), path.to_string())} else { let import_path = Path::new(&self.path()).join(&resource::with_ext(path, "fqy")); let mut import_dir = import_path.clone(); import_dir.pop(); let file = import_path.to_string_lossy().to_string(); let ctx = stdlib::create_ctx(&import_dir.to_string_lossy())?; (ctx, file) }; let exp = parser::parse_resource(&file)?; Ok(Module {path: file.to_string(), exp: exp, ctx: ctx}) } pub fn import_eval(&self, path: &str) -> Ret<RunVal> { let mut module = self.import(path)?; Ok(eval_exp_inline(&module.exp, &mut module.ctx)) } } #[derive(Clone,Debug,PartialEq)] pub struct Module { pub path: String, pub exp: Exp, pub ctx: Context, } pub fn eval_exp(exp: &Exp, ctx: &Context) -> RunVal { match exp { &Exp::Index(n) => RunVal::Index(n), &Exp::String(ref s) => RunVal::String(s.to_string()), &Exp::Var(ref id) => ctx.find_var(id).unwrap(), &Exp::Scope(ref decls, ref ret) => { let mut child = ctx.create_child(); for decl in decls { eval_decl(decl, &mut child).unwrap(); } eval_exp(ret, &child) }, &Exp::Expand(_) => panic!("No context for expansion"), &Exp::Tuple(ref args) => RunVal::Tuple(eval_exp_seq(args, ctx)), &Exp::Concat(ref args) => { //TODO adjacent gates if args.len() == 1 { if let Some(gate) = build_gate(&eval_exp(&args[0], ctx), ctx) { return RunVal::Gate(gate) } } let div = (args.len() as f32).sqrt(); let states = args.iter() .map(|e| build_state_typed(eval_exp(e, ctx))) .collect::<Ret<Vec<(State, Type)>>>().unwrap(); RunVal::State(states.iter() .flat_map(|(s, _)| s) .map(|n| n / div) .collect(), Type::Concat(states.into_iter() .map(|(_, t)| t) .collect())) }, &Exp::Cond(ref cond_exp, ref then_exp, ref else_exp) => { let val = eval_exp(cond_exp, ctx); if let Some(b) = build_bool(&val) { eval_exp(if b {then_exp} else {else_exp}, ctx) } else { // TODO: consider removing in favor of using extract gates for explicitness // let state = build_state(val); // if state.len() > 2 { // panic!("Conditional state cannot be {}-dimensional", state.len()) // } // RunVal::State(state.extract(vec![ // build_state(eval_exp(else_exp, ctx)), // build_state(eval_exp(then_exp, ctx)), // ]), Type::Any /* TODO determine from then/else types */) panic!("Non-boolean value: {}", val) } }, &Exp::Lambda(ref pat, ref body) => { let ty = infer_type(exp, ctx.types()).unwrap(); RunVal::Func(Rc::new(ctx.clone()), pat.clone(), (**body).clone(), ty) }, &Exp::Invoke(ref target, ref arg) => { match eval_exp(target, ctx) { // TODO proper tuple function evaluation RunVal::Func(fn_ctx_rc, pat, body, _ty) => { let mut fn_ctx = (*fn_ctx_rc).clone(); assign_pat(&pat, &eval_exp(arg, ctx), &mut fn_ctx).unwrap(); eval_exp(&body, &fn_ctx) }, RunVal::Macro(Macro(_, handle)) => handle(arg, ctx).unwrap(), RunVal::Gate(gate) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s.extract(gate), t) }, val => { let msg = &format!("Cannot invoke {}", val); let state = build_state(eval_exp(arg, ctx)); let gate = build_gate(&val, ctx).expect(msg); RunVal::State(state.extract(gate), Type::Any /* TODO infer output type from `target` */) }, } }, &Exp::Repeat(n, ref exp) => { let val = eval_exp(&exp, ctx); RunVal::Tuple((0..n).map(|_| val.clone()).collect()) }, &Exp::State(ref arg) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s, t) }, &Exp::Phase(phase, ref arg) => { let val = eval_exp(arg, ctx); build_gate(&val, ctx) .map(|g| RunVal::Gate(g.power(phase))) .unwrap_or_else(|| { let (s, t) = build_state_typed(val).unwrap(); RunVal::State(s.phase(phase), t) }) }, &Exp::Extract(ref arg, ref cases) => { let state = build_state(eval_exp(arg, ctx)); let (gate, gt) = create_extract_gate_typed(cases, state.len(), ctx); RunVal::State(state.extract(gate), gt) }, &Exp::Anno(ref exp, ref anno) => eval_type(anno, ctx.types()).unwrap().assign(eval_exp(exp, ctx)).unwrap(), } } pub fn eval_exp_inline(exp: &Exp, ctx: &mut Context) -> RunVal { match exp { Exp::Scope(ref decls, ref exp) => { for decl in decls { eval_decl(decl, ctx).unwrap(); } eval_exp(exp, ctx) }, _ => eval_exp(exp, ctx), } } pub fn eval_exp_seq(seq: &Vec<Exp>, ctx: &Context) -> Vec<RunVal> { seq.iter().flat_map(|e| { if let Exp::Expand(ref e) = e { let val = eval_exp(e, ctx); let err = Error(format!("Cannot expand value: {}", val)); iterate_val(val).ok_or(err).unwrap() } else {vec![eval_exp(e, ctx)]} }).collect() } pub fn eval_decl(decl: &Decl, ctx: &mut Context) -> Ret { match decl { &Decl::Let(ref pat, ref exp) => assign_pat(pat, &eval_exp(exp, ctx), ctx), &Decl::Type(ref id, ref pat) => { let ty = eval_type(pat, ctx.types())?; ctx.add_type(id.clone(), ty) }, &Decl::Data(ref id, ref variants) => ctx.add_datatype(id.clone(), variants.clone()), &Decl::Assert(ref expect, ref result) => { let a = eval_exp(expect, ctx); let b = eval_exp(result, ctx); let eq = match (&a, &b) { (&RunVal::State(ref a, _), &RunVal::State(ref b, _)) => { a.iter().zip(b).map(|(a, b)| { let abs = (a - b).norm(); abs * abs }).sum::<f32>() < 0.00001_f32 }, (a, b) => a == b, }; if !eq {err!("Assertion failed: {} != {}", a, b)} else {Ok(())} }, &Decl::Print(ref exp) => Ok(println!(":: {}", eval_exp(exp, ctx))), &Decl::Do(ref exp) => { eval_exp(exp, ctx); Ok(()) }, } } // TODO combine logic with eval_static::assign_pat_type() pub fn assign_pat(pat: &Pat, val: &RunVal, ctx: &mut Context) -> Ret { match (pat, val) { (&Pat::Any, _) => Ok(()), (&Pat::Var(ref id), _) => ctx.add_var(id.clone(), val.clone(), get_val_type(val)), //TODO use val type (&Pat::Tuple(ref pats), &RunVal::Tuple(ref vals)) => { if pats.len() != vals.len() {err!("Cannot deconstruct {} values from value: {}", pats.len(), val)} else { pats.iter().zip(vals) .map(|(pat, val)| assign_pat(pat, val, ctx)) .collect::<Ret<_>>() } }, (&Pat::Anno(ref pat, ref anno), _) => assign_pat(pat, &eval_type(&anno, ctx.types())?.assign(val.clone())?, ctx), _ => err!("{:?} cannot deconstruct `{}`", pat, val), } } pub fn get_val_type(val: &RunVal) -> Type { match val { &RunVal::Index(_) => Type::Any, &RunVal::String(_) => Type::Any, &RunVal::Data(ref dt, _) => Type::Data((*dt).clone()), &RunVal::Tuple(ref vals) => Type::Tuple(vals.iter().map(get_val_type).collect()), &RunVal::Func(_, _, _, ref ty) => ty.clone(), &RunVal::Macro(_) => Type::Any, // TODO &RunVal::State(_, ref ty) => ty.clone(), &RunVal::Gate(_) => Type::Any, // TODO } } pub fn build_bool(val: &RunVal) -> Option<bool> { match val { &RunVal::Index(n) => Some(n > 0), &RunVal::Data(ref _ty, n) => Some(n > 0), &RunVal::Tuple(ref vec) => Some(vec.len() > 0), _ => None, } } pub fn build_state(val: RunVal) -> State { build_state_typed(val).unwrap().0 } pub fn build_state_typed(val: RunVal) -> Ret<(State, Type)> { match val { RunVal::Index(n) => Ok((get_state(n), Type::Any)), RunVal::Data(dt, index) => Ok((get_state(index).pad(dt.variants.len()), Type::Data(dt))), RunVal::Tuple(vals) => { let states = vals.into_iter().map(|v| build_state_typed(v)).collect::<Ret<Vec<(State, Type)>>>()?; let ty = Type::Tuple(states.iter().map(|(_, t)| t.clone()).collect()); Ok((states.into_iter().fold(get_state(0), |a, (b, _)| State::combine(a, b)), ty)) }, RunVal::State(state, ty) => Ok((state, ty)), val => err!("Cannot build state from {}", val) } } pub fn eval_gate_body(exp: &Exp, ctx: &Context) -> Option<Gate> { match exp { &Exp::Extract(ref _arg, ref cases) => Some(create_extract_gate_typed(cases, 0, ctx).0), _ => None, } } pub fn build_gate(val: &RunVal, ctx: &Context) -> Option<Gate> { match val { &RunVal::Tuple(ref vals) => vals.iter() .fold(Some(vec![get_state(0)]), |a, b| a.and_then(|a| build_gate(b, ctx).map(|b| a.combine(b)))), &RunVal::Func(ref fn_ctx, ref _pat, ref body, ref _ty) => eval_gate_body(body, fn_ctx), // TODO use type &RunVal::Gate(ref gate) => Some(gate.clone()), _ => None, } } pub fn iterate_val(val: RunVal) -> Option<Vec<RunVal>> { match val { RunVal::Index(i) => { Some((0..i).map(RunVal::Index).collect()) }, RunVal::Tuple(vals) => Some(vals), _ => None, } } pub fn create_extract_gate_typed(cases: &Vec<Case>, min_input_size: usize, ctx: &Context) -> (Gate, Type) { fn reduce_type(output_type: Option<Type>, t: Type) -> Option<Type> { Some(match output_type { None => t, Some(ot) => if ot == t {t} else {Type::Any}, }) } let mut dims: Gate = vec![]; let mut output_type = None; for case in cases.iter() { match case { &Case::Exp(ref selector, ref result) => { let selector_state = build_state(eval_exp(selector, ctx)); let (result_state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); while dims.len() < selector_state.len() || dims.len() < min_input_size { dims.push(vec![]); } for (i, s) in selector_state.iter().enumerate() { let len = ::std::cmp::max(result_state.len(), dims[i].len()); // TODO improve impl dims[i] = result_state.clone().pad(len).into_iter() .zip(dims[i].clone().pad(len).into_iter()) .map(|(r, d)| r * s + d) .collect(); } output_type = reduce_type(output_type, result_type); }, &Case::Default(ref result) => { let (state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); for i in 0..dims.len() { use num::Zero; if dims[i].prob_sum().is_zero() { dims[i] = state.clone(); } } output_type = reduce_type(output_type, result_type); }, } } let max_len = dims.iter().map(Vec::len).max().unwrap_or(0); let gate: Gate = dims.into_iter().map(|s| s.pad(max_len)).collect(); // if !gate.is_unitary() { // panic!("Non-unitary extraction: {:?}", cases); // } (gate, output_type.unwrap_or(Type::Any)) }
add_datatype
identifier_name
eval.rs
use error::*; use ast::*; use engine::*; use types::*; use eval_static::*; use std::fmt; use std::rc::Rc; use std::collections::HashMap; #[derive(Clone)] pub struct Macro(pub Ident, pub Rc<dyn Fn(&Exp, &Context) -> Ret<RunVal>>); impl fmt::Debug for Macro { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, ":macro: {}", self.0) } } impl PartialEq for Macro { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } fn ne(&self, other: &Self) -> bool { self.0 != other.0 } } #[derive(Clone,Debug,PartialEq)] pub enum RunVal { Index(usize), String(String), Data(Rc<DataType>, usize), Tuple(Vec<RunVal>), Func(Rc<Context>, Pat, Exp, Type), Macro(Macro), State(State, Type), Gate(Gate), } impl fmt::Display for RunVal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &RunVal::Index(ref n) => write!(f, "{}", n), &RunVal::String(ref s) => write!(f, "{:?}", s), &RunVal::Data(ref dt, ref index) => write!(f, "{}", dt.variants[*index]), &RunVal::Tuple(ref vals) => write!(f, "({})", vals.iter().map(|val| format!("{}", val)).collect::<Vec<_>>().join(", ")), &RunVal::Func(ref _ctx, ref _pat, ref _body, ref ty) => write!(f, "fn{}", ty), &RunVal::Macro(ref mc) => write!(f, "{:?}", mc), &RunVal::State(ref state, ref ty) => if ty != &Type::Any { write!(f, "{}: {}", StateView(state), ty) } else { write!(f, "{}", StateView(state)) }, &RunVal::Gate(ref gate) => write!(f, "[{}]", gate.iter().map(|state| format!("{}", StateView(state))).collect::<Vec<_>>().join(", ")), } } } #[derive(Clone,Debug,PartialEq)] pub struct Context { path: String, vars: HashMap<Ident, RunVal>, types: TypeContext, } impl Context { pub fn new(path: String) -> Context { Context { path, vars: HashMap::new(), types: TypeContext::new(), } } pub fn path(&self) -> &String { &self.path } pub fn types(&self) -> &TypeContext { &self.types } pub fn create_child(&self) -> Context { self.clone() } pub fn find_var(&self, id: &Ident) -> Ret<RunVal> { unwrap_from_context("Variable", id, self.vars.get(id)) } pub fn add_var(&mut self, id: Ident, val: RunVal, ty: Type) -> Ret { self.vars.insert(id.clone(), val); self.types.add_var_type(id, ty) } pub fn find_type(&self, id: &Ident) -> Ret<Type> { self.types.find_type(id) } pub fn add_type(&mut self, id: String, ty: Type) -> Ret { self.types.add_type(id, ty) } pub fn add_datatype(&mut self, id: String, variants: Vec<Ident>) -> Ret { let rc = Rc::new(DataType {id: id.clone(), variants: variants.clone()}); for (i, variant) in variants.iter().enumerate() { self.add_var(variant.clone(), RunVal::Data(rc.clone(), i), Type::Data(rc.clone()))?; } self.add_type(id, Type::Data(rc)) } pub fn add_macro(&mut self, id: &str, handle: &'static dyn Fn(&Exp, &Context) -> Ret<RunVal>) -> Ret { self.add_var(id.to_string(), RunVal::Macro(Macro(id.to_string(), Rc::new(handle))), Type::Any /* TODO define macro types */) } pub fn import(&self, path: &str) -> Ret<Module> { use regex::Regex; use std::path::Path; use resource; use stdlib; use parser; let (ctx, file) = if Regex::new("^[a-z]+:").unwrap().is_match(path) {(self.create_child(), path.to_string())} else { let import_path = Path::new(&self.path()).join(&resource::with_ext(path, "fqy")); let mut import_dir = import_path.clone(); import_dir.pop(); let file = import_path.to_string_lossy().to_string(); let ctx = stdlib::create_ctx(&import_dir.to_string_lossy())?; (ctx, file) }; let exp = parser::parse_resource(&file)?; Ok(Module {path: file.to_string(), exp: exp, ctx: ctx}) } pub fn import_eval(&self, path: &str) -> Ret<RunVal> { let mut module = self.import(path)?; Ok(eval_exp_inline(&module.exp, &mut module.ctx)) } } #[derive(Clone,Debug,PartialEq)] pub struct Module { pub path: String, pub exp: Exp, pub ctx: Context, } pub fn eval_exp(exp: &Exp, ctx: &Context) -> RunVal { match exp { &Exp::Index(n) => RunVal::Index(n), &Exp::String(ref s) => RunVal::String(s.to_string()), &Exp::Var(ref id) => ctx.find_var(id).unwrap(), &Exp::Scope(ref decls, ref ret) => { let mut child = ctx.create_child(); for decl in decls { eval_decl(decl, &mut child).unwrap(); } eval_exp(ret, &child) }, &Exp::Expand(_) => panic!("No context for expansion"), &Exp::Tuple(ref args) => RunVal::Tuple(eval_exp_seq(args, ctx)), &Exp::Concat(ref args) => { //TODO adjacent gates if args.len() == 1 { if let Some(gate) = build_gate(&eval_exp(&args[0], ctx), ctx) { return RunVal::Gate(gate) } } let div = (args.len() as f32).sqrt(); let states = args.iter() .map(|e| build_state_typed(eval_exp(e, ctx))) .collect::<Ret<Vec<(State, Type)>>>().unwrap(); RunVal::State(states.iter() .flat_map(|(s, _)| s) .map(|n| n / div) .collect(), Type::Concat(states.into_iter() .map(|(_, t)| t) .collect())) }, &Exp::Cond(ref cond_exp, ref then_exp, ref else_exp) => { let val = eval_exp(cond_exp, ctx); if let Some(b) = build_bool(&val) { eval_exp(if b {then_exp} else {else_exp}, ctx) } else { // TODO: consider removing in favor of using extract gates for explicitness // let state = build_state(val); // if state.len() > 2 { // panic!("Conditional state cannot be {}-dimensional", state.len()) // } // RunVal::State(state.extract(vec![ // build_state(eval_exp(else_exp, ctx)), // build_state(eval_exp(then_exp, ctx)), // ]), Type::Any /* TODO determine from then/else types */) panic!("Non-boolean value: {}", val) } }, &Exp::Lambda(ref pat, ref body) => { let ty = infer_type(exp, ctx.types()).unwrap(); RunVal::Func(Rc::new(ctx.clone()), pat.clone(), (**body).clone(), ty) }, &Exp::Invoke(ref target, ref arg) => { match eval_exp(target, ctx) { // TODO proper tuple function evaluation RunVal::Func(fn_ctx_rc, pat, body, _ty) => { let mut fn_ctx = (*fn_ctx_rc).clone(); assign_pat(&pat, &eval_exp(arg, ctx), &mut fn_ctx).unwrap(); eval_exp(&body, &fn_ctx) }, RunVal::Macro(Macro(_, handle)) => handle(arg, ctx).unwrap(), RunVal::Gate(gate) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s.extract(gate), t) }, val => { let msg = &format!("Cannot invoke {}", val); let state = build_state(eval_exp(arg, ctx)); let gate = build_gate(&val, ctx).expect(msg); RunVal::State(state.extract(gate), Type::Any /* TODO infer output type from `target` */) }, } }, &Exp::Repeat(n, ref exp) => { let val = eval_exp(&exp, ctx); RunVal::Tuple((0..n).map(|_| val.clone()).collect()) }, &Exp::State(ref arg) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s, t) }, &Exp::Phase(phase, ref arg) => { let val = eval_exp(arg, ctx); build_gate(&val, ctx) .map(|g| RunVal::Gate(g.power(phase))) .unwrap_or_else(|| { let (s, t) = build_state_typed(val).unwrap(); RunVal::State(s.phase(phase), t) }) }, &Exp::Extract(ref arg, ref cases) => { let state = build_state(eval_exp(arg, ctx)); let (gate, gt) = create_extract_gate_typed(cases, state.len(), ctx); RunVal::State(state.extract(gate), gt) }, &Exp::Anno(ref exp, ref anno) => eval_type(anno, ctx.types()).unwrap().assign(eval_exp(exp, ctx)).unwrap(), } } pub fn eval_exp_inline(exp: &Exp, ctx: &mut Context) -> RunVal { match exp { Exp::Scope(ref decls, ref exp) => {
} eval_exp(exp, ctx) }, _ => eval_exp(exp, ctx), } } pub fn eval_exp_seq(seq: &Vec<Exp>, ctx: &Context) -> Vec<RunVal> { seq.iter().flat_map(|e| { if let Exp::Expand(ref e) = e { let val = eval_exp(e, ctx); let err = Error(format!("Cannot expand value: {}", val)); iterate_val(val).ok_or(err).unwrap() } else {vec![eval_exp(e, ctx)]} }).collect() } pub fn eval_decl(decl: &Decl, ctx: &mut Context) -> Ret { match decl { &Decl::Let(ref pat, ref exp) => assign_pat(pat, &eval_exp(exp, ctx), ctx), &Decl::Type(ref id, ref pat) => { let ty = eval_type(pat, ctx.types())?; ctx.add_type(id.clone(), ty) }, &Decl::Data(ref id, ref variants) => ctx.add_datatype(id.clone(), variants.clone()), &Decl::Assert(ref expect, ref result) => { let a = eval_exp(expect, ctx); let b = eval_exp(result, ctx); let eq = match (&a, &b) { (&RunVal::State(ref a, _), &RunVal::State(ref b, _)) => { a.iter().zip(b).map(|(a, b)| { let abs = (a - b).norm(); abs * abs }).sum::<f32>() < 0.00001_f32 }, (a, b) => a == b, }; if !eq {err!("Assertion failed: {} != {}", a, b)} else {Ok(())} }, &Decl::Print(ref exp) => Ok(println!(":: {}", eval_exp(exp, ctx))), &Decl::Do(ref exp) => { eval_exp(exp, ctx); Ok(()) }, } } // TODO combine logic with eval_static::assign_pat_type() pub fn assign_pat(pat: &Pat, val: &RunVal, ctx: &mut Context) -> Ret { match (pat, val) { (&Pat::Any, _) => Ok(()), (&Pat::Var(ref id), _) => ctx.add_var(id.clone(), val.clone(), get_val_type(val)), //TODO use val type (&Pat::Tuple(ref pats), &RunVal::Tuple(ref vals)) => { if pats.len() != vals.len() {err!("Cannot deconstruct {} values from value: {}", pats.len(), val)} else { pats.iter().zip(vals) .map(|(pat, val)| assign_pat(pat, val, ctx)) .collect::<Ret<_>>() } }, (&Pat::Anno(ref pat, ref anno), _) => assign_pat(pat, &eval_type(&anno, ctx.types())?.assign(val.clone())?, ctx), _ => err!("{:?} cannot deconstruct `{}`", pat, val), } } pub fn get_val_type(val: &RunVal) -> Type { match val { &RunVal::Index(_) => Type::Any, &RunVal::String(_) => Type::Any, &RunVal::Data(ref dt, _) => Type::Data((*dt).clone()), &RunVal::Tuple(ref vals) => Type::Tuple(vals.iter().map(get_val_type).collect()), &RunVal::Func(_, _, _, ref ty) => ty.clone(), &RunVal::Macro(_) => Type::Any, // TODO &RunVal::State(_, ref ty) => ty.clone(), &RunVal::Gate(_) => Type::Any, // TODO } } pub fn build_bool(val: &RunVal) -> Option<bool> { match val { &RunVal::Index(n) => Some(n > 0), &RunVal::Data(ref _ty, n) => Some(n > 0), &RunVal::Tuple(ref vec) => Some(vec.len() > 0), _ => None, } } pub fn build_state(val: RunVal) -> State { build_state_typed(val).unwrap().0 } pub fn build_state_typed(val: RunVal) -> Ret<(State, Type)> { match val { RunVal::Index(n) => Ok((get_state(n), Type::Any)), RunVal::Data(dt, index) => Ok((get_state(index).pad(dt.variants.len()), Type::Data(dt))), RunVal::Tuple(vals) => { let states = vals.into_iter().map(|v| build_state_typed(v)).collect::<Ret<Vec<(State, Type)>>>()?; let ty = Type::Tuple(states.iter().map(|(_, t)| t.clone()).collect()); Ok((states.into_iter().fold(get_state(0), |a, (b, _)| State::combine(a, b)), ty)) }, RunVal::State(state, ty) => Ok((state, ty)), val => err!("Cannot build state from {}", val) } } pub fn eval_gate_body(exp: &Exp, ctx: &Context) -> Option<Gate> { match exp { &Exp::Extract(ref _arg, ref cases) => Some(create_extract_gate_typed(cases, 0, ctx).0), _ => None, } } pub fn build_gate(val: &RunVal, ctx: &Context) -> Option<Gate> { match val { &RunVal::Tuple(ref vals) => vals.iter() .fold(Some(vec![get_state(0)]), |a, b| a.and_then(|a| build_gate(b, ctx).map(|b| a.combine(b)))), &RunVal::Func(ref fn_ctx, ref _pat, ref body, ref _ty) => eval_gate_body(body, fn_ctx), // TODO use type &RunVal::Gate(ref gate) => Some(gate.clone()), _ => None, } } pub fn iterate_val(val: RunVal) -> Option<Vec<RunVal>> { match val { RunVal::Index(i) => { Some((0..i).map(RunVal::Index).collect()) }, RunVal::Tuple(vals) => Some(vals), _ => None, } } pub fn create_extract_gate_typed(cases: &Vec<Case>, min_input_size: usize, ctx: &Context) -> (Gate, Type) { fn reduce_type(output_type: Option<Type>, t: Type) -> Option<Type> { Some(match output_type { None => t, Some(ot) => if ot == t {t} else {Type::Any}, }) } let mut dims: Gate = vec![]; let mut output_type = None; for case in cases.iter() { match case { &Case::Exp(ref selector, ref result) => { let selector_state = build_state(eval_exp(selector, ctx)); let (result_state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); while dims.len() < selector_state.len() || dims.len() < min_input_size { dims.push(vec![]); } for (i, s) in selector_state.iter().enumerate() { let len = ::std::cmp::max(result_state.len(), dims[i].len()); // TODO improve impl dims[i] = result_state.clone().pad(len).into_iter() .zip(dims[i].clone().pad(len).into_iter()) .map(|(r, d)| r * s + d) .collect(); } output_type = reduce_type(output_type, result_type); }, &Case::Default(ref result) => { let (state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); for i in 0..dims.len() { use num::Zero; if dims[i].prob_sum().is_zero() { dims[i] = state.clone(); } } output_type = reduce_type(output_type, result_type); }, } } let max_len = dims.iter().map(Vec::len).max().unwrap_or(0); let gate: Gate = dims.into_iter().map(|s| s.pad(max_len)).collect(); // if !gate.is_unitary() { // panic!("Non-unitary extraction: {:?}", cases); // } (gate, output_type.unwrap_or(Type::Any)) }
for decl in decls { eval_decl(decl, ctx).unwrap();
random_line_split
eval.rs
use error::*; use ast::*; use engine::*; use types::*; use eval_static::*; use std::fmt; use std::rc::Rc; use std::collections::HashMap; #[derive(Clone)] pub struct Macro(pub Ident, pub Rc<dyn Fn(&Exp, &Context) -> Ret<RunVal>>); impl fmt::Debug for Macro { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, ":macro: {}", self.0) } } impl PartialEq for Macro { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } fn ne(&self, other: &Self) -> bool { self.0 != other.0 } } #[derive(Clone,Debug,PartialEq)] pub enum RunVal { Index(usize), String(String), Data(Rc<DataType>, usize), Tuple(Vec<RunVal>), Func(Rc<Context>, Pat, Exp, Type), Macro(Macro), State(State, Type), Gate(Gate), } impl fmt::Display for RunVal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &RunVal::Index(ref n) => write!(f, "{}", n), &RunVal::String(ref s) => write!(f, "{:?}", s), &RunVal::Data(ref dt, ref index) => write!(f, "{}", dt.variants[*index]), &RunVal::Tuple(ref vals) => write!(f, "({})", vals.iter().map(|val| format!("{}", val)).collect::<Vec<_>>().join(", ")), &RunVal::Func(ref _ctx, ref _pat, ref _body, ref ty) => write!(f, "fn{}", ty), &RunVal::Macro(ref mc) => write!(f, "{:?}", mc), &RunVal::State(ref state, ref ty) => if ty != &Type::Any { write!(f, "{}: {}", StateView(state), ty) } else { write!(f, "{}", StateView(state)) }, &RunVal::Gate(ref gate) => write!(f, "[{}]", gate.iter().map(|state| format!("{}", StateView(state))).collect::<Vec<_>>().join(", ")), } } } #[derive(Clone,Debug,PartialEq)] pub struct Context { path: String, vars: HashMap<Ident, RunVal>, types: TypeContext, } impl Context { pub fn new(path: String) -> Context { Context { path, vars: HashMap::new(), types: TypeContext::new(), } } pub fn path(&self) -> &String { &self.path } pub fn types(&self) -> &TypeContext { &self.types } pub fn create_child(&self) -> Context { self.clone() } pub fn find_var(&self, id: &Ident) -> Ret<RunVal> { unwrap_from_context("Variable", id, self.vars.get(id)) } pub fn add_var(&mut self, id: Ident, val: RunVal, ty: Type) -> Ret { self.vars.insert(id.clone(), val); self.types.add_var_type(id, ty) } pub fn find_type(&self, id: &Ident) -> Ret<Type> { self.types.find_type(id) } pub fn add_type(&mut self, id: String, ty: Type) -> Ret { self.types.add_type(id, ty) } pub fn add_datatype(&mut self, id: String, variants: Vec<Ident>) -> Ret { let rc = Rc::new(DataType {id: id.clone(), variants: variants.clone()}); for (i, variant) in variants.iter().enumerate() { self.add_var(variant.clone(), RunVal::Data(rc.clone(), i), Type::Data(rc.clone()))?; } self.add_type(id, Type::Data(rc)) } pub fn add_macro(&mut self, id: &str, handle: &'static dyn Fn(&Exp, &Context) -> Ret<RunVal>) -> Ret { self.add_var(id.to_string(), RunVal::Macro(Macro(id.to_string(), Rc::new(handle))), Type::Any /* TODO define macro types */) } pub fn import(&self, path: &str) -> Ret<Module> { use regex::Regex; use std::path::Path; use resource; use stdlib; use parser; let (ctx, file) = if Regex::new("^[a-z]+:").unwrap().is_match(path) {(self.create_child(), path.to_string())} else { let import_path = Path::new(&self.path()).join(&resource::with_ext(path, "fqy")); let mut import_dir = import_path.clone(); import_dir.pop(); let file = import_path.to_string_lossy().to_string(); let ctx = stdlib::create_ctx(&import_dir.to_string_lossy())?; (ctx, file) }; let exp = parser::parse_resource(&file)?; Ok(Module {path: file.to_string(), exp: exp, ctx: ctx}) } pub fn import_eval(&self, path: &str) -> Ret<RunVal> { let mut module = self.import(path)?; Ok(eval_exp_inline(&module.exp, &mut module.ctx)) } } #[derive(Clone,Debug,PartialEq)] pub struct Module { pub path: String, pub exp: Exp, pub ctx: Context, } pub fn eval_exp(exp: &Exp, ctx: &Context) -> RunVal { match exp { &Exp::Index(n) => RunVal::Index(n), &Exp::String(ref s) => RunVal::String(s.to_string()), &Exp::Var(ref id) => ctx.find_var(id).unwrap(), &Exp::Scope(ref decls, ref ret) => { let mut child = ctx.create_child(); for decl in decls { eval_decl(decl, &mut child).unwrap(); } eval_exp(ret, &child) }, &Exp::Expand(_) => panic!("No context for expansion"), &Exp::Tuple(ref args) => RunVal::Tuple(eval_exp_seq(args, ctx)), &Exp::Concat(ref args) => { //TODO adjacent gates if args.len() == 1 { if let Some(gate) = build_gate(&eval_exp(&args[0], ctx), ctx) { return RunVal::Gate(gate) } } let div = (args.len() as f32).sqrt(); let states = args.iter() .map(|e| build_state_typed(eval_exp(e, ctx))) .collect::<Ret<Vec<(State, Type)>>>().unwrap(); RunVal::State(states.iter() .flat_map(|(s, _)| s) .map(|n| n / div) .collect(), Type::Concat(states.into_iter() .map(|(_, t)| t) .collect())) }, &Exp::Cond(ref cond_exp, ref then_exp, ref else_exp) => { let val = eval_exp(cond_exp, ctx); if let Some(b) = build_bool(&val) { eval_exp(if b {then_exp} else {else_exp}, ctx) } else { // TODO: consider removing in favor of using extract gates for explicitness // let state = build_state(val); // if state.len() > 2 { // panic!("Conditional state cannot be {}-dimensional", state.len()) // } // RunVal::State(state.extract(vec![ // build_state(eval_exp(else_exp, ctx)), // build_state(eval_exp(then_exp, ctx)), // ]), Type::Any /* TODO determine from then/else types */) panic!("Non-boolean value: {}", val) } }, &Exp::Lambda(ref pat, ref body) => { let ty = infer_type(exp, ctx.types()).unwrap(); RunVal::Func(Rc::new(ctx.clone()), pat.clone(), (**body).clone(), ty) }, &Exp::Invoke(ref target, ref arg) => { match eval_exp(target, ctx) { // TODO proper tuple function evaluation RunVal::Func(fn_ctx_rc, pat, body, _ty) => { let mut fn_ctx = (*fn_ctx_rc).clone(); assign_pat(&pat, &eval_exp(arg, ctx), &mut fn_ctx).unwrap(); eval_exp(&body, &fn_ctx) }, RunVal::Macro(Macro(_, handle)) => handle(arg, ctx).unwrap(), RunVal::Gate(gate) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s.extract(gate), t) }, val => { let msg = &format!("Cannot invoke {}", val); let state = build_state(eval_exp(arg, ctx)); let gate = build_gate(&val, ctx).expect(msg); RunVal::State(state.extract(gate), Type::Any /* TODO infer output type from `target` */) }, } }, &Exp::Repeat(n, ref exp) => { let val = eval_exp(&exp, ctx); RunVal::Tuple((0..n).map(|_| val.clone()).collect()) }, &Exp::State(ref arg) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s, t) }, &Exp::Phase(phase, ref arg) => { let val = eval_exp(arg, ctx); build_gate(&val, ctx) .map(|g| RunVal::Gate(g.power(phase))) .unwrap_or_else(|| { let (s, t) = build_state_typed(val).unwrap(); RunVal::State(s.phase(phase), t) }) }, &Exp::Extract(ref arg, ref cases) => { let state = build_state(eval_exp(arg, ctx)); let (gate, gt) = create_extract_gate_typed(cases, state.len(), ctx); RunVal::State(state.extract(gate), gt) }, &Exp::Anno(ref exp, ref anno) => eval_type(anno, ctx.types()).unwrap().assign(eval_exp(exp, ctx)).unwrap(), } } pub fn eval_exp_inline(exp: &Exp, ctx: &mut Context) -> RunVal { match exp { Exp::Scope(ref decls, ref exp) => { for decl in decls { eval_decl(decl, ctx).unwrap(); } eval_exp(exp, ctx) }, _ => eval_exp(exp, ctx), } } pub fn eval_exp_seq(seq: &Vec<Exp>, ctx: &Context) -> Vec<RunVal> { seq.iter().flat_map(|e| { if let Exp::Expand(ref e) = e { let val = eval_exp(e, ctx); let err = Error(format!("Cannot expand value: {}", val)); iterate_val(val).ok_or(err).unwrap() } else {vec![eval_exp(e, ctx)]} }).collect() } pub fn eval_decl(decl: &Decl, ctx: &mut Context) -> Ret { match decl { &Decl::Let(ref pat, ref exp) => assign_pat(pat, &eval_exp(exp, ctx), ctx), &Decl::Type(ref id, ref pat) => { let ty = eval_type(pat, ctx.types())?; ctx.add_type(id.clone(), ty) }, &Decl::Data(ref id, ref variants) => ctx.add_datatype(id.clone(), variants.clone()), &Decl::Assert(ref expect, ref result) => { let a = eval_exp(expect, ctx); let b = eval_exp(result, ctx); let eq = match (&a, &b) { (&RunVal::State(ref a, _), &RunVal::State(ref b, _)) => { a.iter().zip(b).map(|(a, b)| { let abs = (a - b).norm(); abs * abs }).sum::<f32>() < 0.00001_f32 }, (a, b) => a == b, }; if !eq {err!("Assertion failed: {} != {}", a, b)} else {Ok(())} }, &Decl::Print(ref exp) => Ok(println!(":: {}", eval_exp(exp, ctx))), &Decl::Do(ref exp) => { eval_exp(exp, ctx); Ok(()) }, } } // TODO combine logic with eval_static::assign_pat_type() pub fn assign_pat(pat: &Pat, val: &RunVal, ctx: &mut Context) -> Ret { match (pat, val) { (&Pat::Any, _) => Ok(()), (&Pat::Var(ref id), _) => ctx.add_var(id.clone(), val.clone(), get_val_type(val)), //TODO use val type (&Pat::Tuple(ref pats), &RunVal::Tuple(ref vals)) => { if pats.len() != vals.len() {err!("Cannot deconstruct {} values from value: {}", pats.len(), val)} else { pats.iter().zip(vals) .map(|(pat, val)| assign_pat(pat, val, ctx)) .collect::<Ret<_>>() } }, (&Pat::Anno(ref pat, ref anno), _) => assign_pat(pat, &eval_type(&anno, ctx.types())?.assign(val.clone())?, ctx), _ => err!("{:?} cannot deconstruct `{}`", pat, val), } } pub fn get_val_type(val: &RunVal) -> Type { match val { &RunVal::Index(_) => Type::Any, &RunVal::String(_) => Type::Any, &RunVal::Data(ref dt, _) => Type::Data((*dt).clone()), &RunVal::Tuple(ref vals) => Type::Tuple(vals.iter().map(get_val_type).collect()), &RunVal::Func(_, _, _, ref ty) => ty.clone(), &RunVal::Macro(_) => Type::Any, // TODO &RunVal::State(_, ref ty) => ty.clone(), &RunVal::Gate(_) => Type::Any, // TODO } } pub fn build_bool(val: &RunVal) -> Option<bool> { match val { &RunVal::Index(n) => Some(n > 0), &RunVal::Data(ref _ty, n) => Some(n > 0), &RunVal::Tuple(ref vec) => Some(vec.len() > 0), _ => None, } } pub fn build_state(val: RunVal) -> State { build_state_typed(val).unwrap().0 } pub fn build_state_typed(val: RunVal) -> Ret<(State, Type)> { match val { RunVal::Index(n) => Ok((get_state(n), Type::Any)), RunVal::Data(dt, index) => Ok((get_state(index).pad(dt.variants.len()), Type::Data(dt))), RunVal::Tuple(vals) => { let states = vals.into_iter().map(|v| build_state_typed(v)).collect::<Ret<Vec<(State, Type)>>>()?; let ty = Type::Tuple(states.iter().map(|(_, t)| t.clone()).collect()); Ok((states.into_iter().fold(get_state(0), |a, (b, _)| State::combine(a, b)), ty)) }, RunVal::State(state, ty) => Ok((state, ty)), val => err!("Cannot build state from {}", val) } } pub fn eval_gate_body(exp: &Exp, ctx: &Context) -> Option<Gate> { match exp { &Exp::Extract(ref _arg, ref cases) => Some(create_extract_gate_typed(cases, 0, ctx).0), _ => None, } } pub fn build_gate(val: &RunVal, ctx: &Context) -> Option<Gate> { match val { &RunVal::Tuple(ref vals) => vals.iter() .fold(Some(vec![get_state(0)]), |a, b| a.and_then(|a| build_gate(b, ctx).map(|b| a.combine(b)))), &RunVal::Func(ref fn_ctx, ref _pat, ref body, ref _ty) => eval_gate_body(body, fn_ctx), // TODO use type &RunVal::Gate(ref gate) => Some(gate.clone()), _ => None, } } pub fn iterate_val(val: RunVal) -> Option<Vec<RunVal>> { match val { RunVal::Index(i) =>
, RunVal::Tuple(vals) => Some(vals), _ => None, } } pub fn create_extract_gate_typed(cases: &Vec<Case>, min_input_size: usize, ctx: &Context) -> (Gate, Type) { fn reduce_type(output_type: Option<Type>, t: Type) -> Option<Type> { Some(match output_type { None => t, Some(ot) => if ot == t {t} else {Type::Any}, }) } let mut dims: Gate = vec![]; let mut output_type = None; for case in cases.iter() { match case { &Case::Exp(ref selector, ref result) => { let selector_state = build_state(eval_exp(selector, ctx)); let (result_state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); while dims.len() < selector_state.len() || dims.len() < min_input_size { dims.push(vec![]); } for (i, s) in selector_state.iter().enumerate() { let len = ::std::cmp::max(result_state.len(), dims[i].len()); // TODO improve impl dims[i] = result_state.clone().pad(len).into_iter() .zip(dims[i].clone().pad(len).into_iter()) .map(|(r, d)| r * s + d) .collect(); } output_type = reduce_type(output_type, result_type); }, &Case::Default(ref result) => { let (state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); for i in 0..dims.len() { use num::Zero; if dims[i].prob_sum().is_zero() { dims[i] = state.clone(); } } output_type = reduce_type(output_type, result_type); }, } } let max_len = dims.iter().map(Vec::len).max().unwrap_or(0); let gate: Gate = dims.into_iter().map(|s| s.pad(max_len)).collect(); // if !gate.is_unitary() { // panic!("Non-unitary extraction: {:?}", cases); // } (gate, output_type.unwrap_or(Type::Any)) }
{ Some((0..i).map(RunVal::Index).collect()) }
conditional_block
eval.rs
use error::*; use ast::*; use engine::*; use types::*; use eval_static::*; use std::fmt; use std::rc::Rc; use std::collections::HashMap; #[derive(Clone)] pub struct Macro(pub Ident, pub Rc<dyn Fn(&Exp, &Context) -> Ret<RunVal>>); impl fmt::Debug for Macro { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, ":macro: {}", self.0) } } impl PartialEq for Macro { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } fn ne(&self, other: &Self) -> bool { self.0 != other.0 } } #[derive(Clone,Debug,PartialEq)] pub enum RunVal { Index(usize), String(String), Data(Rc<DataType>, usize), Tuple(Vec<RunVal>), Func(Rc<Context>, Pat, Exp, Type), Macro(Macro), State(State, Type), Gate(Gate), } impl fmt::Display for RunVal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
} #[derive(Clone,Debug,PartialEq)] pub struct Context { path: String, vars: HashMap<Ident, RunVal>, types: TypeContext, } impl Context { pub fn new(path: String) -> Context { Context { path, vars: HashMap::new(), types: TypeContext::new(), } } pub fn path(&self) -> &String { &self.path } pub fn types(&self) -> &TypeContext { &self.types } pub fn create_child(&self) -> Context { self.clone() } pub fn find_var(&self, id: &Ident) -> Ret<RunVal> { unwrap_from_context("Variable", id, self.vars.get(id)) } pub fn add_var(&mut self, id: Ident, val: RunVal, ty: Type) -> Ret { self.vars.insert(id.clone(), val); self.types.add_var_type(id, ty) } pub fn find_type(&self, id: &Ident) -> Ret<Type> { self.types.find_type(id) } pub fn add_type(&mut self, id: String, ty: Type) -> Ret { self.types.add_type(id, ty) } pub fn add_datatype(&mut self, id: String, variants: Vec<Ident>) -> Ret { let rc = Rc::new(DataType {id: id.clone(), variants: variants.clone()}); for (i, variant) in variants.iter().enumerate() { self.add_var(variant.clone(), RunVal::Data(rc.clone(), i), Type::Data(rc.clone()))?; } self.add_type(id, Type::Data(rc)) } pub fn add_macro(&mut self, id: &str, handle: &'static dyn Fn(&Exp, &Context) -> Ret<RunVal>) -> Ret { self.add_var(id.to_string(), RunVal::Macro(Macro(id.to_string(), Rc::new(handle))), Type::Any /* TODO define macro types */) } pub fn import(&self, path: &str) -> Ret<Module> { use regex::Regex; use std::path::Path; use resource; use stdlib; use parser; let (ctx, file) = if Regex::new("^[a-z]+:").unwrap().is_match(path) {(self.create_child(), path.to_string())} else { let import_path = Path::new(&self.path()).join(&resource::with_ext(path, "fqy")); let mut import_dir = import_path.clone(); import_dir.pop(); let file = import_path.to_string_lossy().to_string(); let ctx = stdlib::create_ctx(&import_dir.to_string_lossy())?; (ctx, file) }; let exp = parser::parse_resource(&file)?; Ok(Module {path: file.to_string(), exp: exp, ctx: ctx}) } pub fn import_eval(&self, path: &str) -> Ret<RunVal> { let mut module = self.import(path)?; Ok(eval_exp_inline(&module.exp, &mut module.ctx)) } } #[derive(Clone,Debug,PartialEq)] pub struct Module { pub path: String, pub exp: Exp, pub ctx: Context, } pub fn eval_exp(exp: &Exp, ctx: &Context) -> RunVal { match exp { &Exp::Index(n) => RunVal::Index(n), &Exp::String(ref s) => RunVal::String(s.to_string()), &Exp::Var(ref id) => ctx.find_var(id).unwrap(), &Exp::Scope(ref decls, ref ret) => { let mut child = ctx.create_child(); for decl in decls { eval_decl(decl, &mut child).unwrap(); } eval_exp(ret, &child) }, &Exp::Expand(_) => panic!("No context for expansion"), &Exp::Tuple(ref args) => RunVal::Tuple(eval_exp_seq(args, ctx)), &Exp::Concat(ref args) => { //TODO adjacent gates if args.len() == 1 { if let Some(gate) = build_gate(&eval_exp(&args[0], ctx), ctx) { return RunVal::Gate(gate) } } let div = (args.len() as f32).sqrt(); let states = args.iter() .map(|e| build_state_typed(eval_exp(e, ctx))) .collect::<Ret<Vec<(State, Type)>>>().unwrap(); RunVal::State(states.iter() .flat_map(|(s, _)| s) .map(|n| n / div) .collect(), Type::Concat(states.into_iter() .map(|(_, t)| t) .collect())) }, &Exp::Cond(ref cond_exp, ref then_exp, ref else_exp) => { let val = eval_exp(cond_exp, ctx); if let Some(b) = build_bool(&val) { eval_exp(if b {then_exp} else {else_exp}, ctx) } else { // TODO: consider removing in favor of using extract gates for explicitness // let state = build_state(val); // if state.len() > 2 { // panic!("Conditional state cannot be {}-dimensional", state.len()) // } // RunVal::State(state.extract(vec![ // build_state(eval_exp(else_exp, ctx)), // build_state(eval_exp(then_exp, ctx)), // ]), Type::Any /* TODO determine from then/else types */) panic!("Non-boolean value: {}", val) } }, &Exp::Lambda(ref pat, ref body) => { let ty = infer_type(exp, ctx.types()).unwrap(); RunVal::Func(Rc::new(ctx.clone()), pat.clone(), (**body).clone(), ty) }, &Exp::Invoke(ref target, ref arg) => { match eval_exp(target, ctx) { // TODO proper tuple function evaluation RunVal::Func(fn_ctx_rc, pat, body, _ty) => { let mut fn_ctx = (*fn_ctx_rc).clone(); assign_pat(&pat, &eval_exp(arg, ctx), &mut fn_ctx).unwrap(); eval_exp(&body, &fn_ctx) }, RunVal::Macro(Macro(_, handle)) => handle(arg, ctx).unwrap(), RunVal::Gate(gate) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s.extract(gate), t) }, val => { let msg = &format!("Cannot invoke {}", val); let state = build_state(eval_exp(arg, ctx)); let gate = build_gate(&val, ctx).expect(msg); RunVal::State(state.extract(gate), Type::Any /* TODO infer output type from `target` */) }, } }, &Exp::Repeat(n, ref exp) => { let val = eval_exp(&exp, ctx); RunVal::Tuple((0..n).map(|_| val.clone()).collect()) }, &Exp::State(ref arg) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s, t) }, &Exp::Phase(phase, ref arg) => { let val = eval_exp(arg, ctx); build_gate(&val, ctx) .map(|g| RunVal::Gate(g.power(phase))) .unwrap_or_else(|| { let (s, t) = build_state_typed(val).unwrap(); RunVal::State(s.phase(phase), t) }) }, &Exp::Extract(ref arg, ref cases) => { let state = build_state(eval_exp(arg, ctx)); let (gate, gt) = create_extract_gate_typed(cases, state.len(), ctx); RunVal::State(state.extract(gate), gt) }, &Exp::Anno(ref exp, ref anno) => eval_type(anno, ctx.types()).unwrap().assign(eval_exp(exp, ctx)).unwrap(), } } pub fn eval_exp_inline(exp: &Exp, ctx: &mut Context) -> RunVal { match exp { Exp::Scope(ref decls, ref exp) => { for decl in decls { eval_decl(decl, ctx).unwrap(); } eval_exp(exp, ctx) }, _ => eval_exp(exp, ctx), } } pub fn eval_exp_seq(seq: &Vec<Exp>, ctx: &Context) -> Vec<RunVal> { seq.iter().flat_map(|e| { if let Exp::Expand(ref e) = e { let val = eval_exp(e, ctx); let err = Error(format!("Cannot expand value: {}", val)); iterate_val(val).ok_or(err).unwrap() } else {vec![eval_exp(e, ctx)]} }).collect() } pub fn eval_decl(decl: &Decl, ctx: &mut Context) -> Ret { match decl { &Decl::Let(ref pat, ref exp) => assign_pat(pat, &eval_exp(exp, ctx), ctx), &Decl::Type(ref id, ref pat) => { let ty = eval_type(pat, ctx.types())?; ctx.add_type(id.clone(), ty) }, &Decl::Data(ref id, ref variants) => ctx.add_datatype(id.clone(), variants.clone()), &Decl::Assert(ref expect, ref result) => { let a = eval_exp(expect, ctx); let b = eval_exp(result, ctx); let eq = match (&a, &b) { (&RunVal::State(ref a, _), &RunVal::State(ref b, _)) => { a.iter().zip(b).map(|(a, b)| { let abs = (a - b).norm(); abs * abs }).sum::<f32>() < 0.00001_f32 }, (a, b) => a == b, }; if !eq {err!("Assertion failed: {} != {}", a, b)} else {Ok(())} }, &Decl::Print(ref exp) => Ok(println!(":: {}", eval_exp(exp, ctx))), &Decl::Do(ref exp) => { eval_exp(exp, ctx); Ok(()) }, } } // TODO combine logic with eval_static::assign_pat_type() pub fn assign_pat(pat: &Pat, val: &RunVal, ctx: &mut Context) -> Ret { match (pat, val) { (&Pat::Any, _) => Ok(()), (&Pat::Var(ref id), _) => ctx.add_var(id.clone(), val.clone(), get_val_type(val)), //TODO use val type (&Pat::Tuple(ref pats), &RunVal::Tuple(ref vals)) => { if pats.len() != vals.len() {err!("Cannot deconstruct {} values from value: {}", pats.len(), val)} else { pats.iter().zip(vals) .map(|(pat, val)| assign_pat(pat, val, ctx)) .collect::<Ret<_>>() } }, (&Pat::Anno(ref pat, ref anno), _) => assign_pat(pat, &eval_type(&anno, ctx.types())?.assign(val.clone())?, ctx), _ => err!("{:?} cannot deconstruct `{}`", pat, val), } } pub fn get_val_type(val: &RunVal) -> Type { match val { &RunVal::Index(_) => Type::Any, &RunVal::String(_) => Type::Any, &RunVal::Data(ref dt, _) => Type::Data((*dt).clone()), &RunVal::Tuple(ref vals) => Type::Tuple(vals.iter().map(get_val_type).collect()), &RunVal::Func(_, _, _, ref ty) => ty.clone(), &RunVal::Macro(_) => Type::Any, // TODO &RunVal::State(_, ref ty) => ty.clone(), &RunVal::Gate(_) => Type::Any, // TODO } } pub fn build_bool(val: &RunVal) -> Option<bool> { match val { &RunVal::Index(n) => Some(n > 0), &RunVal::Data(ref _ty, n) => Some(n > 0), &RunVal::Tuple(ref vec) => Some(vec.len() > 0), _ => None, } } pub fn build_state(val: RunVal) -> State { build_state_typed(val).unwrap().0 } pub fn build_state_typed(val: RunVal) -> Ret<(State, Type)> { match val { RunVal::Index(n) => Ok((get_state(n), Type::Any)), RunVal::Data(dt, index) => Ok((get_state(index).pad(dt.variants.len()), Type::Data(dt))), RunVal::Tuple(vals) => { let states = vals.into_iter().map(|v| build_state_typed(v)).collect::<Ret<Vec<(State, Type)>>>()?; let ty = Type::Tuple(states.iter().map(|(_, t)| t.clone()).collect()); Ok((states.into_iter().fold(get_state(0), |a, (b, _)| State::combine(a, b)), ty)) }, RunVal::State(state, ty) => Ok((state, ty)), val => err!("Cannot build state from {}", val) } } pub fn eval_gate_body(exp: &Exp, ctx: &Context) -> Option<Gate> { match exp { &Exp::Extract(ref _arg, ref cases) => Some(create_extract_gate_typed(cases, 0, ctx).0), _ => None, } } pub fn build_gate(val: &RunVal, ctx: &Context) -> Option<Gate> { match val { &RunVal::Tuple(ref vals) => vals.iter() .fold(Some(vec![get_state(0)]), |a, b| a.and_then(|a| build_gate(b, ctx).map(|b| a.combine(b)))), &RunVal::Func(ref fn_ctx, ref _pat, ref body, ref _ty) => eval_gate_body(body, fn_ctx), // TODO use type &RunVal::Gate(ref gate) => Some(gate.clone()), _ => None, } } pub fn iterate_val(val: RunVal) -> Option<Vec<RunVal>> { match val { RunVal::Index(i) => { Some((0..i).map(RunVal::Index).collect()) }, RunVal::Tuple(vals) => Some(vals), _ => None, } } pub fn create_extract_gate_typed(cases: &Vec<Case>, min_input_size: usize, ctx: &Context) -> (Gate, Type) { fn reduce_type(output_type: Option<Type>, t: Type) -> Option<Type> { Some(match output_type { None => t, Some(ot) => if ot == t {t} else {Type::Any}, }) } let mut dims: Gate = vec![]; let mut output_type = None; for case in cases.iter() { match case { &Case::Exp(ref selector, ref result) => { let selector_state = build_state(eval_exp(selector, ctx)); let (result_state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); while dims.len() < selector_state.len() || dims.len() < min_input_size { dims.push(vec![]); } for (i, s) in selector_state.iter().enumerate() { let len = ::std::cmp::max(result_state.len(), dims[i].len()); // TODO improve impl dims[i] = result_state.clone().pad(len).into_iter() .zip(dims[i].clone().pad(len).into_iter()) .map(|(r, d)| r * s + d) .collect(); } output_type = reduce_type(output_type, result_type); }, &Case::Default(ref result) => { let (state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); for i in 0..dims.len() { use num::Zero; if dims[i].prob_sum().is_zero() { dims[i] = state.clone(); } } output_type = reduce_type(output_type, result_type); }, } } let max_len = dims.iter().map(Vec::len).max().unwrap_or(0); let gate: Gate = dims.into_iter().map(|s| s.pad(max_len)).collect(); // if !gate.is_unitary() { // panic!("Non-unitary extraction: {:?}", cases); // } (gate, output_type.unwrap_or(Type::Any)) }
{ match self { &RunVal::Index(ref n) => write!(f, "{}", n), &RunVal::String(ref s) => write!(f, "{:?}", s), &RunVal::Data(ref dt, ref index) => write!(f, "{}", dt.variants[*index]), &RunVal::Tuple(ref vals) => write!(f, "({})", vals.iter().map(|val| format!("{}", val)).collect::<Vec<_>>().join(", ")), &RunVal::Func(ref _ctx, ref _pat, ref _body, ref ty) => write!(f, "fn{}", ty), &RunVal::Macro(ref mc) => write!(f, "{:?}", mc), &RunVal::State(ref state, ref ty) => if ty != &Type::Any { write!(f, "{}: {}", StateView(state), ty) } else { write!(f, "{}", StateView(state)) }, &RunVal::Gate(ref gate) => write!(f, "[{}]", gate.iter().map(|state| format!("{}", StateView(state))).collect::<Vec<_>>().join(", ")), } }
identifier_body
avx.rs
/* * Copyright (c) 2023. * * This software is free software; * * You can redistribute it or modify it under terms of the MIT, Apache License or Zlib license */ //! AVX color conversion routines //! //! Okay these codes are cool //! //! Herein lies super optimized codes to do color conversions. //! //! //! 1. The YCbCr to RGB use integer approximations and not the floating point equivalent. //! That means we may be +- 2 of pixels generated by libjpeg-turbo jpeg decoding //! (also libjpeg uses routines like `Y = 0.29900 * R + 0.33700 * G + 0.11400 * B + 0.25000 * G`) //! //! Firstly, we use integers (fun fact:there is no part of this code base where were dealing with //! floating points.., fun fact: the first fun fact wasn't even fun.) //! //! Secondly ,we have cool clamping code, especially for rgba , where we don't need clamping and we //! spend our time cursing that Intel decided permute instructions to work like 2 128 bit vectors(the compiler opitmizes //! it out to something cool). //! //! There isn't a lot here (not as fun as bitstream ) but I hope you find what you're looking for. //! //! O and ~~subscribe to my youtube channel~~ #![cfg(any(target_arch = "x86", target_arch = "x86_64"))] #![cfg(feature = "x86")] #![allow( clippy::wildcard_imports, clippy::cast_possible_truncation, clippy::too_many_arguments, clippy::inline_always, clippy::doc_markdown, dead_code )] #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; pub union YmmRegister { // both are 32 when using std::mem::size_of mm256: __m256i, // for avx color conversion array: [i16; 16] } //-------------------------------------------------------------------------------------------------- // AVX conversion routines //-------------------------------------------------------------------------------------------------- /// /// Convert YCBCR to RGB using AVX instructions /// /// # Note ///**IT IS THE RESPONSIBILITY OF THE CALLER TO CALL THIS IN CPUS SUPPORTING /// AVX2 OTHERWISE THIS IS UB** /// /// *Peace* /// /// This library itself will ensure that it's never called in CPU's not /// supporting AVX2 /// /// # Arguments /// - `y`,`cb`,`cr`: A reference of 8 i32's /// - `out`: The output array where we store our converted items /// - `offset`: The position from 0 where we write these RGB values #[inline(always)] pub fn ycbcr_to_rgb_avx2( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { // call this in another function to tell RUST to vectorize this // storing unsafe { ycbcr_to_rgb_avx2_1(y, cb, cr, out, offset); } } #[inline] #[target_feature(enable = "avx2")] #[target_feature(enable = "avx")] unsafe fn ycbcr_to_rgb_avx2_1( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { // Load output buffer let tmp: &mut [u8; 48] = out .get_mut(*offset..*offset + 48) .expect("Slice to small cannot write") .try_into() .unwrap(); let (r, g, b) = ycbcr_to_rgb_baseline(y, cb, cr); let mut j = 0; let mut i = 0; while i < 48 { tmp[i] = r.array[j] as u8; tmp[i + 1] = g.array[j] as u8; tmp[i + 2] = b.array[j] as u8; i += 3; j += 1; } *offset += 48; } /// Baseline implementation of YCBCR to RGB for avx, /// /// It uses integer operations as opposed to floats, the approximation is /// difficult for the eye to see, but this means that it may produce different /// values with libjpeg_turbo. if accuracy is of utmost importance, use that. /// /// this function should be called for most implementations, including /// - ycbcr->rgb /// - ycbcr->rgba /// - ycbcr->brga /// - ycbcr->rgbx #[inline] #[target_feature(enable = "avx2")] #[target_feature(enable = "avx")] unsafe fn ycbcr_to_rgb_baseline( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16] ) -> (YmmRegister, YmmRegister, YmmRegister) { // Load values into a register // // dst[127:0] := MEM[loaddr+127:loaddr] // dst[255:128] := MEM[hiaddr+127:hiaddr] let y_c = _mm256_loadu_si256(y.as_ptr().cast()); let cb_c = _mm256_loadu_si256(cb.as_ptr().cast()); let cr_c = _mm256_loadu_si256(cr.as_ptr().cast()); // AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb // Cb = Cb-128; let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128)); // cr = Cb -128; let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128)); // Calculate Y->R // r = Y + 45 * Cr / 32 // 45*cr let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r); // r1>>5 let r2 = _mm256_srai_epi16::<5>(r1); //y+r2 let r = YmmRegister { mm256: clamp_avx(_mm256_add_epi16(y_c, r2)) }; // g = Y - (11 * Cb + 23 * Cr) / 32 ; // 11*cb let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r); // 23*cr let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r); //(11 //(11 * Cb + 23 * Cr) let g3 = _mm256_add_epi16(g1, g2); // (11 * Cb + 23 * Cr) / 32 let g4 = _mm256_srai_epi16::<5>(g3); // Y - (11 * Cb + 23 * Cr) / 32 ; let g = YmmRegister { mm256: clamp_avx(_mm256_sub_epi16(y_c, g4)) }; // b = Y + 113 * Cb / 64 // 113 * cb let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r); //113 * Cb / 64 let b2 = _mm256_srai_epi16::<6>(b1); // b = Y + 113 * Cb / 64 ; let b = YmmRegister { mm256: clamp_avx(_mm256_add_epi16(b2, y_c)) }; return (r, g, b); } #[inline] #[target_feature(enable = "avx2")] /// A baseline implementation of YCbCr to RGB conversion which does not carry /// out clamping /// /// This is used by the `ycbcr_to_rgba_avx` and `ycbcr_to_rgbx` conversion /// routines unsafe fn ycbcr_to_rgb_baseline_no_clamp( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16] ) -> (__m256i, __m256i, __m256i) { // Load values into a register // let y_c = _mm256_loadu_si256(y.as_ptr().cast()); let cb_c = _mm256_loadu_si256(cb.as_ptr().cast()); let cr_c = _mm256_loadu_si256(cr.as_ptr().cast()); // AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb // Cb = Cb-128; let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128)); // cr = Cb -128; let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128)); // Calculate Y->R // r = Y + 45 * Cr / 32 // 45*cr let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r); // r1>>5 let r2 = _mm256_srai_epi16::<5>(r1); //y+r2 let r = _mm256_add_epi16(y_c, r2); // g = Y - (11 * Cb + 23 * Cr) / 32 ; // 11*cb let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r); // 23*cr let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r); //(11 //(11 * Cb + 23 * Cr) let g3 = _mm256_add_epi16(g1, g2); // (11 * Cb + 23 * Cr) / 32 let g4 = _mm256_srai_epi16::<5>(g3); // Y - (11 * Cb + 23 * Cr) / 32 ; let g = _mm256_sub_epi16(y_c, g4); // b = Y + 113 * Cb / 64 // 113 * cb let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r); //113 * Cb / 64 let b2 = _mm256_srai_epi16::<6>(b1); // b = Y + 113 * Cb / 64 ; let b = _mm256_add_epi16(b2, y_c); return (r, g, b); } #[inline(always)] pub fn ycbcr_to_rgba_avx2( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { unsafe { ycbcr_to_rgba_unsafe(y, cb, cr, out, offset); } } #[inline] #[target_feature(enable = "avx2")] #[rustfmt::skip] unsafe fn ycbcr_to_rgba_unsafe( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize, ) { // check if we have enough space to write. let tmp:& mut [u8; 64] = out.get_mut(*offset..*offset + 64).expect("Slice to small cannot write").try_into().unwrap(); let (r, g, b) = ycbcr_to_rgb_baseline_no_clamp(y, cb, cr); // set alpha channel to 255 for opaque // And no these comments were not from me pressing the keyboard // Pack the integers into u8's using signed saturation. let c = _mm256_packus_epi16(r, g); //aaaaa_bbbbb_aaaaa_bbbbbb let d = _mm256_packus_epi16(b, _mm256_set1_epi16(255)); // cccccc_dddddd_ccccccc_ddddd // transpose_u16 and interleave channels let e = _mm256_unpacklo_epi8(c, d); //ab_ab_ab_ab_ab_ab_ab_ab let f = _mm256_unpackhi_epi8(c, d); //cd_cd_cd_cd_cd_cd_cd_cd // final transpose_u16 let g = _mm256_unpacklo_epi8(e, f); //abcd_abcd_abcd_abcd_abcd let h = _mm256_unpackhi_epi8(e, f); // undo packus shuffling... let i = _mm256_permute2x128_si256::<{ shuffle(3, 2, 1, 0) }>(g, h); let j = _mm256_permute2x128_si256::<{ shuffle(1, 2, 3, 0) }>(g, h); let k = _mm256_permute2x128_si256::<{ shuffle(3, 2, 0, 1) }>(g, h); let l = _mm256_permute2x128_si256::<{ shuffle(0, 3, 2, 1) }>(g, h); let m = _mm256_blend_epi32::<0b1111_0000>(i, j); let n = _mm256_blend_epi32::<0b1111_0000>(k, l); // Store // Use streaming instructions to prevent polluting the cache? _mm256_storeu_si256(tmp.as_mut_ptr().cast(), m); _mm256_storeu_si256(tmp[32..].as_mut_ptr().cast(), n); *offset += 64; } /// Clamp values between 0 and 255 /// /// This function clamps all values in `reg` to be between 0 and 255 ///( the accepted values for RGB) #[inline] #[target_feature(enable = "avx2")] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] unsafe fn clamp_avx(reg: __m256i) -> __m256i { // the lowest value let min_s = _mm256_set1_epi16(0); // Highest value let max_s = _mm256_set1_epi16(255);
return min_v; } #[inline] const fn shuffle(z: i32, y: i32, x: i32, w: i32) -> i32 { (z << 6) | (y << 4) | (x << 2) | w }
let max_v = _mm256_max_epi16(reg, min_s); //max(a,0) let min_v = _mm256_min_epi16(max_v, max_s); //min(max(a,0),255)
random_line_split
avx.rs
/* * Copyright (c) 2023. * * This software is free software; * * You can redistribute it or modify it under terms of the MIT, Apache License or Zlib license */ //! AVX color conversion routines //! //! Okay these codes are cool //! //! Herein lies super optimized codes to do color conversions. //! //! //! 1. The YCbCr to RGB use integer approximations and not the floating point equivalent. //! That means we may be +- 2 of pixels generated by libjpeg-turbo jpeg decoding //! (also libjpeg uses routines like `Y = 0.29900 * R + 0.33700 * G + 0.11400 * B + 0.25000 * G`) //! //! Firstly, we use integers (fun fact:there is no part of this code base where were dealing with //! floating points.., fun fact: the first fun fact wasn't even fun.) //! //! Secondly ,we have cool clamping code, especially for rgba , where we don't need clamping and we //! spend our time cursing that Intel decided permute instructions to work like 2 128 bit vectors(the compiler opitmizes //! it out to something cool). //! //! There isn't a lot here (not as fun as bitstream ) but I hope you find what you're looking for. //! //! O and ~~subscribe to my youtube channel~~ #![cfg(any(target_arch = "x86", target_arch = "x86_64"))] #![cfg(feature = "x86")] #![allow( clippy::wildcard_imports, clippy::cast_possible_truncation, clippy::too_many_arguments, clippy::inline_always, clippy::doc_markdown, dead_code )] #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; pub union YmmRegister { // both are 32 when using std::mem::size_of mm256: __m256i, // for avx color conversion array: [i16; 16] } //-------------------------------------------------------------------------------------------------- // AVX conversion routines //-------------------------------------------------------------------------------------------------- /// /// Convert YCBCR to RGB using AVX instructions /// /// # Note ///**IT IS THE RESPONSIBILITY OF THE CALLER TO CALL THIS IN CPUS SUPPORTING /// AVX2 OTHERWISE THIS IS UB** /// /// *Peace* /// /// This library itself will ensure that it's never called in CPU's not /// supporting AVX2 /// /// # Arguments /// - `y`,`cb`,`cr`: A reference of 8 i32's /// - `out`: The output array where we store our converted items /// - `offset`: The position from 0 where we write these RGB values #[inline(always)] pub fn ycbcr_to_rgb_avx2( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { // call this in another function to tell RUST to vectorize this // storing unsafe { ycbcr_to_rgb_avx2_1(y, cb, cr, out, offset); } } #[inline] #[target_feature(enable = "avx2")] #[target_feature(enable = "avx")] unsafe fn ycbcr_to_rgb_avx2_1( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { // Load output buffer let tmp: &mut [u8; 48] = out .get_mut(*offset..*offset + 48) .expect("Slice to small cannot write") .try_into() .unwrap(); let (r, g, b) = ycbcr_to_rgb_baseline(y, cb, cr); let mut j = 0; let mut i = 0; while i < 48 { tmp[i] = r.array[j] as u8; tmp[i + 1] = g.array[j] as u8; tmp[i + 2] = b.array[j] as u8; i += 3; j += 1; } *offset += 48; } /// Baseline implementation of YCBCR to RGB for avx, /// /// It uses integer operations as opposed to floats, the approximation is /// difficult for the eye to see, but this means that it may produce different /// values with libjpeg_turbo. if accuracy is of utmost importance, use that. /// /// this function should be called for most implementations, including /// - ycbcr->rgb /// - ycbcr->rgba /// - ycbcr->brga /// - ycbcr->rgbx #[inline] #[target_feature(enable = "avx2")] #[target_feature(enable = "avx")] unsafe fn ycbcr_to_rgb_baseline( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16] ) -> (YmmRegister, YmmRegister, YmmRegister) { // Load values into a register // // dst[127:0] := MEM[loaddr+127:loaddr] // dst[255:128] := MEM[hiaddr+127:hiaddr] let y_c = _mm256_loadu_si256(y.as_ptr().cast()); let cb_c = _mm256_loadu_si256(cb.as_ptr().cast()); let cr_c = _mm256_loadu_si256(cr.as_ptr().cast()); // AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb // Cb = Cb-128; let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128)); // cr = Cb -128; let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128)); // Calculate Y->R // r = Y + 45 * Cr / 32 // 45*cr let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r); // r1>>5 let r2 = _mm256_srai_epi16::<5>(r1); //y+r2 let r = YmmRegister { mm256: clamp_avx(_mm256_add_epi16(y_c, r2)) }; // g = Y - (11 * Cb + 23 * Cr) / 32 ; // 11*cb let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r); // 23*cr let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r); //(11 //(11 * Cb + 23 * Cr) let g3 = _mm256_add_epi16(g1, g2); // (11 * Cb + 23 * Cr) / 32 let g4 = _mm256_srai_epi16::<5>(g3); // Y - (11 * Cb + 23 * Cr) / 32 ; let g = YmmRegister { mm256: clamp_avx(_mm256_sub_epi16(y_c, g4)) }; // b = Y + 113 * Cb / 64 // 113 * cb let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r); //113 * Cb / 64 let b2 = _mm256_srai_epi16::<6>(b1); // b = Y + 113 * Cb / 64 ; let b = YmmRegister { mm256: clamp_avx(_mm256_add_epi16(b2, y_c)) }; return (r, g, b); } #[inline] #[target_feature(enable = "avx2")] /// A baseline implementation of YCbCr to RGB conversion which does not carry /// out clamping /// /// This is used by the `ycbcr_to_rgba_avx` and `ycbcr_to_rgbx` conversion /// routines unsafe fn ycbcr_to_rgb_baseline_no_clamp( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16] ) -> (__m256i, __m256i, __m256i) { // Load values into a register // let y_c = _mm256_loadu_si256(y.as_ptr().cast()); let cb_c = _mm256_loadu_si256(cb.as_ptr().cast()); let cr_c = _mm256_loadu_si256(cr.as_ptr().cast()); // AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb // Cb = Cb-128; let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128)); // cr = Cb -128; let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128)); // Calculate Y->R // r = Y + 45 * Cr / 32 // 45*cr let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r); // r1>>5 let r2 = _mm256_srai_epi16::<5>(r1); //y+r2 let r = _mm256_add_epi16(y_c, r2); // g = Y - (11 * Cb + 23 * Cr) / 32 ; // 11*cb let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r); // 23*cr let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r); //(11 //(11 * Cb + 23 * Cr) let g3 = _mm256_add_epi16(g1, g2); // (11 * Cb + 23 * Cr) / 32 let g4 = _mm256_srai_epi16::<5>(g3); // Y - (11 * Cb + 23 * Cr) / 32 ; let g = _mm256_sub_epi16(y_c, g4); // b = Y + 113 * Cb / 64 // 113 * cb let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r); //113 * Cb / 64 let b2 = _mm256_srai_epi16::<6>(b1); // b = Y + 113 * Cb / 64 ; let b = _mm256_add_epi16(b2, y_c); return (r, g, b); } #[inline(always)] pub fn ycbcr_to_rgba_avx2( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize )
#[inline] #[target_feature(enable = "avx2")] #[rustfmt::skip] unsafe fn ycbcr_to_rgba_unsafe( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize, ) { // check if we have enough space to write. let tmp:& mut [u8; 64] = out.get_mut(*offset..*offset + 64).expect("Slice to small cannot write").try_into().unwrap(); let (r, g, b) = ycbcr_to_rgb_baseline_no_clamp(y, cb, cr); // set alpha channel to 255 for opaque // And no these comments were not from me pressing the keyboard // Pack the integers into u8's using signed saturation. let c = _mm256_packus_epi16(r, g); //aaaaa_bbbbb_aaaaa_bbbbbb let d = _mm256_packus_epi16(b, _mm256_set1_epi16(255)); // cccccc_dddddd_ccccccc_ddddd // transpose_u16 and interleave channels let e = _mm256_unpacklo_epi8(c, d); //ab_ab_ab_ab_ab_ab_ab_ab let f = _mm256_unpackhi_epi8(c, d); //cd_cd_cd_cd_cd_cd_cd_cd // final transpose_u16 let g = _mm256_unpacklo_epi8(e, f); //abcd_abcd_abcd_abcd_abcd let h = _mm256_unpackhi_epi8(e, f); // undo packus shuffling... let i = _mm256_permute2x128_si256::<{ shuffle(3, 2, 1, 0) }>(g, h); let j = _mm256_permute2x128_si256::<{ shuffle(1, 2, 3, 0) }>(g, h); let k = _mm256_permute2x128_si256::<{ shuffle(3, 2, 0, 1) }>(g, h); let l = _mm256_permute2x128_si256::<{ shuffle(0, 3, 2, 1) }>(g, h); let m = _mm256_blend_epi32::<0b1111_0000>(i, j); let n = _mm256_blend_epi32::<0b1111_0000>(k, l); // Store // Use streaming instructions to prevent polluting the cache? _mm256_storeu_si256(tmp.as_mut_ptr().cast(), m); _mm256_storeu_si256(tmp[32..].as_mut_ptr().cast(), n); *offset += 64; } /// Clamp values between 0 and 255 /// /// This function clamps all values in `reg` to be between 0 and 255 ///( the accepted values for RGB) #[inline] #[target_feature(enable = "avx2")] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] unsafe fn clamp_avx(reg: __m256i) -> __m256i { // the lowest value let min_s = _mm256_set1_epi16(0); // Highest value let max_s = _mm256_set1_epi16(255); let max_v = _mm256_max_epi16(reg, min_s); //max(a,0) let min_v = _mm256_min_epi16(max_v, max_s); //min(max(a,0),255) return min_v; } #[inline] const fn shuffle(z: i32, y: i32, x: i32, w: i32) -> i32 { (z << 6) | (y << 4) | (x << 2) | w }
{ unsafe { ycbcr_to_rgba_unsafe(y, cb, cr, out, offset); } }
identifier_body
avx.rs
/* * Copyright (c) 2023. * * This software is free software; * * You can redistribute it or modify it under terms of the MIT, Apache License or Zlib license */ //! AVX color conversion routines //! //! Okay these codes are cool //! //! Herein lies super optimized codes to do color conversions. //! //! //! 1. The YCbCr to RGB use integer approximations and not the floating point equivalent. //! That means we may be +- 2 of pixels generated by libjpeg-turbo jpeg decoding //! (also libjpeg uses routines like `Y = 0.29900 * R + 0.33700 * G + 0.11400 * B + 0.25000 * G`) //! //! Firstly, we use integers (fun fact:there is no part of this code base where were dealing with //! floating points.., fun fact: the first fun fact wasn't even fun.) //! //! Secondly ,we have cool clamping code, especially for rgba , where we don't need clamping and we //! spend our time cursing that Intel decided permute instructions to work like 2 128 bit vectors(the compiler opitmizes //! it out to something cool). //! //! There isn't a lot here (not as fun as bitstream ) but I hope you find what you're looking for. //! //! O and ~~subscribe to my youtube channel~~ #![cfg(any(target_arch = "x86", target_arch = "x86_64"))] #![cfg(feature = "x86")] #![allow( clippy::wildcard_imports, clippy::cast_possible_truncation, clippy::too_many_arguments, clippy::inline_always, clippy::doc_markdown, dead_code )] #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; pub union YmmRegister { // both are 32 when using std::mem::size_of mm256: __m256i, // for avx color conversion array: [i16; 16] } //-------------------------------------------------------------------------------------------------- // AVX conversion routines //-------------------------------------------------------------------------------------------------- /// /// Convert YCBCR to RGB using AVX instructions /// /// # Note ///**IT IS THE RESPONSIBILITY OF THE CALLER TO CALL THIS IN CPUS SUPPORTING /// AVX2 OTHERWISE THIS IS UB** /// /// *Peace* /// /// This library itself will ensure that it's never called in CPU's not /// supporting AVX2 /// /// # Arguments /// - `y`,`cb`,`cr`: A reference of 8 i32's /// - `out`: The output array where we store our converted items /// - `offset`: The position from 0 where we write these RGB values #[inline(always)] pub fn ycbcr_to_rgb_avx2( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { // call this in another function to tell RUST to vectorize this // storing unsafe { ycbcr_to_rgb_avx2_1(y, cb, cr, out, offset); } } #[inline] #[target_feature(enable = "avx2")] #[target_feature(enable = "avx")] unsafe fn ycbcr_to_rgb_avx2_1( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { // Load output buffer let tmp: &mut [u8; 48] = out .get_mut(*offset..*offset + 48) .expect("Slice to small cannot write") .try_into() .unwrap(); let (r, g, b) = ycbcr_to_rgb_baseline(y, cb, cr); let mut j = 0; let mut i = 0; while i < 48 { tmp[i] = r.array[j] as u8; tmp[i + 1] = g.array[j] as u8; tmp[i + 2] = b.array[j] as u8; i += 3; j += 1; } *offset += 48; } /// Baseline implementation of YCBCR to RGB for avx, /// /// It uses integer operations as opposed to floats, the approximation is /// difficult for the eye to see, but this means that it may produce different /// values with libjpeg_turbo. if accuracy is of utmost importance, use that. /// /// this function should be called for most implementations, including /// - ycbcr->rgb /// - ycbcr->rgba /// - ycbcr->brga /// - ycbcr->rgbx #[inline] #[target_feature(enable = "avx2")] #[target_feature(enable = "avx")] unsafe fn ycbcr_to_rgb_baseline( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16] ) -> (YmmRegister, YmmRegister, YmmRegister) { // Load values into a register // // dst[127:0] := MEM[loaddr+127:loaddr] // dst[255:128] := MEM[hiaddr+127:hiaddr] let y_c = _mm256_loadu_si256(y.as_ptr().cast()); let cb_c = _mm256_loadu_si256(cb.as_ptr().cast()); let cr_c = _mm256_loadu_si256(cr.as_ptr().cast()); // AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb // Cb = Cb-128; let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128)); // cr = Cb -128; let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128)); // Calculate Y->R // r = Y + 45 * Cr / 32 // 45*cr let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r); // r1>>5 let r2 = _mm256_srai_epi16::<5>(r1); //y+r2 let r = YmmRegister { mm256: clamp_avx(_mm256_add_epi16(y_c, r2)) }; // g = Y - (11 * Cb + 23 * Cr) / 32 ; // 11*cb let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r); // 23*cr let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r); //(11 //(11 * Cb + 23 * Cr) let g3 = _mm256_add_epi16(g1, g2); // (11 * Cb + 23 * Cr) / 32 let g4 = _mm256_srai_epi16::<5>(g3); // Y - (11 * Cb + 23 * Cr) / 32 ; let g = YmmRegister { mm256: clamp_avx(_mm256_sub_epi16(y_c, g4)) }; // b = Y + 113 * Cb / 64 // 113 * cb let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r); //113 * Cb / 64 let b2 = _mm256_srai_epi16::<6>(b1); // b = Y + 113 * Cb / 64 ; let b = YmmRegister { mm256: clamp_avx(_mm256_add_epi16(b2, y_c)) }; return (r, g, b); } #[inline] #[target_feature(enable = "avx2")] /// A baseline implementation of YCbCr to RGB conversion which does not carry /// out clamping /// /// This is used by the `ycbcr_to_rgba_avx` and `ycbcr_to_rgbx` conversion /// routines unsafe fn ycbcr_to_rgb_baseline_no_clamp( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16] ) -> (__m256i, __m256i, __m256i) { // Load values into a register // let y_c = _mm256_loadu_si256(y.as_ptr().cast()); let cb_c = _mm256_loadu_si256(cb.as_ptr().cast()); let cr_c = _mm256_loadu_si256(cr.as_ptr().cast()); // AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb // Cb = Cb-128; let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128)); // cr = Cb -128; let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128)); // Calculate Y->R // r = Y + 45 * Cr / 32 // 45*cr let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r); // r1>>5 let r2 = _mm256_srai_epi16::<5>(r1); //y+r2 let r = _mm256_add_epi16(y_c, r2); // g = Y - (11 * Cb + 23 * Cr) / 32 ; // 11*cb let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r); // 23*cr let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r); //(11 //(11 * Cb + 23 * Cr) let g3 = _mm256_add_epi16(g1, g2); // (11 * Cb + 23 * Cr) / 32 let g4 = _mm256_srai_epi16::<5>(g3); // Y - (11 * Cb + 23 * Cr) / 32 ; let g = _mm256_sub_epi16(y_c, g4); // b = Y + 113 * Cb / 64 // 113 * cb let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r); //113 * Cb / 64 let b2 = _mm256_srai_epi16::<6>(b1); // b = Y + 113 * Cb / 64 ; let b = _mm256_add_epi16(b2, y_c); return (r, g, b); } #[inline(always)] pub fn ycbcr_to_rgba_avx2( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { unsafe { ycbcr_to_rgba_unsafe(y, cb, cr, out, offset); } } #[inline] #[target_feature(enable = "avx2")] #[rustfmt::skip] unsafe fn
( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize, ) { // check if we have enough space to write. let tmp:& mut [u8; 64] = out.get_mut(*offset..*offset + 64).expect("Slice to small cannot write").try_into().unwrap(); let (r, g, b) = ycbcr_to_rgb_baseline_no_clamp(y, cb, cr); // set alpha channel to 255 for opaque // And no these comments were not from me pressing the keyboard // Pack the integers into u8's using signed saturation. let c = _mm256_packus_epi16(r, g); //aaaaa_bbbbb_aaaaa_bbbbbb let d = _mm256_packus_epi16(b, _mm256_set1_epi16(255)); // cccccc_dddddd_ccccccc_ddddd // transpose_u16 and interleave channels let e = _mm256_unpacklo_epi8(c, d); //ab_ab_ab_ab_ab_ab_ab_ab let f = _mm256_unpackhi_epi8(c, d); //cd_cd_cd_cd_cd_cd_cd_cd // final transpose_u16 let g = _mm256_unpacklo_epi8(e, f); //abcd_abcd_abcd_abcd_abcd let h = _mm256_unpackhi_epi8(e, f); // undo packus shuffling... let i = _mm256_permute2x128_si256::<{ shuffle(3, 2, 1, 0) }>(g, h); let j = _mm256_permute2x128_si256::<{ shuffle(1, 2, 3, 0) }>(g, h); let k = _mm256_permute2x128_si256::<{ shuffle(3, 2, 0, 1) }>(g, h); let l = _mm256_permute2x128_si256::<{ shuffle(0, 3, 2, 1) }>(g, h); let m = _mm256_blend_epi32::<0b1111_0000>(i, j); let n = _mm256_blend_epi32::<0b1111_0000>(k, l); // Store // Use streaming instructions to prevent polluting the cache? _mm256_storeu_si256(tmp.as_mut_ptr().cast(), m); _mm256_storeu_si256(tmp[32..].as_mut_ptr().cast(), n); *offset += 64; } /// Clamp values between 0 and 255 /// /// This function clamps all values in `reg` to be between 0 and 255 ///( the accepted values for RGB) #[inline] #[target_feature(enable = "avx2")] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] unsafe fn clamp_avx(reg: __m256i) -> __m256i { // the lowest value let min_s = _mm256_set1_epi16(0); // Highest value let max_s = _mm256_set1_epi16(255); let max_v = _mm256_max_epi16(reg, min_s); //max(a,0) let min_v = _mm256_min_epi16(max_v, max_s); //min(max(a,0),255) return min_v; } #[inline] const fn shuffle(z: i32, y: i32, x: i32, w: i32) -> i32 { (z << 6) | (y << 4) | (x << 2) | w }
ycbcr_to_rgba_unsafe
identifier_name
run.js
'use strict'; const fs = require('fs-extra') const express = require('express') const bodyParser = require('body-parser') const url = require('url') const dateFormat = require('dateformat') const spawn = require('child_process').spawn const request = require('request') const app = express() const config = require('./config') const helpers = require('./helpers') const { html, safeHtml, stripIndents } = require('common-tags') let cache = null let updating_now = false var older_cache_time const cachePath = helpers.localFile('_cache', 'cache.json') const updatePath = helpers.localFile('update.js') /* See if there's an up-to-date cache, otherwise run `update.js` to create one. */ const getCache = async function () { //helpers.rollbar.info('Starting getCache') return new Promise(async (resolve, reject) => { const exists = await fs.pathExists(cachePath) if (exists) { if (!cache) { try { cache = JSON.parse(await fs.readFile(cachePath, { encoding: 'utf8' })) resolve(cache) } catch (e) { reject(e) } } else if ((new Date().getTime() - cache.updated) < config.cache_refreshing_interval) { resolve(cache) } else if ((new Date().getTime() - cache.updated) >= config.cache_refreshing_interval) { if (!updating_now) { updating_now = true older_cache_time = cache.updated spawn('node', [updatePath], { detached: true }) var checkDone2 = setInterval(function () { if (cache.updated != older_cache_time) { clearInterval(checkDone2) helpers.rollbar.info("Successfully updated cache!") updating_now = false } }, 1000) } resolve(cache) } } else { helpers.rollbar.info("No cache file found. Creating one...") if (!updating_now) { updating_now = true spawn('node', [updatePath], { detached: true }) } var checkDone = setInterval(async function () { const exists = await fs.pathExists(cachePath) if (exists) { updating_now = false try { cache = JSON.parse(await fs.readFile(cachePath, { encoding: 'utf8' })) clearInterval(checkDone) helpers.rollbar.info("Successfully updated cache!") resolve(cache) } catch (err) { reject(err) } } }, 1000) } }) } /* Generate an abuse report for a scam domain */ function
(scam) { let abusereport = stripIndents`I would like to inform you of suspicious activities at the domain ${url.parse(scam.url).hostname} ${'ip' in scam ? `located at IP address ${scam['ip']}`: ''}. ${'subcategory' in scam && scam.subcategory == "NanoWallet" ? `The domain is impersonating NanoWallet.io, a website where people can create Nano wallets (a cryptocurrency like Bitcoin).` : ''} ${'category' in scam && scam.category == "Fake ICO" ? `The domain is impersonating a website where an ICO is being held (initial coin offering, like an initial public offering but it's for cryptocurrencies)` : ''} ${'category' in scam && scam.category == "Phishing" ? `The attackers wish to steal funds by using phishing to get the victim's private keys (passwords to a wallet) and using them to send funds to their own wallets.` : ''} ${'category' in scam && scam.category == "Fake ICO" ? `The attackers wish to steal funds by cloning the real website and changing the XRB address so people will send funds to the attackers' address instead of the real address.` : ''} Please shut down this domain so further attacks will be prevented.` return abusereport } /* Start the web server */ function startWebServer() { app.use(express.static('_static')); // Serve all static pages first app.use('/screenshot', express.static('_cache/screenshots/')); // Serve all screenshots app.use(bodyParser.json({ strict: true })) // to support JSON-encoded bodies app.get('/(/|index.html)?', async function (_req, res) { // Serve index.html res.send(await helpers.layout('index', {})) }) app.get('/search/', async function (_req, res) { // Serve /search/ const verified = [].concat((await getCache()).verified) const sorted = verified.sort(function (a, b) { return a.name.localeCompare(b.name) }) const table = sorted.map((url) => { if ('featured' in url && url.featured) { // TODO: put the verified images here /*if ( await fs.pathExists("_static/img/" + url.name.toLowerCase().replace(' ', '') + ".png") || await fs.pathExists("_static/img/" + url.name.toLowerCase().replace(' ', '') + ".svg") ) { table += "<tr><td><img class='project icon' src='/img/" + url.name.toLowerCase().replace(' ', '') + ".png'>" + url.name + "</td><td><a target='_blank' href='" + url.url + "'>" + url.url + "</a></td></tr>"; } else {*/ //helpers.rollbar.warn(`Warning: No verified icon was found for ${url.name}`); return `<tr> <td>${url.name}</td> <td><a target="_blank" href="${url.url}">${url.url}</a></td> </tr>` //} } return null }).filter((s) => s).join('') res.send(await helpers.layout('search', { 'trusted.table': table, 'page.title': 'Search for scam sites, scammers addresses and scam ips' })) }) app.get('/faq/', async function (_req, res) { // Serve /faq/ res.send(await helpers.layout('faq', { 'page.title': 'FAQ' })) }) // Serve /report/, /report/domain/, and /report/address/ or /report/domain/fake-mycrypto.com app.get('/report/:type?/:value?', async function (req, res, next) { let value = '' if (req.params.value) { value = safeHtml`${req.params.value}` } switch (`${req.params.type}`) { case 'address': res.send(await helpers.layout('reportaddress', { 'page.placeholder': value })) break case 'domain': res.send(await helpers.layout('reportdomain', { 'page.placeholder': value })) break default: if (!req.params.type) { res.send(await helpers.layout('report', {})) } else { return next(new Error(`Request type ${req.params.type}`)) } } }) // Serve /scams/ app.get('/scams/:page?/:sorting?/:direction?', async function (req, res, next) { const MAX_RESULTS_PER_PAGE = 30 const scams = [].concat((await getCache()).scams) const currentDirection = `${req.params.direction}` === 'ascending' ? 'ascending' : 'descending' let direction = { category: '', subcategory: '', status: '', title: '', } let sorting = { category: '', subcategory: '', status: '', title: '' } switch (`${req.params.sorting}`) { case 'category': sorting.category = 'sorted' direction.category = currentDirection scams.sort(function (a, b) { if ('category' in a && 'category' in b && a.category && b.category) { return a.category.localeCompare(b.category) } else { return -1 } }) break case 'subcategory': sorting.subcategory = 'sorted' direction.subcategory = currentDirection scams.sort(function (a, b) { if ('subcategory' in a && 'subcategory' in b && a.subcategory && b.subcategory) { return a.subcategory.localeCompare(b.subcategory) } else { return -1 } }) break case 'title': sorting.title = 'sorted' direction.title = currentDirection scams.sort(function (a, b) { return a.name.localeCompare(b.name) }) break case 'status': sorting.status = 'sorted' direction.status = currentDirection scams.sort(function (a, b) { if ('status' in a && 'status' in b) { if ((a.status == 'Active' && b.status != 'Active') || (a.status == 'Inactive' && (b.status == 'Suspended' || b.status == 'Offline')) || (a.status == 'Suspended' && b.status == 'Offline')) { return -1 } else if (a.status == b.status) { return 0 } else { return 1 } } else { return 1 } }) break default: if (!req.params.sorting) { scams.sort(function (a, b) { return b.id - a.id }) } else { return next(new Error(`Invalid sorting "${req.params.sorting}"`)) } } if (currentDirection === 'descending') { scams.reverse() } let addresses = {} var intActiveScams = 0 var intInactiveScams = 0 scams.forEach(function (scam) { if ('addresses' in scam) { scam.addresses.forEach(function (address) { addresses[address] = true }) } if ('status' in scam) { if (scam.status === 'Active') { ++intActiveScams } else { ++intInactiveScams } } }) let max = MAX_RESULTS_PER_PAGE let start = 0 let pagination = [] const page = +req.params.page || 1 if (req.params.page == "all") { max = scams.length } else if (page) { max = ((page - 1) * MAX_RESULTS_PER_PAGE) + MAX_RESULTS_PER_PAGE start = (page - 1) * MAX_RESULTS_PER_PAGE } const paginate = req.params.sorting ? `/${req.params.sorting}/${currentDirection}` : '' const table = scams.slice(start, max).map((scam) => { let status = '<td>None</td>' let category = scam.category || '<i class="remove icon"></i> None' let subcategory = scam.subcategory || '<i class="remove icon"></i> None' if ('status' in scam) { switch (scam.status) { case 'Active': status = "<td class='offline'><i class='warning sign icon'></i> Active</td>" break case 'Inactive': status = "<td class='suspended'><i class='remove icon'></i> Inactive</td>" break case 'Offline': status = "<td class='activ'><i class='checkmark icon'></i> Offline</td>" break case 'Suspended': status = "<td class='suspended'><i class='remove icon'></i> Suspended</td>" break } } if ('category' in scam) { switch (scam.category) { case "Phishing": category = '<i class="address book icon"></i> Phishing' break case "Scamming": category = '<i class="payment icon"></i> Scamming' break case "Fake ICO": category = '<i class="dollar icon"></i> Fake ICO' break } } if ('subcategory' in scam && scam.subcategory) { const sub = scam.subcategory.toLowerCase().replace(/\s/g, '') if (sub == "wallets") { subcategory = `<i class="credit card alternative icon"></i> ${scam.subcategory}` } // TODO: put icons here /*else if (fs.existsSync(`_static/img/${sub}.png`)) { subcategory = `<img src="/img/${scams[i].subcategory.toLowerCase().replace(/\s/g, '')}.png" class="subcategoryicon"> ${scams[i].subcategory}`; } else { subcategory = scams[i].subcategory if (!(icon_warnings.includes(subcategory))) { icon_warnings.push(subcategory) } }*/ } let name = scam.name if (name.length > 40) { name = name.substring(0, 40) + '...' } return `<tr> <td>${category}</td> <td>${subcategory}</td> ${status} <td>${name}</td> <td class="center"> <a href='/scam/${scam.id}'><i class='search icon'></i></a> </td> </tr>` }).join('') if (req.params.page !== "all") { let arrLoop = [-2, 3] if (page == 0) { arrLoop = [1, 6] } else if (page == 1) { arrLoop = [0, 5] } else if (page == 2) { arrLoop = [-1, 4] } for (let i = arrLoop[0]; i < arrLoop[1]; i++) { let intPageNumber = (page + Number(i)) let strItemClass = "item" let strHref = `/scams/${intPageNumber}${paginate}` if ((intPageNumber > (scams.length) / MAX_RESULTS_PER_PAGE) || (intPageNumber < 1)) { strItemClass = "disabled item" strHref = "#" } else if (page == intPageNumber) { strItemClass = "active item" } pagination.push(`<a href="${strHref}" class="${strItemClass}">${intPageNumber}</a>`) } if (page > 3) { pagination.unshift(`<a class="item" href="/scams/1${paginate}"> <i class="angle double left icon"></i> </a>`) } if (page < Math.ceil(scams.length / MAX_RESULTS_PER_PAGE) - 3) { pagination.push(`<a class="item" href="/scams/${(Math.ceil(scams.length / MAX_RESULTS_PER_PAGE) - 1)}${paginate}"> <i class='angle double right icon'></i> </a>` ) } } res.send(await helpers.layout('scams', { 'sorting.category.direction': direction.category, 'sorting.subcategory.direction': direction.subcategory, 'sorting.status.direction': direction.status, 'sorting.title.direction': direction.title, 'sorting.category': sorting.category, 'sorting.subcategory': sorting.subcategory, 'sorting.status': sorting.status, 'sorting.title': sorting.title, 'scams.total': scams.length.toLocaleString('en-US'), 'scams.active': intActiveScams.toLocaleString('en-US'), 'addresses.total': Object.keys(addresses).length.toLocaleString('en-US'), 'scams.inactive': intInactiveScams.toLocaleString('en-US'), 'scams.pagination': `<div class="ui pagination menu">${pagination.join('')}</div>`, 'scams.table': table, 'page.title': 'Active Scam List' })) }) app.get('/scam/:id/', async function (req, res, next) { // Serve /scam/<id>/ const startTime = Date.now() const cache = await getCache() const id = +req.params.id const scam = cache.scams.find(function (scam) { return scam.id == id }) if (!scam) { return next(new Error(`Scam id not found ${id}`)) } const hostname = url.parse(scam.url).hostname let actions = [] let category = '' if ('category' in scam) { category = `<b>Category</b>: ${scam.category}` if ('subcategory' in scam) { category += ` - ${scam.subcategory}` } category += '<br>' } let status = '' if ('status' in scam) { status = `<b>Status</b>: <span class="class_${scam.status.toLowerCase()}">${scam.status}</span><br>` } let description = '' if ('description' in scam) { description = `<b>Description</b>: ${scam.description}<br>` } let nameservers = '' if ('nameservers' in scam && scam.nameservers && scam.nameservers.length) { nameservers = `<b>Nameservers</b>: <div class="ui bulleted list"> ${scam.nameservers.map(function (nameserver) { return `<div class="ui item">${nameserver}</div>` }).join('')} </div>` } let addresses = '' if ('addresses' in scam && scam.addresses && scam.addresses.length) { addresses = `<b>Related addresses</b>: <div class="ui bulleted list"> ${scam.addresses.map(function (address) { return `<div class="ui item"><a href="/address/${address}">${address}</a></div>` }).join('')} </div>` } let ip = '' if ('ip' in scam) { ip = `<b>IP</b>: <a href="/ip/${scam.ip}">${scam.ip}</a><br>` } let abusereport = '' let screenshot = '' let scamUrl = '' if ('url' in scam) { abusereport = generateAbuseReport(scam) actions.push(`<button id="gen" class="ui icon secondary button"> <i class="setting icon"></i> Abuse Report</button>`, `<a target="_blank" href="http://web.archive.org/web/*/${hostname}" class="ui icon secondary button"><i class="archive icon"></i> Archive</a>` ) scamUrl = `<b>URL</b>: <a id="url" target="_blank" href="/redirect/${encodeURIComponent(scam.url)}">${scam.url}</a><br>` // TODO: put back the screenshots /* if ('status' in scam && scam.status != 'Offline' && fs.existsSync('_cache/screenshots/' + scam.id + '.png')) { template = template.replace("{{ scam.screenshot }}", '<h3>Screenshot</h3><img src="/screenshot/' + scam.id + '.png">'); }*/ } actions.push(`<a target="_blank" href="https://github.com/${config.repository.author}/${config.repository.name}/blob/${config.repository.branch}/_data/scams.yaml" class="ui icon secondary button"> <i class="write alternate icon"></i> Improve</a> <button id="share" class="ui icon secondary button"> <i class="share alternate icon"></i> Share</button>`) let googlethreat = '' if ('Google_SafeBrowsing_API_Key' in config && config.Google_SafeBrowsing_API_Key && 'url' in scam) { var options = { uri: 'https://safebrowsing.googleapis.com/v4/threatMatches:find?key=' + config.Google_SafeBrowsing_API_Key, method: 'POST', json: { client: { clientId: "Nano Scam DB", clientVersion: "1.0.0" }, threatInfo: { threatTypes: ["THREAT_TYPE_UNSPECIFIED", "MALWARE", "SOCIAL_ENGINEERING", "UNWANTED_SOFTWARE", "POTENTIALLY_HARMFUL_APPLICATION"], platformTypes: ["ANY_PLATFORM"], threatEntryTypes: ["THREAT_ENTRY_TYPE_UNSPECIFIED", "URL", "EXECUTABLE"], threatEntries: [{ "url": hostname }] } } } googlethreat = `<b>Google Safe Browsing</b>: ${await new Promise((resolve) => { request(options, function (error, response, body) { if (!error && response.statusCode == 200) { if (body && 'matches' in body && body.matches[0]) { resolve(html`<span class='class_offline'>Blocked for ${body.matches[0]['threatType']}</span>`) } else { resolve(html`<span class='class_active'>Not Blocked</span> <a target='_blank' href='https://safebrowsing.google.com/safebrowsing/report_phish/'><i class='warning sign icon'></i></a>`) } } else { resolve('') } }) })}<br>` } res.send(await helpers.layout('scam', { 'scam.id': scam.id, 'scam.name': safeHtml(scam.name), 'scam.category': category, 'scam.status': status, 'scam.description': description, 'scam.nameservers': nameservers, 'scam.addresses': addresses, 'scam.ip': ip, 'scam.abusereport': abusereport, 'scam.googlethreat': googlethreat, 'scam.screenshot': screenshot, 'scam.url': scamUrl, 'disqus': await helpers.template('disqus', { 'disqus.id': `scam-${scam.id}` }), 'page.title': safeHtml`Scam ${scam.name}`, 'scam.actions': `<div id="actions" class="eight wide column">${actions.join('')}</div>`, 'page.built': `<p class="built"> This page was built in <b>${Date.now() - startTime}</b>ms, and last updated at <b>${dateFormat(cache.updated, "UTC:mmm dd yyyy, HH:MM")} UTC</b> </p>` })) }) app.get('/ip/:ip/', async function (req, res) { // Serve /ip/<ip>/ const ip = safeHtml`${req.params.ip}` const related = (await getCache()).scams.filter(function (obj) { return obj.ip === ip }).map(function (value) { return `<div class="item"> <a href="/scam/${value.id}/">${value.name}</a> </div>` }).join('') res.send(await helpers.layout('ip', { 'ip.ip': ip, 'ip.scams': html`<div class="ui bulleted list">${related}</div>`, 'disqus': await helpers.template('disqus', { 'disqus.id': `ip-${ip}` }), 'page.title': `Scam report for IP ${ip}` })); }); app.get('/address/:address/', async function (req, res) { // Serve /address/<address>/ const address = safeHtml`${req.params.address}` const related = (await getCache()).scams.filter(function (obj) { if ('addresses' in obj) { return obj.addresses.includes(address) } else { return false } }).map(function (value) { return `<div class="item"> <a href="/scam/${value.id}/">${value.name}</a> </div>` }).join('') res.send(await helpers.layout('address', { 'address.address': address, 'disqus': await helpers.template('disqus', { 'disqus.id': `address-${address}` }), 'address.scams': `<div class="ui bulleted list">${related}</div>`, 'page.title': `Scam report for address ${address}` })) }) app.get('/redirect/:url/', async function (req, res) { // Serve /redirect/<url>/ const url = safeHtml`${req.params.url}` res.send(await helpers.layout('redirect', { 'redirect.domain': url, 'page.title': 'Redirect warning' })) }) app.get('/rss/', async function (_req, res) { // Serve /rss/ (rss feed) const cache = await getCache() res.send(await helpers.template('rss', { 'rss.entries': cache.scams.map(function (scam) { const url = `${config.base_url}scam/${scam.id}` return `<item> <guid>${url}</guid> <title>${safeHtml`${scam.name}`}</title> <link>${url}</link> <description>${scam.category}</description> </item>`; }).join('') })) }) app.get('/api/:type?/:domain?/', async function (req, res) { // Serve /api/<type>/ res.header('Access-Control-Allow-Origin', '*') const cache = await getCache() const type = safeHtml`${req.params.type}` /** @type {any} */ let json = false switch (type) { case 'scams': case 'addresses': case 'ips': case 'verified': case 'blacklist': case 'whitelist': json = { success: true, result: cache[type] } break case 'check': { const domainOrAddress = safeHtml`${req.params.domain}` const hostname = url.parse(domainOrAddress).hostname || '' const host = helpers.removeProtocol(domainOrAddress) if (domainOrAddress) { //They can search for an address or domain. if (/^xrb_?[0-9a-z]{60}$/.test(domainOrAddress)) { const blocked = Object.keys(cache.addresses).some((address) => (domainOrAddress == address)) //They searched for an address if (blocked) { json = { success: true, result: 'blocked', type: 'address', entries: cache.scams.filter(function (scam) { if ('addresses' in scam) { return (scam.addresses.includes(domainOrAddress)) } else { return false } }) } } else { json = { success: true, result: 'neutral', type: 'address', entries: [] } } } else { //They searched for a domain or an ip address if (cache.whitelist.includes(hostname) || cache.whitelist.includes(domainOrAddress)) { json = { success: true, result: 'verified' } } else if (cache.blacklist.includes(hostname) || cache.blacklist.includes(host)) { if (/^(([1-9]?\d|1\d\d|2[0-5][0-5]|2[0-4]\d)\.){3}([1-9]?\d|1\d\d|2[0-5][0-5]|2[0-4]\d)$/.test(host)) { //They searched for an ip address json = { success: true, result: 'blocked', type: 'ip', entries: cache.scams.filter(function (scam) { return ( url.parse(scam.url).hostname == hostname || helpers.removeProtocol(scam.url) == domainOrAddress || scam.ip == host ) }) } } else { //They searched for a domain json = { success: true, result: 'blocked', type: 'domain', entries: cache.scams.filter(function (scam) { return ( url.parse(scam.url).hostname == hostname || helpers.removeProtocol(scam.url) == domainOrAddress ) }) } } } else { json = { success: false, result: 'neutral', type: 'unsupported', entries: [] } } } } } break case 'abusereport': { const domain = safeHtml`${req.params.domain}` const hostname = url.parse(domain).hostname const results = cache.scams.filter(function (scam) { return ( url.parse(scam.url).hostname == hostname || helpers.removeProtocol(scam.url) == domain ) }) || [] if (results.length == 0) { json = { success: false, error: "URL wasn't found" } } else { json = { success: true, result: generateAbuseReport(results[0]) } } } break } if (json) { res.json(json) } else { res.send(await helpers.layout('api', { 'page.title': 'Use the API' })) } }) // Serve all other pages as 404 app.get('*', async function (_req, res) { res.status(404).send(await helpers.layout('404', { 'page.title': 'Not found' })) }) if (helpers.rollbar['errorHandler']) { app.use(helpers.rollbar['errorHandler']()) } app.use(async (_err, _req, res, _next) => { res.status(404).send(await helpers.layout('404', { 'page.title': 'Error' })) }) app.listen(config.port, function () { // Listen on port (defined in config) helpers.rollbar.info(`Content served on port ${config.port}`) }) } getCache().then(startWebServer).catch((err) => helpers.rollbar.error(err))
generateAbuseReport
identifier_name
run.js
'use strict'; const fs = require('fs-extra') const express = require('express') const bodyParser = require('body-parser') const url = require('url') const dateFormat = require('dateformat') const spawn = require('child_process').spawn const request = require('request') const app = express() const config = require('./config') const helpers = require('./helpers') const { html, safeHtml, stripIndents } = require('common-tags') let cache = null let updating_now = false var older_cache_time const cachePath = helpers.localFile('_cache', 'cache.json') const updatePath = helpers.localFile('update.js') /* See if there's an up-to-date cache, otherwise run `update.js` to create one. */ const getCache = async function () { //helpers.rollbar.info('Starting getCache') return new Promise(async (resolve, reject) => { const exists = await fs.pathExists(cachePath) if (exists) { if (!cache) { try { cache = JSON.parse(await fs.readFile(cachePath, { encoding: 'utf8' })) resolve(cache) } catch (e) { reject(e) } } else if ((new Date().getTime() - cache.updated) < config.cache_refreshing_interval) { resolve(cache) } else if ((new Date().getTime() - cache.updated) >= config.cache_refreshing_interval) { if (!updating_now) { updating_now = true older_cache_time = cache.updated spawn('node', [updatePath], { detached: true }) var checkDone2 = setInterval(function () { if (cache.updated != older_cache_time) { clearInterval(checkDone2) helpers.rollbar.info("Successfully updated cache!") updating_now = false } }, 1000) } resolve(cache) } } else { helpers.rollbar.info("No cache file found. Creating one...") if (!updating_now) { updating_now = true spawn('node', [updatePath], { detached: true }) } var checkDone = setInterval(async function () { const exists = await fs.pathExists(cachePath) if (exists) { updating_now = false try { cache = JSON.parse(await fs.readFile(cachePath, { encoding: 'utf8' })) clearInterval(checkDone) helpers.rollbar.info("Successfully updated cache!") resolve(cache) } catch (err) { reject(err) } } }, 1000) } }) } /* Generate an abuse report for a scam domain */ function generateAbuseReport(scam) { let abusereport = stripIndents`I would like to inform you of suspicious activities at the domain ${url.parse(scam.url).hostname} ${'ip' in scam ? `located at IP address ${scam['ip']}`: ''}. ${'subcategory' in scam && scam.subcategory == "NanoWallet" ? `The domain is impersonating NanoWallet.io, a website where people can create Nano wallets (a cryptocurrency like Bitcoin).` : ''} ${'category' in scam && scam.category == "Fake ICO" ? `The domain is impersonating a website where an ICO is being held (initial coin offering, like an initial public offering but it's for cryptocurrencies)` : ''} ${'category' in scam && scam.category == "Phishing" ? `The attackers wish to steal funds by using phishing to get the victim's private keys (passwords to a wallet) and using them to send funds to their own wallets.` : ''} ${'category' in scam && scam.category == "Fake ICO" ? `The attackers wish to steal funds by cloning the real website and changing the XRB address so people will send funds to the attackers' address instead of the real address.` : ''} Please shut down this domain so further attacks will be prevented.` return abusereport } /* Start the web server */ function startWebServer() { app.use(express.static('_static')); // Serve all static pages first app.use('/screenshot', express.static('_cache/screenshots/')); // Serve all screenshots app.use(bodyParser.json({ strict: true })) // to support JSON-encoded bodies app.get('/(/|index.html)?', async function (_req, res) { // Serve index.html res.send(await helpers.layout('index', {})) }) app.get('/search/', async function (_req, res) { // Serve /search/ const verified = [].concat((await getCache()).verified) const sorted = verified.sort(function (a, b) { return a.name.localeCompare(b.name) }) const table = sorted.map((url) => { if ('featured' in url && url.featured) { // TODO: put the verified images here /*if ( await fs.pathExists("_static/img/" + url.name.toLowerCase().replace(' ', '') + ".png") || await fs.pathExists("_static/img/" + url.name.toLowerCase().replace(' ', '') + ".svg") ) { table += "<tr><td><img class='project icon' src='/img/" + url.name.toLowerCase().replace(' ', '') + ".png'>" + url.name + "</td><td><a target='_blank' href='" + url.url + "'>" + url.url + "</a></td></tr>"; } else {*/ //helpers.rollbar.warn(`Warning: No verified icon was found for ${url.name}`); return `<tr> <td>${url.name}</td> <td><a target="_blank" href="${url.url}">${url.url}</a></td> </tr>` //} } return null }).filter((s) => s).join('') res.send(await helpers.layout('search', { 'trusted.table': table, 'page.title': 'Search for scam sites, scammers addresses and scam ips' })) }) app.get('/faq/', async function (_req, res) { // Serve /faq/ res.send(await helpers.layout('faq', { 'page.title': 'FAQ' })) }) // Serve /report/, /report/domain/, and /report/address/ or /report/domain/fake-mycrypto.com app.get('/report/:type?/:value?', async function (req, res, next) { let value = '' if (req.params.value) { value = safeHtml`${req.params.value}` } switch (`${req.params.type}`) { case 'address': res.send(await helpers.layout('reportaddress', { 'page.placeholder': value })) break case 'domain': res.send(await helpers.layout('reportdomain', { 'page.placeholder': value })) break default: if (!req.params.type) { res.send(await helpers.layout('report', {})) } else { return next(new Error(`Request type ${req.params.type}`)) } } }) // Serve /scams/ app.get('/scams/:page?/:sorting?/:direction?', async function (req, res, next) { const MAX_RESULTS_PER_PAGE = 30 const scams = [].concat((await getCache()).scams) const currentDirection = `${req.params.direction}` === 'ascending' ? 'ascending' : 'descending' let direction = { category: '', subcategory: '', status: '', title: '', } let sorting = { category: '', subcategory: '', status: '', title: '' } switch (`${req.params.sorting}`) { case 'category': sorting.category = 'sorted' direction.category = currentDirection scams.sort(function (a, b) { if ('category' in a && 'category' in b && a.category && b.category) { return a.category.localeCompare(b.category) } else { return -1 } }) break case 'subcategory': sorting.subcategory = 'sorted' direction.subcategory = currentDirection scams.sort(function (a, b) { if ('subcategory' in a && 'subcategory' in b && a.subcategory && b.subcategory) { return a.subcategory.localeCompare(b.subcategory) } else { return -1 } }) break case 'title': sorting.title = 'sorted' direction.title = currentDirection scams.sort(function (a, b) { return a.name.localeCompare(b.name) }) break case 'status': sorting.status = 'sorted' direction.status = currentDirection scams.sort(function (a, b) { if ('status' in a && 'status' in b) { if ((a.status == 'Active' && b.status != 'Active') || (a.status == 'Inactive' && (b.status == 'Suspended' || b.status == 'Offline')) || (a.status == 'Suspended' && b.status == 'Offline')) { return -1 } else if (a.status == b.status) { return 0 } else { return 1 } } else { return 1 } }) break default: if (!req.params.sorting) { scams.sort(function (a, b) { return b.id - a.id }) } else { return next(new Error(`Invalid sorting "${req.params.sorting}"`)) } } if (currentDirection === 'descending') { scams.reverse() }
var intActiveScams = 0 var intInactiveScams = 0 scams.forEach(function (scam) { if ('addresses' in scam) { scam.addresses.forEach(function (address) { addresses[address] = true }) } if ('status' in scam) { if (scam.status === 'Active') { ++intActiveScams } else { ++intInactiveScams } } }) let max = MAX_RESULTS_PER_PAGE let start = 0 let pagination = [] const page = +req.params.page || 1 if (req.params.page == "all") { max = scams.length } else if (page) { max = ((page - 1) * MAX_RESULTS_PER_PAGE) + MAX_RESULTS_PER_PAGE start = (page - 1) * MAX_RESULTS_PER_PAGE } const paginate = req.params.sorting ? `/${req.params.sorting}/${currentDirection}` : '' const table = scams.slice(start, max).map((scam) => { let status = '<td>None</td>' let category = scam.category || '<i class="remove icon"></i> None' let subcategory = scam.subcategory || '<i class="remove icon"></i> None' if ('status' in scam) { switch (scam.status) { case 'Active': status = "<td class='offline'><i class='warning sign icon'></i> Active</td>" break case 'Inactive': status = "<td class='suspended'><i class='remove icon'></i> Inactive</td>" break case 'Offline': status = "<td class='activ'><i class='checkmark icon'></i> Offline</td>" break case 'Suspended': status = "<td class='suspended'><i class='remove icon'></i> Suspended</td>" break } } if ('category' in scam) { switch (scam.category) { case "Phishing": category = '<i class="address book icon"></i> Phishing' break case "Scamming": category = '<i class="payment icon"></i> Scamming' break case "Fake ICO": category = '<i class="dollar icon"></i> Fake ICO' break } } if ('subcategory' in scam && scam.subcategory) { const sub = scam.subcategory.toLowerCase().replace(/\s/g, '') if (sub == "wallets") { subcategory = `<i class="credit card alternative icon"></i> ${scam.subcategory}` } // TODO: put icons here /*else if (fs.existsSync(`_static/img/${sub}.png`)) { subcategory = `<img src="/img/${scams[i].subcategory.toLowerCase().replace(/\s/g, '')}.png" class="subcategoryicon"> ${scams[i].subcategory}`; } else { subcategory = scams[i].subcategory if (!(icon_warnings.includes(subcategory))) { icon_warnings.push(subcategory) } }*/ } let name = scam.name if (name.length > 40) { name = name.substring(0, 40) + '...' } return `<tr> <td>${category}</td> <td>${subcategory}</td> ${status} <td>${name}</td> <td class="center"> <a href='/scam/${scam.id}'><i class='search icon'></i></a> </td> </tr>` }).join('') if (req.params.page !== "all") { let arrLoop = [-2, 3] if (page == 0) { arrLoop = [1, 6] } else if (page == 1) { arrLoop = [0, 5] } else if (page == 2) { arrLoop = [-1, 4] } for (let i = arrLoop[0]; i < arrLoop[1]; i++) { let intPageNumber = (page + Number(i)) let strItemClass = "item" let strHref = `/scams/${intPageNumber}${paginate}` if ((intPageNumber > (scams.length) / MAX_RESULTS_PER_PAGE) || (intPageNumber < 1)) { strItemClass = "disabled item" strHref = "#" } else if (page == intPageNumber) { strItemClass = "active item" } pagination.push(`<a href="${strHref}" class="${strItemClass}">${intPageNumber}</a>`) } if (page > 3) { pagination.unshift(`<a class="item" href="/scams/1${paginate}"> <i class="angle double left icon"></i> </a>`) } if (page < Math.ceil(scams.length / MAX_RESULTS_PER_PAGE) - 3) { pagination.push(`<a class="item" href="/scams/${(Math.ceil(scams.length / MAX_RESULTS_PER_PAGE) - 1)}${paginate}"> <i class='angle double right icon'></i> </a>` ) } } res.send(await helpers.layout('scams', { 'sorting.category.direction': direction.category, 'sorting.subcategory.direction': direction.subcategory, 'sorting.status.direction': direction.status, 'sorting.title.direction': direction.title, 'sorting.category': sorting.category, 'sorting.subcategory': sorting.subcategory, 'sorting.status': sorting.status, 'sorting.title': sorting.title, 'scams.total': scams.length.toLocaleString('en-US'), 'scams.active': intActiveScams.toLocaleString('en-US'), 'addresses.total': Object.keys(addresses).length.toLocaleString('en-US'), 'scams.inactive': intInactiveScams.toLocaleString('en-US'), 'scams.pagination': `<div class="ui pagination menu">${pagination.join('')}</div>`, 'scams.table': table, 'page.title': 'Active Scam List' })) }) app.get('/scam/:id/', async function (req, res, next) { // Serve /scam/<id>/ const startTime = Date.now() const cache = await getCache() const id = +req.params.id const scam = cache.scams.find(function (scam) { return scam.id == id }) if (!scam) { return next(new Error(`Scam id not found ${id}`)) } const hostname = url.parse(scam.url).hostname let actions = [] let category = '' if ('category' in scam) { category = `<b>Category</b>: ${scam.category}` if ('subcategory' in scam) { category += ` - ${scam.subcategory}` } category += '<br>' } let status = '' if ('status' in scam) { status = `<b>Status</b>: <span class="class_${scam.status.toLowerCase()}">${scam.status}</span><br>` } let description = '' if ('description' in scam) { description = `<b>Description</b>: ${scam.description}<br>` } let nameservers = '' if ('nameservers' in scam && scam.nameservers && scam.nameservers.length) { nameservers = `<b>Nameservers</b>: <div class="ui bulleted list"> ${scam.nameservers.map(function (nameserver) { return `<div class="ui item">${nameserver}</div>` }).join('')} </div>` } let addresses = '' if ('addresses' in scam && scam.addresses && scam.addresses.length) { addresses = `<b>Related addresses</b>: <div class="ui bulleted list"> ${scam.addresses.map(function (address) { return `<div class="ui item"><a href="/address/${address}">${address}</a></div>` }).join('')} </div>` } let ip = '' if ('ip' in scam) { ip = `<b>IP</b>: <a href="/ip/${scam.ip}">${scam.ip}</a><br>` } let abusereport = '' let screenshot = '' let scamUrl = '' if ('url' in scam) { abusereport = generateAbuseReport(scam) actions.push(`<button id="gen" class="ui icon secondary button"> <i class="setting icon"></i> Abuse Report</button>`, `<a target="_blank" href="http://web.archive.org/web/*/${hostname}" class="ui icon secondary button"><i class="archive icon"></i> Archive</a>` ) scamUrl = `<b>URL</b>: <a id="url" target="_blank" href="/redirect/${encodeURIComponent(scam.url)}">${scam.url}</a><br>` // TODO: put back the screenshots /* if ('status' in scam && scam.status != 'Offline' && fs.existsSync('_cache/screenshots/' + scam.id + '.png')) { template = template.replace("{{ scam.screenshot }}", '<h3>Screenshot</h3><img src="/screenshot/' + scam.id + '.png">'); }*/ } actions.push(`<a target="_blank" href="https://github.com/${config.repository.author}/${config.repository.name}/blob/${config.repository.branch}/_data/scams.yaml" class="ui icon secondary button"> <i class="write alternate icon"></i> Improve</a> <button id="share" class="ui icon secondary button"> <i class="share alternate icon"></i> Share</button>`) let googlethreat = '' if ('Google_SafeBrowsing_API_Key' in config && config.Google_SafeBrowsing_API_Key && 'url' in scam) { var options = { uri: 'https://safebrowsing.googleapis.com/v4/threatMatches:find?key=' + config.Google_SafeBrowsing_API_Key, method: 'POST', json: { client: { clientId: "Nano Scam DB", clientVersion: "1.0.0" }, threatInfo: { threatTypes: ["THREAT_TYPE_UNSPECIFIED", "MALWARE", "SOCIAL_ENGINEERING", "UNWANTED_SOFTWARE", "POTENTIALLY_HARMFUL_APPLICATION"], platformTypes: ["ANY_PLATFORM"], threatEntryTypes: ["THREAT_ENTRY_TYPE_UNSPECIFIED", "URL", "EXECUTABLE"], threatEntries: [{ "url": hostname }] } } } googlethreat = `<b>Google Safe Browsing</b>: ${await new Promise((resolve) => { request(options, function (error, response, body) { if (!error && response.statusCode == 200) { if (body && 'matches' in body && body.matches[0]) { resolve(html`<span class='class_offline'>Blocked for ${body.matches[0]['threatType']}</span>`) } else { resolve(html`<span class='class_active'>Not Blocked</span> <a target='_blank' href='https://safebrowsing.google.com/safebrowsing/report_phish/'><i class='warning sign icon'></i></a>`) } } else { resolve('') } }) })}<br>` } res.send(await helpers.layout('scam', { 'scam.id': scam.id, 'scam.name': safeHtml(scam.name), 'scam.category': category, 'scam.status': status, 'scam.description': description, 'scam.nameservers': nameservers, 'scam.addresses': addresses, 'scam.ip': ip, 'scam.abusereport': abusereport, 'scam.googlethreat': googlethreat, 'scam.screenshot': screenshot, 'scam.url': scamUrl, 'disqus': await helpers.template('disqus', { 'disqus.id': `scam-${scam.id}` }), 'page.title': safeHtml`Scam ${scam.name}`, 'scam.actions': `<div id="actions" class="eight wide column">${actions.join('')}</div>`, 'page.built': `<p class="built"> This page was built in <b>${Date.now() - startTime}</b>ms, and last updated at <b>${dateFormat(cache.updated, "UTC:mmm dd yyyy, HH:MM")} UTC</b> </p>` })) }) app.get('/ip/:ip/', async function (req, res) { // Serve /ip/<ip>/ const ip = safeHtml`${req.params.ip}` const related = (await getCache()).scams.filter(function (obj) { return obj.ip === ip }).map(function (value) { return `<div class="item"> <a href="/scam/${value.id}/">${value.name}</a> </div>` }).join('') res.send(await helpers.layout('ip', { 'ip.ip': ip, 'ip.scams': html`<div class="ui bulleted list">${related}</div>`, 'disqus': await helpers.template('disqus', { 'disqus.id': `ip-${ip}` }), 'page.title': `Scam report for IP ${ip}` })); }); app.get('/address/:address/', async function (req, res) { // Serve /address/<address>/ const address = safeHtml`${req.params.address}` const related = (await getCache()).scams.filter(function (obj) { if ('addresses' in obj) { return obj.addresses.includes(address) } else { return false } }).map(function (value) { return `<div class="item"> <a href="/scam/${value.id}/">${value.name}</a> </div>` }).join('') res.send(await helpers.layout('address', { 'address.address': address, 'disqus': await helpers.template('disqus', { 'disqus.id': `address-${address}` }), 'address.scams': `<div class="ui bulleted list">${related}</div>`, 'page.title': `Scam report for address ${address}` })) }) app.get('/redirect/:url/', async function (req, res) { // Serve /redirect/<url>/ const url = safeHtml`${req.params.url}` res.send(await helpers.layout('redirect', { 'redirect.domain': url, 'page.title': 'Redirect warning' })) }) app.get('/rss/', async function (_req, res) { // Serve /rss/ (rss feed) const cache = await getCache() res.send(await helpers.template('rss', { 'rss.entries': cache.scams.map(function (scam) { const url = `${config.base_url}scam/${scam.id}` return `<item> <guid>${url}</guid> <title>${safeHtml`${scam.name}`}</title> <link>${url}</link> <description>${scam.category}</description> </item>`; }).join('') })) }) app.get('/api/:type?/:domain?/', async function (req, res) { // Serve /api/<type>/ res.header('Access-Control-Allow-Origin', '*') const cache = await getCache() const type = safeHtml`${req.params.type}` /** @type {any} */ let json = false switch (type) { case 'scams': case 'addresses': case 'ips': case 'verified': case 'blacklist': case 'whitelist': json = { success: true, result: cache[type] } break case 'check': { const domainOrAddress = safeHtml`${req.params.domain}` const hostname = url.parse(domainOrAddress).hostname || '' const host = helpers.removeProtocol(domainOrAddress) if (domainOrAddress) { //They can search for an address or domain. if (/^xrb_?[0-9a-z]{60}$/.test(domainOrAddress)) { const blocked = Object.keys(cache.addresses).some((address) => (domainOrAddress == address)) //They searched for an address if (blocked) { json = { success: true, result: 'blocked', type: 'address', entries: cache.scams.filter(function (scam) { if ('addresses' in scam) { return (scam.addresses.includes(domainOrAddress)) } else { return false } }) } } else { json = { success: true, result: 'neutral', type: 'address', entries: [] } } } else { //They searched for a domain or an ip address if (cache.whitelist.includes(hostname) || cache.whitelist.includes(domainOrAddress)) { json = { success: true, result: 'verified' } } else if (cache.blacklist.includes(hostname) || cache.blacklist.includes(host)) { if (/^(([1-9]?\d|1\d\d|2[0-5][0-5]|2[0-4]\d)\.){3}([1-9]?\d|1\d\d|2[0-5][0-5]|2[0-4]\d)$/.test(host)) { //They searched for an ip address json = { success: true, result: 'blocked', type: 'ip', entries: cache.scams.filter(function (scam) { return ( url.parse(scam.url).hostname == hostname || helpers.removeProtocol(scam.url) == domainOrAddress || scam.ip == host ) }) } } else { //They searched for a domain json = { success: true, result: 'blocked', type: 'domain', entries: cache.scams.filter(function (scam) { return ( url.parse(scam.url).hostname == hostname || helpers.removeProtocol(scam.url) == domainOrAddress ) }) } } } else { json = { success: false, result: 'neutral', type: 'unsupported', entries: [] } } } } } break case 'abusereport': { const domain = safeHtml`${req.params.domain}` const hostname = url.parse(domain).hostname const results = cache.scams.filter(function (scam) { return ( url.parse(scam.url).hostname == hostname || helpers.removeProtocol(scam.url) == domain ) }) || [] if (results.length == 0) { json = { success: false, error: "URL wasn't found" } } else { json = { success: true, result: generateAbuseReport(results[0]) } } } break } if (json) { res.json(json) } else { res.send(await helpers.layout('api', { 'page.title': 'Use the API' })) } }) // Serve all other pages as 404 app.get('*', async function (_req, res) { res.status(404).send(await helpers.layout('404', { 'page.title': 'Not found' })) }) if (helpers.rollbar['errorHandler']) { app.use(helpers.rollbar['errorHandler']()) } app.use(async (_err, _req, res, _next) => { res.status(404).send(await helpers.layout('404', { 'page.title': 'Error' })) }) app.listen(config.port, function () { // Listen on port (defined in config) helpers.rollbar.info(`Content served on port ${config.port}`) }) } getCache().then(startWebServer).catch((err) => helpers.rollbar.error(err))
let addresses = {}
random_line_split
run.js
'use strict'; const fs = require('fs-extra') const express = require('express') const bodyParser = require('body-parser') const url = require('url') const dateFormat = require('dateformat') const spawn = require('child_process').spawn const request = require('request') const app = express() const config = require('./config') const helpers = require('./helpers') const { html, safeHtml, stripIndents } = require('common-tags') let cache = null let updating_now = false var older_cache_time const cachePath = helpers.localFile('_cache', 'cache.json') const updatePath = helpers.localFile('update.js') /* See if there's an up-to-date cache, otherwise run `update.js` to create one. */ const getCache = async function () { //helpers.rollbar.info('Starting getCache') return new Promise(async (resolve, reject) => { const exists = await fs.pathExists(cachePath) if (exists) { if (!cache) { try { cache = JSON.parse(await fs.readFile(cachePath, { encoding: 'utf8' })) resolve(cache) } catch (e) { reject(e) } } else if ((new Date().getTime() - cache.updated) < config.cache_refreshing_interval) { resolve(cache) } else if ((new Date().getTime() - cache.updated) >= config.cache_refreshing_interval) { if (!updating_now) { updating_now = true older_cache_time = cache.updated spawn('node', [updatePath], { detached: true }) var checkDone2 = setInterval(function () { if (cache.updated != older_cache_time) { clearInterval(checkDone2) helpers.rollbar.info("Successfully updated cache!") updating_now = false } }, 1000) } resolve(cache) } } else { helpers.rollbar.info("No cache file found. Creating one...") if (!updating_now) { updating_now = true spawn('node', [updatePath], { detached: true }) } var checkDone = setInterval(async function () { const exists = await fs.pathExists(cachePath) if (exists) { updating_now = false try { cache = JSON.parse(await fs.readFile(cachePath, { encoding: 'utf8' })) clearInterval(checkDone) helpers.rollbar.info("Successfully updated cache!") resolve(cache) } catch (err) { reject(err) } } }, 1000) } }) } /* Generate an abuse report for a scam domain */ function generateAbuseReport(scam) { let abusereport = stripIndents`I would like to inform you of suspicious activities at the domain ${url.parse(scam.url).hostname} ${'ip' in scam ? `located at IP address ${scam['ip']}`: ''}. ${'subcategory' in scam && scam.subcategory == "NanoWallet" ? `The domain is impersonating NanoWallet.io, a website where people can create Nano wallets (a cryptocurrency like Bitcoin).` : ''} ${'category' in scam && scam.category == "Fake ICO" ? `The domain is impersonating a website where an ICO is being held (initial coin offering, like an initial public offering but it's for cryptocurrencies)` : ''} ${'category' in scam && scam.category == "Phishing" ? `The attackers wish to steal funds by using phishing to get the victim's private keys (passwords to a wallet) and using them to send funds to their own wallets.` : ''} ${'category' in scam && scam.category == "Fake ICO" ? `The attackers wish to steal funds by cloning the real website and changing the XRB address so people will send funds to the attackers' address instead of the real address.` : ''} Please shut down this domain so further attacks will be prevented.` return abusereport } /* Start the web server */ function startWebServer() { app.use(express.static('_static')); // Serve all static pages first app.use('/screenshot', express.static('_cache/screenshots/')); // Serve all screenshots app.use(bodyParser.json({ strict: true })) // to support JSON-encoded bodies app.get('/(/|index.html)?', async function (_req, res) { // Serve index.html res.send(await helpers.layout('index', {})) }) app.get('/search/', async function (_req, res) { // Serve /search/ const verified = [].concat((await getCache()).verified) const sorted = verified.sort(function (a, b) { return a.name.localeCompare(b.name) }) const table = sorted.map((url) => { if ('featured' in url && url.featured) { // TODO: put the verified images here /*if ( await fs.pathExists("_static/img/" + url.name.toLowerCase().replace(' ', '') + ".png") || await fs.pathExists("_static/img/" + url.name.toLowerCase().replace(' ', '') + ".svg") ) { table += "<tr><td><img class='project icon' src='/img/" + url.name.toLowerCase().replace(' ', '') + ".png'>" + url.name + "</td><td><a target='_blank' href='" + url.url + "'>" + url.url + "</a></td></tr>"; } else {*/ //helpers.rollbar.warn(`Warning: No verified icon was found for ${url.name}`); return `<tr> <td>${url.name}</td> <td><a target="_blank" href="${url.url}">${url.url}</a></td> </tr>` //} } return null }).filter((s) => s).join('') res.send(await helpers.layout('search', { 'trusted.table': table, 'page.title': 'Search for scam sites, scammers addresses and scam ips' })) }) app.get('/faq/', async function (_req, res) { // Serve /faq/ res.send(await helpers.layout('faq', { 'page.title': 'FAQ' })) }) // Serve /report/, /report/domain/, and /report/address/ or /report/domain/fake-mycrypto.com app.get('/report/:type?/:value?', async function (req, res, next) { let value = '' if (req.params.value) { value = safeHtml`${req.params.value}` } switch (`${req.params.type}`) { case 'address': res.send(await helpers.layout('reportaddress', { 'page.placeholder': value })) break case 'domain': res.send(await helpers.layout('reportdomain', { 'page.placeholder': value })) break default: if (!req.params.type) { res.send(await helpers.layout('report', {})) } else { return next(new Error(`Request type ${req.params.type}`)) } } }) // Serve /scams/ app.get('/scams/:page?/:sorting?/:direction?', async function (req, res, next) { const MAX_RESULTS_PER_PAGE = 30 const scams = [].concat((await getCache()).scams) const currentDirection = `${req.params.direction}` === 'ascending' ? 'ascending' : 'descending' let direction = { category: '', subcategory: '', status: '', title: '', } let sorting = { category: '', subcategory: '', status: '', title: '' } switch (`${req.params.sorting}`) { case 'category': sorting.category = 'sorted' direction.category = currentDirection scams.sort(function (a, b) { if ('category' in a && 'category' in b && a.category && b.category)
else { return -1 } }) break case 'subcategory': sorting.subcategory = 'sorted' direction.subcategory = currentDirection scams.sort(function (a, b) { if ('subcategory' in a && 'subcategory' in b && a.subcategory && b.subcategory) { return a.subcategory.localeCompare(b.subcategory) } else { return -1 } }) break case 'title': sorting.title = 'sorted' direction.title = currentDirection scams.sort(function (a, b) { return a.name.localeCompare(b.name) }) break case 'status': sorting.status = 'sorted' direction.status = currentDirection scams.sort(function (a, b) { if ('status' in a && 'status' in b) { if ((a.status == 'Active' && b.status != 'Active') || (a.status == 'Inactive' && (b.status == 'Suspended' || b.status == 'Offline')) || (a.status == 'Suspended' && b.status == 'Offline')) { return -1 } else if (a.status == b.status) { return 0 } else { return 1 } } else { return 1 } }) break default: if (!req.params.sorting) { scams.sort(function (a, b) { return b.id - a.id }) } else { return next(new Error(`Invalid sorting "${req.params.sorting}"`)) } } if (currentDirection === 'descending') { scams.reverse() } let addresses = {} var intActiveScams = 0 var intInactiveScams = 0 scams.forEach(function (scam) { if ('addresses' in scam) { scam.addresses.forEach(function (address) { addresses[address] = true }) } if ('status' in scam) { if (scam.status === 'Active') { ++intActiveScams } else { ++intInactiveScams } } }) let max = MAX_RESULTS_PER_PAGE let start = 0 let pagination = [] const page = +req.params.page || 1 if (req.params.page == "all") { max = scams.length } else if (page) { max = ((page - 1) * MAX_RESULTS_PER_PAGE) + MAX_RESULTS_PER_PAGE start = (page - 1) * MAX_RESULTS_PER_PAGE } const paginate = req.params.sorting ? `/${req.params.sorting}/${currentDirection}` : '' const table = scams.slice(start, max).map((scam) => { let status = '<td>None</td>' let category = scam.category || '<i class="remove icon"></i> None' let subcategory = scam.subcategory || '<i class="remove icon"></i> None' if ('status' in scam) { switch (scam.status) { case 'Active': status = "<td class='offline'><i class='warning sign icon'></i> Active</td>" break case 'Inactive': status = "<td class='suspended'><i class='remove icon'></i> Inactive</td>" break case 'Offline': status = "<td class='activ'><i class='checkmark icon'></i> Offline</td>" break case 'Suspended': status = "<td class='suspended'><i class='remove icon'></i> Suspended</td>" break } } if ('category' in scam) { switch (scam.category) { case "Phishing": category = '<i class="address book icon"></i> Phishing' break case "Scamming": category = '<i class="payment icon"></i> Scamming' break case "Fake ICO": category = '<i class="dollar icon"></i> Fake ICO' break } } if ('subcategory' in scam && scam.subcategory) { const sub = scam.subcategory.toLowerCase().replace(/\s/g, '') if (sub == "wallets") { subcategory = `<i class="credit card alternative icon"></i> ${scam.subcategory}` } // TODO: put icons here /*else if (fs.existsSync(`_static/img/${sub}.png`)) { subcategory = `<img src="/img/${scams[i].subcategory.toLowerCase().replace(/\s/g, '')}.png" class="subcategoryicon"> ${scams[i].subcategory}`; } else { subcategory = scams[i].subcategory if (!(icon_warnings.includes(subcategory))) { icon_warnings.push(subcategory) } }*/ } let name = scam.name if (name.length > 40) { name = name.substring(0, 40) + '...' } return `<tr> <td>${category}</td> <td>${subcategory}</td> ${status} <td>${name}</td> <td class="center"> <a href='/scam/${scam.id}'><i class='search icon'></i></a> </td> </tr>` }).join('') if (req.params.page !== "all") { let arrLoop = [-2, 3] if (page == 0) { arrLoop = [1, 6] } else if (page == 1) { arrLoop = [0, 5] } else if (page == 2) { arrLoop = [-1, 4] } for (let i = arrLoop[0]; i < arrLoop[1]; i++) { let intPageNumber = (page + Number(i)) let strItemClass = "item" let strHref = `/scams/${intPageNumber}${paginate}` if ((intPageNumber > (scams.length) / MAX_RESULTS_PER_PAGE) || (intPageNumber < 1)) { strItemClass = "disabled item" strHref = "#" } else if (page == intPageNumber) { strItemClass = "active item" } pagination.push(`<a href="${strHref}" class="${strItemClass}">${intPageNumber}</a>`) } if (page > 3) { pagination.unshift(`<a class="item" href="/scams/1${paginate}"> <i class="angle double left icon"></i> </a>`) } if (page < Math.ceil(scams.length / MAX_RESULTS_PER_PAGE) - 3) { pagination.push(`<a class="item" href="/scams/${(Math.ceil(scams.length / MAX_RESULTS_PER_PAGE) - 1)}${paginate}"> <i class='angle double right icon'></i> </a>` ) } } res.send(await helpers.layout('scams', { 'sorting.category.direction': direction.category, 'sorting.subcategory.direction': direction.subcategory, 'sorting.status.direction': direction.status, 'sorting.title.direction': direction.title, 'sorting.category': sorting.category, 'sorting.subcategory': sorting.subcategory, 'sorting.status': sorting.status, 'sorting.title': sorting.title, 'scams.total': scams.length.toLocaleString('en-US'), 'scams.active': intActiveScams.toLocaleString('en-US'), 'addresses.total': Object.keys(addresses).length.toLocaleString('en-US'), 'scams.inactive': intInactiveScams.toLocaleString('en-US'), 'scams.pagination': `<div class="ui pagination menu">${pagination.join('')}</div>`, 'scams.table': table, 'page.title': 'Active Scam List' })) }) app.get('/scam/:id/', async function (req, res, next) { // Serve /scam/<id>/ const startTime = Date.now() const cache = await getCache() const id = +req.params.id const scam = cache.scams.find(function (scam) { return scam.id == id }) if (!scam) { return next(new Error(`Scam id not found ${id}`)) } const hostname = url.parse(scam.url).hostname let actions = [] let category = '' if ('category' in scam) { category = `<b>Category</b>: ${scam.category}` if ('subcategory' in scam) { category += ` - ${scam.subcategory}` } category += '<br>' } let status = '' if ('status' in scam) { status = `<b>Status</b>: <span class="class_${scam.status.toLowerCase()}">${scam.status}</span><br>` } let description = '' if ('description' in scam) { description = `<b>Description</b>: ${scam.description}<br>` } let nameservers = '' if ('nameservers' in scam && scam.nameservers && scam.nameservers.length) { nameservers = `<b>Nameservers</b>: <div class="ui bulleted list"> ${scam.nameservers.map(function (nameserver) { return `<div class="ui item">${nameserver}</div>` }).join('')} </div>` } let addresses = '' if ('addresses' in scam && scam.addresses && scam.addresses.length) { addresses = `<b>Related addresses</b>: <div class="ui bulleted list"> ${scam.addresses.map(function (address) { return `<div class="ui item"><a href="/address/${address}">${address}</a></div>` }).join('')} </div>` } let ip = '' if ('ip' in scam) { ip = `<b>IP</b>: <a href="/ip/${scam.ip}">${scam.ip}</a><br>` } let abusereport = '' let screenshot = '' let scamUrl = '' if ('url' in scam) { abusereport = generateAbuseReport(scam) actions.push(`<button id="gen" class="ui icon secondary button"> <i class="setting icon"></i> Abuse Report</button>`, `<a target="_blank" href="http://web.archive.org/web/*/${hostname}" class="ui icon secondary button"><i class="archive icon"></i> Archive</a>` ) scamUrl = `<b>URL</b>: <a id="url" target="_blank" href="/redirect/${encodeURIComponent(scam.url)}">${scam.url}</a><br>` // TODO: put back the screenshots /* if ('status' in scam && scam.status != 'Offline' && fs.existsSync('_cache/screenshots/' + scam.id + '.png')) { template = template.replace("{{ scam.screenshot }}", '<h3>Screenshot</h3><img src="/screenshot/' + scam.id + '.png">'); }*/ } actions.push(`<a target="_blank" href="https://github.com/${config.repository.author}/${config.repository.name}/blob/${config.repository.branch}/_data/scams.yaml" class="ui icon secondary button"> <i class="write alternate icon"></i> Improve</a> <button id="share" class="ui icon secondary button"> <i class="share alternate icon"></i> Share</button>`) let googlethreat = '' if ('Google_SafeBrowsing_API_Key' in config && config.Google_SafeBrowsing_API_Key && 'url' in scam) { var options = { uri: 'https://safebrowsing.googleapis.com/v4/threatMatches:find?key=' + config.Google_SafeBrowsing_API_Key, method: 'POST', json: { client: { clientId: "Nano Scam DB", clientVersion: "1.0.0" }, threatInfo: { threatTypes: ["THREAT_TYPE_UNSPECIFIED", "MALWARE", "SOCIAL_ENGINEERING", "UNWANTED_SOFTWARE", "POTENTIALLY_HARMFUL_APPLICATION"], platformTypes: ["ANY_PLATFORM"], threatEntryTypes: ["THREAT_ENTRY_TYPE_UNSPECIFIED", "URL", "EXECUTABLE"], threatEntries: [{ "url": hostname }] } } } googlethreat = `<b>Google Safe Browsing</b>: ${await new Promise((resolve) => { request(options, function (error, response, body) { if (!error && response.statusCode == 200) { if (body && 'matches' in body && body.matches[0]) { resolve(html`<span class='class_offline'>Blocked for ${body.matches[0]['threatType']}</span>`) } else { resolve(html`<span class='class_active'>Not Blocked</span> <a target='_blank' href='https://safebrowsing.google.com/safebrowsing/report_phish/'><i class='warning sign icon'></i></a>`) } } else { resolve('') } }) })}<br>` } res.send(await helpers.layout('scam', { 'scam.id': scam.id, 'scam.name': safeHtml(scam.name), 'scam.category': category, 'scam.status': status, 'scam.description': description, 'scam.nameservers': nameservers, 'scam.addresses': addresses, 'scam.ip': ip, 'scam.abusereport': abusereport, 'scam.googlethreat': googlethreat, 'scam.screenshot': screenshot, 'scam.url': scamUrl, 'disqus': await helpers.template('disqus', { 'disqus.id': `scam-${scam.id}` }), 'page.title': safeHtml`Scam ${scam.name}`, 'scam.actions': `<div id="actions" class="eight wide column">${actions.join('')}</div>`, 'page.built': `<p class="built"> This page was built in <b>${Date.now() - startTime}</b>ms, and last updated at <b>${dateFormat(cache.updated, "UTC:mmm dd yyyy, HH:MM")} UTC</b> </p>` })) }) app.get('/ip/:ip/', async function (req, res) { // Serve /ip/<ip>/ const ip = safeHtml`${req.params.ip}` const related = (await getCache()).scams.filter(function (obj) { return obj.ip === ip }).map(function (value) { return `<div class="item"> <a href="/scam/${value.id}/">${value.name}</a> </div>` }).join('') res.send(await helpers.layout('ip', { 'ip.ip': ip, 'ip.scams': html`<div class="ui bulleted list">${related}</div>`, 'disqus': await helpers.template('disqus', { 'disqus.id': `ip-${ip}` }), 'page.title': `Scam report for IP ${ip}` })); }); app.get('/address/:address/', async function (req, res) { // Serve /address/<address>/ const address = safeHtml`${req.params.address}` const related = (await getCache()).scams.filter(function (obj) { if ('addresses' in obj) { return obj.addresses.includes(address) } else { return false } }).map(function (value) { return `<div class="item"> <a href="/scam/${value.id}/">${value.name}</a> </div>` }).join('') res.send(await helpers.layout('address', { 'address.address': address, 'disqus': await helpers.template('disqus', { 'disqus.id': `address-${address}` }), 'address.scams': `<div class="ui bulleted list">${related}</div>`, 'page.title': `Scam report for address ${address}` })) }) app.get('/redirect/:url/', async function (req, res) { // Serve /redirect/<url>/ const url = safeHtml`${req.params.url}` res.send(await helpers.layout('redirect', { 'redirect.domain': url, 'page.title': 'Redirect warning' })) }) app.get('/rss/', async function (_req, res) { // Serve /rss/ (rss feed) const cache = await getCache() res.send(await helpers.template('rss', { 'rss.entries': cache.scams.map(function (scam) { const url = `${config.base_url}scam/${scam.id}` return `<item> <guid>${url}</guid> <title>${safeHtml`${scam.name}`}</title> <link>${url}</link> <description>${scam.category}</description> </item>`; }).join('') })) }) app.get('/api/:type?/:domain?/', async function (req, res) { // Serve /api/<type>/ res.header('Access-Control-Allow-Origin', '*') const cache = await getCache() const type = safeHtml`${req.params.type}` /** @type {any} */ let json = false switch (type) { case 'scams': case 'addresses': case 'ips': case 'verified': case 'blacklist': case 'whitelist': json = { success: true, result: cache[type] } break case 'check': { const domainOrAddress = safeHtml`${req.params.domain}` const hostname = url.parse(domainOrAddress).hostname || '' const host = helpers.removeProtocol(domainOrAddress) if (domainOrAddress) { //They can search for an address or domain. if (/^xrb_?[0-9a-z]{60}$/.test(domainOrAddress)) { const blocked = Object.keys(cache.addresses).some((address) => (domainOrAddress == address)) //They searched for an address if (blocked) { json = { success: true, result: 'blocked', type: 'address', entries: cache.scams.filter(function (scam) { if ('addresses' in scam) { return (scam.addresses.includes(domainOrAddress)) } else { return false } }) } } else { json = { success: true, result: 'neutral', type: 'address', entries: [] } } } else { //They searched for a domain or an ip address if (cache.whitelist.includes(hostname) || cache.whitelist.includes(domainOrAddress)) { json = { success: true, result: 'verified' } } else if (cache.blacklist.includes(hostname) || cache.blacklist.includes(host)) { if (/^(([1-9]?\d|1\d\d|2[0-5][0-5]|2[0-4]\d)\.){3}([1-9]?\d|1\d\d|2[0-5][0-5]|2[0-4]\d)$/.test(host)) { //They searched for an ip address json = { success: true, result: 'blocked', type: 'ip', entries: cache.scams.filter(function (scam) { return ( url.parse(scam.url).hostname == hostname || helpers.removeProtocol(scam.url) == domainOrAddress || scam.ip == host ) }) } } else { //They searched for a domain json = { success: true, result: 'blocked', type: 'domain', entries: cache.scams.filter(function (scam) { return ( url.parse(scam.url).hostname == hostname || helpers.removeProtocol(scam.url) == domainOrAddress ) }) } } } else { json = { success: false, result: 'neutral', type: 'unsupported', entries: [] } } } } } break case 'abusereport': { const domain = safeHtml`${req.params.domain}` const hostname = url.parse(domain).hostname const results = cache.scams.filter(function (scam) { return ( url.parse(scam.url).hostname == hostname || helpers.removeProtocol(scam.url) == domain ) }) || [] if (results.length == 0) { json = { success: false, error: "URL wasn't found" } } else { json = { success: true, result: generateAbuseReport(results[0]) } } } break } if (json) { res.json(json) } else { res.send(await helpers.layout('api', { 'page.title': 'Use the API' })) } }) // Serve all other pages as 404 app.get('*', async function (_req, res) { res.status(404).send(await helpers.layout('404', { 'page.title': 'Not found' })) }) if (helpers.rollbar['errorHandler']) { app.use(helpers.rollbar['errorHandler']()) } app.use(async (_err, _req, res, _next) => { res.status(404).send(await helpers.layout('404', { 'page.title': 'Error' })) }) app.listen(config.port, function () { // Listen on port (defined in config) helpers.rollbar.info(`Content served on port ${config.port}`) }) } getCache().then(startWebServer).catch((err) => helpers.rollbar.error(err))
{ return a.category.localeCompare(b.category) }
conditional_block
run.js
'use strict'; const fs = require('fs-extra') const express = require('express') const bodyParser = require('body-parser') const url = require('url') const dateFormat = require('dateformat') const spawn = require('child_process').spawn const request = require('request') const app = express() const config = require('./config') const helpers = require('./helpers') const { html, safeHtml, stripIndents } = require('common-tags') let cache = null let updating_now = false var older_cache_time const cachePath = helpers.localFile('_cache', 'cache.json') const updatePath = helpers.localFile('update.js') /* See if there's an up-to-date cache, otherwise run `update.js` to create one. */ const getCache = async function () { //helpers.rollbar.info('Starting getCache') return new Promise(async (resolve, reject) => { const exists = await fs.pathExists(cachePath) if (exists) { if (!cache) { try { cache = JSON.parse(await fs.readFile(cachePath, { encoding: 'utf8' })) resolve(cache) } catch (e) { reject(e) } } else if ((new Date().getTime() - cache.updated) < config.cache_refreshing_interval) { resolve(cache) } else if ((new Date().getTime() - cache.updated) >= config.cache_refreshing_interval) { if (!updating_now) { updating_now = true older_cache_time = cache.updated spawn('node', [updatePath], { detached: true }) var checkDone2 = setInterval(function () { if (cache.updated != older_cache_time) { clearInterval(checkDone2) helpers.rollbar.info("Successfully updated cache!") updating_now = false } }, 1000) } resolve(cache) } } else { helpers.rollbar.info("No cache file found. Creating one...") if (!updating_now) { updating_now = true spawn('node', [updatePath], { detached: true }) } var checkDone = setInterval(async function () { const exists = await fs.pathExists(cachePath) if (exists) { updating_now = false try { cache = JSON.parse(await fs.readFile(cachePath, { encoding: 'utf8' })) clearInterval(checkDone) helpers.rollbar.info("Successfully updated cache!") resolve(cache) } catch (err) { reject(err) } } }, 1000) } }) } /* Generate an abuse report for a scam domain */ function generateAbuseReport(scam)
/* Start the web server */ function startWebServer() { app.use(express.static('_static')); // Serve all static pages first app.use('/screenshot', express.static('_cache/screenshots/')); // Serve all screenshots app.use(bodyParser.json({ strict: true })) // to support JSON-encoded bodies app.get('/(/|index.html)?', async function (_req, res) { // Serve index.html res.send(await helpers.layout('index', {})) }) app.get('/search/', async function (_req, res) { // Serve /search/ const verified = [].concat((await getCache()).verified) const sorted = verified.sort(function (a, b) { return a.name.localeCompare(b.name) }) const table = sorted.map((url) => { if ('featured' in url && url.featured) { // TODO: put the verified images here /*if ( await fs.pathExists("_static/img/" + url.name.toLowerCase().replace(' ', '') + ".png") || await fs.pathExists("_static/img/" + url.name.toLowerCase().replace(' ', '') + ".svg") ) { table += "<tr><td><img class='project icon' src='/img/" + url.name.toLowerCase().replace(' ', '') + ".png'>" + url.name + "</td><td><a target='_blank' href='" + url.url + "'>" + url.url + "</a></td></tr>"; } else {*/ //helpers.rollbar.warn(`Warning: No verified icon was found for ${url.name}`); return `<tr> <td>${url.name}</td> <td><a target="_blank" href="${url.url}">${url.url}</a></td> </tr>` //} } return null }).filter((s) => s).join('') res.send(await helpers.layout('search', { 'trusted.table': table, 'page.title': 'Search for scam sites, scammers addresses and scam ips' })) }) app.get('/faq/', async function (_req, res) { // Serve /faq/ res.send(await helpers.layout('faq', { 'page.title': 'FAQ' })) }) // Serve /report/, /report/domain/, and /report/address/ or /report/domain/fake-mycrypto.com app.get('/report/:type?/:value?', async function (req, res, next) { let value = '' if (req.params.value) { value = safeHtml`${req.params.value}` } switch (`${req.params.type}`) { case 'address': res.send(await helpers.layout('reportaddress', { 'page.placeholder': value })) break case 'domain': res.send(await helpers.layout('reportdomain', { 'page.placeholder': value })) break default: if (!req.params.type) { res.send(await helpers.layout('report', {})) } else { return next(new Error(`Request type ${req.params.type}`)) } } }) // Serve /scams/ app.get('/scams/:page?/:sorting?/:direction?', async function (req, res, next) { const MAX_RESULTS_PER_PAGE = 30 const scams = [].concat((await getCache()).scams) const currentDirection = `${req.params.direction}` === 'ascending' ? 'ascending' : 'descending' let direction = { category: '', subcategory: '', status: '', title: '', } let sorting = { category: '', subcategory: '', status: '', title: '' } switch (`${req.params.sorting}`) { case 'category': sorting.category = 'sorted' direction.category = currentDirection scams.sort(function (a, b) { if ('category' in a && 'category' in b && a.category && b.category) { return a.category.localeCompare(b.category) } else { return -1 } }) break case 'subcategory': sorting.subcategory = 'sorted' direction.subcategory = currentDirection scams.sort(function (a, b) { if ('subcategory' in a && 'subcategory' in b && a.subcategory && b.subcategory) { return a.subcategory.localeCompare(b.subcategory) } else { return -1 } }) break case 'title': sorting.title = 'sorted' direction.title = currentDirection scams.sort(function (a, b) { return a.name.localeCompare(b.name) }) break case 'status': sorting.status = 'sorted' direction.status = currentDirection scams.sort(function (a, b) { if ('status' in a && 'status' in b) { if ((a.status == 'Active' && b.status != 'Active') || (a.status == 'Inactive' && (b.status == 'Suspended' || b.status == 'Offline')) || (a.status == 'Suspended' && b.status == 'Offline')) { return -1 } else if (a.status == b.status) { return 0 } else { return 1 } } else { return 1 } }) break default: if (!req.params.sorting) { scams.sort(function (a, b) { return b.id - a.id }) } else { return next(new Error(`Invalid sorting "${req.params.sorting}"`)) } } if (currentDirection === 'descending') { scams.reverse() } let addresses = {} var intActiveScams = 0 var intInactiveScams = 0 scams.forEach(function (scam) { if ('addresses' in scam) { scam.addresses.forEach(function (address) { addresses[address] = true }) } if ('status' in scam) { if (scam.status === 'Active') { ++intActiveScams } else { ++intInactiveScams } } }) let max = MAX_RESULTS_PER_PAGE let start = 0 let pagination = [] const page = +req.params.page || 1 if (req.params.page == "all") { max = scams.length } else if (page) { max = ((page - 1) * MAX_RESULTS_PER_PAGE) + MAX_RESULTS_PER_PAGE start = (page - 1) * MAX_RESULTS_PER_PAGE } const paginate = req.params.sorting ? `/${req.params.sorting}/${currentDirection}` : '' const table = scams.slice(start, max).map((scam) => { let status = '<td>None</td>' let category = scam.category || '<i class="remove icon"></i> None' let subcategory = scam.subcategory || '<i class="remove icon"></i> None' if ('status' in scam) { switch (scam.status) { case 'Active': status = "<td class='offline'><i class='warning sign icon'></i> Active</td>" break case 'Inactive': status = "<td class='suspended'><i class='remove icon'></i> Inactive</td>" break case 'Offline': status = "<td class='activ'><i class='checkmark icon'></i> Offline</td>" break case 'Suspended': status = "<td class='suspended'><i class='remove icon'></i> Suspended</td>" break } } if ('category' in scam) { switch (scam.category) { case "Phishing": category = '<i class="address book icon"></i> Phishing' break case "Scamming": category = '<i class="payment icon"></i> Scamming' break case "Fake ICO": category = '<i class="dollar icon"></i> Fake ICO' break } } if ('subcategory' in scam && scam.subcategory) { const sub = scam.subcategory.toLowerCase().replace(/\s/g, '') if (sub == "wallets") { subcategory = `<i class="credit card alternative icon"></i> ${scam.subcategory}` } // TODO: put icons here /*else if (fs.existsSync(`_static/img/${sub}.png`)) { subcategory = `<img src="/img/${scams[i].subcategory.toLowerCase().replace(/\s/g, '')}.png" class="subcategoryicon"> ${scams[i].subcategory}`; } else { subcategory = scams[i].subcategory if (!(icon_warnings.includes(subcategory))) { icon_warnings.push(subcategory) } }*/ } let name = scam.name if (name.length > 40) { name = name.substring(0, 40) + '...' } return `<tr> <td>${category}</td> <td>${subcategory}</td> ${status} <td>${name}</td> <td class="center"> <a href='/scam/${scam.id}'><i class='search icon'></i></a> </td> </tr>` }).join('') if (req.params.page !== "all") { let arrLoop = [-2, 3] if (page == 0) { arrLoop = [1, 6] } else if (page == 1) { arrLoop = [0, 5] } else if (page == 2) { arrLoop = [-1, 4] } for (let i = arrLoop[0]; i < arrLoop[1]; i++) { let intPageNumber = (page + Number(i)) let strItemClass = "item" let strHref = `/scams/${intPageNumber}${paginate}` if ((intPageNumber > (scams.length) / MAX_RESULTS_PER_PAGE) || (intPageNumber < 1)) { strItemClass = "disabled item" strHref = "#" } else if (page == intPageNumber) { strItemClass = "active item" } pagination.push(`<a href="${strHref}" class="${strItemClass}">${intPageNumber}</a>`) } if (page > 3) { pagination.unshift(`<a class="item" href="/scams/1${paginate}"> <i class="angle double left icon"></i> </a>`) } if (page < Math.ceil(scams.length / MAX_RESULTS_PER_PAGE) - 3) { pagination.push(`<a class="item" href="/scams/${(Math.ceil(scams.length / MAX_RESULTS_PER_PAGE) - 1)}${paginate}"> <i class='angle double right icon'></i> </a>` ) } } res.send(await helpers.layout('scams', { 'sorting.category.direction': direction.category, 'sorting.subcategory.direction': direction.subcategory, 'sorting.status.direction': direction.status, 'sorting.title.direction': direction.title, 'sorting.category': sorting.category, 'sorting.subcategory': sorting.subcategory, 'sorting.status': sorting.status, 'sorting.title': sorting.title, 'scams.total': scams.length.toLocaleString('en-US'), 'scams.active': intActiveScams.toLocaleString('en-US'), 'addresses.total': Object.keys(addresses).length.toLocaleString('en-US'), 'scams.inactive': intInactiveScams.toLocaleString('en-US'), 'scams.pagination': `<div class="ui pagination menu">${pagination.join('')}</div>`, 'scams.table': table, 'page.title': 'Active Scam List' })) }) app.get('/scam/:id/', async function (req, res, next) { // Serve /scam/<id>/ const startTime = Date.now() const cache = await getCache() const id = +req.params.id const scam = cache.scams.find(function (scam) { return scam.id == id }) if (!scam) { return next(new Error(`Scam id not found ${id}`)) } const hostname = url.parse(scam.url).hostname let actions = [] let category = '' if ('category' in scam) { category = `<b>Category</b>: ${scam.category}` if ('subcategory' in scam) { category += ` - ${scam.subcategory}` } category += '<br>' } let status = '' if ('status' in scam) { status = `<b>Status</b>: <span class="class_${scam.status.toLowerCase()}">${scam.status}</span><br>` } let description = '' if ('description' in scam) { description = `<b>Description</b>: ${scam.description}<br>` } let nameservers = '' if ('nameservers' in scam && scam.nameservers && scam.nameservers.length) { nameservers = `<b>Nameservers</b>: <div class="ui bulleted list"> ${scam.nameservers.map(function (nameserver) { return `<div class="ui item">${nameserver}</div>` }).join('')} </div>` } let addresses = '' if ('addresses' in scam && scam.addresses && scam.addresses.length) { addresses = `<b>Related addresses</b>: <div class="ui bulleted list"> ${scam.addresses.map(function (address) { return `<div class="ui item"><a href="/address/${address}">${address}</a></div>` }).join('')} </div>` } let ip = '' if ('ip' in scam) { ip = `<b>IP</b>: <a href="/ip/${scam.ip}">${scam.ip}</a><br>` } let abusereport = '' let screenshot = '' let scamUrl = '' if ('url' in scam) { abusereport = generateAbuseReport(scam) actions.push(`<button id="gen" class="ui icon secondary button"> <i class="setting icon"></i> Abuse Report</button>`, `<a target="_blank" href="http://web.archive.org/web/*/${hostname}" class="ui icon secondary button"><i class="archive icon"></i> Archive</a>` ) scamUrl = `<b>URL</b>: <a id="url" target="_blank" href="/redirect/${encodeURIComponent(scam.url)}">${scam.url}</a><br>` // TODO: put back the screenshots /* if ('status' in scam && scam.status != 'Offline' && fs.existsSync('_cache/screenshots/' + scam.id + '.png')) { template = template.replace("{{ scam.screenshot }}", '<h3>Screenshot</h3><img src="/screenshot/' + scam.id + '.png">'); }*/ } actions.push(`<a target="_blank" href="https://github.com/${config.repository.author}/${config.repository.name}/blob/${config.repository.branch}/_data/scams.yaml" class="ui icon secondary button"> <i class="write alternate icon"></i> Improve</a> <button id="share" class="ui icon secondary button"> <i class="share alternate icon"></i> Share</button>`) let googlethreat = '' if ('Google_SafeBrowsing_API_Key' in config && config.Google_SafeBrowsing_API_Key && 'url' in scam) { var options = { uri: 'https://safebrowsing.googleapis.com/v4/threatMatches:find?key=' + config.Google_SafeBrowsing_API_Key, method: 'POST', json: { client: { clientId: "Nano Scam DB", clientVersion: "1.0.0" }, threatInfo: { threatTypes: ["THREAT_TYPE_UNSPECIFIED", "MALWARE", "SOCIAL_ENGINEERING", "UNWANTED_SOFTWARE", "POTENTIALLY_HARMFUL_APPLICATION"], platformTypes: ["ANY_PLATFORM"], threatEntryTypes: ["THREAT_ENTRY_TYPE_UNSPECIFIED", "URL", "EXECUTABLE"], threatEntries: [{ "url": hostname }] } } } googlethreat = `<b>Google Safe Browsing</b>: ${await new Promise((resolve) => { request(options, function (error, response, body) { if (!error && response.statusCode == 200) { if (body && 'matches' in body && body.matches[0]) { resolve(html`<span class='class_offline'>Blocked for ${body.matches[0]['threatType']}</span>`) } else { resolve(html`<span class='class_active'>Not Blocked</span> <a target='_blank' href='https://safebrowsing.google.com/safebrowsing/report_phish/'><i class='warning sign icon'></i></a>`) } } else { resolve('') } }) })}<br>` } res.send(await helpers.layout('scam', { 'scam.id': scam.id, 'scam.name': safeHtml(scam.name), 'scam.category': category, 'scam.status': status, 'scam.description': description, 'scam.nameservers': nameservers, 'scam.addresses': addresses, 'scam.ip': ip, 'scam.abusereport': abusereport, 'scam.googlethreat': googlethreat, 'scam.screenshot': screenshot, 'scam.url': scamUrl, 'disqus': await helpers.template('disqus', { 'disqus.id': `scam-${scam.id}` }), 'page.title': safeHtml`Scam ${scam.name}`, 'scam.actions': `<div id="actions" class="eight wide column">${actions.join('')}</div>`, 'page.built': `<p class="built"> This page was built in <b>${Date.now() - startTime}</b>ms, and last updated at <b>${dateFormat(cache.updated, "UTC:mmm dd yyyy, HH:MM")} UTC</b> </p>` })) }) app.get('/ip/:ip/', async function (req, res) { // Serve /ip/<ip>/ const ip = safeHtml`${req.params.ip}` const related = (await getCache()).scams.filter(function (obj) { return obj.ip === ip }).map(function (value) { return `<div class="item"> <a href="/scam/${value.id}/">${value.name}</a> </div>` }).join('') res.send(await helpers.layout('ip', { 'ip.ip': ip, 'ip.scams': html`<div class="ui bulleted list">${related}</div>`, 'disqus': await helpers.template('disqus', { 'disqus.id': `ip-${ip}` }), 'page.title': `Scam report for IP ${ip}` })); }); app.get('/address/:address/', async function (req, res) { // Serve /address/<address>/ const address = safeHtml`${req.params.address}` const related = (await getCache()).scams.filter(function (obj) { if ('addresses' in obj) { return obj.addresses.includes(address) } else { return false } }).map(function (value) { return `<div class="item"> <a href="/scam/${value.id}/">${value.name}</a> </div>` }).join('') res.send(await helpers.layout('address', { 'address.address': address, 'disqus': await helpers.template('disqus', { 'disqus.id': `address-${address}` }), 'address.scams': `<div class="ui bulleted list">${related}</div>`, 'page.title': `Scam report for address ${address}` })) }) app.get('/redirect/:url/', async function (req, res) { // Serve /redirect/<url>/ const url = safeHtml`${req.params.url}` res.send(await helpers.layout('redirect', { 'redirect.domain': url, 'page.title': 'Redirect warning' })) }) app.get('/rss/', async function (_req, res) { // Serve /rss/ (rss feed) const cache = await getCache() res.send(await helpers.template('rss', { 'rss.entries': cache.scams.map(function (scam) { const url = `${config.base_url}scam/${scam.id}` return `<item> <guid>${url}</guid> <title>${safeHtml`${scam.name}`}</title> <link>${url}</link> <description>${scam.category}</description> </item>`; }).join('') })) }) app.get('/api/:type?/:domain?/', async function (req, res) { // Serve /api/<type>/ res.header('Access-Control-Allow-Origin', '*') const cache = await getCache() const type = safeHtml`${req.params.type}` /** @type {any} */ let json = false switch (type) { case 'scams': case 'addresses': case 'ips': case 'verified': case 'blacklist': case 'whitelist': json = { success: true, result: cache[type] } break case 'check': { const domainOrAddress = safeHtml`${req.params.domain}` const hostname = url.parse(domainOrAddress).hostname || '' const host = helpers.removeProtocol(domainOrAddress) if (domainOrAddress) { //They can search for an address or domain. if (/^xrb_?[0-9a-z]{60}$/.test(domainOrAddress)) { const blocked = Object.keys(cache.addresses).some((address) => (domainOrAddress == address)) //They searched for an address if (blocked) { json = { success: true, result: 'blocked', type: 'address', entries: cache.scams.filter(function (scam) { if ('addresses' in scam) { return (scam.addresses.includes(domainOrAddress)) } else { return false } }) } } else { json = { success: true, result: 'neutral', type: 'address', entries: [] } } } else { //They searched for a domain or an ip address if (cache.whitelist.includes(hostname) || cache.whitelist.includes(domainOrAddress)) { json = { success: true, result: 'verified' } } else if (cache.blacklist.includes(hostname) || cache.blacklist.includes(host)) { if (/^(([1-9]?\d|1\d\d|2[0-5][0-5]|2[0-4]\d)\.){3}([1-9]?\d|1\d\d|2[0-5][0-5]|2[0-4]\d)$/.test(host)) { //They searched for an ip address json = { success: true, result: 'blocked', type: 'ip', entries: cache.scams.filter(function (scam) { return ( url.parse(scam.url).hostname == hostname || helpers.removeProtocol(scam.url) == domainOrAddress || scam.ip == host ) }) } } else { //They searched for a domain json = { success: true, result: 'blocked', type: 'domain', entries: cache.scams.filter(function (scam) { return ( url.parse(scam.url).hostname == hostname || helpers.removeProtocol(scam.url) == domainOrAddress ) }) } } } else { json = { success: false, result: 'neutral', type: 'unsupported', entries: [] } } } } } break case 'abusereport': { const domain = safeHtml`${req.params.domain}` const hostname = url.parse(domain).hostname const results = cache.scams.filter(function (scam) { return ( url.parse(scam.url).hostname == hostname || helpers.removeProtocol(scam.url) == domain ) }) || [] if (results.length == 0) { json = { success: false, error: "URL wasn't found" } } else { json = { success: true, result: generateAbuseReport(results[0]) } } } break } if (json) { res.json(json) } else { res.send(await helpers.layout('api', { 'page.title': 'Use the API' })) } }) // Serve all other pages as 404 app.get('*', async function (_req, res) { res.status(404).send(await helpers.layout('404', { 'page.title': 'Not found' })) }) if (helpers.rollbar['errorHandler']) { app.use(helpers.rollbar['errorHandler']()) } app.use(async (_err, _req, res, _next) => { res.status(404).send(await helpers.layout('404', { 'page.title': 'Error' })) }) app.listen(config.port, function () { // Listen on port (defined in config) helpers.rollbar.info(`Content served on port ${config.port}`) }) } getCache().then(startWebServer).catch((err) => helpers.rollbar.error(err))
{ let abusereport = stripIndents`I would like to inform you of suspicious activities at the domain ${url.parse(scam.url).hostname} ${'ip' in scam ? `located at IP address ${scam['ip']}`: ''}. ${'subcategory' in scam && scam.subcategory == "NanoWallet" ? `The domain is impersonating NanoWallet.io, a website where people can create Nano wallets (a cryptocurrency like Bitcoin).` : ''} ${'category' in scam && scam.category == "Fake ICO" ? `The domain is impersonating a website where an ICO is being held (initial coin offering, like an initial public offering but it's for cryptocurrencies)` : ''} ${'category' in scam && scam.category == "Phishing" ? `The attackers wish to steal funds by using phishing to get the victim's private keys (passwords to a wallet) and using them to send funds to their own wallets.` : ''} ${'category' in scam && scam.category == "Fake ICO" ? `The attackers wish to steal funds by cloning the real website and changing the XRB address so people will send funds to the attackers' address instead of the real address.` : ''} Please shut down this domain so further attacks will be prevented.` return abusereport }
identifier_body
Index.js
import React, { Component, Fragment } from 'react'; import { Route } from "react-router-dom"; import { Card, Table, Modal, Button, Drawer, message, Breadcrumb, } from 'antd'; import utils from '@/utils'; import moment from 'moment'; import Enum, { AUTH } from '@/enum'; import classnames from 'classnames'; import Dotted from '@/component/Dotted'; import Search from './Search'; import NetOperation from '@/net/operation'; import DataAgencys from '@/data/Agencys'; import CustomerDetail from '../../customer/Detail'; import styles from '../styles.module.less' import globalStyles from '@/resource/css/global.module.less'; const BreadcrumbItem = Breadcrumb.Item; export default class extends Component { constructor(props) { super(props); this.state = { pagination: { total: 0, current: 1, pageSize: 10, showSizeChanger: true, showQuickJumper: true, onShowSizeChange: (current, size) => { this.state.pagination.pageSize = size; }, showTotal: (total, range) => `共 ${total} 条记录 / ${range[0]} - ${range[1]}` }, loading: false, dataSource: [], filterInfo: { status: '2' }, filterValue: { status: [2] }, searchData: {}, downloadStatus: 0, agencyTree: null, } } async componentDidMount() { this.getRechargeDetails(); this.getAgencyTree(); } getAgencyTree() { DataAgencys.getTreeData(this.props.match.params.id, (data) => { this.setState({ agencyTree: data }); }, true); } getRechargeDetails() { const { filterInfo, searchData } = this.state; const data = { time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`, limit: this.state.pagination.pageSize, page: this.state.pagination.current, ...filterInfo, ...searchData, }; this.setState({ loading: true, }) NetOperation.getRechargeDetails(data).then((res) => { this.setState({ loading: false, dataSource: res.data.rows, pagination: res.data.pagination }); }).catch((e) => { message.error(e.msg); }); } onCallBack = (filterData) => { this.state.pagination.current = 1; this.setState({ // filterValue: null, // filterInfo: null, searchData: filterData, }, () => { this.getRechargeDetails(); }); } onClose = () => { this.props.history.push(this.props.match.url); } open(id) { this.props.history.push(`${this.props.match.url}/${id}`); } handleTableChange = (pagination, filters, sorter) => { const state = this.state; const _page = state.pagination; if (pagination.current != _page.current) { _page.current = pagination.current; } let objInfo = state.filterInfo || {}; if (filters.pay_channel && filters.pay_channel.length) { objInfo.pay_channel = filters.pay_channel.join(','); } if (filters.status && filters.status.length) { objInfo.status = filters.status.join(','); } if (filters.is_internal_staff && filters.is_internal_staff.length == 1) { objInfo.is_internal_staff = filters.is_internal_staff.join(','); } else { objInfo.is_internal_staff = ''; } this.setState({ loading: true, filterValue: filters, filterInfo: objInfo }, () => { this.getRechargeDetails(); }); } exportAlert = () => { this.setState({ downloadStatus: 1 }); Modal.confirm({ title: '确认提示', content: '确定导出当前筛选数据的Excel表格吗?', width: '450px', centered: true, onOk: () => { this.exportDetails(); }, onCancel: () => { this.setState({ downloadStatus: 0 }); }, }); } exportDetails() { const state = this.state;
= { time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`, ...state.filterInfo, ...state.searchData }; this.setState({ downloadStatus: 2 }); NetOperation.exportRecharge(data).then((res) => { const items = res.data; if (items && items.id) { this.downloadExcelFile(items.id); } }).catch((err) => { this.setState({ downloadStatus: 0 }); if (err.msg) { message.error(err.msg); } }); } downloadExcelFile = (id) => { NetOperation.downloadExcelFile(id).then((res) => { const items = res.data; if (items && items.path) { this.setState({ downloadStatus: 0 }); window.location.href = '/' + items.path; } else { window.setTimeout((e) => { this.downloadExcelFile(id); }, 500); } }).catch((err) => { this.setState({ downloadStatus: 0 }); if (err.msg) { message.error(err.msg); } }); } creatColumns(state) { const columns = [ { title: '流水号', dataIndex: 'order_number', fixed: 'left', width: 210 }, { title: '三方单号', dataIndex: 'serial_number', fixed: 'left', width: 320, render: data => { if (data.trim()) { return data; } return '-'; } }, { title: '交易金额', width: 100, align: 'right', render: (data) => utils.formatMoney((data.price * data.goods_amount) / 100) }, { title: '交易时间', dataIndex: 'create_time', width: 130, render: data => { if (data) { return moment.unix(data).format('YYYY-MM-DD HH:mm'); } return '-'; } }, { title: '支付时间', dataIndex: 'update_time', width: 130, render: data => { if (data) { return moment.unix(data).format('YYYY-MM-DD HH:mm'); } return '-'; } }, { title: '客户', key: 'is_internal_staff', width: 150, filteredValue: (state.filterValue ? state.filterValue.is_internal_staff : []), filters: [ { text: '正式客户', value: '0' }, { text: '测试客户', value: '1' } ], render: data => { const customer_name = data.customer_name || '-'; let is_internal_staff = ''; if (data.is_internal_staff) { is_internal_staff = <label className={classnames(globalStyles.tag, globalStyles.staffTag)}>测试</label>; } return <Fragment> <a href="javascript:;" onClick={() => { this.open(data.customer_id) }}>{customer_name}</a> <div>{is_internal_staff}</div> </Fragment> } }, { title: '所属机构', dataIndex: 'agency_id', width: 150, render: data => { return DataAgencys.getField(data, 'alias', (items) => { this.setState({}) }); } }, { title: '支付方式', dataIndex: 'pay_channel', width: 140, filteredValue: (state.filterValue ? state.filterValue.pay_channel : []), filters: [ { text: '银行转账', value: 1 }, { text: '微信支付', value: 2 }, { text: '支付宝支付', value: 3 }, { text: '易宝支付', value: 4 }, { text: '苹果支付', value: 5 }, { text: '连连支付', value: 6 }, { text: '汇潮支付', value: 7 }, { text: '双乾-支付宝', value: 10 }, { text: '易票联支付', value: 15 }, { text: '优畅-支付宝', value: 18 }, { text: '优畅-微信', value: 19 }, { text: '乾易付-支付宝', value: 30 }, { text: '乾易付-微信', value: 31 }, { text: '汇付支付', value: 35 }, { text: '汇德汇付-支付宝', value: 36 }, { text: '汇德汇付-微信', value: 37 } ], render: (data) => { switch(data) { case 1: return '银行转账'; case 2: return '微信支付'; case 3: return '支付宝支付'; case 4: return '易宝支付'; case 5: return '苹果支付'; case 6: return '连连支付'; case 7: return '汇潮支付'; case 10: return '双乾-支付宝'; case 15: return '易票联支付'; case 18: return '优畅-支付宝'; case 19: return '优畅-微信'; case 30: return '乾易付-支付宝'; case 31: return '乾易付-微信'; case 35: return '汇付支付'; case 36: return '汇德汇付-支付宝'; case 37: return '汇德汇付-微信'; default: return '-'; } } }, { title: '支付状态', dataIndex: 'status', width: 110, filteredValue: (state.filterValue ? state.filterValue.status : []), filters: [ { text: '待支付', value: 1 }, { text: '支付成功', value: 2 }, { text: '支付失败', value: 3 } ], render: (data) => { switch(data) { case 1: return <Dotted type="green">待支付</Dotted> case 2: return <Dotted type="blue">支付成功</Dotted> case 3: return <Dotted type="red">支付失败</Dotted> default: return '-'; } } }, { title: '描述', dataIndex: 'desc', render: data => { if (data.trim()) { return data; } return '-'; } } ]; return columns } render() { const state = this.state; const { dataSource, loading, pagination, downloadStatus, agencyTree } = state; return <Fragment > <div className={globalStyles.topWhiteBlock}> <Breadcrumb> <BreadcrumbItem>首页</BreadcrumbItem> <BreadcrumbItem>运营管理</BreadcrumbItem> <BreadcrumbItem>合规管理</BreadcrumbItem> </Breadcrumb> <h3 className={globalStyles.pageTitle}>三方支付查询</h3> </div> <div className={globalStyles.content}> <Card bordered={false}> <Search onCallBack={this.onCallBack} agencyTree={agencyTree} /> <div className={globalStyles.mBottom16}> <Button onClick={this.exportAlert} disabled={!this.props.checkAuth(1, AUTH.ALLOW_EXPORT_CAPITAL) || !dataSource.length || downloadStatus != 0}> {downloadStatus == 2 ? '处理中...' : '导出Excel'} </Button> </div> <Table dataSource={dataSource} columns={this.creatColumns(state)} rowKey={(record, index) => index} animated={false} scroll={{ x: 1790 }} onChange={this.handleTableChange} loading={loading} pagination={pagination} /> </Card> <Route path={`${this.props.match.path}/:detail`} children={(childProps) => { return <Drawer title="查看详情" placement="right" width="calc(100% - 300px)" visible={!!childProps.match} onClose={this.onClose} destroyOnClose={true} className={classnames(globalStyles.drawGap, globalStyles.grey)} > <CustomerDetail {...this.props} id={childProps.match ? childProps.match.params.detail : null} getData={this.getData} isCompliance={true} allowManage={true} assort={2} /> </Drawer> }} /> </div> </Fragment> } }
const data
identifier_name
Index.js
import React, { Component, Fragment } from 'react'; import { Route } from "react-router-dom"; import { Card, Table, Modal, Button, Drawer, message, Breadcrumb, } from 'antd'; import utils from '@/utils'; import moment from 'moment'; import Enum, { AUTH } from '@/enum'; import classnames from 'classnames'; import Dotted from '@/component/Dotted'; import Search from './Search'; import NetOperation from '@/net/operation'; import DataAgencys from '@/data/Agencys'; import CustomerDetail from '../../customer/Detail'; import styles from '../styles.module.less' import globalStyles from '@/resource/css/global.module.less'; const BreadcrumbItem = Breadcrumb.Item; export default class extends Component { constructor(props) { super(props); this.state = { pagination: { total: 0, current: 1, pageSize: 10, showSizeChanger: true, showQuickJumper: true, onShowSizeChange: (current, size) => { this.state.pagination.pageSize = size; }, showTotal: (total, range) => `共 ${total} 条记录 / ${range[0]} - ${range[1]}` }, loading: false, dataSource: [], filterInfo: { status: '2' }, filterValue: { status: [2] }, searchData: {}, downloadStatus: 0, agencyTree: null, } } async componentDidMount() { this
encyTree() { DataAgencys.getTreeData(this.props.match.params.id, (data) => { this.setState({ agencyTree: data }); }, true); } getRechargeDetails() { const { filterInfo, searchData } = this.state; const data = { time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`, limit: this.state.pagination.pageSize, page: this.state.pagination.current, ...filterInfo, ...searchData, }; this.setState({ loading: true, }) NetOperation.getRechargeDetails(data).then((res) => { this.setState({ loading: false, dataSource: res.data.rows, pagination: res.data.pagination }); }).catch((e) => { message.error(e.msg); }); } onCallBack = (filterData) => { this.state.pagination.current = 1; this.setState({ // filterValue: null, // filterInfo: null, searchData: filterData, }, () => { this.getRechargeDetails(); }); } onClose = () => { this.props.history.push(this.props.match.url); } open(id) { this.props.history.push(`${this.props.match.url}/${id}`); } handleTableChange = (pagination, filters, sorter) => { const state = this.state; const _page = state.pagination; if (pagination.current != _page.current) { _page.current = pagination.current; } let objInfo = state.filterInfo || {}; if (filters.pay_channel && filters.pay_channel.length) { objInfo.pay_channel = filters.pay_channel.join(','); } if (filters.status && filters.status.length) { objInfo.status = filters.status.join(','); } if (filters.is_internal_staff && filters.is_internal_staff.length == 1) { objInfo.is_internal_staff = filters.is_internal_staff.join(','); } else { objInfo.is_internal_staff = ''; } this.setState({ loading: true, filterValue: filters, filterInfo: objInfo }, () => { this.getRechargeDetails(); }); } exportAlert = () => { this.setState({ downloadStatus: 1 }); Modal.confirm({ title: '确认提示', content: '确定导出当前筛选数据的Excel表格吗?', width: '450px', centered: true, onOk: () => { this.exportDetails(); }, onCancel: () => { this.setState({ downloadStatus: 0 }); }, }); } exportDetails() { const state = this.state; const data = { time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`, ...state.filterInfo, ...state.searchData }; this.setState({ downloadStatus: 2 }); NetOperation.exportRecharge(data).then((res) => { const items = res.data; if (items && items.id) { this.downloadExcelFile(items.id); } }).catch((err) => { this.setState({ downloadStatus: 0 }); if (err.msg) { message.error(err.msg); } }); } downloadExcelFile = (id) => { NetOperation.downloadExcelFile(id).then((res) => { const items = res.data; if (items && items.path) { this.setState({ downloadStatus: 0 }); window.location.href = '/' + items.path; } else { window.setTimeout((e) => { this.downloadExcelFile(id); }, 500); } }).catch((err) => { this.setState({ downloadStatus: 0 }); if (err.msg) { message.error(err.msg); } }); } creatColumns(state) { const columns = [ { title: '流水号', dataIndex: 'order_number', fixed: 'left', width: 210 }, { title: '三方单号', dataIndex: 'serial_number', fixed: 'left', width: 320, render: data => { if (data.trim()) { return data; } return '-'; } }, { title: '交易金额', width: 100, align: 'right', render: (data) => utils.formatMoney((data.price * data.goods_amount) / 100) }, { title: '交易时间', dataIndex: 'create_time', width: 130, render: data => { if (data) { return moment.unix(data).format('YYYY-MM-DD HH:mm'); } return '-'; } }, { title: '支付时间', dataIndex: 'update_time', width: 130, render: data => { if (data) { return moment.unix(data).format('YYYY-MM-DD HH:mm'); } return '-'; } }, { title: '客户', key: 'is_internal_staff', width: 150, filteredValue: (state.filterValue ? state.filterValue.is_internal_staff : []), filters: [ { text: '正式客户', value: '0' }, { text: '测试客户', value: '1' } ], render: data => { const customer_name = data.customer_name || '-'; let is_internal_staff = ''; if (data.is_internal_staff) { is_internal_staff = <label className={classnames(globalStyles.tag, globalStyles.staffTag)}>测试</label>; } return <Fragment> <a href="javascript:;" onClick={() => { this.open(data.customer_id) }}>{customer_name}</a> <div>{is_internal_staff}</div> </Fragment> } }, { title: '所属机构', dataIndex: 'agency_id', width: 150, render: data => { return DataAgencys.getField(data, 'alias', (items) => { this.setState({}) }); } }, { title: '支付方式', dataIndex: 'pay_channel', width: 140, filteredValue: (state.filterValue ? state.filterValue.pay_channel : []), filters: [ { text: '银行转账', value: 1 }, { text: '微信支付', value: 2 }, { text: '支付宝支付', value: 3 }, { text: '易宝支付', value: 4 }, { text: '苹果支付', value: 5 }, { text: '连连支付', value: 6 }, { text: '汇潮支付', value: 7 }, { text: '双乾-支付宝', value: 10 }, { text: '易票联支付', value: 15 }, { text: '优畅-支付宝', value: 18 }, { text: '优畅-微信', value: 19 }, { text: '乾易付-支付宝', value: 30 }, { text: '乾易付-微信', value: 31 }, { text: '汇付支付', value: 35 }, { text: '汇德汇付-支付宝', value: 36 }, { text: '汇德汇付-微信', value: 37 } ], render: (data) => { switch(data) { case 1: return '银行转账'; case 2: return '微信支付'; case 3: return '支付宝支付'; case 4: return '易宝支付'; case 5: return '苹果支付'; case 6: return '连连支付'; case 7: return '汇潮支付'; case 10: return '双乾-支付宝'; case 15: return '易票联支付'; case 18: return '优畅-支付宝'; case 19: return '优畅-微信'; case 30: return '乾易付-支付宝'; case 31: return '乾易付-微信'; case 35: return '汇付支付'; case 36: return '汇德汇付-支付宝'; case 37: return '汇德汇付-微信'; default: return '-'; } } }, { title: '支付状态', dataIndex: 'status', width: 110, filteredValue: (state.filterValue ? state.filterValue.status : []), filters: [ { text: '待支付', value: 1 }, { text: '支付成功', value: 2 }, { text: '支付失败', value: 3 } ], render: (data) => { switch(data) { case 1: return <Dotted type="green">待支付</Dotted> case 2: return <Dotted type="blue">支付成功</Dotted> case 3: return <Dotted type="red">支付失败</Dotted> default: return '-'; } } }, { title: '描述', dataIndex: 'desc', render: data => { if (data.trim()) { return data; } return '-'; } } ]; return columns } render() { const state = this.state; const { dataSource, loading, pagination, downloadStatus, agencyTree } = state; return <Fragment > <div className={globalStyles.topWhiteBlock}> <Breadcrumb> <BreadcrumbItem>首页</BreadcrumbItem> <BreadcrumbItem>运营管理</BreadcrumbItem> <BreadcrumbItem>合规管理</BreadcrumbItem> </Breadcrumb> <h3 className={globalStyles.pageTitle}>三方支付查询</h3> </div> <div className={globalStyles.content}> <Card bordered={false}> <Search onCallBack={this.onCallBack} agencyTree={agencyTree} /> <div className={globalStyles.mBottom16}> <Button onClick={this.exportAlert} disabled={!this.props.checkAuth(1, AUTH.ALLOW_EXPORT_CAPITAL) || !dataSource.length || downloadStatus != 0}> {downloadStatus == 2 ? '处理中...' : '导出Excel'} </Button> </div> <Table dataSource={dataSource} columns={this.creatColumns(state)} rowKey={(record, index) => index} animated={false} scroll={{ x: 1790 }} onChange={this.handleTableChange} loading={loading} pagination={pagination} /> </Card> <Route path={`${this.props.match.path}/:detail`} children={(childProps) => { return <Drawer title="查看详情" placement="right" width="calc(100% - 300px)" visible={!!childProps.match} onClose={this.onClose} destroyOnClose={true} className={classnames(globalStyles.drawGap, globalStyles.grey)} > <CustomerDetail {...this.props} id={childProps.match ? childProps.match.params.detail : null} getData={this.getData} isCompliance={true} allowManage={true} assort={2} /> </Drawer> }} /> </div> </Fragment> } }
.getRechargeDetails(); this.getAgencyTree(); } getAg
identifier_body
Index.js
import React, { Component, Fragment } from 'react'; import { Route } from "react-router-dom"; import { Card, Table, Modal, Button, Drawer, message, Breadcrumb, } from 'antd'; import utils from '@/utils'; import moment from 'moment'; import Enum, { AUTH } from '@/enum'; import classnames from 'classnames'; import Dotted from '@/component/Dotted'; import Search from './Search'; import NetOperation from '@/net/operation'; import DataAgencys from '@/data/Agencys'; import CustomerDetail from '../../customer/Detail'; import styles from '../styles.module.less' import globalStyles from '@/resource/css/global.module.less'; const BreadcrumbItem = Breadcrumb.Item; export default class extends Component { constructor(props) { super(props); this.state = { pagination: { total: 0, current: 1, pageSize: 10, showSizeChanger: true, showQuickJumper: true, onShowSizeChange: (current, size) => { this.state.pagination.pageSize = size; }, showTotal: (total, range) => `共 ${total} 条记录 / ${range[0]} - ${range[1]}` }, loading: false, dataSource: [], filterInfo: { status: '2' }, filterValue: { status: [2] }, searchData: {}, downloadStatus: 0, agencyTree: null, } } async componentDidMount() { this.getRechargeDetails(); this.getAgencyTree(); } getAgencyTree() { DataAgencys.getTreeData(this.props.match.params.id, (data) => { this.setState({ agencyTree: data }); }, true); } getRechargeDetails() { const { filterInfo, searchData } = this.state; const data = { time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`, limit: this.state.pagination.pageSize, page: this.state.pagination.current, ...filterInfo, ...searchData, }; this.setState({ loading: true, }) NetOperation.getRechargeDetails(data).then((res) => { this.setState({ loading: false, dataSource: res.data.rows, pagination: res.data.pagination }); }).catch((e) => { message.error(e.msg); }); } onCallBack = (filterData) => { this.state.pagination.current = 1; this.setState({ // filterValue: null, // filterInfo: null, searchData: filterData, }, () => { this.getRechargeDetails(); }); } onClose = () => { this.props.history.push(this.props.match.url); } open(id) { this.props.history.push(`${this.props.match.url}/${id}`); } handleTableChange = (pagination, filters, sorter) => { const state = this.state; const _page = state.pagination; if (pagination.current != _page.current) { _page.current = pagination.current; } let objInfo = state.filterInfo || {}; if (filters.pay_channel && filters.pay_channel.length) { objInfo.pay_channel = filters.pay_channel.join(','); } if (filters.status && filters.status.length) { objInfo.status = filters.status.join(','); } if (filters.is_internal_staff && filters.is_internal_staff.length == 1) { objInfo.is_internal_staff = filters.is_internal_staff.join(','); } else { objInfo.is_internal_staff = ''; } this.setState({ loading: true, filterValue: filters, filterInfo: objInfo }, () => { this.getRechargeDetails(); }); } exportAlert = () => { this.setState({ downloadStatus: 1 }); Modal.confirm({ title: '确认提示', content: '确定导出当前筛选数据的Excel表格吗?', width: '450px', centered: true, onOk: () => { this.exportDetails(); }, onCancel: () => { this.setState({ downloadStatus: 0 }); }, }); } exportDetails() { const state = this.state; const data = { time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`, ...state.filterInfo, ...state.searchData }; this.setState({ downloadStatus: 2 }); NetOperation.exportRecharge(data).then((res) => { const items = res.data; if (items && items.id) { this.downloadExcelFile(items.id); } }).catch((err) => { this.setState({ downloadStatus: 0 }); if (err.msg) { message.error(err.msg); } }); } downloadExcelFile = (id) => { NetOperation.downloadExcelFile(id).then((res) => { const items = res.data; if (items && items.path) { this.setState({ downloadStatus: 0 }); window.location.href = '/' + items.path; } else { window.setTimeout((e) => { this.downloadExcelFile(id); }, 500); } }).catch((err) => { this.setState({ downloadStatus: 0 }); if (err.msg) { message.error(err.msg); } }); } creatColumns(state) { const columns = [ { title: '流水号', dataIndex: 'order_number', fixed: 'left', width: 210 }, { title: '三方单号', dataIndex: 'serial_number', fixed: 'left', width: 320,
} }, { title: '交易金额', width: 100, align: 'right', render: (data) => utils.formatMoney((data.price * data.goods_amount) / 100) }, { title: '交易时间', dataIndex: 'create_time', width: 130, render: data => { if (data) { return moment.unix(data).format('YYYY-MM-DD HH:mm'); } return '-'; } }, { title: '支付时间', dataIndex: 'update_time', width: 130, render: data => { if (data) { return moment.unix(data).format('YYYY-MM-DD HH:mm'); } return '-'; } }, { title: '客户', key: 'is_internal_staff', width: 150, filteredValue: (state.filterValue ? state.filterValue.is_internal_staff : []), filters: [ { text: '正式客户', value: '0' }, { text: '测试客户', value: '1' } ], render: data => { const customer_name = data.customer_name || '-'; let is_internal_staff = ''; if (data.is_internal_staff) { is_internal_staff = <label className={classnames(globalStyles.tag, globalStyles.staffTag)}>测试</label>; } return <Fragment> <a href="javascript:;" onClick={() => { this.open(data.customer_id) }}>{customer_name}</a> <div>{is_internal_staff}</div> </Fragment> } }, { title: '所属机构', dataIndex: 'agency_id', width: 150, render: data => { return DataAgencys.getField(data, 'alias', (items) => { this.setState({}) }); } }, { title: '支付方式', dataIndex: 'pay_channel', width: 140, filteredValue: (state.filterValue ? state.filterValue.pay_channel : []), filters: [ { text: '银行转账', value: 1 }, { text: '微信支付', value: 2 }, { text: '支付宝支付', value: 3 }, { text: '易宝支付', value: 4 }, { text: '苹果支付', value: 5 }, { text: '连连支付', value: 6 }, { text: '汇潮支付', value: 7 }, { text: '双乾-支付宝', value: 10 }, { text: '易票联支付', value: 15 }, { text: '优畅-支付宝', value: 18 }, { text: '优畅-微信', value: 19 }, { text: '乾易付-支付宝', value: 30 }, { text: '乾易付-微信', value: 31 }, { text: '汇付支付', value: 35 }, { text: '汇德汇付-支付宝', value: 36 }, { text: '汇德汇付-微信', value: 37 } ], render: (data) => { switch(data) { case 1: return '银行转账'; case 2: return '微信支付'; case 3: return '支付宝支付'; case 4: return '易宝支付'; case 5: return '苹果支付'; case 6: return '连连支付'; case 7: return '汇潮支付'; case 10: return '双乾-支付宝'; case 15: return '易票联支付'; case 18: return '优畅-支付宝'; case 19: return '优畅-微信'; case 30: return '乾易付-支付宝'; case 31: return '乾易付-微信'; case 35: return '汇付支付'; case 36: return '汇德汇付-支付宝'; case 37: return '汇德汇付-微信'; default: return '-'; } } }, { title: '支付状态', dataIndex: 'status', width: 110, filteredValue: (state.filterValue ? state.filterValue.status : []), filters: [ { text: '待支付', value: 1 }, { text: '支付成功', value: 2 }, { text: '支付失败', value: 3 } ], render: (data) => { switch(data) { case 1: return <Dotted type="green">待支付</Dotted> case 2: return <Dotted type="blue">支付成功</Dotted> case 3: return <Dotted type="red">支付失败</Dotted> default: return '-'; } } }, { title: '描述', dataIndex: 'desc', render: data => { if (data.trim()) { return data; } return '-'; } } ]; return columns } render() { const state = this.state; const { dataSource, loading, pagination, downloadStatus, agencyTree } = state; return <Fragment > <div className={globalStyles.topWhiteBlock}> <Breadcrumb> <BreadcrumbItem>首页</BreadcrumbItem> <BreadcrumbItem>运营管理</BreadcrumbItem> <BreadcrumbItem>合规管理</BreadcrumbItem> </Breadcrumb> <h3 className={globalStyles.pageTitle}>三方支付查询</h3> </div> <div className={globalStyles.content}> <Card bordered={false}> <Search onCallBack={this.onCallBack} agencyTree={agencyTree} /> <div className={globalStyles.mBottom16}> <Button onClick={this.exportAlert} disabled={!this.props.checkAuth(1, AUTH.ALLOW_EXPORT_CAPITAL) || !dataSource.length || downloadStatus != 0}> {downloadStatus == 2 ? '处理中...' : '导出Excel'} </Button> </div> <Table dataSource={dataSource} columns={this.creatColumns(state)} rowKey={(record, index) => index} animated={false} scroll={{ x: 1790 }} onChange={this.handleTableChange} loading={loading} pagination={pagination} /> </Card> <Route path={`${this.props.match.path}/:detail`} children={(childProps) => { return <Drawer title="查看详情" placement="right" width="calc(100% - 300px)" visible={!!childProps.match} onClose={this.onClose} destroyOnClose={true} className={classnames(globalStyles.drawGap, globalStyles.grey)} > <CustomerDetail {...this.props} id={childProps.match ? childProps.match.params.detail : null} getData={this.getData} isCompliance={true} allowManage={true} assort={2} /> </Drawer> }} /> </div> </Fragment> } }
render: data => { if (data.trim()) { return data; } return '-';
random_line_split
Index.js
import React, { Component, Fragment } from 'react'; import { Route } from "react-router-dom"; import { Card, Table, Modal, Button, Drawer, message, Breadcrumb, } from 'antd'; import utils from '@/utils'; import moment from 'moment'; import Enum, { AUTH } from '@/enum'; import classnames from 'classnames'; import Dotted from '@/component/Dotted'; import Search from './Search'; import NetOperation from '@/net/operation'; import DataAgencys from '@/data/Agencys'; import CustomerDetail from '../../customer/Detail'; import styles from '../styles.module.less' import globalStyles from '@/resource/css/global.module.less'; const BreadcrumbItem = Breadcrumb.Item; export default class extends Component { constructor(props) { super(props); this.state = { pagination: { total: 0, current: 1, pageSize: 10, showSizeChanger: true, showQuickJumper: true, onShowSizeChange: (current, size) => { this.state.pagination.pageSize = size; }, showTotal: (total, range) => `共 ${total} 条记录 / ${range[0]} - ${range[1]}` }, loading: false, dataSource: [], filterInfo: { status: '2' }, filterValue: { status: [2] }, searchData: {}, downloadStatus: 0, agencyTree: null, } } async componentDidMount() { this.getRechargeDetails(); this.getAgencyTree(); } getAgencyTree() { DataAgencys.getTreeData(this.props.match.params.id, (data) => { this.setState({ agencyTree: data }); }, true); } getRechargeDetails() { const { filterInfo, searchData } = this.state; const data = { time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`, limit: this.state.pagination.pageSize, page: this.state.pagination.current, ...filterInfo, ...searchData, }; this.setState({ loading: true, }) NetOperation.getRechargeDetails(data).then((res) => { this.setState({ loading: false, dataSource: res.data.rows, pagination: res.data.pagination }); }).catch((e) => { message.error(e.msg); }); } onCallBack = (filterData) => { this.state.pagination.current = 1; this.setState({ // filterValue: null, // filterInfo: null, searchData: filterData, }, () => { this.getRechargeDetails(); }); } onClose = () => { this.props.history.push(this.props.match.url); } open(id) { this.props.history.push(`${this.props.match.url}/${id}`); } handleTableChange = (pagination, filters, sorter) => { const state = this.state; const _page = state.pagination; if (pagination.current != _page.current) { _page.current = pagination.current; } let objInfo = state.filterInfo || {}; if (filters.pay_channel && filters.pay_channel.length) { objInfo.pay_channel = filters.pay_channel.join(','); } if (filters.status && filters.status.length) { objInfo.status = filters.status.join(','); } if (filters.is_internal_staff && filters.is_internal_staff.length == 1) { objInfo.is_internal_staff = filters.is_internal_staff.join(','); } else { objInfo.is_internal_staff = ''; } this.setState({ loading: true, filterValue: filters, filterInfo: objInfo }, () => { this.getRechargeDetails(); }); } exportAlert = () => { this.setState({ downloadStatus: 1 }); Modal.confirm({ title: '确认提示', content: '确定导出当前筛选数据的Excel表格吗?', width: '450px', centered: true, onOk: () => { this.exportDetails(); }, onCancel: () => { this.setState({ downloadStatus: 0 }); }, }); } exportDetails() { const state = this.state; const data = { time_exp: `${moment().startOf('day').add(-1, 'month').unix()},${moment().endOf('day').unix()}`, ...state.filterInfo, ...state.searchData }; this.setState({ downloadStatus: 2 }); NetOperation.exportRecharge(data).then((res) => { const items = res.data; if (items && items.id) { this.downloadExcelFile(items.id); } }).catch((err) => { this.setState({ downloadStatus: 0 }); if (err.msg) { message.error(err.msg); } }); } downloadExcelFile = (id) => { NetOperation.downloadExcelFile(id).then((res) => { const items = res.data; if (items && items.path) { this.setState({ downloadStatus: 0 }); window.location.href = '/' + items.path; } else { window.setTimeout((e) => { this.downloadExcelFile(id); }, 500); } }).catch((err) => { this.setState({ downloadStatus: 0 }); if (err.msg) { message.error(err.msg); } }); }
mns = [ { title: '流水号', dataIndex: 'order_number', fixed: 'left', width: 210 }, { title: '三方单号', dataIndex: 'serial_number', fixed: 'left', width: 320, render: data => { if (data.trim()) { return data; } return '-'; } }, { title: '交易金额', width: 100, align: 'right', render: (data) => utils.formatMoney((data.price * data.goods_amount) / 100) }, { title: '交易时间', dataIndex: 'create_time', width: 130, render: data => { if (data) { return moment.unix(data).format('YYYY-MM-DD HH:mm'); } return '-'; } }, { title: '支付时间', dataIndex: 'update_time', width: 130, render: data => { if (data) { return moment.unix(data).format('YYYY-MM-DD HH:mm'); } return '-'; } }, { title: '客户', key: 'is_internal_staff', width: 150, filteredValue: (state.filterValue ? state.filterValue.is_internal_staff : []), filters: [ { text: '正式客户', value: '0' }, { text: '测试客户', value: '1' } ], render: data => { const customer_name = data.customer_name || '-'; let is_internal_staff = ''; if (data.is_internal_staff) { is_internal_staff = <label className={classnames(globalStyles.tag, globalStyles.staffTag)}>测试</label>; } return <Fragment> <a href="javascript:;" onClick={() => { this.open(data.customer_id) }}>{customer_name}</a> <div>{is_internal_staff}</div> </Fragment> } }, { title: '所属机构', dataIndex: 'agency_id', width: 150, render: data => { return DataAgencys.getField(data, 'alias', (items) => { this.setState({}) }); } }, { title: '支付方式', dataIndex: 'pay_channel', width: 140, filteredValue: (state.filterValue ? state.filterValue.pay_channel : []), filters: [ { text: '银行转账', value: 1 }, { text: '微信支付', value: 2 }, { text: '支付宝支付', value: 3 }, { text: '易宝支付', value: 4 }, { text: '苹果支付', value: 5 }, { text: '连连支付', value: 6 }, { text: '汇潮支付', value: 7 }, { text: '双乾-支付宝', value: 10 }, { text: '易票联支付', value: 15 }, { text: '优畅-支付宝', value: 18 }, { text: '优畅-微信', value: 19 }, { text: '乾易付-支付宝', value: 30 }, { text: '乾易付-微信', value: 31 }, { text: '汇付支付', value: 35 }, { text: '汇德汇付-支付宝', value: 36 }, { text: '汇德汇付-微信', value: 37 } ], render: (data) => { switch(data) { case 1: return '银行转账'; case 2: return '微信支付'; case 3: return '支付宝支付'; case 4: return '易宝支付'; case 5: return '苹果支付'; case 6: return '连连支付'; case 7: return '汇潮支付'; case 10: return '双乾-支付宝'; case 15: return '易票联支付'; case 18: return '优畅-支付宝'; case 19: return '优畅-微信'; case 30: return '乾易付-支付宝'; case 31: return '乾易付-微信'; case 35: return '汇付支付'; case 36: return '汇德汇付-支付宝'; case 37: return '汇德汇付-微信'; default: return '-'; } } }, { title: '支付状态', dataIndex: 'status', width: 110, filteredValue: (state.filterValue ? state.filterValue.status : []), filters: [ { text: '待支付', value: 1 }, { text: '支付成功', value: 2 }, { text: '支付失败', value: 3 } ], render: (data) => { switch(data) { case 1: return <Dotted type="green">待支付</Dotted> case 2: return <Dotted type="blue">支付成功</Dotted> case 3: return <Dotted type="red">支付失败</Dotted> default: return '-'; } } }, { title: '描述', dataIndex: 'desc', render: data => { if (data.trim()) { return data; } return '-'; } } ]; return columns } render() { const state = this.state; const { dataSource, loading, pagination, downloadStatus, agencyTree } = state; return <Fragment > <div className={globalStyles.topWhiteBlock}> <Breadcrumb> <BreadcrumbItem>首页</BreadcrumbItem> <BreadcrumbItem>运营管理</BreadcrumbItem> <BreadcrumbItem>合规管理</BreadcrumbItem> </Breadcrumb> <h3 className={globalStyles.pageTitle}>三方支付查询</h3> </div> <div className={globalStyles.content}> <Card bordered={false}> <Search onCallBack={this.onCallBack} agencyTree={agencyTree} /> <div className={globalStyles.mBottom16}> <Button onClick={this.exportAlert} disabled={!this.props.checkAuth(1, AUTH.ALLOW_EXPORT_CAPITAL) || !dataSource.length || downloadStatus != 0}> {downloadStatus == 2 ? '处理中...' : '导出Excel'} </Button> </div> <Table dataSource={dataSource} columns={this.creatColumns(state)} rowKey={(record, index) => index} animated={false} scroll={{ x: 1790 }} onChange={this.handleTableChange} loading={loading} pagination={pagination} /> </Card> <Route path={`${this.props.match.path}/:detail`} children={(childProps) => { return <Drawer title="查看详情" placement="right" width="calc(100% - 300px)" visible={!!childProps.match} onClose={this.onClose} destroyOnClose={true} className={classnames(globalStyles.drawGap, globalStyles.grey)} > <CustomerDetail {...this.props} id={childProps.match ? childProps.match.params.detail : null} getData={this.getData} isCompliance={true} allowManage={true} assort={2} /> </Drawer> }} /> </div> </Fragment> } }
creatColumns(state) { const colu
conditional_block
day_06.rs
/// --- Day 6: Chronal Coordinates --- /// /// The device on your wrist beeps several times, and once again you feel like /// you're falling. /// /// "Situation critical," the device announces. "Destination indeterminate. /// Chronal interference detected. Please specify new target coordinates." /// /// The device then produces a list of coordinates (your puzzle input). Are they /// places it thinks are safe or dangerous? It recommends you check manual page /// 729. The Elves did not give you a manual. /// /// If they're dangerous, maybe you can minimize the danger by finding the /// coordinate that gives the largest distance from the other points. /// /// Using only the Manhattan distance, determine the area around each coordinate /// by counting the number of integer X,Y locations that are closest to that /// coordinate (and aren't tied in distance to any other coordinate). /// /// Your goal is to find the size of the largest area that isn't infinite. For /// example, consider the following list of coordinates: /// /// 1, 1 /// 1, 6 /// 8, 3 /// 3, 4 /// 5, 5 /// 8, 9 /// /// If we name these coordinates A through F, we can draw them on a grid, /// putting 0,0 at the top left: /// /// .......... /// .A........ /// .......... /// ........C. /// ...D...... /// .....E.... /// .B........ /// .......... /// .......... /// ........F. /// /// This view is partial - the actual grid extends infinitely in all directions. /// Using the Manhattan distance, each location's closest coordinate can be /// determined, shown here in lowercase: /// /// aaaaa.cccc /// aAaaa.cccc /// aaaddecccc /// aadddeccCc /// ..dDdeeccc /// bb.deEeecc /// bBb.eeee.. /// bbb.eeefff /// bbb.eeffff /// bbb.ffffFf /// /// Locations shown as . are equally far from two or more coordinates, and so /// they don't count as being closest to any. /// /// In this example, the areas of coordinates A, B, C, and F are infinite - /// while not shown here, their areas extend forever outside the visible grid. /// However, the areas of coordinates D and E are finite: D is closest to 9 /// locations, and E is closest to 17 (both including the coordinate's location /// itself). Therefore, in this example, the size of the largest area is 17. /// /// What is the size of the largest area that isn't infinite? /// /// --- Part Two --- /// /// On the other hand, if the coordinates are safe, maybe the best you can do is /// try to find a region near as many coordinates as possible. /// /// For example, suppose you want the sum of the Manhattan distance to all of /// the coordinates to be less than 32. For each location, add up the distances /// to all of the given coordinates; if the total of those distances is less /// than 32, that location is within the desired region. Using the same /// coordinates as above, the resulting region looks like this: /// /// .......... /// .A........ /// .......... /// ...###..C. /// ..#D###... /// ..###E#... /// .B.###.... /// .......... /// .......... /// ........F. /// /// In particular, consider the highlighted location 4,3 located at the top /// middle of the region. Its calculation is as follows, where abs() is the /// absolute value function: /// /// Distance to coordinate A: abs(4-1) + abs(3-1) = 5 /// Distance to coordinate B: abs(4-1) + abs(3-6) = 6 /// Distance to coordinate C: abs(4-8) + abs(3-3) = 4 /// Distance to coordinate D: abs(4-3) + abs(3-4) = 2 /// Distance to coordinate E: abs(4-5) + abs(3-5) = 3 /// Distance to coordinate F: abs(4-8) + abs(3-9) = 10 /// Total distance: 5 + 6 + 4 + 2 + 3 + 10 = 30 /// /// Because the total distance to all coordinates (30) is less than 32, the /// location is within the region. /// /// This region, which also includes coordinates D and E, has a total size of /// 16. /// /// Your actual region will need to be much larger than this example, though, /// instead including all locations with a total distance of less than 10000. /// /// What is the size of the region containing all locations which have a total /// distance to all given coordinates of less than 10000? use regex::Regex; use std::collections::{HashMap, HashSet}; use std::cmp::Ordering; use std::i32; type Point = (i32, i32); type Grid = HashMap<Point, usize>; #[derive(Debug, PartialEq, Eq)] struct Range { min: i32, max: i32, } #[derive(Debug, PartialEq, Eq)] struct Bounds { x: Range, y: Range, } pub fn run()
fn create_grid(points: &Vec<Point>, bounds: &Bounds) -> Grid { let mut grid = HashMap::new(); for x in bounds.x.min..=bounds.x.max { for y in bounds.y.min..=bounds.y.max { let point = (x, y); match closest_point(&point, points) { Some(area_number) => grid.insert(point, area_number), None => None, }; } } grid } fn count_points_below(points: &Vec<Point>, bounds: &Bounds, treshold: i32) -> i32 { let mut count = 0; for x in bounds.x.min..=bounds.x.max { for y in bounds.y.min..=bounds.y.max { let point = (x, y); if total_distance(&point, points) < treshold { count += 1; }; } } count } fn create_bounds(points: &Vec<Point>) -> Bounds { let x_min = points.iter() .map(|(x, _)| x) .min() .unwrap(); let x_max = points.iter() .map(|(x, _)| x) .max() .unwrap(); let x_range = Range {min:*x_min, max:*x_max}; let y_min = points.iter() .map(|(_, y)| y) .min() .unwrap(); let y_max = points.iter() .map(|(_, y)| y) .max() .unwrap(); let y_range = Range {min:*y_min, max:*y_max}; Bounds {x:x_range, y:y_range} } fn distance((x1, y1): &Point, (x2, y2): &Point) -> i32 { (x1 - x2).abs() + (y1 - y2).abs() } fn total_distance(reference_point: &Point, points: &Vec<Point>) -> i32 { points.iter() .map(|point| distance(reference_point, point)) .sum() } fn closest_point(reference_point: &Point, points: &Vec<Point>) -> Option<usize> { let (index, _) = points.iter() .map(|point| distance(reference_point, point)) .enumerate() .fold((None, i32::MAX), |(some_index, minimum), (new_index, new_value)| { match minimum.cmp(&new_value) { Ordering::Greater => (Some(new_index), new_value), Ordering::Less => (some_index, minimum), Ordering::Equal => (None, minimum), } }); index } fn on_bounds(&(x, y): &Point, bounds: &Bounds) -> bool { if (bounds.x.min == x || bounds.x.max == x) && (bounds.y.min <= y && bounds.y.max >= y) { return true } if (bounds.y.min == y || bounds.y.max == y) && (bounds.x.min <= x && bounds.x.max >= x) { return true } false } fn parse_input(input: &str) -> Vec<Point> { input.lines() .filter_map(|line| convert_line(line)) .collect() } fn convert_line(line: &str) -> Option<Point> { lazy_static! { static ref RE: Regex = Regex::new(r"(\d*), (\d*)").unwrap(); } let captures = RE.captures(line).unwrap(); match (captures.get(1), captures.get(2)) { (Some(x), Some(y)) => Some((x.as_str().parse().unwrap(), y.as_str().parse().unwrap())), _ => None, } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_input() { let input = "1, 1\n\ 1, 6\n\ 8, 3\n\ 3, 4\n\ 5, 5\n\ 8, 9"; let output: Vec<Point> = vec![ (1, 1), (1, 6), (8, 3), (3, 4), (5, 5), (8, 9)]; assert_eq!(parse_input(input), output); } #[test] fn test_create_bounds() { let input: Vec<Point> = vec![ (0, 0), (1, 1), (3, 2)]; let x_range = Range {min:0, max:3}; let y_range = Range {min:0, max:2}; let output = Bounds {x:x_range, y:y_range}; assert_eq!(create_bounds(&input), output); } #[test] fn test_create_grid() { let input: Vec<Point> = vec![ (0, 0), (2, 2)]; let x_range = Range {min:0, max:2}; let y_range = Range {min:0, max:2}; let bounds = Bounds {x:x_range, y:y_range}; let mut output = HashMap::new(); output.insert((0, 0), 0); output.insert((1, 0), 0); output.insert((0, 1), 0); output.insert((2, 1), 1); output.insert((1, 2), 1); output.insert((2, 2), 1); assert_eq!(create_grid(&input, &bounds), output); } #[test] fn test_on_bounds() { let x_range = Range {min:0, max:3}; let y_range = Range {min:2, max:6}; let bounds = Bounds {x:x_range, y:y_range}; assert!(on_bounds(&(0, 4), &bounds)); assert!(on_bounds(&(3, 2), &bounds)); assert!(on_bounds(&(2, 6), &bounds)); assert!(!on_bounds(&(2, 7), &bounds)); assert!(!on_bounds(&(2, 5), &bounds)); assert!(!on_bounds(&(1, 1), &bounds)); assert!(!on_bounds(&(11, 8), &bounds)); } #[test] fn test_distance() { assert_eq!(distance(&(0, 0), &(1, 1)), 2); assert_eq!(distance(&(1, 1), &(0, 0)), 2); assert_eq!(distance(&(10, 0), &(0, 10)), 20); assert_eq!(distance(&(5, 5), &(7, 3)), 4); } #[test] fn test_closest_point() { let points: Vec<Point> = vec![ (1, 1), (1, 6), (8, 9)]; assert_eq!(closest_point(&(2, 2), &points), Some(0)); assert_eq!(closest_point(&(1, 5), &points), Some(1)); assert_eq!(closest_point(&(10, 10), &points), Some(2)); } #[test] fn test_closest_point_equal_distance() { let points: Vec<Point> = vec![ (0, 1), (0, 3)]; assert_eq!(closest_point(&(0, 2), &points), None); } #[test] fn test_total_distance() { let points: Vec<Point> = vec![ (0, 0), (2, 3), (10, 5)]; assert_eq!(total_distance(&(1, 1), &points), 18); assert_eq!(total_distance(&(3, 3), &points), 16); assert_eq!(total_distance(&(0, 5), &points), 19); } }
{ let points = parse_input(include_str!("../input/day_06.txt")); let bounds = create_bounds(&points); let grid = create_grid(&points, &bounds); let mut areas = HashMap::new(); let mut infinite_areas = HashSet::new(); for (point, area_number) in grid.iter() { if on_bounds(point,&bounds) { infinite_areas.insert(*area_number); areas.remove(area_number); } if !infinite_areas.contains(area_number) { *areas.entry(area_number).or_insert(0) += 1; } } let biggest_area_size = areas.values() .max() .unwrap(); println!("The biggest non-infinite area size is: {}", biggest_area_size); let concentrated_area = count_points_below(&points, &bounds, 10_000); println!("The size of the area that have a total distance less than \ 10.000 is: {}", concentrated_area); }
identifier_body
day_06.rs
/// --- Day 6: Chronal Coordinates --- /// /// The device on your wrist beeps several times, and once again you feel like /// you're falling. /// /// "Situation critical," the device announces. "Destination indeterminate. /// Chronal interference detected. Please specify new target coordinates." /// /// The device then produces a list of coordinates (your puzzle input). Are they /// places it thinks are safe or dangerous? It recommends you check manual page /// 729. The Elves did not give you a manual. /// /// If they're dangerous, maybe you can minimize the danger by finding the /// coordinate that gives the largest distance from the other points. /// /// Using only the Manhattan distance, determine the area around each coordinate /// by counting the number of integer X,Y locations that are closest to that /// coordinate (and aren't tied in distance to any other coordinate). /// /// Your goal is to find the size of the largest area that isn't infinite. For /// example, consider the following list of coordinates: /// /// 1, 1 /// 1, 6 /// 8, 3 /// 3, 4 /// 5, 5 /// 8, 9 /// /// If we name these coordinates A through F, we can draw them on a grid, /// putting 0,0 at the top left: /// /// .......... /// .A........ /// .......... /// ........C. /// ...D...... /// .....E.... /// .B........ /// .......... /// .......... /// ........F. /// /// This view is partial - the actual grid extends infinitely in all directions. /// Using the Manhattan distance, each location's closest coordinate can be /// determined, shown here in lowercase: /// /// aaaaa.cccc /// aAaaa.cccc /// aaaddecccc /// aadddeccCc /// ..dDdeeccc /// bb.deEeecc /// bBb.eeee.. /// bbb.eeefff /// bbb.eeffff /// bbb.ffffFf /// /// Locations shown as . are equally far from two or more coordinates, and so /// they don't count as being closest to any. /// /// In this example, the areas of coordinates A, B, C, and F are infinite - /// while not shown here, their areas extend forever outside the visible grid. /// However, the areas of coordinates D and E are finite: D is closest to 9 /// locations, and E is closest to 17 (both including the coordinate's location /// itself). Therefore, in this example, the size of the largest area is 17. /// /// What is the size of the largest area that isn't infinite? /// /// --- Part Two --- /// /// On the other hand, if the coordinates are safe, maybe the best you can do is /// try to find a region near as many coordinates as possible. /// /// For example, suppose you want the sum of the Manhattan distance to all of /// the coordinates to be less than 32. For each location, add up the distances /// to all of the given coordinates; if the total of those distances is less /// than 32, that location is within the desired region. Using the same /// coordinates as above, the resulting region looks like this: /// /// .......... /// .A........ /// .......... /// ...###..C. /// ..#D###... /// ..###E#... /// .B.###.... /// .......... /// .......... /// ........F. /// /// In particular, consider the highlighted location 4,3 located at the top /// middle of the region. Its calculation is as follows, where abs() is the /// absolute value function: /// /// Distance to coordinate A: abs(4-1) + abs(3-1) = 5 /// Distance to coordinate B: abs(4-1) + abs(3-6) = 6 /// Distance to coordinate C: abs(4-8) + abs(3-3) = 4 /// Distance to coordinate D: abs(4-3) + abs(3-4) = 2 /// Distance to coordinate E: abs(4-5) + abs(3-5) = 3 /// Distance to coordinate F: abs(4-8) + abs(3-9) = 10 /// Total distance: 5 + 6 + 4 + 2 + 3 + 10 = 30 /// /// Because the total distance to all coordinates (30) is less than 32, the /// location is within the region. /// /// This region, which also includes coordinates D and E, has a total size of /// 16. /// /// Your actual region will need to be much larger than this example, though, /// instead including all locations with a total distance of less than 10000. /// /// What is the size of the region containing all locations which have a total /// distance to all given coordinates of less than 10000? use regex::Regex; use std::collections::{HashMap, HashSet}; use std::cmp::Ordering; use std::i32; type Point = (i32, i32); type Grid = HashMap<Point, usize>; #[derive(Debug, PartialEq, Eq)] struct Range { min: i32, max: i32, } #[derive(Debug, PartialEq, Eq)] struct Bounds { x: Range, y: Range, } pub fn run() { let points = parse_input(include_str!("../input/day_06.txt")); let bounds = create_bounds(&points); let grid = create_grid(&points, &bounds); let mut areas = HashMap::new(); let mut infinite_areas = HashSet::new(); for (point, area_number) in grid.iter() { if on_bounds(point,&bounds) { infinite_areas.insert(*area_number); areas.remove(area_number); } if !infinite_areas.contains(area_number) { *areas.entry(area_number).or_insert(0) += 1; } } let biggest_area_size = areas.values() .max() .unwrap(); println!("The biggest non-infinite area size is: {}", biggest_area_size); let concentrated_area = count_points_below(&points, &bounds, 10_000); println!("The size of the area that have a total distance less than \ 10.000 is: {}", concentrated_area); } fn create_grid(points: &Vec<Point>, bounds: &Bounds) -> Grid { let mut grid = HashMap::new(); for x in bounds.x.min..=bounds.x.max { for y in bounds.y.min..=bounds.y.max { let point = (x, y); match closest_point(&point, points) { Some(area_number) => grid.insert(point, area_number), None => None, }; } } grid } fn count_points_below(points: &Vec<Point>, bounds: &Bounds, treshold: i32) -> i32 { let mut count = 0; for x in bounds.x.min..=bounds.x.max { for y in bounds.y.min..=bounds.y.max { let point = (x, y); if total_distance(&point, points) < treshold { count += 1; }; } } count } fn create_bounds(points: &Vec<Point>) -> Bounds { let x_min = points.iter() .map(|(x, _)| x) .min() .unwrap(); let x_max = points.iter() .map(|(x, _)| x) .max() .unwrap(); let x_range = Range {min:*x_min, max:*x_max}; let y_min = points.iter() .map(|(_, y)| y) .min() .unwrap(); let y_max = points.iter() .map(|(_, y)| y) .max() .unwrap(); let y_range = Range {min:*y_min, max:*y_max}; Bounds {x:x_range, y:y_range} } fn distance((x1, y1): &Point, (x2, y2): &Point) -> i32 { (x1 - x2).abs() + (y1 - y2).abs() } fn total_distance(reference_point: &Point, points: &Vec<Point>) -> i32 { points.iter() .map(|point| distance(reference_point, point)) .sum() } fn closest_point(reference_point: &Point, points: &Vec<Point>) -> Option<usize> { let (index, _) = points.iter() .map(|point| distance(reference_point, point)) .enumerate() .fold((None, i32::MAX), |(some_index, minimum), (new_index, new_value)| { match minimum.cmp(&new_value) { Ordering::Greater => (Some(new_index), new_value), Ordering::Less => (some_index, minimum), Ordering::Equal => (None, minimum), } }); index } fn on_bounds(&(x, y): &Point, bounds: &Bounds) -> bool { if (bounds.x.min == x || bounds.x.max == x) && (bounds.y.min <= y && bounds.y.max >= y) { return true } if (bounds.y.min == y || bounds.y.max == y) && (bounds.x.min <= x && bounds.x.max >= x) { return true } false } fn parse_input(input: &str) -> Vec<Point> { input.lines() .filter_map(|line| convert_line(line)) .collect() } fn convert_line(line: &str) -> Option<Point> { lazy_static! { static ref RE: Regex = Regex::new(r"(\d*), (\d*)").unwrap(); } let captures = RE.captures(line).unwrap(); match (captures.get(1), captures.get(2)) { (Some(x), Some(y)) => Some((x.as_str().parse().unwrap(), y.as_str().parse().unwrap())), _ => None, } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_input() { let input = "1, 1\n\ 1, 6\n\ 8, 3\n\ 3, 4\n\ 5, 5\n\ 8, 9"; let output: Vec<Point> = vec![ (1, 1), (1, 6), (8, 3), (3, 4), (5, 5), (8, 9)]; assert_eq!(parse_input(input), output); } #[test] fn test_create_bounds() { let input: Vec<Point> = vec![ (0, 0), (1, 1), (3, 2)]; let x_range = Range {min:0, max:3}; let y_range = Range {min:0, max:2}; let output = Bounds {x:x_range, y:y_range}; assert_eq!(create_bounds(&input), output); } #[test] fn test_create_grid() { let input: Vec<Point> = vec![ (0, 0), (2, 2)]; let x_range = Range {min:0, max:2}; let y_range = Range {min:0, max:2}; let bounds = Bounds {x:x_range, y:y_range}; let mut output = HashMap::new(); output.insert((0, 0), 0); output.insert((1, 0), 0); output.insert((0, 1), 0); output.insert((2, 1), 1); output.insert((1, 2), 1); output.insert((2, 2), 1); assert_eq!(create_grid(&input, &bounds), output); } #[test]
let bounds = Bounds {x:x_range, y:y_range}; assert!(on_bounds(&(0, 4), &bounds)); assert!(on_bounds(&(3, 2), &bounds)); assert!(on_bounds(&(2, 6), &bounds)); assert!(!on_bounds(&(2, 7), &bounds)); assert!(!on_bounds(&(2, 5), &bounds)); assert!(!on_bounds(&(1, 1), &bounds)); assert!(!on_bounds(&(11, 8), &bounds)); } #[test] fn test_distance() { assert_eq!(distance(&(0, 0), &(1, 1)), 2); assert_eq!(distance(&(1, 1), &(0, 0)), 2); assert_eq!(distance(&(10, 0), &(0, 10)), 20); assert_eq!(distance(&(5, 5), &(7, 3)), 4); } #[test] fn test_closest_point() { let points: Vec<Point> = vec![ (1, 1), (1, 6), (8, 9)]; assert_eq!(closest_point(&(2, 2), &points), Some(0)); assert_eq!(closest_point(&(1, 5), &points), Some(1)); assert_eq!(closest_point(&(10, 10), &points), Some(2)); } #[test] fn test_closest_point_equal_distance() { let points: Vec<Point> = vec![ (0, 1), (0, 3)]; assert_eq!(closest_point(&(0, 2), &points), None); } #[test] fn test_total_distance() { let points: Vec<Point> = vec![ (0, 0), (2, 3), (10, 5)]; assert_eq!(total_distance(&(1, 1), &points), 18); assert_eq!(total_distance(&(3, 3), &points), 16); assert_eq!(total_distance(&(0, 5), &points), 19); } }
fn test_on_bounds() { let x_range = Range {min:0, max:3}; let y_range = Range {min:2, max:6};
random_line_split
day_06.rs
/// --- Day 6: Chronal Coordinates --- /// /// The device on your wrist beeps several times, and once again you feel like /// you're falling. /// /// "Situation critical," the device announces. "Destination indeterminate. /// Chronal interference detected. Please specify new target coordinates." /// /// The device then produces a list of coordinates (your puzzle input). Are they /// places it thinks are safe or dangerous? It recommends you check manual page /// 729. The Elves did not give you a manual. /// /// If they're dangerous, maybe you can minimize the danger by finding the /// coordinate that gives the largest distance from the other points. /// /// Using only the Manhattan distance, determine the area around each coordinate /// by counting the number of integer X,Y locations that are closest to that /// coordinate (and aren't tied in distance to any other coordinate). /// /// Your goal is to find the size of the largest area that isn't infinite. For /// example, consider the following list of coordinates: /// /// 1, 1 /// 1, 6 /// 8, 3 /// 3, 4 /// 5, 5 /// 8, 9 /// /// If we name these coordinates A through F, we can draw them on a grid, /// putting 0,0 at the top left: /// /// .......... /// .A........ /// .......... /// ........C. /// ...D...... /// .....E.... /// .B........ /// .......... /// .......... /// ........F. /// /// This view is partial - the actual grid extends infinitely in all directions. /// Using the Manhattan distance, each location's closest coordinate can be /// determined, shown here in lowercase: /// /// aaaaa.cccc /// aAaaa.cccc /// aaaddecccc /// aadddeccCc /// ..dDdeeccc /// bb.deEeecc /// bBb.eeee.. /// bbb.eeefff /// bbb.eeffff /// bbb.ffffFf /// /// Locations shown as . are equally far from two or more coordinates, and so /// they don't count as being closest to any. /// /// In this example, the areas of coordinates A, B, C, and F are infinite - /// while not shown here, their areas extend forever outside the visible grid. /// However, the areas of coordinates D and E are finite: D is closest to 9 /// locations, and E is closest to 17 (both including the coordinate's location /// itself). Therefore, in this example, the size of the largest area is 17. /// /// What is the size of the largest area that isn't infinite? /// /// --- Part Two --- /// /// On the other hand, if the coordinates are safe, maybe the best you can do is /// try to find a region near as many coordinates as possible. /// /// For example, suppose you want the sum of the Manhattan distance to all of /// the coordinates to be less than 32. For each location, add up the distances /// to all of the given coordinates; if the total of those distances is less /// than 32, that location is within the desired region. Using the same /// coordinates as above, the resulting region looks like this: /// /// .......... /// .A........ /// .......... /// ...###..C. /// ..#D###... /// ..###E#... /// .B.###.... /// .......... /// .......... /// ........F. /// /// In particular, consider the highlighted location 4,3 located at the top /// middle of the region. Its calculation is as follows, where abs() is the /// absolute value function: /// /// Distance to coordinate A: abs(4-1) + abs(3-1) = 5 /// Distance to coordinate B: abs(4-1) + abs(3-6) = 6 /// Distance to coordinate C: abs(4-8) + abs(3-3) = 4 /// Distance to coordinate D: abs(4-3) + abs(3-4) = 2 /// Distance to coordinate E: abs(4-5) + abs(3-5) = 3 /// Distance to coordinate F: abs(4-8) + abs(3-9) = 10 /// Total distance: 5 + 6 + 4 + 2 + 3 + 10 = 30 /// /// Because the total distance to all coordinates (30) is less than 32, the /// location is within the region. /// /// This region, which also includes coordinates D and E, has a total size of /// 16. /// /// Your actual region will need to be much larger than this example, though, /// instead including all locations with a total distance of less than 10000. /// /// What is the size of the region containing all locations which have a total /// distance to all given coordinates of less than 10000? use regex::Regex; use std::collections::{HashMap, HashSet}; use std::cmp::Ordering; use std::i32; type Point = (i32, i32); type Grid = HashMap<Point, usize>; #[derive(Debug, PartialEq, Eq)] struct Range { min: i32, max: i32, } #[derive(Debug, PartialEq, Eq)] struct Bounds { x: Range, y: Range, } pub fn run() { let points = parse_input(include_str!("../input/day_06.txt")); let bounds = create_bounds(&points); let grid = create_grid(&points, &bounds); let mut areas = HashMap::new(); let mut infinite_areas = HashSet::new(); for (point, area_number) in grid.iter() { if on_bounds(point,&bounds) { infinite_areas.insert(*area_number); areas.remove(area_number); } if !infinite_areas.contains(area_number) { *areas.entry(area_number).or_insert(0) += 1; } } let biggest_area_size = areas.values() .max() .unwrap(); println!("The biggest non-infinite area size is: {}", biggest_area_size); let concentrated_area = count_points_below(&points, &bounds, 10_000); println!("The size of the area that have a total distance less than \ 10.000 is: {}", concentrated_area); } fn create_grid(points: &Vec<Point>, bounds: &Bounds) -> Grid { let mut grid = HashMap::new(); for x in bounds.x.min..=bounds.x.max { for y in bounds.y.min..=bounds.y.max { let point = (x, y); match closest_point(&point, points) { Some(area_number) => grid.insert(point, area_number), None => None, }; } } grid } fn count_points_below(points: &Vec<Point>, bounds: &Bounds, treshold: i32) -> i32 { let mut count = 0; for x in bounds.x.min..=bounds.x.max { for y in bounds.y.min..=bounds.y.max { let point = (x, y); if total_distance(&point, points) < treshold { count += 1; }; } } count } fn create_bounds(points: &Vec<Point>) -> Bounds { let x_min = points.iter() .map(|(x, _)| x) .min() .unwrap(); let x_max = points.iter() .map(|(x, _)| x) .max() .unwrap(); let x_range = Range {min:*x_min, max:*x_max}; let y_min = points.iter() .map(|(_, y)| y) .min() .unwrap(); let y_max = points.iter() .map(|(_, y)| y) .max() .unwrap(); let y_range = Range {min:*y_min, max:*y_max}; Bounds {x:x_range, y:y_range} } fn distance((x1, y1): &Point, (x2, y2): &Point) -> i32 { (x1 - x2).abs() + (y1 - y2).abs() } fn
(reference_point: &Point, points: &Vec<Point>) -> i32 { points.iter() .map(|point| distance(reference_point, point)) .sum() } fn closest_point(reference_point: &Point, points: &Vec<Point>) -> Option<usize> { let (index, _) = points.iter() .map(|point| distance(reference_point, point)) .enumerate() .fold((None, i32::MAX), |(some_index, minimum), (new_index, new_value)| { match minimum.cmp(&new_value) { Ordering::Greater => (Some(new_index), new_value), Ordering::Less => (some_index, minimum), Ordering::Equal => (None, minimum), } }); index } fn on_bounds(&(x, y): &Point, bounds: &Bounds) -> bool { if (bounds.x.min == x || bounds.x.max == x) && (bounds.y.min <= y && bounds.y.max >= y) { return true } if (bounds.y.min == y || bounds.y.max == y) && (bounds.x.min <= x && bounds.x.max >= x) { return true } false } fn parse_input(input: &str) -> Vec<Point> { input.lines() .filter_map(|line| convert_line(line)) .collect() } fn convert_line(line: &str) -> Option<Point> { lazy_static! { static ref RE: Regex = Regex::new(r"(\d*), (\d*)").unwrap(); } let captures = RE.captures(line).unwrap(); match (captures.get(1), captures.get(2)) { (Some(x), Some(y)) => Some((x.as_str().parse().unwrap(), y.as_str().parse().unwrap())), _ => None, } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_input() { let input = "1, 1\n\ 1, 6\n\ 8, 3\n\ 3, 4\n\ 5, 5\n\ 8, 9"; let output: Vec<Point> = vec![ (1, 1), (1, 6), (8, 3), (3, 4), (5, 5), (8, 9)]; assert_eq!(parse_input(input), output); } #[test] fn test_create_bounds() { let input: Vec<Point> = vec![ (0, 0), (1, 1), (3, 2)]; let x_range = Range {min:0, max:3}; let y_range = Range {min:0, max:2}; let output = Bounds {x:x_range, y:y_range}; assert_eq!(create_bounds(&input), output); } #[test] fn test_create_grid() { let input: Vec<Point> = vec![ (0, 0), (2, 2)]; let x_range = Range {min:0, max:2}; let y_range = Range {min:0, max:2}; let bounds = Bounds {x:x_range, y:y_range}; let mut output = HashMap::new(); output.insert((0, 0), 0); output.insert((1, 0), 0); output.insert((0, 1), 0); output.insert((2, 1), 1); output.insert((1, 2), 1); output.insert((2, 2), 1); assert_eq!(create_grid(&input, &bounds), output); } #[test] fn test_on_bounds() { let x_range = Range {min:0, max:3}; let y_range = Range {min:2, max:6}; let bounds = Bounds {x:x_range, y:y_range}; assert!(on_bounds(&(0, 4), &bounds)); assert!(on_bounds(&(3, 2), &bounds)); assert!(on_bounds(&(2, 6), &bounds)); assert!(!on_bounds(&(2, 7), &bounds)); assert!(!on_bounds(&(2, 5), &bounds)); assert!(!on_bounds(&(1, 1), &bounds)); assert!(!on_bounds(&(11, 8), &bounds)); } #[test] fn test_distance() { assert_eq!(distance(&(0, 0), &(1, 1)), 2); assert_eq!(distance(&(1, 1), &(0, 0)), 2); assert_eq!(distance(&(10, 0), &(0, 10)), 20); assert_eq!(distance(&(5, 5), &(7, 3)), 4); } #[test] fn test_closest_point() { let points: Vec<Point> = vec![ (1, 1), (1, 6), (8, 9)]; assert_eq!(closest_point(&(2, 2), &points), Some(0)); assert_eq!(closest_point(&(1, 5), &points), Some(1)); assert_eq!(closest_point(&(10, 10), &points), Some(2)); } #[test] fn test_closest_point_equal_distance() { let points: Vec<Point> = vec![ (0, 1), (0, 3)]; assert_eq!(closest_point(&(0, 2), &points), None); } #[test] fn test_total_distance() { let points: Vec<Point> = vec![ (0, 0), (2, 3), (10, 5)]; assert_eq!(total_distance(&(1, 1), &points), 18); assert_eq!(total_distance(&(3, 3), &points), 16); assert_eq!(total_distance(&(0, 5), &points), 19); } }
total_distance
identifier_name
lib.rs
// Copyright 2018 Mozilla // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. // We take the "low road" here when returning the structs - we expose the // items (and arrays of items) as strings, which are JSON. The rust side of // the world gets serialization and deserialization for free and it makes // memory management that little bit simpler. extern crate failure; extern crate serde_json; extern crate url; extern crate reqwest; #[macro_use] extern crate ffi_toolkit; extern crate mentat; extern crate sync15_passwords; extern crate sync15_adapter as sync; #[macro_use] extern crate log; mod error; use error::{ ExternError, with_translated_result, with_translated_value_result, with_translated_void_result, with_translated_string_result, with_translated_opt_string_result, }; use std::os::raw::{ c_char, }; use std::sync::{Once, ONCE_INIT}; use ffi_toolkit::string::{ c_char_to_string, }; pub use ffi_toolkit::memory::{ destroy_c_char, }; use sync::{ Sync15StorageClient, Sync15StorageClientInit, GlobalState, }; use sync15_passwords::{ passwords, PasswordEngine, ServerPassword, }; pub struct SyncInfo { state: GlobalState, client: Sync15StorageClient, // Used so that we know whether or not we need to re-initialize `client` last_client_init: Sync15StorageClientInit, } pub struct PasswordState { engine: PasswordEngine, sync: Option<SyncInfo>, } #[cfg(target_os = "android")] extern { pub fn __android_log_write(level: ::std::os::raw::c_int, tag: *const c_char, text: *const c_char) -> ::std::os::raw::c_int; } struct DevLogger; impl log::Log for DevLogger { fn enabled(&self, _: &log::Metadata) -> bool { true } fn
(&self, record: &log::Record) { let message = format!("{}:{} -- {}", record.level(), record.target(), record.args()); println!("{}", message); #[cfg(target_os = "android")] { unsafe { let message = ::std::ffi::CString::new(message).unwrap(); let level_int = match record.level() { log::Level::Trace => 2, log::Level::Debug => 3, log::Level::Info => 4, log::Level::Warn => 5, log::Level::Error => 6, }; let message = message.as_ptr(); let tag = b"RustInternal\0"; __android_log_write(level_int, tag.as_ptr() as *const c_char, message); } } // TODO ios (use NSLog(__CFStringMakeConstantString(b"%s\0"), ...), maybe windows? (OutputDebugStringA) } fn flush(&self) {} } static INIT_LOGGER: Once = ONCE_INIT; static DEV_LOGGER: &'static log::Log = &DevLogger; fn init_logger() { log::set_logger(DEV_LOGGER).unwrap(); log::set_max_level(log::LevelFilter::Trace); std::env::set_var("RUST_BACKTRACE", "1"); info!("Hooked up rust logger!"); } define_destructor!(sync15_passwords_state_destroy, PasswordState); // This is probably too many string arguments... #[no_mangle] pub unsafe extern "C" fn sync15_passwords_state_new( mentat_db_path: *const c_char, encryption_key: *const c_char, error: *mut ExternError ) -> *mut PasswordState { INIT_LOGGER.call_once(init_logger); with_translated_result(error, || { let store = mentat::Store::open_with_key(c_char_to_string(mentat_db_path), c_char_to_string(encryption_key))?; let engine = PasswordEngine::new(store)?; Ok(PasswordState { engine, sync: None, }) }) } // indirection to help `?` figure out the target error type fn parse_url(url: &str) -> sync::Result<url::Url> { Ok(url::Url::parse(url)?) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_sync( state: *mut PasswordState, key_id: *const c_char, access_token: *const c_char, sync_key: *const c_char, tokenserver_url: *const c_char, error: *mut ExternError ) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; let root_sync_key = sync::KeyBundle::from_ksync_base64( c_char_to_string(sync_key).into())?; let requested_init = Sync15StorageClientInit { key_id: c_char_to_string(key_id).into(), access_token: c_char_to_string(access_token).into(), tokenserver_url: parse_url(c_char_to_string(tokenserver_url))?, }; // TODO: If `to_ready` (or anything else with a ?) fails below, this // `take()` means we end up with `state.sync.is_none()`, which means the // next sync will redownload meta/global, crypto/keys, etc. without // needing to. (AFAICT fixing this requires a change in sync15-adapter, // since to_ready takes GlobalState as a move, and it's not clear if // that change even is a good idea). let mut sync_info = state.sync.take().map(Ok) .unwrap_or_else(|| -> sync::Result<SyncInfo> { let state = GlobalState::default(); let client = Sync15StorageClient::new(requested_init.clone())?; Ok(SyncInfo { state, client, last_client_init: requested_init.clone(), }) })?; // If the options passed for initialization of the storage client aren't // the same as the ones we used last time, reinitialize it. (Note that // we could avoid the comparison in the case where we had `None` in // `state.sync` before, but this probably doesn't matter). if requested_init != sync_info.last_client_init { sync_info.client = Sync15StorageClient::new(requested_init.clone())?; sync_info.last_client_init = requested_init; } { // Scope borrow of `sync_info.client` let mut state_machine = sync::SetupStateMachine::for_readonly_sync(&sync_info.client, &root_sync_key); let next_sync_state = state_machine.to_ready(sync_info.state)?; sync_info.state = next_sync_state; } // We don't use a ? on the next line so that even if `state.engine.sync` // fails, we don't forget the sync_state. let result = state.engine.sync(&sync_info.client, &sync_info.state); state.sync = Some(sync_info); result }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_touch(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.touch_credential(c_char_to_string(id).into())?; Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_delete(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> bool { with_translated_value_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; let deleted = state.engine.delete_credential(c_char_to_string(id).into())?; Ok(deleted) }) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_wipe(state: *mut PasswordState, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.wipe()?; Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_reset(state: *mut PasswordState, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.reset()?; // XXX We probably need to clear out some things from `state.service`! Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_get_all(state: *mut PasswordState, error: *mut ExternError) -> *mut c_char { with_translated_string_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; // Type declaration is just to make sure we have the right type (and for documentation) let passwords: Vec<ServerPassword> = { let mut in_progress_read = state.engine.store.begin_read()?; passwords::get_all_sync_passwords(&mut in_progress_read)? }; let result = serde_json::to_string(&passwords)?; Ok(result) }) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_get_by_id(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> *mut c_char { with_translated_opt_string_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; // Type declaration is just to make sure we have the right type (and for documentation) let maybe_pass: Option<ServerPassword> = { let mut in_progress_read = state.engine.store.begin_read()?; passwords::get_sync_password(&mut in_progress_read, c_char_to_string(id).into())? }; let pass = if let Some(p) = maybe_pass { p } else { return Ok(None) }; Ok(Some(serde_json::to_string(&pass)?)) }) } #[no_mangle] pub extern "C" fn wtf_destroy_c_char(s: *mut c_char) { // the "pub use" above should should be enough to expose this? // It appears that is enough to expose it in a windows DLL, but for // some reason it's not expored for Android. // *sob* - and now that I've defined this, suddenly this *and* // destroy_c_char are exposed (and removing this again removes the // destroy_c_char) // Oh well, a yak for another day. destroy_c_char(s); }
log
identifier_name
lib.rs
// Copyright 2018 Mozilla // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed
// We take the "low road" here when returning the structs - we expose the // items (and arrays of items) as strings, which are JSON. The rust side of // the world gets serialization and deserialization for free and it makes // memory management that little bit simpler. extern crate failure; extern crate serde_json; extern crate url; extern crate reqwest; #[macro_use] extern crate ffi_toolkit; extern crate mentat; extern crate sync15_passwords; extern crate sync15_adapter as sync; #[macro_use] extern crate log; mod error; use error::{ ExternError, with_translated_result, with_translated_value_result, with_translated_void_result, with_translated_string_result, with_translated_opt_string_result, }; use std::os::raw::{ c_char, }; use std::sync::{Once, ONCE_INIT}; use ffi_toolkit::string::{ c_char_to_string, }; pub use ffi_toolkit::memory::{ destroy_c_char, }; use sync::{ Sync15StorageClient, Sync15StorageClientInit, GlobalState, }; use sync15_passwords::{ passwords, PasswordEngine, ServerPassword, }; pub struct SyncInfo { state: GlobalState, client: Sync15StorageClient, // Used so that we know whether or not we need to re-initialize `client` last_client_init: Sync15StorageClientInit, } pub struct PasswordState { engine: PasswordEngine, sync: Option<SyncInfo>, } #[cfg(target_os = "android")] extern { pub fn __android_log_write(level: ::std::os::raw::c_int, tag: *const c_char, text: *const c_char) -> ::std::os::raw::c_int; } struct DevLogger; impl log::Log for DevLogger { fn enabled(&self, _: &log::Metadata) -> bool { true } fn log(&self, record: &log::Record) { let message = format!("{}:{} -- {}", record.level(), record.target(), record.args()); println!("{}", message); #[cfg(target_os = "android")] { unsafe { let message = ::std::ffi::CString::new(message).unwrap(); let level_int = match record.level() { log::Level::Trace => 2, log::Level::Debug => 3, log::Level::Info => 4, log::Level::Warn => 5, log::Level::Error => 6, }; let message = message.as_ptr(); let tag = b"RustInternal\0"; __android_log_write(level_int, tag.as_ptr() as *const c_char, message); } } // TODO ios (use NSLog(__CFStringMakeConstantString(b"%s\0"), ...), maybe windows? (OutputDebugStringA) } fn flush(&self) {} } static INIT_LOGGER: Once = ONCE_INIT; static DEV_LOGGER: &'static log::Log = &DevLogger; fn init_logger() { log::set_logger(DEV_LOGGER).unwrap(); log::set_max_level(log::LevelFilter::Trace); std::env::set_var("RUST_BACKTRACE", "1"); info!("Hooked up rust logger!"); } define_destructor!(sync15_passwords_state_destroy, PasswordState); // This is probably too many string arguments... #[no_mangle] pub unsafe extern "C" fn sync15_passwords_state_new( mentat_db_path: *const c_char, encryption_key: *const c_char, error: *mut ExternError ) -> *mut PasswordState { INIT_LOGGER.call_once(init_logger); with_translated_result(error, || { let store = mentat::Store::open_with_key(c_char_to_string(mentat_db_path), c_char_to_string(encryption_key))?; let engine = PasswordEngine::new(store)?; Ok(PasswordState { engine, sync: None, }) }) } // indirection to help `?` figure out the target error type fn parse_url(url: &str) -> sync::Result<url::Url> { Ok(url::Url::parse(url)?) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_sync( state: *mut PasswordState, key_id: *const c_char, access_token: *const c_char, sync_key: *const c_char, tokenserver_url: *const c_char, error: *mut ExternError ) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; let root_sync_key = sync::KeyBundle::from_ksync_base64( c_char_to_string(sync_key).into())?; let requested_init = Sync15StorageClientInit { key_id: c_char_to_string(key_id).into(), access_token: c_char_to_string(access_token).into(), tokenserver_url: parse_url(c_char_to_string(tokenserver_url))?, }; // TODO: If `to_ready` (or anything else with a ?) fails below, this // `take()` means we end up with `state.sync.is_none()`, which means the // next sync will redownload meta/global, crypto/keys, etc. without // needing to. (AFAICT fixing this requires a change in sync15-adapter, // since to_ready takes GlobalState as a move, and it's not clear if // that change even is a good idea). let mut sync_info = state.sync.take().map(Ok) .unwrap_or_else(|| -> sync::Result<SyncInfo> { let state = GlobalState::default(); let client = Sync15StorageClient::new(requested_init.clone())?; Ok(SyncInfo { state, client, last_client_init: requested_init.clone(), }) })?; // If the options passed for initialization of the storage client aren't // the same as the ones we used last time, reinitialize it. (Note that // we could avoid the comparison in the case where we had `None` in // `state.sync` before, but this probably doesn't matter). if requested_init != sync_info.last_client_init { sync_info.client = Sync15StorageClient::new(requested_init.clone())?; sync_info.last_client_init = requested_init; } { // Scope borrow of `sync_info.client` let mut state_machine = sync::SetupStateMachine::for_readonly_sync(&sync_info.client, &root_sync_key); let next_sync_state = state_machine.to_ready(sync_info.state)?; sync_info.state = next_sync_state; } // We don't use a ? on the next line so that even if `state.engine.sync` // fails, we don't forget the sync_state. let result = state.engine.sync(&sync_info.client, &sync_info.state); state.sync = Some(sync_info); result }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_touch(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.touch_credential(c_char_to_string(id).into())?; Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_delete(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> bool { with_translated_value_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; let deleted = state.engine.delete_credential(c_char_to_string(id).into())?; Ok(deleted) }) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_wipe(state: *mut PasswordState, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.wipe()?; Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_reset(state: *mut PasswordState, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.reset()?; // XXX We probably need to clear out some things from `state.service`! Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_get_all(state: *mut PasswordState, error: *mut ExternError) -> *mut c_char { with_translated_string_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; // Type declaration is just to make sure we have the right type (and for documentation) let passwords: Vec<ServerPassword> = { let mut in_progress_read = state.engine.store.begin_read()?; passwords::get_all_sync_passwords(&mut in_progress_read)? }; let result = serde_json::to_string(&passwords)?; Ok(result) }) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_get_by_id(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> *mut c_char { with_translated_opt_string_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; // Type declaration is just to make sure we have the right type (and for documentation) let maybe_pass: Option<ServerPassword> = { let mut in_progress_read = state.engine.store.begin_read()?; passwords::get_sync_password(&mut in_progress_read, c_char_to_string(id).into())? }; let pass = if let Some(p) = maybe_pass { p } else { return Ok(None) }; Ok(Some(serde_json::to_string(&pass)?)) }) } #[no_mangle] pub extern "C" fn wtf_destroy_c_char(s: *mut c_char) { // the "pub use" above should should be enough to expose this? // It appears that is enough to expose it in a windows DLL, but for // some reason it's not expored for Android. // *sob* - and now that I've defined this, suddenly this *and* // destroy_c_char are exposed (and removing this again removes the // destroy_c_char) // Oh well, a yak for another day. destroy_c_char(s); }
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License.
random_line_split
lib.rs
// Copyright 2018 Mozilla // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. // We take the "low road" here when returning the structs - we expose the // items (and arrays of items) as strings, which are JSON. The rust side of // the world gets serialization and deserialization for free and it makes // memory management that little bit simpler. extern crate failure; extern crate serde_json; extern crate url; extern crate reqwest; #[macro_use] extern crate ffi_toolkit; extern crate mentat; extern crate sync15_passwords; extern crate sync15_adapter as sync; #[macro_use] extern crate log; mod error; use error::{ ExternError, with_translated_result, with_translated_value_result, with_translated_void_result, with_translated_string_result, with_translated_opt_string_result, }; use std::os::raw::{ c_char, }; use std::sync::{Once, ONCE_INIT}; use ffi_toolkit::string::{ c_char_to_string, }; pub use ffi_toolkit::memory::{ destroy_c_char, }; use sync::{ Sync15StorageClient, Sync15StorageClientInit, GlobalState, }; use sync15_passwords::{ passwords, PasswordEngine, ServerPassword, }; pub struct SyncInfo { state: GlobalState, client: Sync15StorageClient, // Used so that we know whether or not we need to re-initialize `client` last_client_init: Sync15StorageClientInit, } pub struct PasswordState { engine: PasswordEngine, sync: Option<SyncInfo>, } #[cfg(target_os = "android")] extern { pub fn __android_log_write(level: ::std::os::raw::c_int, tag: *const c_char, text: *const c_char) -> ::std::os::raw::c_int; } struct DevLogger; impl log::Log for DevLogger { fn enabled(&self, _: &log::Metadata) -> bool { true } fn log(&self, record: &log::Record) { let message = format!("{}:{} -- {}", record.level(), record.target(), record.args()); println!("{}", message); #[cfg(target_os = "android")] { unsafe { let message = ::std::ffi::CString::new(message).unwrap(); let level_int = match record.level() { log::Level::Trace => 2, log::Level::Debug => 3, log::Level::Info => 4, log::Level::Warn => 5, log::Level::Error => 6, }; let message = message.as_ptr(); let tag = b"RustInternal\0"; __android_log_write(level_int, tag.as_ptr() as *const c_char, message); } } // TODO ios (use NSLog(__CFStringMakeConstantString(b"%s\0"), ...), maybe windows? (OutputDebugStringA) } fn flush(&self) {} } static INIT_LOGGER: Once = ONCE_INIT; static DEV_LOGGER: &'static log::Log = &DevLogger; fn init_logger() { log::set_logger(DEV_LOGGER).unwrap(); log::set_max_level(log::LevelFilter::Trace); std::env::set_var("RUST_BACKTRACE", "1"); info!("Hooked up rust logger!"); } define_destructor!(sync15_passwords_state_destroy, PasswordState); // This is probably too many string arguments... #[no_mangle] pub unsafe extern "C" fn sync15_passwords_state_new( mentat_db_path: *const c_char, encryption_key: *const c_char, error: *mut ExternError ) -> *mut PasswordState { INIT_LOGGER.call_once(init_logger); with_translated_result(error, || { let store = mentat::Store::open_with_key(c_char_to_string(mentat_db_path), c_char_to_string(encryption_key))?; let engine = PasswordEngine::new(store)?; Ok(PasswordState { engine, sync: None, }) }) } // indirection to help `?` figure out the target error type fn parse_url(url: &str) -> sync::Result<url::Url> {
#[no_mangle] pub unsafe extern "C" fn sync15_passwords_sync( state: *mut PasswordState, key_id: *const c_char, access_token: *const c_char, sync_key: *const c_char, tokenserver_url: *const c_char, error: *mut ExternError ) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; let root_sync_key = sync::KeyBundle::from_ksync_base64( c_char_to_string(sync_key).into())?; let requested_init = Sync15StorageClientInit { key_id: c_char_to_string(key_id).into(), access_token: c_char_to_string(access_token).into(), tokenserver_url: parse_url(c_char_to_string(tokenserver_url))?, }; // TODO: If `to_ready` (or anything else with a ?) fails below, this // `take()` means we end up with `state.sync.is_none()`, which means the // next sync will redownload meta/global, crypto/keys, etc. without // needing to. (AFAICT fixing this requires a change in sync15-adapter, // since to_ready takes GlobalState as a move, and it's not clear if // that change even is a good idea). let mut sync_info = state.sync.take().map(Ok) .unwrap_or_else(|| -> sync::Result<SyncInfo> { let state = GlobalState::default(); let client = Sync15StorageClient::new(requested_init.clone())?; Ok(SyncInfo { state, client, last_client_init: requested_init.clone(), }) })?; // If the options passed for initialization of the storage client aren't // the same as the ones we used last time, reinitialize it. (Note that // we could avoid the comparison in the case where we had `None` in // `state.sync` before, but this probably doesn't matter). if requested_init != sync_info.last_client_init { sync_info.client = Sync15StorageClient::new(requested_init.clone())?; sync_info.last_client_init = requested_init; } { // Scope borrow of `sync_info.client` let mut state_machine = sync::SetupStateMachine::for_readonly_sync(&sync_info.client, &root_sync_key); let next_sync_state = state_machine.to_ready(sync_info.state)?; sync_info.state = next_sync_state; } // We don't use a ? on the next line so that even if `state.engine.sync` // fails, we don't forget the sync_state. let result = state.engine.sync(&sync_info.client, &sync_info.state); state.sync = Some(sync_info); result }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_touch(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.touch_credential(c_char_to_string(id).into())?; Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_delete(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> bool { with_translated_value_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; let deleted = state.engine.delete_credential(c_char_to_string(id).into())?; Ok(deleted) }) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_wipe(state: *mut PasswordState, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.wipe()?; Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_reset(state: *mut PasswordState, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.reset()?; // XXX We probably need to clear out some things from `state.service`! Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_get_all(state: *mut PasswordState, error: *mut ExternError) -> *mut c_char { with_translated_string_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; // Type declaration is just to make sure we have the right type (and for documentation) let passwords: Vec<ServerPassword> = { let mut in_progress_read = state.engine.store.begin_read()?; passwords::get_all_sync_passwords(&mut in_progress_read)? }; let result = serde_json::to_string(&passwords)?; Ok(result) }) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_get_by_id(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> *mut c_char { with_translated_opt_string_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; // Type declaration is just to make sure we have the right type (and for documentation) let maybe_pass: Option<ServerPassword> = { let mut in_progress_read = state.engine.store.begin_read()?; passwords::get_sync_password(&mut in_progress_read, c_char_to_string(id).into())? }; let pass = if let Some(p) = maybe_pass { p } else { return Ok(None) }; Ok(Some(serde_json::to_string(&pass)?)) }) } #[no_mangle] pub extern "C" fn wtf_destroy_c_char(s: *mut c_char) { // the "pub use" above should should be enough to expose this? // It appears that is enough to expose it in a windows DLL, but for // some reason it's not expored for Android. // *sob* - and now that I've defined this, suddenly this *and* // destroy_c_char are exposed (and removing this again removes the // destroy_c_char) // Oh well, a yak for another day. destroy_c_char(s); }
Ok(url::Url::parse(url)?) }
identifier_body
lucy.go
/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package lucy /* #include <stdlib.h> #define C_LUCY_DOC #define C_LUCY_REGEXTOKENIZER #define C_LUCY_DEFAULTDOCREADER #define C_LUCY_INVERTER #define C_LUCY_INVERTERENTRY #include "lucy_parcel.h" #include "Lucy/Analysis/RegexTokenizer.h" #include "Lucy/Document/Doc.h" #include "Lucy/Index/DocReader.h" #include "Lucy/Index/Inverter.h" #include "Clownfish/String.h" #include "Clownfish/Blob.h" #include "Clownfish/Num.h" #include "Clownfish/Hash.h" #include "Clownfish/HashIterator.h" #include "Clownfish/Vector.h" #include "Clownfish/Err.h" #include "Clownfish/Util/StringHelper.h" #include "Lucy/Analysis/Analyzer.h" #include "Lucy/Analysis/Inversion.h" #include "Lucy/Analysis/Token.h" #include "Lucy/Document/HitDoc.h" #include "Lucy/Plan/FieldType.h" #include "Lucy/Plan/Schema.h" #include "Lucy/Index/Segment.h" #include "Lucy/Store/InStream.h" #include "Lucy/Store/OutStream.h" #include "Lucy/Util/Freezer.h" extern lucy_RegexTokenizer* GOLUCY_RegexTokenizer_init(lucy_RegexTokenizer *self, cfish_String *pattern); extern lucy_RegexTokenizer* (*GOLUCY_RegexTokenizer_init_BRIDGE)(lucy_RegexTokenizer *self, cfish_String *pattern); extern void GOLUCY_RegexTokenizer_Destroy(lucy_RegexTokenizer *self); extern void (*GOLUCY_RegexTokenizer_Destroy_BRIDGE)(lucy_RegexTokenizer *self); extern void GOLUCY_RegexTokenizer_Tokenize_Utf8(lucy_RegexTokenizer *self, char *str, size_t string_len, lucy_Inversion *inversion); extern void (*GOLUCY_RegexTokenizer_Tokenize_Utf8_BRIDGE)(lucy_RegexTokenizer *self, const char *str, size_t string_len, lucy_Inversion *inversion); extern lucy_Doc* GOLUCY_Doc_init(lucy_Doc *doc, void *fields, int32_t doc_id); extern lucy_Doc* (*GOLUCY_Doc_init_BRIDGE)(lucy_Doc *doc, void *fields, int32_t doc_id); extern void GOLUCY_Doc_Set_Fields(lucy_Doc *self, void *fields); extern void (*GOLUCY_Doc_Set_Fields_BRIDGE)(lucy_Doc *self, void *fields); extern uint32_t GOLUCY_Doc_Get_Size(lucy_Doc *self); extern uint32_t (*GOLUCY_Doc_Get_Size_BRIDGE)(lucy_Doc *self); extern void GOLUCY_Doc_Store(lucy_Doc *self, cfish_String *field, cfish_Obj *value); extern void (*GOLUCY_Doc_Store_BRIDGE)(lucy_Doc *self, cfish_String *field, cfish_Obj *value); extern void GOLUCY_Doc_Serialize(lucy_Doc *self, lucy_OutStream *outstream); extern void (*GOLUCY_Doc_Serialize_BRIDGE)(lucy_Doc *self, lucy_OutStream *outstream); extern lucy_Doc* GOLUCY_Doc_Deserialize(lucy_Doc *self, lucy_InStream *instream); extern lucy_Doc* (*GOLUCY_Doc_Deserialize_BRIDGE)(lucy_Doc *self, lucy_InStream *instream); extern cfish_Obj* GOLUCY_Doc_Extract(lucy_Doc *self, cfish_String *field); extern cfish_Obj* (*GOLUCY_Doc_Extract_BRIDGE)(lucy_Doc *self, cfish_String *field); extern cfish_Vector* GOLUCY_Doc_Field_Names(lucy_Doc *self); extern cfish_Vector* (*GOLUCY_Doc_Field_Names_BRIDGE)(lucy_Doc *self); extern bool GOLUCY_Doc_Equals(lucy_Doc *self, cfish_Obj *other); extern bool (*GOLUCY_Doc_Equals_BRIDGE)(lucy_Doc *self, cfish_Obj *other); extern void GOLUCY_Doc_Destroy(lucy_Doc *self); extern void (*GOLUCY_Doc_Destroy_BRIDGE)(lucy_Doc *self); extern lucy_HitDoc* GOLUCY_DefDocReader_Fetch_Doc(lucy_DefaultDocReader *self, int32_t doc_id); extern lucy_HitDoc* (*GOLUCY_DefDocReader_Fetch_Doc_BRIDGE)(lucy_DefaultDocReader *self, int32_t doc_id); extern void GOLUCY_Inverter_Invert_Doc(lucy_Inverter *self, lucy_Doc *doc); extern void (*GOLUCY_Inverter_Invert_Doc_BRIDGE)(lucy_Inverter *self, lucy_Doc *doc); // C symbols linked into a Go-built package archive are not visible to // external C code -- but internal code *can* see symbols from outside. // This allows us to fake up symbol export by assigning values only known // interally to external symbols during Go package initialization. static CFISH_INLINE void GOLUCY_glue_exported_symbols() { GOLUCY_RegexTokenizer_init_BRIDGE = GOLUCY_RegexTokenizer_init; GOLUCY_RegexTokenizer_Destroy_BRIDGE = GOLUCY_RegexTokenizer_Destroy; GOLUCY_RegexTokenizer_Tokenize_Utf8_BRIDGE = (LUCY_RegexTokenizer_Tokenize_Utf8_t)GOLUCY_RegexTokenizer_Tokenize_Utf8; GOLUCY_Doc_init_BRIDGE = GOLUCY_Doc_init; GOLUCY_Doc_Set_Fields_BRIDGE = GOLUCY_Doc_Set_Fields; GOLUCY_Doc_Get_Size_BRIDGE = GOLUCY_Doc_Get_Size; GOLUCY_Doc_Store_BRIDGE = GOLUCY_Doc_Store; GOLUCY_Doc_Serialize_BRIDGE = GOLUCY_Doc_Serialize; GOLUCY_Doc_Deserialize_BRIDGE = GOLUCY_Doc_Deserialize; GOLUCY_Doc_Extract_BRIDGE = GOLUCY_Doc_Extract; GOLUCY_Doc_Field_Names_BRIDGE = GOLUCY_Doc_Field_Names; GOLUCY_Doc_Equals_BRIDGE = GOLUCY_Doc_Equals; GOLUCY_Doc_Destroy_BRIDGE = GOLUCY_Doc_Destroy; GOLUCY_DefDocReader_Fetch_Doc_BRIDGE = GOLUCY_DefDocReader_Fetch_Doc; GOLUCY_Inverter_Invert_Doc_BRIDGE = GOLUCY_Inverter_Invert_Doc; } static uint32_t S_count_code_points(const char *string, size_t len) { uint32_t num_code_points = 0; size_t i = 0; while (i < len) { i += cfish_StrHelp_UTF8_COUNT[(uint8_t)(string[i])]; ++num_code_points; } if (i != len) { CFISH_THROW(CFISH_ERR, "Match between code point boundaries in '%s'", string); } return num_code_points; } // Returns the number of code points through the end of the match. static int push_token(const char *str, int start, int end, int last_end, int cp_count, lucy_Inversion *inversion) { const char *match = str + start; int match_len = end - start; int cp_start = cp_count + S_count_code_points(str + last_end, start - last_end); int cp_end = cp_start + S_count_code_points(match, match_len); lucy_Token *token = lucy_Token_new(match, match_len, cp_start, cp_end, 1.0f, 1); LUCY_Inversion_Append(inversion, token); return cp_end; } static void null_terminate_string(char *string, size_t len) { string[len] = '\0'; } */ import "C" import "unsafe" import "fmt" import "regexp" import "git-wip-us.apache.org/repos/asf/lucy-clownfish.git/runtime/go/clownfish" var registry *objRegistry func init() { C.GOLUCY_glue_exported_symbols() C.lucy_bootstrap_parcel() registry = newObjRegistry(16) } //export GOLUCY_RegexTokenizer_init func GOLUCY_RegexTokenizer_init(rt *C.lucy_RegexTokenizer, pattern *C.cfish_String) *C.lucy_RegexTokenizer { C.lucy_Analyzer_init(((*C.lucy_Analyzer)(unsafe.Pointer(rt)))) ivars := C.lucy_RegexTokenizer_IVARS(rt) ivars.pattern = C.CFISH_Str_Clone(pattern) var patternGo string if pattern == nil { patternGo = "\\w+(?:['\\x{2019}]\\w+)*" } else { patternGo = clownfish.CFStringToGo(unsafe.Pointer(pattern)) } rx, err := regexp.Compile(patternGo) if err != nil { panic(err) } rxID := registry.store(rx) ivars.token_re = unsafe.Pointer(rxID) return rt } //export GOLUCY_RegexTokenizer_Destroy func GOLUCY_RegexTokenizer_Destroy(rt *C.lucy_RegexTokenizer) { ivars := C.lucy_RegexTokenizer_IVARS(rt) rxID := uintptr(ivars.token_re) registry.delete(rxID) C.cfish_super_destroy(unsafe.Pointer(rt), C.LUCY_REGEXTOKENIZER) } //export GOLUCY_RegexTokenizer_Tokenize_Utf8 func GOLUCY_RegexTokenizer_Tokenize_Utf8(rt *C.lucy_RegexTokenizer, str *C.char, stringLen C.size_t, inversion *C.lucy_Inversion) { ivars := C.lucy_RegexTokenizer_IVARS(rt) rxID := uintptr(ivars.token_re) rx, ok := registry.fetch(rxID).(*regexp.Regexp) if !ok { mess := fmt.Sprintf("Failed to Fetch *RegExp with id %d and pattern %s", rxID, clownfish.CFStringToGo(unsafe.Pointer(ivars.pattern))) panic(clownfish.NewErr(mess)) } buf := C.GoBytes(unsafe.Pointer(str), C.int(stringLen)) found := rx.FindAllIndex(buf, int(stringLen)) lastEnd := 0 cpCount := 0 for _, startEnd := range found { cpCount = int(C.push_token(str, C.int(startEnd[0]), C.int(startEnd[1]), C.int(lastEnd), C.int(cpCount), inversion)) lastEnd = startEnd[1] } } func
(docID int32) Doc { retvalCF := C.lucy_Doc_new(nil, C.int32_t(docID)) return WRAPDoc(unsafe.Pointer(retvalCF)) } //export GOLUCY_Doc_init func GOLUCY_Doc_init(d *C.lucy_Doc, fields unsafe.Pointer, docID C.int32_t) *C.lucy_Doc { ivars := C.lucy_Doc_IVARS(d) if fields != nil { ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields)) } else { ivars.fields = unsafe.Pointer(C.cfish_Hash_new(0)) } ivars.doc_id = docID return d } //export GOLUCY_Doc_Set_Fields func GOLUCY_Doc_Set_Fields(d *C.lucy_Doc, fields unsafe.Pointer) { ivars := C.lucy_Doc_IVARS(d) temp := ivars.fields ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields)) C.cfish_decref(temp) } //export GOLUCY_Doc_Get_Size func GOLUCY_Doc_Get_Size(d *C.lucy_Doc) C.uint32_t { ivars := C.lucy_Doc_IVARS(d) hash := ((*C.cfish_Hash)(ivars.fields)) return C.uint32_t(C.CFISH_Hash_Get_Size(hash)) } //export GOLUCY_Doc_Store func GOLUCY_Doc_Store(d *C.lucy_Doc, field *C.cfish_String, value *C.cfish_Obj) { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) C.CFISH_Hash_Store(hash, field, C.cfish_inc_refcount(unsafe.Pointer(value))) } //export GOLUCY_Doc_Serialize func GOLUCY_Doc_Serialize(d *C.lucy_Doc, outstream *C.lucy_OutStream) { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) C.lucy_Freezer_serialize_hash(hash, outstream) C.LUCY_OutStream_Write_C32(outstream, C.uint32_t(ivars.doc_id)) } //export GOLUCY_Doc_Deserialize func GOLUCY_Doc_Deserialize(d *C.lucy_Doc, instream *C.lucy_InStream) *C.lucy_Doc { ivars := C.lucy_Doc_IVARS(d) ivars.fields = unsafe.Pointer(C.lucy_Freezer_read_hash(instream)) ivars.doc_id = C.int32_t(C.LUCY_InStream_Read_C32(instream)) return d } //export GOLUCY_Doc_Extract func GOLUCY_Doc_Extract(d *C.lucy_Doc, field *C.cfish_String) *C.cfish_Obj { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) val := C.CFISH_Hash_Fetch(hash, field) return C.cfish_inc_refcount(unsafe.Pointer(val)) } //export GOLUCY_Doc_Field_Names func GOLUCY_Doc_Field_Names(d *C.lucy_Doc) *C.cfish_Vector { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) return C.CFISH_Hash_Keys(hash) } //export GOLUCY_Doc_Equals func GOLUCY_Doc_Equals(d *C.lucy_Doc, other *C.cfish_Obj) C.bool { twin := (*C.lucy_Doc)(unsafe.Pointer(other)) if twin == d { return true } if !C.cfish_Obj_is_a(other, C.LUCY_DOC) { return false } ivars := C.lucy_Doc_IVARS(d) ovars := C.lucy_Doc_IVARS(twin) hash := (*C.cfish_Hash)(ivars.fields) otherHash := (*C.cfish_Obj)(ovars.fields) return C.CFISH_Hash_Equals(hash, otherHash) } //export GOLUCY_Doc_Destroy func GOLUCY_Doc_Destroy(d *C.lucy_Doc) { ivars := C.lucy_Doc_IVARS(d) C.cfish_decref(unsafe.Pointer(ivars.fields)) C.cfish_super_destroy(unsafe.Pointer(d), C.LUCY_DOC) } func fetchEntry(ivars *C.lucy_InverterIVARS, field *C.cfish_String) *C.lucy_InverterEntry { schema := ivars.schema fieldNum := C.LUCY_Seg_Field_Num(ivars.segment, field) if fieldNum == 0 { // This field seems not to be in the segment yet. Try to find it in // the Schema. if C.LUCY_Schema_Fetch_Type(schema, field) != nil { // The field is in the Schema. Get a field num from the Segment. fieldNum = C.LUCY_Seg_Add_Field(ivars.segment, field) } else { // We've truly failed to find the field. The user must // not have spec'd it. fieldGo := clownfish.CFStringToGo(unsafe.Pointer(field)) err := clownfish.NewErr("Unknown field name: '" + fieldGo + "'") panic(err) } } entry := C.CFISH_Vec_Fetch(ivars.entry_pool, C.size_t(fieldNum)) if entry == nil { newEntry := C.lucy_InvEntry_new(schema, field, fieldNum) C.CFISH_Vec_Store(ivars.entry_pool, C.size_t(fieldNum), (*C.cfish_Obj)(unsafe.Pointer(entry))) return newEntry } return (*C.lucy_InverterEntry)(unsafe.Pointer(entry)) } //export GOLUCY_DefDocReader_Fetch_Doc func GOLUCY_DefDocReader_Fetch_Doc(ddr *C.lucy_DefaultDocReader, docID C.int32_t) *C.lucy_HitDoc { ivars := C.lucy_DefDocReader_IVARS(ddr) schema := ivars.schema datInstream := ivars.dat_in ixInstream := ivars.ix_in fields := C.cfish_Hash_new(1) fieldNameCap := C.size_t(31) var fieldName *C.char = ((*C.char)(C.malloc(fieldNameCap + 1))) // Get data file pointer from index, read number of fields. C.LUCY_InStream_Seek(ixInstream, C.int64_t(docID*8)) start := C.LUCY_InStream_Read_U64(ixInstream) C.LUCY_InStream_Seek(datInstream, C.int64_t(start)) numFields := uint32(C.LUCY_InStream_Read_C32(datInstream)) // Decode stored data and build up the doc field by field. for i := uint32(0); i < numFields; i++ { // Read field name. fieldNameLen := C.size_t(C.LUCY_InStream_Read_C32(datInstream)) if fieldNameLen > fieldNameCap { fieldNameCap = fieldNameLen fieldName = ((*C.char)(C.realloc(unsafe.Pointer(fieldName), fieldNameCap+1))) } C.LUCY_InStream_Read_Bytes(datInstream, fieldName, fieldNameLen) // Find the Field's FieldType. // TODO: Creating and destroying a new string each time is // inefficient. The solution should be to add a privte // Schema_Fetch_Type_Utf8 method which takes char* and size_t. fieldNameStr := C.cfish_Str_new_from_utf8(fieldName, fieldNameLen) fieldType := C.LUCY_Schema_Fetch_Type(schema, fieldNameStr) C.cfish_dec_refcount(unsafe.Pointer(fieldNameStr)) // Read the field value. var value *C.cfish_Obj switch C.LUCY_FType_Primitive_ID(fieldType) & C.lucy_FType_PRIMITIVE_ID_MASK { case C.lucy_FType_TEXT: valueLen := C.size_t(C.LUCY_InStream_Read_C32(datInstream)) buf := ((*C.char)(C.malloc(valueLen + 1))) C.LUCY_InStream_Read_Bytes(datInstream, buf, valueLen) C.null_terminate_string(buf, valueLen) value = ((*C.cfish_Obj)(C.cfish_Str_new_steal_utf8(buf, valueLen))) case C.lucy_FType_BLOB: valueLen := C.size_t(C.LUCY_InStream_Read_C32(datInstream)) buf := ((*C.char)(C.malloc(valueLen))) C.LUCY_InStream_Read_Bytes(datInstream, buf, valueLen) value = ((*C.cfish_Obj)(C.cfish_Blob_new_steal(buf, valueLen))) case C.lucy_FType_FLOAT32: value = ((*C.cfish_Obj)(C.cfish_Float_new(C.double(C.LUCY_InStream_Read_F32(datInstream))))) case C.lucy_FType_FLOAT64: value = ((*C.cfish_Obj)(C.cfish_Float_new(C.LUCY_InStream_Read_F64(datInstream)))) case C.lucy_FType_INT32: value = ((*C.cfish_Obj)(C.cfish_Int_new(C.int64_t(C.LUCY_InStream_Read_C32(datInstream))))) case C.lucy_FType_INT64: value = ((*C.cfish_Obj)(C.cfish_Int_new(C.int64_t(C.LUCY_InStream_Read_C64(datInstream))))) default: value = nil panic(clownfish.NewErr("Internal Lucy error: bad type id for field " + C.GoStringN(fieldName, C.int(fieldNameLen)))) } // Store the value. C.CFISH_Hash_Store_Utf8(fields, fieldName, fieldNameLen, value) } C.free(unsafe.Pointer(fieldName)) retval := C.lucy_HitDoc_new(unsafe.Pointer(fields), docID, 0.0) C.cfish_dec_refcount(unsafe.Pointer(fields)) return retval } //export GOLUCY_Inverter_Invert_Doc func GOLUCY_Inverter_Invert_Doc(inverter *C.lucy_Inverter, doc *C.lucy_Doc) { ivars := C.lucy_Inverter_IVARS(inverter) fields := (*C.cfish_Hash)(C.LUCY_Doc_Get_Fields(doc)) // Prepare for the new doc. C.LUCY_Inverter_Set_Doc(inverter, doc) // Extract and invert the doc's fields. iter := C.cfish_HashIter_new(fields) for C.CFISH_HashIter_Next(iter) { field := C.CFISH_HashIter_Get_Key(iter) obj := C.CFISH_HashIter_Get_Value(iter) if obj == nil { mess := "Invalid nil value for field" + clownfish.CFStringToGo(unsafe.Pointer(field)) panic(clownfish.NewErr(mess)) } inventry := fetchEntry(ivars, field) inventryIvars := C.lucy_InvEntry_IVARS(inventry) fieldType := inventryIvars._type // Get the field value. var expectedType *C.cfish_Class switch C.LUCY_FType_Primitive_ID(fieldType) & C.lucy_FType_PRIMITIVE_ID_MASK { case C.lucy_FType_TEXT: expectedType = C.CFISH_STRING case C.lucy_FType_BLOB: expectedType = C.CFISH_BLOB case C.lucy_FType_INT32: expectedType = C.CFISH_INTEGER case C.lucy_FType_INT64: expectedType = C.CFISH_INTEGER case C.lucy_FType_FLOAT32: expectedType = C.CFISH_FLOAT case C.lucy_FType_FLOAT64: expectedType = C.CFISH_FLOAT default: panic(clownfish.NewErr("Internal Lucy error: bad type id for field " + clownfish.CFStringToGo(unsafe.Pointer(field)))) } if !C.cfish_Obj_is_a(obj, expectedType) { className := C.cfish_Obj_get_class_name((*C.cfish_Obj)(unsafe.Pointer(fieldType))) mess := fmt.Sprintf("Invalid type for field '%s': '%s'", clownfish.CFStringToGo(unsafe.Pointer(field)), clownfish.CFStringToGo(unsafe.Pointer(className))) panic(clownfish.NewErr(mess)) } if inventryIvars.value != obj { C.cfish_decref(unsafe.Pointer(inventryIvars.value)) inventryIvars.value = C.cfish_inc_refcount(unsafe.Pointer(obj)) } C.LUCY_Inverter_Add_Field(inverter, inventry) } C.cfish_dec_refcount(unsafe.Pointer(iter)) }
NewDoc
identifier_name
lucy.go
/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package lucy /* #include <stdlib.h> #define C_LUCY_DOC #define C_LUCY_REGEXTOKENIZER #define C_LUCY_DEFAULTDOCREADER #define C_LUCY_INVERTER #define C_LUCY_INVERTERENTRY #include "lucy_parcel.h" #include "Lucy/Analysis/RegexTokenizer.h" #include "Lucy/Document/Doc.h" #include "Lucy/Index/DocReader.h" #include "Lucy/Index/Inverter.h" #include "Clownfish/String.h" #include "Clownfish/Blob.h" #include "Clownfish/Num.h" #include "Clownfish/Hash.h" #include "Clownfish/HashIterator.h" #include "Clownfish/Vector.h" #include "Clownfish/Err.h" #include "Clownfish/Util/StringHelper.h" #include "Lucy/Analysis/Analyzer.h" #include "Lucy/Analysis/Inversion.h" #include "Lucy/Analysis/Token.h" #include "Lucy/Document/HitDoc.h" #include "Lucy/Plan/FieldType.h" #include "Lucy/Plan/Schema.h" #include "Lucy/Index/Segment.h" #include "Lucy/Store/InStream.h" #include "Lucy/Store/OutStream.h" #include "Lucy/Util/Freezer.h" extern lucy_RegexTokenizer* GOLUCY_RegexTokenizer_init(lucy_RegexTokenizer *self, cfish_String *pattern); extern lucy_RegexTokenizer* (*GOLUCY_RegexTokenizer_init_BRIDGE)(lucy_RegexTokenizer *self, cfish_String *pattern); extern void GOLUCY_RegexTokenizer_Destroy(lucy_RegexTokenizer *self); extern void (*GOLUCY_RegexTokenizer_Destroy_BRIDGE)(lucy_RegexTokenizer *self); extern void GOLUCY_RegexTokenizer_Tokenize_Utf8(lucy_RegexTokenizer *self, char *str, size_t string_len, lucy_Inversion *inversion); extern void (*GOLUCY_RegexTokenizer_Tokenize_Utf8_BRIDGE)(lucy_RegexTokenizer *self, const char *str, size_t string_len, lucy_Inversion *inversion); extern lucy_Doc* GOLUCY_Doc_init(lucy_Doc *doc, void *fields, int32_t doc_id); extern lucy_Doc* (*GOLUCY_Doc_init_BRIDGE)(lucy_Doc *doc, void *fields, int32_t doc_id); extern void GOLUCY_Doc_Set_Fields(lucy_Doc *self, void *fields); extern void (*GOLUCY_Doc_Set_Fields_BRIDGE)(lucy_Doc *self, void *fields); extern uint32_t GOLUCY_Doc_Get_Size(lucy_Doc *self); extern uint32_t (*GOLUCY_Doc_Get_Size_BRIDGE)(lucy_Doc *self); extern void GOLUCY_Doc_Store(lucy_Doc *self, cfish_String *field, cfish_Obj *value); extern void (*GOLUCY_Doc_Store_BRIDGE)(lucy_Doc *self, cfish_String *field, cfish_Obj *value); extern void GOLUCY_Doc_Serialize(lucy_Doc *self, lucy_OutStream *outstream); extern void (*GOLUCY_Doc_Serialize_BRIDGE)(lucy_Doc *self, lucy_OutStream *outstream); extern lucy_Doc* GOLUCY_Doc_Deserialize(lucy_Doc *self, lucy_InStream *instream); extern lucy_Doc* (*GOLUCY_Doc_Deserialize_BRIDGE)(lucy_Doc *self, lucy_InStream *instream); extern cfish_Obj* GOLUCY_Doc_Extract(lucy_Doc *self, cfish_String *field); extern cfish_Obj* (*GOLUCY_Doc_Extract_BRIDGE)(lucy_Doc *self, cfish_String *field); extern cfish_Vector* GOLUCY_Doc_Field_Names(lucy_Doc *self); extern cfish_Vector* (*GOLUCY_Doc_Field_Names_BRIDGE)(lucy_Doc *self); extern bool GOLUCY_Doc_Equals(lucy_Doc *self, cfish_Obj *other); extern bool (*GOLUCY_Doc_Equals_BRIDGE)(lucy_Doc *self, cfish_Obj *other); extern void GOLUCY_Doc_Destroy(lucy_Doc *self); extern void (*GOLUCY_Doc_Destroy_BRIDGE)(lucy_Doc *self); extern lucy_HitDoc* GOLUCY_DefDocReader_Fetch_Doc(lucy_DefaultDocReader *self, int32_t doc_id); extern lucy_HitDoc* (*GOLUCY_DefDocReader_Fetch_Doc_BRIDGE)(lucy_DefaultDocReader *self, int32_t doc_id); extern void GOLUCY_Inverter_Invert_Doc(lucy_Inverter *self, lucy_Doc *doc); extern void (*GOLUCY_Inverter_Invert_Doc_BRIDGE)(lucy_Inverter *self, lucy_Doc *doc); // C symbols linked into a Go-built package archive are not visible to // external C code -- but internal code *can* see symbols from outside. // This allows us to fake up symbol export by assigning values only known // interally to external symbols during Go package initialization. static CFISH_INLINE void GOLUCY_glue_exported_symbols() { GOLUCY_RegexTokenizer_init_BRIDGE = GOLUCY_RegexTokenizer_init; GOLUCY_RegexTokenizer_Destroy_BRIDGE = GOLUCY_RegexTokenizer_Destroy; GOLUCY_RegexTokenizer_Tokenize_Utf8_BRIDGE = (LUCY_RegexTokenizer_Tokenize_Utf8_t)GOLUCY_RegexTokenizer_Tokenize_Utf8; GOLUCY_Doc_init_BRIDGE = GOLUCY_Doc_init; GOLUCY_Doc_Set_Fields_BRIDGE = GOLUCY_Doc_Set_Fields; GOLUCY_Doc_Get_Size_BRIDGE = GOLUCY_Doc_Get_Size; GOLUCY_Doc_Store_BRIDGE = GOLUCY_Doc_Store; GOLUCY_Doc_Serialize_BRIDGE = GOLUCY_Doc_Serialize; GOLUCY_Doc_Deserialize_BRIDGE = GOLUCY_Doc_Deserialize; GOLUCY_Doc_Extract_BRIDGE = GOLUCY_Doc_Extract; GOLUCY_Doc_Field_Names_BRIDGE = GOLUCY_Doc_Field_Names; GOLUCY_Doc_Equals_BRIDGE = GOLUCY_Doc_Equals; GOLUCY_Doc_Destroy_BRIDGE = GOLUCY_Doc_Destroy; GOLUCY_DefDocReader_Fetch_Doc_BRIDGE = GOLUCY_DefDocReader_Fetch_Doc; GOLUCY_Inverter_Invert_Doc_BRIDGE = GOLUCY_Inverter_Invert_Doc; } static uint32_t S_count_code_points(const char *string, size_t len) { uint32_t num_code_points = 0; size_t i = 0; while (i < len) { i += cfish_StrHelp_UTF8_COUNT[(uint8_t)(string[i])]; ++num_code_points; } if (i != len) { CFISH_THROW(CFISH_ERR, "Match between code point boundaries in '%s'", string); } return num_code_points; } // Returns the number of code points through the end of the match. static int push_token(const char *str, int start, int end, int last_end, int cp_count, lucy_Inversion *inversion) { const char *match = str + start; int match_len = end - start; int cp_start = cp_count + S_count_code_points(str + last_end, start - last_end); int cp_end = cp_start + S_count_code_points(match, match_len); lucy_Token *token = lucy_Token_new(match, match_len, cp_start, cp_end, 1.0f, 1); LUCY_Inversion_Append(inversion, token); return cp_end; } static void null_terminate_string(char *string, size_t len) { string[len] = '\0'; } */ import "C" import "unsafe" import "fmt" import "regexp" import "git-wip-us.apache.org/repos/asf/lucy-clownfish.git/runtime/go/clownfish" var registry *objRegistry func init() { C.GOLUCY_glue_exported_symbols() C.lucy_bootstrap_parcel() registry = newObjRegistry(16) } //export GOLUCY_RegexTokenizer_init func GOLUCY_RegexTokenizer_init(rt *C.lucy_RegexTokenizer, pattern *C.cfish_String) *C.lucy_RegexTokenizer { C.lucy_Analyzer_init(((*C.lucy_Analyzer)(unsafe.Pointer(rt)))) ivars := C.lucy_RegexTokenizer_IVARS(rt) ivars.pattern = C.CFISH_Str_Clone(pattern) var patternGo string if pattern == nil { patternGo = "\\w+(?:['\\x{2019}]\\w+)*" } else { patternGo = clownfish.CFStringToGo(unsafe.Pointer(pattern)) } rx, err := regexp.Compile(patternGo) if err != nil { panic(err) } rxID := registry.store(rx) ivars.token_re = unsafe.Pointer(rxID) return rt } //export GOLUCY_RegexTokenizer_Destroy func GOLUCY_RegexTokenizer_Destroy(rt *C.lucy_RegexTokenizer) { ivars := C.lucy_RegexTokenizer_IVARS(rt) rxID := uintptr(ivars.token_re)
registry.delete(rxID) C.cfish_super_destroy(unsafe.Pointer(rt), C.LUCY_REGEXTOKENIZER) } //export GOLUCY_RegexTokenizer_Tokenize_Utf8 func GOLUCY_RegexTokenizer_Tokenize_Utf8(rt *C.lucy_RegexTokenizer, str *C.char, stringLen C.size_t, inversion *C.lucy_Inversion) { ivars := C.lucy_RegexTokenizer_IVARS(rt) rxID := uintptr(ivars.token_re) rx, ok := registry.fetch(rxID).(*regexp.Regexp) if !ok { mess := fmt.Sprintf("Failed to Fetch *RegExp with id %d and pattern %s", rxID, clownfish.CFStringToGo(unsafe.Pointer(ivars.pattern))) panic(clownfish.NewErr(mess)) } buf := C.GoBytes(unsafe.Pointer(str), C.int(stringLen)) found := rx.FindAllIndex(buf, int(stringLen)) lastEnd := 0 cpCount := 0 for _, startEnd := range found { cpCount = int(C.push_token(str, C.int(startEnd[0]), C.int(startEnd[1]), C.int(lastEnd), C.int(cpCount), inversion)) lastEnd = startEnd[1] } } func NewDoc(docID int32) Doc { retvalCF := C.lucy_Doc_new(nil, C.int32_t(docID)) return WRAPDoc(unsafe.Pointer(retvalCF)) } //export GOLUCY_Doc_init func GOLUCY_Doc_init(d *C.lucy_Doc, fields unsafe.Pointer, docID C.int32_t) *C.lucy_Doc { ivars := C.lucy_Doc_IVARS(d) if fields != nil { ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields)) } else { ivars.fields = unsafe.Pointer(C.cfish_Hash_new(0)) } ivars.doc_id = docID return d } //export GOLUCY_Doc_Set_Fields func GOLUCY_Doc_Set_Fields(d *C.lucy_Doc, fields unsafe.Pointer) { ivars := C.lucy_Doc_IVARS(d) temp := ivars.fields ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields)) C.cfish_decref(temp) } //export GOLUCY_Doc_Get_Size func GOLUCY_Doc_Get_Size(d *C.lucy_Doc) C.uint32_t { ivars := C.lucy_Doc_IVARS(d) hash := ((*C.cfish_Hash)(ivars.fields)) return C.uint32_t(C.CFISH_Hash_Get_Size(hash)) } //export GOLUCY_Doc_Store func GOLUCY_Doc_Store(d *C.lucy_Doc, field *C.cfish_String, value *C.cfish_Obj) { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) C.CFISH_Hash_Store(hash, field, C.cfish_inc_refcount(unsafe.Pointer(value))) } //export GOLUCY_Doc_Serialize func GOLUCY_Doc_Serialize(d *C.lucy_Doc, outstream *C.lucy_OutStream) { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) C.lucy_Freezer_serialize_hash(hash, outstream) C.LUCY_OutStream_Write_C32(outstream, C.uint32_t(ivars.doc_id)) } //export GOLUCY_Doc_Deserialize func GOLUCY_Doc_Deserialize(d *C.lucy_Doc, instream *C.lucy_InStream) *C.lucy_Doc { ivars := C.lucy_Doc_IVARS(d) ivars.fields = unsafe.Pointer(C.lucy_Freezer_read_hash(instream)) ivars.doc_id = C.int32_t(C.LUCY_InStream_Read_C32(instream)) return d } //export GOLUCY_Doc_Extract func GOLUCY_Doc_Extract(d *C.lucy_Doc, field *C.cfish_String) *C.cfish_Obj { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) val := C.CFISH_Hash_Fetch(hash, field) return C.cfish_inc_refcount(unsafe.Pointer(val)) } //export GOLUCY_Doc_Field_Names func GOLUCY_Doc_Field_Names(d *C.lucy_Doc) *C.cfish_Vector { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) return C.CFISH_Hash_Keys(hash) } //export GOLUCY_Doc_Equals func GOLUCY_Doc_Equals(d *C.lucy_Doc, other *C.cfish_Obj) C.bool { twin := (*C.lucy_Doc)(unsafe.Pointer(other)) if twin == d { return true } if !C.cfish_Obj_is_a(other, C.LUCY_DOC) { return false } ivars := C.lucy_Doc_IVARS(d) ovars := C.lucy_Doc_IVARS(twin) hash := (*C.cfish_Hash)(ivars.fields) otherHash := (*C.cfish_Obj)(ovars.fields) return C.CFISH_Hash_Equals(hash, otherHash) } //export GOLUCY_Doc_Destroy func GOLUCY_Doc_Destroy(d *C.lucy_Doc) { ivars := C.lucy_Doc_IVARS(d) C.cfish_decref(unsafe.Pointer(ivars.fields)) C.cfish_super_destroy(unsafe.Pointer(d), C.LUCY_DOC) } func fetchEntry(ivars *C.lucy_InverterIVARS, field *C.cfish_String) *C.lucy_InverterEntry { schema := ivars.schema fieldNum := C.LUCY_Seg_Field_Num(ivars.segment, field) if fieldNum == 0 { // This field seems not to be in the segment yet. Try to find it in // the Schema. if C.LUCY_Schema_Fetch_Type(schema, field) != nil { // The field is in the Schema. Get a field num from the Segment. fieldNum = C.LUCY_Seg_Add_Field(ivars.segment, field) } else { // We've truly failed to find the field. The user must // not have spec'd it. fieldGo := clownfish.CFStringToGo(unsafe.Pointer(field)) err := clownfish.NewErr("Unknown field name: '" + fieldGo + "'") panic(err) } } entry := C.CFISH_Vec_Fetch(ivars.entry_pool, C.size_t(fieldNum)) if entry == nil { newEntry := C.lucy_InvEntry_new(schema, field, fieldNum) C.CFISH_Vec_Store(ivars.entry_pool, C.size_t(fieldNum), (*C.cfish_Obj)(unsafe.Pointer(entry))) return newEntry } return (*C.lucy_InverterEntry)(unsafe.Pointer(entry)) } //export GOLUCY_DefDocReader_Fetch_Doc func GOLUCY_DefDocReader_Fetch_Doc(ddr *C.lucy_DefaultDocReader, docID C.int32_t) *C.lucy_HitDoc { ivars := C.lucy_DefDocReader_IVARS(ddr) schema := ivars.schema datInstream := ivars.dat_in ixInstream := ivars.ix_in fields := C.cfish_Hash_new(1) fieldNameCap := C.size_t(31) var fieldName *C.char = ((*C.char)(C.malloc(fieldNameCap + 1))) // Get data file pointer from index, read number of fields. C.LUCY_InStream_Seek(ixInstream, C.int64_t(docID*8)) start := C.LUCY_InStream_Read_U64(ixInstream) C.LUCY_InStream_Seek(datInstream, C.int64_t(start)) numFields := uint32(C.LUCY_InStream_Read_C32(datInstream)) // Decode stored data and build up the doc field by field. for i := uint32(0); i < numFields; i++ { // Read field name. fieldNameLen := C.size_t(C.LUCY_InStream_Read_C32(datInstream)) if fieldNameLen > fieldNameCap { fieldNameCap = fieldNameLen fieldName = ((*C.char)(C.realloc(unsafe.Pointer(fieldName), fieldNameCap+1))) } C.LUCY_InStream_Read_Bytes(datInstream, fieldName, fieldNameLen) // Find the Field's FieldType. // TODO: Creating and destroying a new string each time is // inefficient. The solution should be to add a privte // Schema_Fetch_Type_Utf8 method which takes char* and size_t. fieldNameStr := C.cfish_Str_new_from_utf8(fieldName, fieldNameLen) fieldType := C.LUCY_Schema_Fetch_Type(schema, fieldNameStr) C.cfish_dec_refcount(unsafe.Pointer(fieldNameStr)) // Read the field value. var value *C.cfish_Obj switch C.LUCY_FType_Primitive_ID(fieldType) & C.lucy_FType_PRIMITIVE_ID_MASK { case C.lucy_FType_TEXT: valueLen := C.size_t(C.LUCY_InStream_Read_C32(datInstream)) buf := ((*C.char)(C.malloc(valueLen + 1))) C.LUCY_InStream_Read_Bytes(datInstream, buf, valueLen) C.null_terminate_string(buf, valueLen) value = ((*C.cfish_Obj)(C.cfish_Str_new_steal_utf8(buf, valueLen))) case C.lucy_FType_BLOB: valueLen := C.size_t(C.LUCY_InStream_Read_C32(datInstream)) buf := ((*C.char)(C.malloc(valueLen))) C.LUCY_InStream_Read_Bytes(datInstream, buf, valueLen) value = ((*C.cfish_Obj)(C.cfish_Blob_new_steal(buf, valueLen))) case C.lucy_FType_FLOAT32: value = ((*C.cfish_Obj)(C.cfish_Float_new(C.double(C.LUCY_InStream_Read_F32(datInstream))))) case C.lucy_FType_FLOAT64: value = ((*C.cfish_Obj)(C.cfish_Float_new(C.LUCY_InStream_Read_F64(datInstream)))) case C.lucy_FType_INT32: value = ((*C.cfish_Obj)(C.cfish_Int_new(C.int64_t(C.LUCY_InStream_Read_C32(datInstream))))) case C.lucy_FType_INT64: value = ((*C.cfish_Obj)(C.cfish_Int_new(C.int64_t(C.LUCY_InStream_Read_C64(datInstream))))) default: value = nil panic(clownfish.NewErr("Internal Lucy error: bad type id for field " + C.GoStringN(fieldName, C.int(fieldNameLen)))) } // Store the value. C.CFISH_Hash_Store_Utf8(fields, fieldName, fieldNameLen, value) } C.free(unsafe.Pointer(fieldName)) retval := C.lucy_HitDoc_new(unsafe.Pointer(fields), docID, 0.0) C.cfish_dec_refcount(unsafe.Pointer(fields)) return retval } //export GOLUCY_Inverter_Invert_Doc func GOLUCY_Inverter_Invert_Doc(inverter *C.lucy_Inverter, doc *C.lucy_Doc) { ivars := C.lucy_Inverter_IVARS(inverter) fields := (*C.cfish_Hash)(C.LUCY_Doc_Get_Fields(doc)) // Prepare for the new doc. C.LUCY_Inverter_Set_Doc(inverter, doc) // Extract and invert the doc's fields. iter := C.cfish_HashIter_new(fields) for C.CFISH_HashIter_Next(iter) { field := C.CFISH_HashIter_Get_Key(iter) obj := C.CFISH_HashIter_Get_Value(iter) if obj == nil { mess := "Invalid nil value for field" + clownfish.CFStringToGo(unsafe.Pointer(field)) panic(clownfish.NewErr(mess)) } inventry := fetchEntry(ivars, field) inventryIvars := C.lucy_InvEntry_IVARS(inventry) fieldType := inventryIvars._type // Get the field value. var expectedType *C.cfish_Class switch C.LUCY_FType_Primitive_ID(fieldType) & C.lucy_FType_PRIMITIVE_ID_MASK { case C.lucy_FType_TEXT: expectedType = C.CFISH_STRING case C.lucy_FType_BLOB: expectedType = C.CFISH_BLOB case C.lucy_FType_INT32: expectedType = C.CFISH_INTEGER case C.lucy_FType_INT64: expectedType = C.CFISH_INTEGER case C.lucy_FType_FLOAT32: expectedType = C.CFISH_FLOAT case C.lucy_FType_FLOAT64: expectedType = C.CFISH_FLOAT default: panic(clownfish.NewErr("Internal Lucy error: bad type id for field " + clownfish.CFStringToGo(unsafe.Pointer(field)))) } if !C.cfish_Obj_is_a(obj, expectedType) { className := C.cfish_Obj_get_class_name((*C.cfish_Obj)(unsafe.Pointer(fieldType))) mess := fmt.Sprintf("Invalid type for field '%s': '%s'", clownfish.CFStringToGo(unsafe.Pointer(field)), clownfish.CFStringToGo(unsafe.Pointer(className))) panic(clownfish.NewErr(mess)) } if inventryIvars.value != obj { C.cfish_decref(unsafe.Pointer(inventryIvars.value)) inventryIvars.value = C.cfish_inc_refcount(unsafe.Pointer(obj)) } C.LUCY_Inverter_Add_Field(inverter, inventry) } C.cfish_dec_refcount(unsafe.Pointer(iter)) }
random_line_split
lucy.go
/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package lucy /* #include <stdlib.h> #define C_LUCY_DOC #define C_LUCY_REGEXTOKENIZER #define C_LUCY_DEFAULTDOCREADER #define C_LUCY_INVERTER #define C_LUCY_INVERTERENTRY #include "lucy_parcel.h" #include "Lucy/Analysis/RegexTokenizer.h" #include "Lucy/Document/Doc.h" #include "Lucy/Index/DocReader.h" #include "Lucy/Index/Inverter.h" #include "Clownfish/String.h" #include "Clownfish/Blob.h" #include "Clownfish/Num.h" #include "Clownfish/Hash.h" #include "Clownfish/HashIterator.h" #include "Clownfish/Vector.h" #include "Clownfish/Err.h" #include "Clownfish/Util/StringHelper.h" #include "Lucy/Analysis/Analyzer.h" #include "Lucy/Analysis/Inversion.h" #include "Lucy/Analysis/Token.h" #include "Lucy/Document/HitDoc.h" #include "Lucy/Plan/FieldType.h" #include "Lucy/Plan/Schema.h" #include "Lucy/Index/Segment.h" #include "Lucy/Store/InStream.h" #include "Lucy/Store/OutStream.h" #include "Lucy/Util/Freezer.h" extern lucy_RegexTokenizer* GOLUCY_RegexTokenizer_init(lucy_RegexTokenizer *self, cfish_String *pattern); extern lucy_RegexTokenizer* (*GOLUCY_RegexTokenizer_init_BRIDGE)(lucy_RegexTokenizer *self, cfish_String *pattern); extern void GOLUCY_RegexTokenizer_Destroy(lucy_RegexTokenizer *self); extern void (*GOLUCY_RegexTokenizer_Destroy_BRIDGE)(lucy_RegexTokenizer *self); extern void GOLUCY_RegexTokenizer_Tokenize_Utf8(lucy_RegexTokenizer *self, char *str, size_t string_len, lucy_Inversion *inversion); extern void (*GOLUCY_RegexTokenizer_Tokenize_Utf8_BRIDGE)(lucy_RegexTokenizer *self, const char *str, size_t string_len, lucy_Inversion *inversion); extern lucy_Doc* GOLUCY_Doc_init(lucy_Doc *doc, void *fields, int32_t doc_id); extern lucy_Doc* (*GOLUCY_Doc_init_BRIDGE)(lucy_Doc *doc, void *fields, int32_t doc_id); extern void GOLUCY_Doc_Set_Fields(lucy_Doc *self, void *fields); extern void (*GOLUCY_Doc_Set_Fields_BRIDGE)(lucy_Doc *self, void *fields); extern uint32_t GOLUCY_Doc_Get_Size(lucy_Doc *self); extern uint32_t (*GOLUCY_Doc_Get_Size_BRIDGE)(lucy_Doc *self); extern void GOLUCY_Doc_Store(lucy_Doc *self, cfish_String *field, cfish_Obj *value); extern void (*GOLUCY_Doc_Store_BRIDGE)(lucy_Doc *self, cfish_String *field, cfish_Obj *value); extern void GOLUCY_Doc_Serialize(lucy_Doc *self, lucy_OutStream *outstream); extern void (*GOLUCY_Doc_Serialize_BRIDGE)(lucy_Doc *self, lucy_OutStream *outstream); extern lucy_Doc* GOLUCY_Doc_Deserialize(lucy_Doc *self, lucy_InStream *instream); extern lucy_Doc* (*GOLUCY_Doc_Deserialize_BRIDGE)(lucy_Doc *self, lucy_InStream *instream); extern cfish_Obj* GOLUCY_Doc_Extract(lucy_Doc *self, cfish_String *field); extern cfish_Obj* (*GOLUCY_Doc_Extract_BRIDGE)(lucy_Doc *self, cfish_String *field); extern cfish_Vector* GOLUCY_Doc_Field_Names(lucy_Doc *self); extern cfish_Vector* (*GOLUCY_Doc_Field_Names_BRIDGE)(lucy_Doc *self); extern bool GOLUCY_Doc_Equals(lucy_Doc *self, cfish_Obj *other); extern bool (*GOLUCY_Doc_Equals_BRIDGE)(lucy_Doc *self, cfish_Obj *other); extern void GOLUCY_Doc_Destroy(lucy_Doc *self); extern void (*GOLUCY_Doc_Destroy_BRIDGE)(lucy_Doc *self); extern lucy_HitDoc* GOLUCY_DefDocReader_Fetch_Doc(lucy_DefaultDocReader *self, int32_t doc_id); extern lucy_HitDoc* (*GOLUCY_DefDocReader_Fetch_Doc_BRIDGE)(lucy_DefaultDocReader *self, int32_t doc_id); extern void GOLUCY_Inverter_Invert_Doc(lucy_Inverter *self, lucy_Doc *doc); extern void (*GOLUCY_Inverter_Invert_Doc_BRIDGE)(lucy_Inverter *self, lucy_Doc *doc); // C symbols linked into a Go-built package archive are not visible to // external C code -- but internal code *can* see symbols from outside. // This allows us to fake up symbol export by assigning values only known // interally to external symbols during Go package initialization. static CFISH_INLINE void GOLUCY_glue_exported_symbols() { GOLUCY_RegexTokenizer_init_BRIDGE = GOLUCY_RegexTokenizer_init; GOLUCY_RegexTokenizer_Destroy_BRIDGE = GOLUCY_RegexTokenizer_Destroy; GOLUCY_RegexTokenizer_Tokenize_Utf8_BRIDGE = (LUCY_RegexTokenizer_Tokenize_Utf8_t)GOLUCY_RegexTokenizer_Tokenize_Utf8; GOLUCY_Doc_init_BRIDGE = GOLUCY_Doc_init; GOLUCY_Doc_Set_Fields_BRIDGE = GOLUCY_Doc_Set_Fields; GOLUCY_Doc_Get_Size_BRIDGE = GOLUCY_Doc_Get_Size; GOLUCY_Doc_Store_BRIDGE = GOLUCY_Doc_Store; GOLUCY_Doc_Serialize_BRIDGE = GOLUCY_Doc_Serialize; GOLUCY_Doc_Deserialize_BRIDGE = GOLUCY_Doc_Deserialize; GOLUCY_Doc_Extract_BRIDGE = GOLUCY_Doc_Extract; GOLUCY_Doc_Field_Names_BRIDGE = GOLUCY_Doc_Field_Names; GOLUCY_Doc_Equals_BRIDGE = GOLUCY_Doc_Equals; GOLUCY_Doc_Destroy_BRIDGE = GOLUCY_Doc_Destroy; GOLUCY_DefDocReader_Fetch_Doc_BRIDGE = GOLUCY_DefDocReader_Fetch_Doc; GOLUCY_Inverter_Invert_Doc_BRIDGE = GOLUCY_Inverter_Invert_Doc; } static uint32_t S_count_code_points(const char *string, size_t len) { uint32_t num_code_points = 0; size_t i = 0; while (i < len) { i += cfish_StrHelp_UTF8_COUNT[(uint8_t)(string[i])]; ++num_code_points; } if (i != len) { CFISH_THROW(CFISH_ERR, "Match between code point boundaries in '%s'", string); } return num_code_points; } // Returns the number of code points through the end of the match. static int push_token(const char *str, int start, int end, int last_end, int cp_count, lucy_Inversion *inversion) { const char *match = str + start; int match_len = end - start; int cp_start = cp_count + S_count_code_points(str + last_end, start - last_end); int cp_end = cp_start + S_count_code_points(match, match_len); lucy_Token *token = lucy_Token_new(match, match_len, cp_start, cp_end, 1.0f, 1); LUCY_Inversion_Append(inversion, token); return cp_end; } static void null_terminate_string(char *string, size_t len) { string[len] = '\0'; } */ import "C" import "unsafe" import "fmt" import "regexp" import "git-wip-us.apache.org/repos/asf/lucy-clownfish.git/runtime/go/clownfish" var registry *objRegistry func init() { C.GOLUCY_glue_exported_symbols() C.lucy_bootstrap_parcel() registry = newObjRegistry(16) } //export GOLUCY_RegexTokenizer_init func GOLUCY_RegexTokenizer_init(rt *C.lucy_RegexTokenizer, pattern *C.cfish_String) *C.lucy_RegexTokenizer { C.lucy_Analyzer_init(((*C.lucy_Analyzer)(unsafe.Pointer(rt)))) ivars := C.lucy_RegexTokenizer_IVARS(rt) ivars.pattern = C.CFISH_Str_Clone(pattern) var patternGo string if pattern == nil
else { patternGo = clownfish.CFStringToGo(unsafe.Pointer(pattern)) } rx, err := regexp.Compile(patternGo) if err != nil { panic(err) } rxID := registry.store(rx) ivars.token_re = unsafe.Pointer(rxID) return rt } //export GOLUCY_RegexTokenizer_Destroy func GOLUCY_RegexTokenizer_Destroy(rt *C.lucy_RegexTokenizer) { ivars := C.lucy_RegexTokenizer_IVARS(rt) rxID := uintptr(ivars.token_re) registry.delete(rxID) C.cfish_super_destroy(unsafe.Pointer(rt), C.LUCY_REGEXTOKENIZER) } //export GOLUCY_RegexTokenizer_Tokenize_Utf8 func GOLUCY_RegexTokenizer_Tokenize_Utf8(rt *C.lucy_RegexTokenizer, str *C.char, stringLen C.size_t, inversion *C.lucy_Inversion) { ivars := C.lucy_RegexTokenizer_IVARS(rt) rxID := uintptr(ivars.token_re) rx, ok := registry.fetch(rxID).(*regexp.Regexp) if !ok { mess := fmt.Sprintf("Failed to Fetch *RegExp with id %d and pattern %s", rxID, clownfish.CFStringToGo(unsafe.Pointer(ivars.pattern))) panic(clownfish.NewErr(mess)) } buf := C.GoBytes(unsafe.Pointer(str), C.int(stringLen)) found := rx.FindAllIndex(buf, int(stringLen)) lastEnd := 0 cpCount := 0 for _, startEnd := range found { cpCount = int(C.push_token(str, C.int(startEnd[0]), C.int(startEnd[1]), C.int(lastEnd), C.int(cpCount), inversion)) lastEnd = startEnd[1] } } func NewDoc(docID int32) Doc { retvalCF := C.lucy_Doc_new(nil, C.int32_t(docID)) return WRAPDoc(unsafe.Pointer(retvalCF)) } //export GOLUCY_Doc_init func GOLUCY_Doc_init(d *C.lucy_Doc, fields unsafe.Pointer, docID C.int32_t) *C.lucy_Doc { ivars := C.lucy_Doc_IVARS(d) if fields != nil { ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields)) } else { ivars.fields = unsafe.Pointer(C.cfish_Hash_new(0)) } ivars.doc_id = docID return d } //export GOLUCY_Doc_Set_Fields func GOLUCY_Doc_Set_Fields(d *C.lucy_Doc, fields unsafe.Pointer) { ivars := C.lucy_Doc_IVARS(d) temp := ivars.fields ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields)) C.cfish_decref(temp) } //export GOLUCY_Doc_Get_Size func GOLUCY_Doc_Get_Size(d *C.lucy_Doc) C.uint32_t { ivars := C.lucy_Doc_IVARS(d) hash := ((*C.cfish_Hash)(ivars.fields)) return C.uint32_t(C.CFISH_Hash_Get_Size(hash)) } //export GOLUCY_Doc_Store func GOLUCY_Doc_Store(d *C.lucy_Doc, field *C.cfish_String, value *C.cfish_Obj) { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) C.CFISH_Hash_Store(hash, field, C.cfish_inc_refcount(unsafe.Pointer(value))) } //export GOLUCY_Doc_Serialize func GOLUCY_Doc_Serialize(d *C.lucy_Doc, outstream *C.lucy_OutStream) { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) C.lucy_Freezer_serialize_hash(hash, outstream) C.LUCY_OutStream_Write_C32(outstream, C.uint32_t(ivars.doc_id)) } //export GOLUCY_Doc_Deserialize func GOLUCY_Doc_Deserialize(d *C.lucy_Doc, instream *C.lucy_InStream) *C.lucy_Doc { ivars := C.lucy_Doc_IVARS(d) ivars.fields = unsafe.Pointer(C.lucy_Freezer_read_hash(instream)) ivars.doc_id = C.int32_t(C.LUCY_InStream_Read_C32(instream)) return d } //export GOLUCY_Doc_Extract func GOLUCY_Doc_Extract(d *C.lucy_Doc, field *C.cfish_String) *C.cfish_Obj { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) val := C.CFISH_Hash_Fetch(hash, field) return C.cfish_inc_refcount(unsafe.Pointer(val)) } //export GOLUCY_Doc_Field_Names func GOLUCY_Doc_Field_Names(d *C.lucy_Doc) *C.cfish_Vector { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) return C.CFISH_Hash_Keys(hash) } //export GOLUCY_Doc_Equals func GOLUCY_Doc_Equals(d *C.lucy_Doc, other *C.cfish_Obj) C.bool { twin := (*C.lucy_Doc)(unsafe.Pointer(other)) if twin == d { return true } if !C.cfish_Obj_is_a(other, C.LUCY_DOC) { return false } ivars := C.lucy_Doc_IVARS(d) ovars := C.lucy_Doc_IVARS(twin) hash := (*C.cfish_Hash)(ivars.fields) otherHash := (*C.cfish_Obj)(ovars.fields) return C.CFISH_Hash_Equals(hash, otherHash) } //export GOLUCY_Doc_Destroy func GOLUCY_Doc_Destroy(d *C.lucy_Doc) { ivars := C.lucy_Doc_IVARS(d) C.cfish_decref(unsafe.Pointer(ivars.fields)) C.cfish_super_destroy(unsafe.Pointer(d), C.LUCY_DOC) } func fetchEntry(ivars *C.lucy_InverterIVARS, field *C.cfish_String) *C.lucy_InverterEntry { schema := ivars.schema fieldNum := C.LUCY_Seg_Field_Num(ivars.segment, field) if fieldNum == 0 { // This field seems not to be in the segment yet. Try to find it in // the Schema. if C.LUCY_Schema_Fetch_Type(schema, field) != nil { // The field is in the Schema. Get a field num from the Segment. fieldNum = C.LUCY_Seg_Add_Field(ivars.segment, field) } else { // We've truly failed to find the field. The user must // not have spec'd it. fieldGo := clownfish.CFStringToGo(unsafe.Pointer(field)) err := clownfish.NewErr("Unknown field name: '" + fieldGo + "'") panic(err) } } entry := C.CFISH_Vec_Fetch(ivars.entry_pool, C.size_t(fieldNum)) if entry == nil { newEntry := C.lucy_InvEntry_new(schema, field, fieldNum) C.CFISH_Vec_Store(ivars.entry_pool, C.size_t(fieldNum), (*C.cfish_Obj)(unsafe.Pointer(entry))) return newEntry } return (*C.lucy_InverterEntry)(unsafe.Pointer(entry)) } //export GOLUCY_DefDocReader_Fetch_Doc func GOLUCY_DefDocReader_Fetch_Doc(ddr *C.lucy_DefaultDocReader, docID C.int32_t) *C.lucy_HitDoc { ivars := C.lucy_DefDocReader_IVARS(ddr) schema := ivars.schema datInstream := ivars.dat_in ixInstream := ivars.ix_in fields := C.cfish_Hash_new(1) fieldNameCap := C.size_t(31) var fieldName *C.char = ((*C.char)(C.malloc(fieldNameCap + 1))) // Get data file pointer from index, read number of fields. C.LUCY_InStream_Seek(ixInstream, C.int64_t(docID*8)) start := C.LUCY_InStream_Read_U64(ixInstream) C.LUCY_InStream_Seek(datInstream, C.int64_t(start)) numFields := uint32(C.LUCY_InStream_Read_C32(datInstream)) // Decode stored data and build up the doc field by field. for i := uint32(0); i < numFields; i++ { // Read field name. fieldNameLen := C.size_t(C.LUCY_InStream_Read_C32(datInstream)) if fieldNameLen > fieldNameCap { fieldNameCap = fieldNameLen fieldName = ((*C.char)(C.realloc(unsafe.Pointer(fieldName), fieldNameCap+1))) } C.LUCY_InStream_Read_Bytes(datInstream, fieldName, fieldNameLen) // Find the Field's FieldType. // TODO: Creating and destroying a new string each time is // inefficient. The solution should be to add a privte // Schema_Fetch_Type_Utf8 method which takes char* and size_t. fieldNameStr := C.cfish_Str_new_from_utf8(fieldName, fieldNameLen) fieldType := C.LUCY_Schema_Fetch_Type(schema, fieldNameStr) C.cfish_dec_refcount(unsafe.Pointer(fieldNameStr)) // Read the field value. var value *C.cfish_Obj switch C.LUCY_FType_Primitive_ID(fieldType) & C.lucy_FType_PRIMITIVE_ID_MASK { case C.lucy_FType_TEXT: valueLen := C.size_t(C.LUCY_InStream_Read_C32(datInstream)) buf := ((*C.char)(C.malloc(valueLen + 1))) C.LUCY_InStream_Read_Bytes(datInstream, buf, valueLen) C.null_terminate_string(buf, valueLen) value = ((*C.cfish_Obj)(C.cfish_Str_new_steal_utf8(buf, valueLen))) case C.lucy_FType_BLOB: valueLen := C.size_t(C.LUCY_InStream_Read_C32(datInstream)) buf := ((*C.char)(C.malloc(valueLen))) C.LUCY_InStream_Read_Bytes(datInstream, buf, valueLen) value = ((*C.cfish_Obj)(C.cfish_Blob_new_steal(buf, valueLen))) case C.lucy_FType_FLOAT32: value = ((*C.cfish_Obj)(C.cfish_Float_new(C.double(C.LUCY_InStream_Read_F32(datInstream))))) case C.lucy_FType_FLOAT64: value = ((*C.cfish_Obj)(C.cfish_Float_new(C.LUCY_InStream_Read_F64(datInstream)))) case C.lucy_FType_INT32: value = ((*C.cfish_Obj)(C.cfish_Int_new(C.int64_t(C.LUCY_InStream_Read_C32(datInstream))))) case C.lucy_FType_INT64: value = ((*C.cfish_Obj)(C.cfish_Int_new(C.int64_t(C.LUCY_InStream_Read_C64(datInstream))))) default: value = nil panic(clownfish.NewErr("Internal Lucy error: bad type id for field " + C.GoStringN(fieldName, C.int(fieldNameLen)))) } // Store the value. C.CFISH_Hash_Store_Utf8(fields, fieldName, fieldNameLen, value) } C.free(unsafe.Pointer(fieldName)) retval := C.lucy_HitDoc_new(unsafe.Pointer(fields), docID, 0.0) C.cfish_dec_refcount(unsafe.Pointer(fields)) return retval } //export GOLUCY_Inverter_Invert_Doc func GOLUCY_Inverter_Invert_Doc(inverter *C.lucy_Inverter, doc *C.lucy_Doc) { ivars := C.lucy_Inverter_IVARS(inverter) fields := (*C.cfish_Hash)(C.LUCY_Doc_Get_Fields(doc)) // Prepare for the new doc. C.LUCY_Inverter_Set_Doc(inverter, doc) // Extract and invert the doc's fields. iter := C.cfish_HashIter_new(fields) for C.CFISH_HashIter_Next(iter) { field := C.CFISH_HashIter_Get_Key(iter) obj := C.CFISH_HashIter_Get_Value(iter) if obj == nil { mess := "Invalid nil value for field" + clownfish.CFStringToGo(unsafe.Pointer(field)) panic(clownfish.NewErr(mess)) } inventry := fetchEntry(ivars, field) inventryIvars := C.lucy_InvEntry_IVARS(inventry) fieldType := inventryIvars._type // Get the field value. var expectedType *C.cfish_Class switch C.LUCY_FType_Primitive_ID(fieldType) & C.lucy_FType_PRIMITIVE_ID_MASK { case C.lucy_FType_TEXT: expectedType = C.CFISH_STRING case C.lucy_FType_BLOB: expectedType = C.CFISH_BLOB case C.lucy_FType_INT32: expectedType = C.CFISH_INTEGER case C.lucy_FType_INT64: expectedType = C.CFISH_INTEGER case C.lucy_FType_FLOAT32: expectedType = C.CFISH_FLOAT case C.lucy_FType_FLOAT64: expectedType = C.CFISH_FLOAT default: panic(clownfish.NewErr("Internal Lucy error: bad type id for field " + clownfish.CFStringToGo(unsafe.Pointer(field)))) } if !C.cfish_Obj_is_a(obj, expectedType) { className := C.cfish_Obj_get_class_name((*C.cfish_Obj)(unsafe.Pointer(fieldType))) mess := fmt.Sprintf("Invalid type for field '%s': '%s'", clownfish.CFStringToGo(unsafe.Pointer(field)), clownfish.CFStringToGo(unsafe.Pointer(className))) panic(clownfish.NewErr(mess)) } if inventryIvars.value != obj { C.cfish_decref(unsafe.Pointer(inventryIvars.value)) inventryIvars.value = C.cfish_inc_refcount(unsafe.Pointer(obj)) } C.LUCY_Inverter_Add_Field(inverter, inventry) } C.cfish_dec_refcount(unsafe.Pointer(iter)) }
{ patternGo = "\\w+(?:['\\x{2019}]\\w+)*" }
conditional_block
lucy.go
/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package lucy /* #include <stdlib.h> #define C_LUCY_DOC #define C_LUCY_REGEXTOKENIZER #define C_LUCY_DEFAULTDOCREADER #define C_LUCY_INVERTER #define C_LUCY_INVERTERENTRY #include "lucy_parcel.h" #include "Lucy/Analysis/RegexTokenizer.h" #include "Lucy/Document/Doc.h" #include "Lucy/Index/DocReader.h" #include "Lucy/Index/Inverter.h" #include "Clownfish/String.h" #include "Clownfish/Blob.h" #include "Clownfish/Num.h" #include "Clownfish/Hash.h" #include "Clownfish/HashIterator.h" #include "Clownfish/Vector.h" #include "Clownfish/Err.h" #include "Clownfish/Util/StringHelper.h" #include "Lucy/Analysis/Analyzer.h" #include "Lucy/Analysis/Inversion.h" #include "Lucy/Analysis/Token.h" #include "Lucy/Document/HitDoc.h" #include "Lucy/Plan/FieldType.h" #include "Lucy/Plan/Schema.h" #include "Lucy/Index/Segment.h" #include "Lucy/Store/InStream.h" #include "Lucy/Store/OutStream.h" #include "Lucy/Util/Freezer.h" extern lucy_RegexTokenizer* GOLUCY_RegexTokenizer_init(lucy_RegexTokenizer *self, cfish_String *pattern); extern lucy_RegexTokenizer* (*GOLUCY_RegexTokenizer_init_BRIDGE)(lucy_RegexTokenizer *self, cfish_String *pattern); extern void GOLUCY_RegexTokenizer_Destroy(lucy_RegexTokenizer *self); extern void (*GOLUCY_RegexTokenizer_Destroy_BRIDGE)(lucy_RegexTokenizer *self); extern void GOLUCY_RegexTokenizer_Tokenize_Utf8(lucy_RegexTokenizer *self, char *str, size_t string_len, lucy_Inversion *inversion); extern void (*GOLUCY_RegexTokenizer_Tokenize_Utf8_BRIDGE)(lucy_RegexTokenizer *self, const char *str, size_t string_len, lucy_Inversion *inversion); extern lucy_Doc* GOLUCY_Doc_init(lucy_Doc *doc, void *fields, int32_t doc_id); extern lucy_Doc* (*GOLUCY_Doc_init_BRIDGE)(lucy_Doc *doc, void *fields, int32_t doc_id); extern void GOLUCY_Doc_Set_Fields(lucy_Doc *self, void *fields); extern void (*GOLUCY_Doc_Set_Fields_BRIDGE)(lucy_Doc *self, void *fields); extern uint32_t GOLUCY_Doc_Get_Size(lucy_Doc *self); extern uint32_t (*GOLUCY_Doc_Get_Size_BRIDGE)(lucy_Doc *self); extern void GOLUCY_Doc_Store(lucy_Doc *self, cfish_String *field, cfish_Obj *value); extern void (*GOLUCY_Doc_Store_BRIDGE)(lucy_Doc *self, cfish_String *field, cfish_Obj *value); extern void GOLUCY_Doc_Serialize(lucy_Doc *self, lucy_OutStream *outstream); extern void (*GOLUCY_Doc_Serialize_BRIDGE)(lucy_Doc *self, lucy_OutStream *outstream); extern lucy_Doc* GOLUCY_Doc_Deserialize(lucy_Doc *self, lucy_InStream *instream); extern lucy_Doc* (*GOLUCY_Doc_Deserialize_BRIDGE)(lucy_Doc *self, lucy_InStream *instream); extern cfish_Obj* GOLUCY_Doc_Extract(lucy_Doc *self, cfish_String *field); extern cfish_Obj* (*GOLUCY_Doc_Extract_BRIDGE)(lucy_Doc *self, cfish_String *field); extern cfish_Vector* GOLUCY_Doc_Field_Names(lucy_Doc *self); extern cfish_Vector* (*GOLUCY_Doc_Field_Names_BRIDGE)(lucy_Doc *self); extern bool GOLUCY_Doc_Equals(lucy_Doc *self, cfish_Obj *other); extern bool (*GOLUCY_Doc_Equals_BRIDGE)(lucy_Doc *self, cfish_Obj *other); extern void GOLUCY_Doc_Destroy(lucy_Doc *self); extern void (*GOLUCY_Doc_Destroy_BRIDGE)(lucy_Doc *self); extern lucy_HitDoc* GOLUCY_DefDocReader_Fetch_Doc(lucy_DefaultDocReader *self, int32_t doc_id); extern lucy_HitDoc* (*GOLUCY_DefDocReader_Fetch_Doc_BRIDGE)(lucy_DefaultDocReader *self, int32_t doc_id); extern void GOLUCY_Inverter_Invert_Doc(lucy_Inverter *self, lucy_Doc *doc); extern void (*GOLUCY_Inverter_Invert_Doc_BRIDGE)(lucy_Inverter *self, lucy_Doc *doc); // C symbols linked into a Go-built package archive are not visible to // external C code -- but internal code *can* see symbols from outside. // This allows us to fake up symbol export by assigning values only known // interally to external symbols during Go package initialization. static CFISH_INLINE void GOLUCY_glue_exported_symbols() { GOLUCY_RegexTokenizer_init_BRIDGE = GOLUCY_RegexTokenizer_init; GOLUCY_RegexTokenizer_Destroy_BRIDGE = GOLUCY_RegexTokenizer_Destroy; GOLUCY_RegexTokenizer_Tokenize_Utf8_BRIDGE = (LUCY_RegexTokenizer_Tokenize_Utf8_t)GOLUCY_RegexTokenizer_Tokenize_Utf8; GOLUCY_Doc_init_BRIDGE = GOLUCY_Doc_init; GOLUCY_Doc_Set_Fields_BRIDGE = GOLUCY_Doc_Set_Fields; GOLUCY_Doc_Get_Size_BRIDGE = GOLUCY_Doc_Get_Size; GOLUCY_Doc_Store_BRIDGE = GOLUCY_Doc_Store; GOLUCY_Doc_Serialize_BRIDGE = GOLUCY_Doc_Serialize; GOLUCY_Doc_Deserialize_BRIDGE = GOLUCY_Doc_Deserialize; GOLUCY_Doc_Extract_BRIDGE = GOLUCY_Doc_Extract; GOLUCY_Doc_Field_Names_BRIDGE = GOLUCY_Doc_Field_Names; GOLUCY_Doc_Equals_BRIDGE = GOLUCY_Doc_Equals; GOLUCY_Doc_Destroy_BRIDGE = GOLUCY_Doc_Destroy; GOLUCY_DefDocReader_Fetch_Doc_BRIDGE = GOLUCY_DefDocReader_Fetch_Doc; GOLUCY_Inverter_Invert_Doc_BRIDGE = GOLUCY_Inverter_Invert_Doc; } static uint32_t S_count_code_points(const char *string, size_t len) { uint32_t num_code_points = 0; size_t i = 0; while (i < len) { i += cfish_StrHelp_UTF8_COUNT[(uint8_t)(string[i])]; ++num_code_points; } if (i != len) { CFISH_THROW(CFISH_ERR, "Match between code point boundaries in '%s'", string); } return num_code_points; } // Returns the number of code points through the end of the match. static int push_token(const char *str, int start, int end, int last_end, int cp_count, lucy_Inversion *inversion) { const char *match = str + start; int match_len = end - start; int cp_start = cp_count + S_count_code_points(str + last_end, start - last_end); int cp_end = cp_start + S_count_code_points(match, match_len); lucy_Token *token = lucy_Token_new(match, match_len, cp_start, cp_end, 1.0f, 1); LUCY_Inversion_Append(inversion, token); return cp_end; } static void null_terminate_string(char *string, size_t len) { string[len] = '\0'; } */ import "C" import "unsafe" import "fmt" import "regexp" import "git-wip-us.apache.org/repos/asf/lucy-clownfish.git/runtime/go/clownfish" var registry *objRegistry func init() { C.GOLUCY_glue_exported_symbols() C.lucy_bootstrap_parcel() registry = newObjRegistry(16) } //export GOLUCY_RegexTokenizer_init func GOLUCY_RegexTokenizer_init(rt *C.lucy_RegexTokenizer, pattern *C.cfish_String) *C.lucy_RegexTokenizer { C.lucy_Analyzer_init(((*C.lucy_Analyzer)(unsafe.Pointer(rt)))) ivars := C.lucy_RegexTokenizer_IVARS(rt) ivars.pattern = C.CFISH_Str_Clone(pattern) var patternGo string if pattern == nil { patternGo = "\\w+(?:['\\x{2019}]\\w+)*" } else { patternGo = clownfish.CFStringToGo(unsafe.Pointer(pattern)) } rx, err := regexp.Compile(patternGo) if err != nil { panic(err) } rxID := registry.store(rx) ivars.token_re = unsafe.Pointer(rxID) return rt } //export GOLUCY_RegexTokenizer_Destroy func GOLUCY_RegexTokenizer_Destroy(rt *C.lucy_RegexTokenizer) { ivars := C.lucy_RegexTokenizer_IVARS(rt) rxID := uintptr(ivars.token_re) registry.delete(rxID) C.cfish_super_destroy(unsafe.Pointer(rt), C.LUCY_REGEXTOKENIZER) } //export GOLUCY_RegexTokenizer_Tokenize_Utf8 func GOLUCY_RegexTokenizer_Tokenize_Utf8(rt *C.lucy_RegexTokenizer, str *C.char, stringLen C.size_t, inversion *C.lucy_Inversion) { ivars := C.lucy_RegexTokenizer_IVARS(rt) rxID := uintptr(ivars.token_re) rx, ok := registry.fetch(rxID).(*regexp.Regexp) if !ok { mess := fmt.Sprintf("Failed to Fetch *RegExp with id %d and pattern %s", rxID, clownfish.CFStringToGo(unsafe.Pointer(ivars.pattern))) panic(clownfish.NewErr(mess)) } buf := C.GoBytes(unsafe.Pointer(str), C.int(stringLen)) found := rx.FindAllIndex(buf, int(stringLen)) lastEnd := 0 cpCount := 0 for _, startEnd := range found { cpCount = int(C.push_token(str, C.int(startEnd[0]), C.int(startEnd[1]), C.int(lastEnd), C.int(cpCount), inversion)) lastEnd = startEnd[1] } } func NewDoc(docID int32) Doc { retvalCF := C.lucy_Doc_new(nil, C.int32_t(docID)) return WRAPDoc(unsafe.Pointer(retvalCF)) } //export GOLUCY_Doc_init func GOLUCY_Doc_init(d *C.lucy_Doc, fields unsafe.Pointer, docID C.int32_t) *C.lucy_Doc
//export GOLUCY_Doc_Set_Fields func GOLUCY_Doc_Set_Fields(d *C.lucy_Doc, fields unsafe.Pointer) { ivars := C.lucy_Doc_IVARS(d) temp := ivars.fields ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields)) C.cfish_decref(temp) } //export GOLUCY_Doc_Get_Size func GOLUCY_Doc_Get_Size(d *C.lucy_Doc) C.uint32_t { ivars := C.lucy_Doc_IVARS(d) hash := ((*C.cfish_Hash)(ivars.fields)) return C.uint32_t(C.CFISH_Hash_Get_Size(hash)) } //export GOLUCY_Doc_Store func GOLUCY_Doc_Store(d *C.lucy_Doc, field *C.cfish_String, value *C.cfish_Obj) { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) C.CFISH_Hash_Store(hash, field, C.cfish_inc_refcount(unsafe.Pointer(value))) } //export GOLUCY_Doc_Serialize func GOLUCY_Doc_Serialize(d *C.lucy_Doc, outstream *C.lucy_OutStream) { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) C.lucy_Freezer_serialize_hash(hash, outstream) C.LUCY_OutStream_Write_C32(outstream, C.uint32_t(ivars.doc_id)) } //export GOLUCY_Doc_Deserialize func GOLUCY_Doc_Deserialize(d *C.lucy_Doc, instream *C.lucy_InStream) *C.lucy_Doc { ivars := C.lucy_Doc_IVARS(d) ivars.fields = unsafe.Pointer(C.lucy_Freezer_read_hash(instream)) ivars.doc_id = C.int32_t(C.LUCY_InStream_Read_C32(instream)) return d } //export GOLUCY_Doc_Extract func GOLUCY_Doc_Extract(d *C.lucy_Doc, field *C.cfish_String) *C.cfish_Obj { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) val := C.CFISH_Hash_Fetch(hash, field) return C.cfish_inc_refcount(unsafe.Pointer(val)) } //export GOLUCY_Doc_Field_Names func GOLUCY_Doc_Field_Names(d *C.lucy_Doc) *C.cfish_Vector { ivars := C.lucy_Doc_IVARS(d) hash := (*C.cfish_Hash)(ivars.fields) return C.CFISH_Hash_Keys(hash) } //export GOLUCY_Doc_Equals func GOLUCY_Doc_Equals(d *C.lucy_Doc, other *C.cfish_Obj) C.bool { twin := (*C.lucy_Doc)(unsafe.Pointer(other)) if twin == d { return true } if !C.cfish_Obj_is_a(other, C.LUCY_DOC) { return false } ivars := C.lucy_Doc_IVARS(d) ovars := C.lucy_Doc_IVARS(twin) hash := (*C.cfish_Hash)(ivars.fields) otherHash := (*C.cfish_Obj)(ovars.fields) return C.CFISH_Hash_Equals(hash, otherHash) } //export GOLUCY_Doc_Destroy func GOLUCY_Doc_Destroy(d *C.lucy_Doc) { ivars := C.lucy_Doc_IVARS(d) C.cfish_decref(unsafe.Pointer(ivars.fields)) C.cfish_super_destroy(unsafe.Pointer(d), C.LUCY_DOC) } func fetchEntry(ivars *C.lucy_InverterIVARS, field *C.cfish_String) *C.lucy_InverterEntry { schema := ivars.schema fieldNum := C.LUCY_Seg_Field_Num(ivars.segment, field) if fieldNum == 0 { // This field seems not to be in the segment yet. Try to find it in // the Schema. if C.LUCY_Schema_Fetch_Type(schema, field) != nil { // The field is in the Schema. Get a field num from the Segment. fieldNum = C.LUCY_Seg_Add_Field(ivars.segment, field) } else { // We've truly failed to find the field. The user must // not have spec'd it. fieldGo := clownfish.CFStringToGo(unsafe.Pointer(field)) err := clownfish.NewErr("Unknown field name: '" + fieldGo + "'") panic(err) } } entry := C.CFISH_Vec_Fetch(ivars.entry_pool, C.size_t(fieldNum)) if entry == nil { newEntry := C.lucy_InvEntry_new(schema, field, fieldNum) C.CFISH_Vec_Store(ivars.entry_pool, C.size_t(fieldNum), (*C.cfish_Obj)(unsafe.Pointer(entry))) return newEntry } return (*C.lucy_InverterEntry)(unsafe.Pointer(entry)) } //export GOLUCY_DefDocReader_Fetch_Doc func GOLUCY_DefDocReader_Fetch_Doc(ddr *C.lucy_DefaultDocReader, docID C.int32_t) *C.lucy_HitDoc { ivars := C.lucy_DefDocReader_IVARS(ddr) schema := ivars.schema datInstream := ivars.dat_in ixInstream := ivars.ix_in fields := C.cfish_Hash_new(1) fieldNameCap := C.size_t(31) var fieldName *C.char = ((*C.char)(C.malloc(fieldNameCap + 1))) // Get data file pointer from index, read number of fields. C.LUCY_InStream_Seek(ixInstream, C.int64_t(docID*8)) start := C.LUCY_InStream_Read_U64(ixInstream) C.LUCY_InStream_Seek(datInstream, C.int64_t(start)) numFields := uint32(C.LUCY_InStream_Read_C32(datInstream)) // Decode stored data and build up the doc field by field. for i := uint32(0); i < numFields; i++ { // Read field name. fieldNameLen := C.size_t(C.LUCY_InStream_Read_C32(datInstream)) if fieldNameLen > fieldNameCap { fieldNameCap = fieldNameLen fieldName = ((*C.char)(C.realloc(unsafe.Pointer(fieldName), fieldNameCap+1))) } C.LUCY_InStream_Read_Bytes(datInstream, fieldName, fieldNameLen) // Find the Field's FieldType. // TODO: Creating and destroying a new string each time is // inefficient. The solution should be to add a privte // Schema_Fetch_Type_Utf8 method which takes char* and size_t. fieldNameStr := C.cfish_Str_new_from_utf8(fieldName, fieldNameLen) fieldType := C.LUCY_Schema_Fetch_Type(schema, fieldNameStr) C.cfish_dec_refcount(unsafe.Pointer(fieldNameStr)) // Read the field value. var value *C.cfish_Obj switch C.LUCY_FType_Primitive_ID(fieldType) & C.lucy_FType_PRIMITIVE_ID_MASK { case C.lucy_FType_TEXT: valueLen := C.size_t(C.LUCY_InStream_Read_C32(datInstream)) buf := ((*C.char)(C.malloc(valueLen + 1))) C.LUCY_InStream_Read_Bytes(datInstream, buf, valueLen) C.null_terminate_string(buf, valueLen) value = ((*C.cfish_Obj)(C.cfish_Str_new_steal_utf8(buf, valueLen))) case C.lucy_FType_BLOB: valueLen := C.size_t(C.LUCY_InStream_Read_C32(datInstream)) buf := ((*C.char)(C.malloc(valueLen))) C.LUCY_InStream_Read_Bytes(datInstream, buf, valueLen) value = ((*C.cfish_Obj)(C.cfish_Blob_new_steal(buf, valueLen))) case C.lucy_FType_FLOAT32: value = ((*C.cfish_Obj)(C.cfish_Float_new(C.double(C.LUCY_InStream_Read_F32(datInstream))))) case C.lucy_FType_FLOAT64: value = ((*C.cfish_Obj)(C.cfish_Float_new(C.LUCY_InStream_Read_F64(datInstream)))) case C.lucy_FType_INT32: value = ((*C.cfish_Obj)(C.cfish_Int_new(C.int64_t(C.LUCY_InStream_Read_C32(datInstream))))) case C.lucy_FType_INT64: value = ((*C.cfish_Obj)(C.cfish_Int_new(C.int64_t(C.LUCY_InStream_Read_C64(datInstream))))) default: value = nil panic(clownfish.NewErr("Internal Lucy error: bad type id for field " + C.GoStringN(fieldName, C.int(fieldNameLen)))) } // Store the value. C.CFISH_Hash_Store_Utf8(fields, fieldName, fieldNameLen, value) } C.free(unsafe.Pointer(fieldName)) retval := C.lucy_HitDoc_new(unsafe.Pointer(fields), docID, 0.0) C.cfish_dec_refcount(unsafe.Pointer(fields)) return retval } //export GOLUCY_Inverter_Invert_Doc func GOLUCY_Inverter_Invert_Doc(inverter *C.lucy_Inverter, doc *C.lucy_Doc) { ivars := C.lucy_Inverter_IVARS(inverter) fields := (*C.cfish_Hash)(C.LUCY_Doc_Get_Fields(doc)) // Prepare for the new doc. C.LUCY_Inverter_Set_Doc(inverter, doc) // Extract and invert the doc's fields. iter := C.cfish_HashIter_new(fields) for C.CFISH_HashIter_Next(iter) { field := C.CFISH_HashIter_Get_Key(iter) obj := C.CFISH_HashIter_Get_Value(iter) if obj == nil { mess := "Invalid nil value for field" + clownfish.CFStringToGo(unsafe.Pointer(field)) panic(clownfish.NewErr(mess)) } inventry := fetchEntry(ivars, field) inventryIvars := C.lucy_InvEntry_IVARS(inventry) fieldType := inventryIvars._type // Get the field value. var expectedType *C.cfish_Class switch C.LUCY_FType_Primitive_ID(fieldType) & C.lucy_FType_PRIMITIVE_ID_MASK { case C.lucy_FType_TEXT: expectedType = C.CFISH_STRING case C.lucy_FType_BLOB: expectedType = C.CFISH_BLOB case C.lucy_FType_INT32: expectedType = C.CFISH_INTEGER case C.lucy_FType_INT64: expectedType = C.CFISH_INTEGER case C.lucy_FType_FLOAT32: expectedType = C.CFISH_FLOAT case C.lucy_FType_FLOAT64: expectedType = C.CFISH_FLOAT default: panic(clownfish.NewErr("Internal Lucy error: bad type id for field " + clownfish.CFStringToGo(unsafe.Pointer(field)))) } if !C.cfish_Obj_is_a(obj, expectedType) { className := C.cfish_Obj_get_class_name((*C.cfish_Obj)(unsafe.Pointer(fieldType))) mess := fmt.Sprintf("Invalid type for field '%s': '%s'", clownfish.CFStringToGo(unsafe.Pointer(field)), clownfish.CFStringToGo(unsafe.Pointer(className))) panic(clownfish.NewErr(mess)) } if inventryIvars.value != obj { C.cfish_decref(unsafe.Pointer(inventryIvars.value)) inventryIvars.value = C.cfish_inc_refcount(unsafe.Pointer(obj)) } C.LUCY_Inverter_Add_Field(inverter, inventry) } C.cfish_dec_refcount(unsafe.Pointer(iter)) }
{ ivars := C.lucy_Doc_IVARS(d) if fields != nil { ivars.fields = unsafe.Pointer(C.cfish_inc_refcount(fields)) } else { ivars.fields = unsafe.Pointer(C.cfish_Hash_new(0)) } ivars.doc_id = docID return d }
identifier_body
settings.py
#-*- coding: utf-8 -*- """ Django settings for HyperKitty + Postorius Pay attention to settings ALLOWED_HOSTS and DATABASES! """ from os.path import abspath, dirname, join as joinpath from ConfigParser import SafeConfigParser def read_cfg(path, section=None, option=None): config = SafeConfigParser() config.read(path) def
(section, option): return config.get(section, option) if config.has_option(section, option) else None return get(section, option) if section else get mailman_cfg = read_cfg('/etc/mailman.cfg') BASE_DIR = '/usr/lib/bundles/mailman-webui' CONF_DIR = '/etc/mailman-webui' DATA_DIR = '/var/lib/mailman-webui' LOG_DIR = '/var/log/mailman-webui' # Hosts/domain names that are valid for this site. # NOTE: You MUST add domain name of your instance of this application here! # See https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts ALLOWED_HOSTS = ['localhost'] # Mailman API credentials # NOTE: Replace with hard-coded values if Mailman is running on a different host. MAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice', 'port') or 8001) MAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin' MAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass') MAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general', 'api_key') MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1', '::ffff:127.0.0.1') # REST API REST_FRAMEWORK = { 'PAGE_SIZE': 10, } # Only display mailing-lists in HyperKitty from the same virtual host # as the webserver. FILTER_VHOST = False # # Application definition # SITE_ID = 1 INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'hyperkitty', 'rest_framework', 'django_gravatar', 'paintstore', 'compressor', 'haystack', 'django_extensions', 'postorius', 'django_mailman3', 'stronghold', # Uncomment the next line to enable integration with Sentry # and set DSN in RAVEN_CONFIG. #'raven.contrib.django.raven_compat', 'allauth', 'allauth.account', 'allauth.socialaccount', # Uncomment providers that you want to use, if any. #'allauth.socialaccount.providers.openid', #'allauth.socialaccount.providers.github', #'allauth.socialaccount.providers.gitlab', #'allauth.socialaccount.providers.google', #'allauth.socialaccount.providers.twitter', #'allauth.socialaccount.providers.stackexchange', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', 'django_mailman3.middleware.TimezoneMiddleware', 'postorius.middleware.PostoriusMiddleware', # Uncomment to require a user to be authenticated to view any page. #'stronghold.middleware.LoginRequiredMiddleware', ) # A string representing the full Python import path to your root URLconf. ROOT_URLCONF = 'urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # Directory for templates override. joinpath(DATA_DIR, 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.template.context_processors.csrf', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django_mailman3.context_processors.common', 'hyperkitty.context_processors.common', 'postorius.context_processors.postorius', ], }, }, ] WSGI_APPLICATION = 'wsgi.application' # Using the cache infrastructure can significantly improve performance on a # production setup. This is an example with a local Memcached server. #CACHES = { # 'default': { # 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache', # 'LOCATION': '127.0.0.1:11211', # } #} # # Databases # See https://docs.djangoproject.com/en/1.9/ref/settings/#databases # DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': joinpath(DATA_DIR, 'db.sqlite3'), } # Remove the above lines and uncomment the below to use PostgreSQL. # 'default': { # 'ENGINE': 'django.db.backends.postgresql_psycopg2', # 'NAME': 'mailman_webui', # 'USER': 'mailman_webui', # 'PASSWORD': 'change-me', # # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. # 'HOST': '127.0.0.1', # 'PORT': '', # } } # Full-text search engine HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine', 'PATH': joinpath(DATA_DIR, 'fulltext_index'), }, } # # Outgoing mails # # NOTE: Replace with hard-coded values if Mailman is running on a different host. # The host and port of the SMTP server to use for sending email. EMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost' EMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25) # Username and password to use for the SMTP server defined above. EMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or '' EMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or '' # Whether to use a explicit TLS connection when talking to the SMTP server. EMAIL_USE_TLS = False # Whether to use an implicit TLS connection when talking to the SMTP server. EMAIL_USE_SSL = False # A tuple that lists people who get code error notifications. When DEBUG=False # and a view raises an exception, Django will email these people with the full # exception information. Each member of the tuple should be a tuple of (Full # name, email address). ADMINS = ( ('Mailman Admin', 'root@localhost'), ) # If you enable email reporting for error messages, this is where those emails # will appear to be coming from. Make sure you set a valid domain name, # otherwise the emails may get rejected. # https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-SERVER_EMAIL #SERVER_EMAIL = 'root@your-domain.org' # If you enable internal authentication, this is the address that the emails # will appear to be coming from. Make sure you set a valid domain name, # otherwise the emails may get rejected. # https://docs.djangoproject.com/en/1.9/ref/settings/#default-from-email #DEFAULT_FROM_EMAIL = 'mailing-lists@you-domain.org' # # Security settings # # A secret key used for signing sessions, cookies, password reset tokens etc. SECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read() CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True SESSION_COOKIE_SECURE = True SECURE_CONTENT_TYPE_NOSNIFF = True SECURE_BROWSER_XSS_FILTER = True X_FRAME_OPTIONS = 'DENY' # If you're behind a proxy, use the X-Forwarded-Host header # See https://docs.djangoproject.com/en/1.9/ref/settings/#use-x-forwarded-host USE_X_FORWARDED_HOST = True # And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') #SECURE_SSL_REDIRECT = True # If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT # contains at least this line: #SECURE_REDIRECT_EXEMPT = [ # 'archives/api/mailman/.*', # Request from Mailman. #] # # Authentication # AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', # Uncomment to next line to enable LDAP authentication. #'custom.LDAPBackend', 'allauth.account.auth_backends.AuthenticationBackend', ) LOGIN_URL = 'account_login' LOGIN_REDIRECT_URL = 'hk_root' LOGOUT_URL = 'account_logout' # Whether registration of new accounts is currently permitted. REGISTRATION_OPEN = True # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator' }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator' }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator' }, ] # URLs which are ignored by LoginRequiredMiddleware, i.e. the middleware # does not *force* them to require authentication. STRONGHOLD_PUBLIC_URLS = ( r'^/accounts/.*', r'^/archives/api/mailman/.*', ) ## Django Allauth # Custom AccountAdapter for allauth that respects REGISTRATION_OPEN variable. ACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter' ACCOUNT_AUTHENTICATION_METHOD = 'username_email' ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_EMAIL_VERIFICATION = 'mandatory' ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https' ACCOUNT_UNIQUE_EMAIL = True # Whether to disable intermediate logout page. ACCOUNT_LOGOUT_ON_GET = False SOCIALACCOUNT_PROVIDERS = {} #SOCIALACCOUNT_PROVIDERS = { # 'openid': { # 'SERVERS': [ # { # 'id': 'yahoo', # 'name': 'Yahoo', # 'openid_url': 'http://me.yahoo.com' # } # ], # }, # 'google': { # 'SCOPE': ['profile', 'email'], # 'AUTH_PARAMS': {'access_type': 'online'}, # }, # 'facebook': { # 'METHOD': 'oauth2', # 'SCOPE': ['email'], # 'FIELDS': [ # 'email', # 'name', # 'first_name', # 'last_name', # 'locale', # 'timezone', # ], # 'VERSION': 'v2.4', # }, #} ## Django LDAP if 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS: import ldap from django_auth_ldap.config import LDAPSearch ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs') AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org' AUTH_LDAP_USER_SEARCH = LDAPSearch( 'ou=People,dc=example,dc=org', ldap.SCOPE_SUBTREE, '(&(mail=*)(uid=%(user)s))' ) AUTH_LDAP_USER_ATTR_MAP = { 'first_name': 'givenName', 'last_name': 'sn', 'email': 'mail', } # # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ # LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ # # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = joinpath(BASE_DIR, 'static') # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static". # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) # django-compressor COMPRESS_OFFLINE = True # Compatibility with Bootstrap 3 from django.contrib.messages import constants as messages MESSAGE_TAGS = { messages.ERROR: 'danger' } # # Gravatar # https://github.com/twaddington/django-gravatar # # Gravatar base url. GRAVATAR_URL = 'http://cdn.libravatar.org/' # Gravatar base secure https url. GRAVATAR_SECURE_URL = 'https://seccdn.libravatar.org/' # Gravatar size in pixels. #GRAVATAR_DEFAULT_SIZE = '80' # An image url or one of the following: 'mm', 'identicon', 'monsterid', 'wavatar', 'retro'. GRAVATAR_DEFAULT_IMAGE = 'retro' # One of the following: 'g', 'pg', 'r', 'x'. #GRAVATAR_DEFAULT_RATING = 'g' # True to use https by default, False for plain http. GRAVATAR_DEFAULT_SECURE = True # # Logging # # A sample logging configuration. The only tangible logging performed by this # configuration is to send an email to the site admins on every HTTP 500 error # when DEBUG=False. See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'formatter': 'simple', }, 'file':{ 'level': 'INFO', #'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.WatchedFileHandler', 'filename': joinpath(LOG_DIR, 'mailman-webui.log'), 'formatter': 'verbose', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, }, 'loggers': { #'django.request': { # 'handlers': ['mail_admins'], # 'level': 'ERROR', # 'propagate': True, #}, 'django.request': { 'handlers': ['file'], 'level': 'ERROR', 'propagate': True, }, 'django': { 'handlers': ['file'], 'level': 'ERROR', 'propagate': True, }, 'postorius': { 'handlers': ['file'], 'level': 'INFO', 'propagate': True, }, 'hyperkitty': { 'handlers': ['file'], 'level': 'INFO', 'propagate': True, }, }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'root': { 'handlers': ['file'], 'level': 'INFO', }, } if 'raven.contrib.django.raven_compat' in INSTALLED_APPS: RAVEN_CONFIG = { 'dsn': 'https://<key>:<secret>@sentry.io/<project>', } LOGGING['handlers']['sentry'] = { 'level': 'ERROR', 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler', } LOGGING['loggers']['root']['handlers'].append('sentry') try: from settings_local import * except ImportError: pass
get
identifier_name
settings.py
#-*- coding: utf-8 -*- """ Django settings for HyperKitty + Postorius Pay attention to settings ALLOWED_HOSTS and DATABASES! """ from os.path import abspath, dirname, join as joinpath from ConfigParser import SafeConfigParser def read_cfg(path, section=None, option=None): config = SafeConfigParser() config.read(path) def get(section, option):
return get(section, option) if section else get mailman_cfg = read_cfg('/etc/mailman.cfg') BASE_DIR = '/usr/lib/bundles/mailman-webui' CONF_DIR = '/etc/mailman-webui' DATA_DIR = '/var/lib/mailman-webui' LOG_DIR = '/var/log/mailman-webui' # Hosts/domain names that are valid for this site. # NOTE: You MUST add domain name of your instance of this application here! # See https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts ALLOWED_HOSTS = ['localhost'] # Mailman API credentials # NOTE: Replace with hard-coded values if Mailman is running on a different host. MAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice', 'port') or 8001) MAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin' MAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass') MAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general', 'api_key') MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1', '::ffff:127.0.0.1') # REST API REST_FRAMEWORK = { 'PAGE_SIZE': 10, } # Only display mailing-lists in HyperKitty from the same virtual host # as the webserver. FILTER_VHOST = False # # Application definition # SITE_ID = 1 INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'hyperkitty', 'rest_framework', 'django_gravatar', 'paintstore', 'compressor', 'haystack', 'django_extensions', 'postorius', 'django_mailman3', 'stronghold', # Uncomment the next line to enable integration with Sentry # and set DSN in RAVEN_CONFIG. #'raven.contrib.django.raven_compat', 'allauth', 'allauth.account', 'allauth.socialaccount', # Uncomment providers that you want to use, if any. #'allauth.socialaccount.providers.openid', #'allauth.socialaccount.providers.github', #'allauth.socialaccount.providers.gitlab', #'allauth.socialaccount.providers.google', #'allauth.socialaccount.providers.twitter', #'allauth.socialaccount.providers.stackexchange', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', 'django_mailman3.middleware.TimezoneMiddleware', 'postorius.middleware.PostoriusMiddleware', # Uncomment to require a user to be authenticated to view any page. #'stronghold.middleware.LoginRequiredMiddleware', ) # A string representing the full Python import path to your root URLconf. ROOT_URLCONF = 'urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # Directory for templates override. joinpath(DATA_DIR, 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.template.context_processors.csrf', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django_mailman3.context_processors.common', 'hyperkitty.context_processors.common', 'postorius.context_processors.postorius', ], }, }, ] WSGI_APPLICATION = 'wsgi.application' # Using the cache infrastructure can significantly improve performance on a # production setup. This is an example with a local Memcached server. #CACHES = { # 'default': { # 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache', # 'LOCATION': '127.0.0.1:11211', # } #} # # Databases # See https://docs.djangoproject.com/en/1.9/ref/settings/#databases # DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': joinpath(DATA_DIR, 'db.sqlite3'), } # Remove the above lines and uncomment the below to use PostgreSQL. # 'default': { # 'ENGINE': 'django.db.backends.postgresql_psycopg2', # 'NAME': 'mailman_webui', # 'USER': 'mailman_webui', # 'PASSWORD': 'change-me', # # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. # 'HOST': '127.0.0.1', # 'PORT': '', # } } # Full-text search engine HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine', 'PATH': joinpath(DATA_DIR, 'fulltext_index'), }, } # # Outgoing mails # # NOTE: Replace with hard-coded values if Mailman is running on a different host. # The host and port of the SMTP server to use for sending email. EMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost' EMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25) # Username and password to use for the SMTP server defined above. EMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or '' EMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or '' # Whether to use a explicit TLS connection when talking to the SMTP server. EMAIL_USE_TLS = False # Whether to use an implicit TLS connection when talking to the SMTP server. EMAIL_USE_SSL = False # A tuple that lists people who get code error notifications. When DEBUG=False # and a view raises an exception, Django will email these people with the full # exception information. Each member of the tuple should be a tuple of (Full # name, email address). ADMINS = ( ('Mailman Admin', 'root@localhost'), ) # If you enable email reporting for error messages, this is where those emails # will appear to be coming from. Make sure you set a valid domain name, # otherwise the emails may get rejected. # https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-SERVER_EMAIL #SERVER_EMAIL = 'root@your-domain.org' # If you enable internal authentication, this is the address that the emails # will appear to be coming from. Make sure you set a valid domain name, # otherwise the emails may get rejected. # https://docs.djangoproject.com/en/1.9/ref/settings/#default-from-email #DEFAULT_FROM_EMAIL = 'mailing-lists@you-domain.org' # # Security settings # # A secret key used for signing sessions, cookies, password reset tokens etc. SECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read() CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True SESSION_COOKIE_SECURE = True SECURE_CONTENT_TYPE_NOSNIFF = True SECURE_BROWSER_XSS_FILTER = True X_FRAME_OPTIONS = 'DENY' # If you're behind a proxy, use the X-Forwarded-Host header # See https://docs.djangoproject.com/en/1.9/ref/settings/#use-x-forwarded-host USE_X_FORWARDED_HOST = True # And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') #SECURE_SSL_REDIRECT = True # If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT # contains at least this line: #SECURE_REDIRECT_EXEMPT = [ # 'archives/api/mailman/.*', # Request from Mailman. #] # # Authentication # AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', # Uncomment to next line to enable LDAP authentication. #'custom.LDAPBackend', 'allauth.account.auth_backends.AuthenticationBackend', ) LOGIN_URL = 'account_login' LOGIN_REDIRECT_URL = 'hk_root' LOGOUT_URL = 'account_logout' # Whether registration of new accounts is currently permitted. REGISTRATION_OPEN = True # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator' }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator' }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator' }, ] # URLs which are ignored by LoginRequiredMiddleware, i.e. the middleware # does not *force* them to require authentication. STRONGHOLD_PUBLIC_URLS = ( r'^/accounts/.*', r'^/archives/api/mailman/.*', ) ## Django Allauth # Custom AccountAdapter for allauth that respects REGISTRATION_OPEN variable. ACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter' ACCOUNT_AUTHENTICATION_METHOD = 'username_email' ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_EMAIL_VERIFICATION = 'mandatory' ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https' ACCOUNT_UNIQUE_EMAIL = True # Whether to disable intermediate logout page. ACCOUNT_LOGOUT_ON_GET = False SOCIALACCOUNT_PROVIDERS = {} #SOCIALACCOUNT_PROVIDERS = { # 'openid': { # 'SERVERS': [ # { # 'id': 'yahoo', # 'name': 'Yahoo', # 'openid_url': 'http://me.yahoo.com' # } # ], # }, # 'google': { # 'SCOPE': ['profile', 'email'], # 'AUTH_PARAMS': {'access_type': 'online'}, # }, # 'facebook': { # 'METHOD': 'oauth2', # 'SCOPE': ['email'], # 'FIELDS': [ # 'email', # 'name', # 'first_name', # 'last_name', # 'locale', # 'timezone', # ], # 'VERSION': 'v2.4', # }, #} ## Django LDAP if 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS: import ldap from django_auth_ldap.config import LDAPSearch ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs') AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org' AUTH_LDAP_USER_SEARCH = LDAPSearch( 'ou=People,dc=example,dc=org', ldap.SCOPE_SUBTREE, '(&(mail=*)(uid=%(user)s))' ) AUTH_LDAP_USER_ATTR_MAP = { 'first_name': 'givenName', 'last_name': 'sn', 'email': 'mail', } # # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ # LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ # # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = joinpath(BASE_DIR, 'static') # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static". # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) # django-compressor COMPRESS_OFFLINE = True # Compatibility with Bootstrap 3 from django.contrib.messages import constants as messages MESSAGE_TAGS = { messages.ERROR: 'danger' } # # Gravatar # https://github.com/twaddington/django-gravatar # # Gravatar base url. GRAVATAR_URL = 'http://cdn.libravatar.org/' # Gravatar base secure https url. GRAVATAR_SECURE_URL = 'https://seccdn.libravatar.org/' # Gravatar size in pixels. #GRAVATAR_DEFAULT_SIZE = '80' # An image url or one of the following: 'mm', 'identicon', 'monsterid', 'wavatar', 'retro'. GRAVATAR_DEFAULT_IMAGE = 'retro' # One of the following: 'g', 'pg', 'r', 'x'. #GRAVATAR_DEFAULT_RATING = 'g' # True to use https by default, False for plain http. GRAVATAR_DEFAULT_SECURE = True # # Logging # # A sample logging configuration. The only tangible logging performed by this # configuration is to send an email to the site admins on every HTTP 500 error # when DEBUG=False. See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'formatter': 'simple', }, 'file':{ 'level': 'INFO', #'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.WatchedFileHandler', 'filename': joinpath(LOG_DIR, 'mailman-webui.log'), 'formatter': 'verbose', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, }, 'loggers': { #'django.request': { # 'handlers': ['mail_admins'], # 'level': 'ERROR', # 'propagate': True, #}, 'django.request': { 'handlers': ['file'], 'level': 'ERROR', 'propagate': True, }, 'django': { 'handlers': ['file'], 'level': 'ERROR', 'propagate': True, }, 'postorius': { 'handlers': ['file'], 'level': 'INFO', 'propagate': True, }, 'hyperkitty': { 'handlers': ['file'], 'level': 'INFO', 'propagate': True, }, }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'root': { 'handlers': ['file'], 'level': 'INFO', }, } if 'raven.contrib.django.raven_compat' in INSTALLED_APPS: RAVEN_CONFIG = { 'dsn': 'https://<key>:<secret>@sentry.io/<project>', } LOGGING['handlers']['sentry'] = { 'level': 'ERROR', 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler', } LOGGING['loggers']['root']['handlers'].append('sentry') try: from settings_local import * except ImportError: pass
return config.get(section, option) if config.has_option(section, option) else None
identifier_body
settings.py
#-*- coding: utf-8 -*-
Django settings for HyperKitty + Postorius Pay attention to settings ALLOWED_HOSTS and DATABASES! """ from os.path import abspath, dirname, join as joinpath from ConfigParser import SafeConfigParser def read_cfg(path, section=None, option=None): config = SafeConfigParser() config.read(path) def get(section, option): return config.get(section, option) if config.has_option(section, option) else None return get(section, option) if section else get mailman_cfg = read_cfg('/etc/mailman.cfg') BASE_DIR = '/usr/lib/bundles/mailman-webui' CONF_DIR = '/etc/mailman-webui' DATA_DIR = '/var/lib/mailman-webui' LOG_DIR = '/var/log/mailman-webui' # Hosts/domain names that are valid for this site. # NOTE: You MUST add domain name of your instance of this application here! # See https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts ALLOWED_HOSTS = ['localhost'] # Mailman API credentials # NOTE: Replace with hard-coded values if Mailman is running on a different host. MAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice', 'port') or 8001) MAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin' MAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass') MAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general', 'api_key') MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1', '::ffff:127.0.0.1') # REST API REST_FRAMEWORK = { 'PAGE_SIZE': 10, } # Only display mailing-lists in HyperKitty from the same virtual host # as the webserver. FILTER_VHOST = False # # Application definition # SITE_ID = 1 INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'hyperkitty', 'rest_framework', 'django_gravatar', 'paintstore', 'compressor', 'haystack', 'django_extensions', 'postorius', 'django_mailman3', 'stronghold', # Uncomment the next line to enable integration with Sentry # and set DSN in RAVEN_CONFIG. #'raven.contrib.django.raven_compat', 'allauth', 'allauth.account', 'allauth.socialaccount', # Uncomment providers that you want to use, if any. #'allauth.socialaccount.providers.openid', #'allauth.socialaccount.providers.github', #'allauth.socialaccount.providers.gitlab', #'allauth.socialaccount.providers.google', #'allauth.socialaccount.providers.twitter', #'allauth.socialaccount.providers.stackexchange', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', 'django_mailman3.middleware.TimezoneMiddleware', 'postorius.middleware.PostoriusMiddleware', # Uncomment to require a user to be authenticated to view any page. #'stronghold.middleware.LoginRequiredMiddleware', ) # A string representing the full Python import path to your root URLconf. ROOT_URLCONF = 'urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # Directory for templates override. joinpath(DATA_DIR, 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.template.context_processors.csrf', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django_mailman3.context_processors.common', 'hyperkitty.context_processors.common', 'postorius.context_processors.postorius', ], }, }, ] WSGI_APPLICATION = 'wsgi.application' # Using the cache infrastructure can significantly improve performance on a # production setup. This is an example with a local Memcached server. #CACHES = { # 'default': { # 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache', # 'LOCATION': '127.0.0.1:11211', # } #} # # Databases # See https://docs.djangoproject.com/en/1.9/ref/settings/#databases # DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': joinpath(DATA_DIR, 'db.sqlite3'), } # Remove the above lines and uncomment the below to use PostgreSQL. # 'default': { # 'ENGINE': 'django.db.backends.postgresql_psycopg2', # 'NAME': 'mailman_webui', # 'USER': 'mailman_webui', # 'PASSWORD': 'change-me', # # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. # 'HOST': '127.0.0.1', # 'PORT': '', # } } # Full-text search engine HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine', 'PATH': joinpath(DATA_DIR, 'fulltext_index'), }, } # # Outgoing mails # # NOTE: Replace with hard-coded values if Mailman is running on a different host. # The host and port of the SMTP server to use for sending email. EMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost' EMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25) # Username and password to use for the SMTP server defined above. EMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or '' EMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or '' # Whether to use a explicit TLS connection when talking to the SMTP server. EMAIL_USE_TLS = False # Whether to use an implicit TLS connection when talking to the SMTP server. EMAIL_USE_SSL = False # A tuple that lists people who get code error notifications. When DEBUG=False # and a view raises an exception, Django will email these people with the full # exception information. Each member of the tuple should be a tuple of (Full # name, email address). ADMINS = ( ('Mailman Admin', 'root@localhost'), ) # If you enable email reporting for error messages, this is where those emails # will appear to be coming from. Make sure you set a valid domain name, # otherwise the emails may get rejected. # https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-SERVER_EMAIL #SERVER_EMAIL = 'root@your-domain.org' # If you enable internal authentication, this is the address that the emails # will appear to be coming from. Make sure you set a valid domain name, # otherwise the emails may get rejected. # https://docs.djangoproject.com/en/1.9/ref/settings/#default-from-email #DEFAULT_FROM_EMAIL = 'mailing-lists@you-domain.org' # # Security settings # # A secret key used for signing sessions, cookies, password reset tokens etc. SECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read() CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True SESSION_COOKIE_SECURE = True SECURE_CONTENT_TYPE_NOSNIFF = True SECURE_BROWSER_XSS_FILTER = True X_FRAME_OPTIONS = 'DENY' # If you're behind a proxy, use the X-Forwarded-Host header # See https://docs.djangoproject.com/en/1.9/ref/settings/#use-x-forwarded-host USE_X_FORWARDED_HOST = True # And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') #SECURE_SSL_REDIRECT = True # If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT # contains at least this line: #SECURE_REDIRECT_EXEMPT = [ # 'archives/api/mailman/.*', # Request from Mailman. #] # # Authentication # AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', # Uncomment to next line to enable LDAP authentication. #'custom.LDAPBackend', 'allauth.account.auth_backends.AuthenticationBackend', ) LOGIN_URL = 'account_login' LOGIN_REDIRECT_URL = 'hk_root' LOGOUT_URL = 'account_logout' # Whether registration of new accounts is currently permitted. REGISTRATION_OPEN = True # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator' }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator' }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator' }, ] # URLs which are ignored by LoginRequiredMiddleware, i.e. the middleware # does not *force* them to require authentication. STRONGHOLD_PUBLIC_URLS = ( r'^/accounts/.*', r'^/archives/api/mailman/.*', ) ## Django Allauth # Custom AccountAdapter for allauth that respects REGISTRATION_OPEN variable. ACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter' ACCOUNT_AUTHENTICATION_METHOD = 'username_email' ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_EMAIL_VERIFICATION = 'mandatory' ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https' ACCOUNT_UNIQUE_EMAIL = True # Whether to disable intermediate logout page. ACCOUNT_LOGOUT_ON_GET = False SOCIALACCOUNT_PROVIDERS = {} #SOCIALACCOUNT_PROVIDERS = { # 'openid': { # 'SERVERS': [ # { # 'id': 'yahoo', # 'name': 'Yahoo', # 'openid_url': 'http://me.yahoo.com' # } # ], # }, # 'google': { # 'SCOPE': ['profile', 'email'], # 'AUTH_PARAMS': {'access_type': 'online'}, # }, # 'facebook': { # 'METHOD': 'oauth2', # 'SCOPE': ['email'], # 'FIELDS': [ # 'email', # 'name', # 'first_name', # 'last_name', # 'locale', # 'timezone', # ], # 'VERSION': 'v2.4', # }, #} ## Django LDAP if 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS: import ldap from django_auth_ldap.config import LDAPSearch ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs') AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org' AUTH_LDAP_USER_SEARCH = LDAPSearch( 'ou=People,dc=example,dc=org', ldap.SCOPE_SUBTREE, '(&(mail=*)(uid=%(user)s))' ) AUTH_LDAP_USER_ATTR_MAP = { 'first_name': 'givenName', 'last_name': 'sn', 'email': 'mail', } # # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ # LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ # # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = joinpath(BASE_DIR, 'static') # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static". # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) # django-compressor COMPRESS_OFFLINE = True # Compatibility with Bootstrap 3 from django.contrib.messages import constants as messages MESSAGE_TAGS = { messages.ERROR: 'danger' } # # Gravatar # https://github.com/twaddington/django-gravatar # # Gravatar base url. GRAVATAR_URL = 'http://cdn.libravatar.org/' # Gravatar base secure https url. GRAVATAR_SECURE_URL = 'https://seccdn.libravatar.org/' # Gravatar size in pixels. #GRAVATAR_DEFAULT_SIZE = '80' # An image url or one of the following: 'mm', 'identicon', 'monsterid', 'wavatar', 'retro'. GRAVATAR_DEFAULT_IMAGE = 'retro' # One of the following: 'g', 'pg', 'r', 'x'. #GRAVATAR_DEFAULT_RATING = 'g' # True to use https by default, False for plain http. GRAVATAR_DEFAULT_SECURE = True # # Logging # # A sample logging configuration. The only tangible logging performed by this # configuration is to send an email to the site admins on every HTTP 500 error # when DEBUG=False. See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'formatter': 'simple', }, 'file':{ 'level': 'INFO', #'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.WatchedFileHandler', 'filename': joinpath(LOG_DIR, 'mailman-webui.log'), 'formatter': 'verbose', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, }, 'loggers': { #'django.request': { # 'handlers': ['mail_admins'], # 'level': 'ERROR', # 'propagate': True, #}, 'django.request': { 'handlers': ['file'], 'level': 'ERROR', 'propagate': True, }, 'django': { 'handlers': ['file'], 'level': 'ERROR', 'propagate': True, }, 'postorius': { 'handlers': ['file'], 'level': 'INFO', 'propagate': True, }, 'hyperkitty': { 'handlers': ['file'], 'level': 'INFO', 'propagate': True, }, }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'root': { 'handlers': ['file'], 'level': 'INFO', }, } if 'raven.contrib.django.raven_compat' in INSTALLED_APPS: RAVEN_CONFIG = { 'dsn': 'https://<key>:<secret>@sentry.io/<project>', } LOGGING['handlers']['sentry'] = { 'level': 'ERROR', 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler', } LOGGING['loggers']['root']['handlers'].append('sentry') try: from settings_local import * except ImportError: pass
"""
random_line_split
settings.py
#-*- coding: utf-8 -*- """ Django settings for HyperKitty + Postorius Pay attention to settings ALLOWED_HOSTS and DATABASES! """ from os.path import abspath, dirname, join as joinpath from ConfigParser import SafeConfigParser def read_cfg(path, section=None, option=None): config = SafeConfigParser() config.read(path) def get(section, option): return config.get(section, option) if config.has_option(section, option) else None return get(section, option) if section else get mailman_cfg = read_cfg('/etc/mailman.cfg') BASE_DIR = '/usr/lib/bundles/mailman-webui' CONF_DIR = '/etc/mailman-webui' DATA_DIR = '/var/lib/mailman-webui' LOG_DIR = '/var/log/mailman-webui' # Hosts/domain names that are valid for this site. # NOTE: You MUST add domain name of your instance of this application here! # See https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts ALLOWED_HOSTS = ['localhost'] # Mailman API credentials # NOTE: Replace with hard-coded values if Mailman is running on a different host. MAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice', 'port') or 8001) MAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin' MAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass') MAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general', 'api_key') MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1', '::ffff:127.0.0.1') # REST API REST_FRAMEWORK = { 'PAGE_SIZE': 10, } # Only display mailing-lists in HyperKitty from the same virtual host # as the webserver. FILTER_VHOST = False # # Application definition # SITE_ID = 1 INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'hyperkitty', 'rest_framework', 'django_gravatar', 'paintstore', 'compressor', 'haystack', 'django_extensions', 'postorius', 'django_mailman3', 'stronghold', # Uncomment the next line to enable integration with Sentry # and set DSN in RAVEN_CONFIG. #'raven.contrib.django.raven_compat', 'allauth', 'allauth.account', 'allauth.socialaccount', # Uncomment providers that you want to use, if any. #'allauth.socialaccount.providers.openid', #'allauth.socialaccount.providers.github', #'allauth.socialaccount.providers.gitlab', #'allauth.socialaccount.providers.google', #'allauth.socialaccount.providers.twitter', #'allauth.socialaccount.providers.stackexchange', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', 'django_mailman3.middleware.TimezoneMiddleware', 'postorius.middleware.PostoriusMiddleware', # Uncomment to require a user to be authenticated to view any page. #'stronghold.middleware.LoginRequiredMiddleware', ) # A string representing the full Python import path to your root URLconf. ROOT_URLCONF = 'urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # Directory for templates override. joinpath(DATA_DIR, 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.template.context_processors.csrf', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django_mailman3.context_processors.common', 'hyperkitty.context_processors.common', 'postorius.context_processors.postorius', ], }, }, ] WSGI_APPLICATION = 'wsgi.application' # Using the cache infrastructure can significantly improve performance on a # production setup. This is an example with a local Memcached server. #CACHES = { # 'default': { # 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache', # 'LOCATION': '127.0.0.1:11211', # } #} # # Databases # See https://docs.djangoproject.com/en/1.9/ref/settings/#databases # DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': joinpath(DATA_DIR, 'db.sqlite3'), } # Remove the above lines and uncomment the below to use PostgreSQL. # 'default': { # 'ENGINE': 'django.db.backends.postgresql_psycopg2', # 'NAME': 'mailman_webui', # 'USER': 'mailman_webui', # 'PASSWORD': 'change-me', # # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. # 'HOST': '127.0.0.1', # 'PORT': '', # } } # Full-text search engine HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine', 'PATH': joinpath(DATA_DIR, 'fulltext_index'), }, } # # Outgoing mails # # NOTE: Replace with hard-coded values if Mailman is running on a different host. # The host and port of the SMTP server to use for sending email. EMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost' EMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25) # Username and password to use for the SMTP server defined above. EMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or '' EMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or '' # Whether to use a explicit TLS connection when talking to the SMTP server. EMAIL_USE_TLS = False # Whether to use an implicit TLS connection when talking to the SMTP server. EMAIL_USE_SSL = False # A tuple that lists people who get code error notifications. When DEBUG=False # and a view raises an exception, Django will email these people with the full # exception information. Each member of the tuple should be a tuple of (Full # name, email address). ADMINS = ( ('Mailman Admin', 'root@localhost'), ) # If you enable email reporting for error messages, this is where those emails # will appear to be coming from. Make sure you set a valid domain name, # otherwise the emails may get rejected. # https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-SERVER_EMAIL #SERVER_EMAIL = 'root@your-domain.org' # If you enable internal authentication, this is the address that the emails # will appear to be coming from. Make sure you set a valid domain name, # otherwise the emails may get rejected. # https://docs.djangoproject.com/en/1.9/ref/settings/#default-from-email #DEFAULT_FROM_EMAIL = 'mailing-lists@you-domain.org' # # Security settings # # A secret key used for signing sessions, cookies, password reset tokens etc. SECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read() CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True SESSION_COOKIE_SECURE = True SECURE_CONTENT_TYPE_NOSNIFF = True SECURE_BROWSER_XSS_FILTER = True X_FRAME_OPTIONS = 'DENY' # If you're behind a proxy, use the X-Forwarded-Host header # See https://docs.djangoproject.com/en/1.9/ref/settings/#use-x-forwarded-host USE_X_FORWARDED_HOST = True # And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') #SECURE_SSL_REDIRECT = True # If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT # contains at least this line: #SECURE_REDIRECT_EXEMPT = [ # 'archives/api/mailman/.*', # Request from Mailman. #] # # Authentication # AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', # Uncomment to next line to enable LDAP authentication. #'custom.LDAPBackend', 'allauth.account.auth_backends.AuthenticationBackend', ) LOGIN_URL = 'account_login' LOGIN_REDIRECT_URL = 'hk_root' LOGOUT_URL = 'account_logout' # Whether registration of new accounts is currently permitted. REGISTRATION_OPEN = True # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator' }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator' }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator' }, ] # URLs which are ignored by LoginRequiredMiddleware, i.e. the middleware # does not *force* them to require authentication. STRONGHOLD_PUBLIC_URLS = ( r'^/accounts/.*', r'^/archives/api/mailman/.*', ) ## Django Allauth # Custom AccountAdapter for allauth that respects REGISTRATION_OPEN variable. ACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter' ACCOUNT_AUTHENTICATION_METHOD = 'username_email' ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_EMAIL_VERIFICATION = 'mandatory' ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https' ACCOUNT_UNIQUE_EMAIL = True # Whether to disable intermediate logout page. ACCOUNT_LOGOUT_ON_GET = False SOCIALACCOUNT_PROVIDERS = {} #SOCIALACCOUNT_PROVIDERS = { # 'openid': { # 'SERVERS': [ # { # 'id': 'yahoo', # 'name': 'Yahoo', # 'openid_url': 'http://me.yahoo.com' # } # ], # }, # 'google': { # 'SCOPE': ['profile', 'email'], # 'AUTH_PARAMS': {'access_type': 'online'}, # }, # 'facebook': { # 'METHOD': 'oauth2', # 'SCOPE': ['email'], # 'FIELDS': [ # 'email', # 'name', # 'first_name', # 'last_name', # 'locale', # 'timezone', # ], # 'VERSION': 'v2.4', # }, #} ## Django LDAP if 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS:
# # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ # LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ # # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = joinpath(BASE_DIR, 'static') # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static". # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) # django-compressor COMPRESS_OFFLINE = True # Compatibility with Bootstrap 3 from django.contrib.messages import constants as messages MESSAGE_TAGS = { messages.ERROR: 'danger' } # # Gravatar # https://github.com/twaddington/django-gravatar # # Gravatar base url. GRAVATAR_URL = 'http://cdn.libravatar.org/' # Gravatar base secure https url. GRAVATAR_SECURE_URL = 'https://seccdn.libravatar.org/' # Gravatar size in pixels. #GRAVATAR_DEFAULT_SIZE = '80' # An image url or one of the following: 'mm', 'identicon', 'monsterid', 'wavatar', 'retro'. GRAVATAR_DEFAULT_IMAGE = 'retro' # One of the following: 'g', 'pg', 'r', 'x'. #GRAVATAR_DEFAULT_RATING = 'g' # True to use https by default, False for plain http. GRAVATAR_DEFAULT_SECURE = True # # Logging # # A sample logging configuration. The only tangible logging performed by this # configuration is to send an email to the site admins on every HTTP 500 error # when DEBUG=False. See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'formatter': 'simple', }, 'file':{ 'level': 'INFO', #'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.WatchedFileHandler', 'filename': joinpath(LOG_DIR, 'mailman-webui.log'), 'formatter': 'verbose', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, }, 'loggers': { #'django.request': { # 'handlers': ['mail_admins'], # 'level': 'ERROR', # 'propagate': True, #}, 'django.request': { 'handlers': ['file'], 'level': 'ERROR', 'propagate': True, }, 'django': { 'handlers': ['file'], 'level': 'ERROR', 'propagate': True, }, 'postorius': { 'handlers': ['file'], 'level': 'INFO', 'propagate': True, }, 'hyperkitty': { 'handlers': ['file'], 'level': 'INFO', 'propagate': True, }, }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'root': { 'handlers': ['file'], 'level': 'INFO', }, } if 'raven.contrib.django.raven_compat' in INSTALLED_APPS: RAVEN_CONFIG = { 'dsn': 'https://<key>:<secret>@sentry.io/<project>', } LOGGING['handlers']['sentry'] = { 'level': 'ERROR', 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler', } LOGGING['loggers']['root']['handlers'].append('sentry') try: from settings_local import * except ImportError: pass
import ldap from django_auth_ldap.config import LDAPSearch ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs') AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org' AUTH_LDAP_USER_SEARCH = LDAPSearch( 'ou=People,dc=example,dc=org', ldap.SCOPE_SUBTREE, '(&(mail=*)(uid=%(user)s))' ) AUTH_LDAP_USER_ATTR_MAP = { 'first_name': 'givenName', 'last_name': 'sn', 'email': 'mail', }
conditional_block
document-data.ts
/******************************************************************************** * Copyright (C) 2018 Red Hat, Inc. and others. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v. 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0. * * This Source Code may also be made available under the following Secondary * Licenses when the conditions for such availability set forth in the Eclipse * Public License v. 2.0 are satisfied: GNU General Public License, version 2 * with the GNU Classpath Exception which is available at * https://www.gnu.org/software/classpath/license.html. * * SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 ********************************************************************************/ import * as theia from '@theia/plugin'; import { ModelChangedEvent, DocumentsMain } from '../api/plugin-api'; import { Range as ARange } from '../api/model'; import URI from 'vscode-uri'; import { ok } from '../common/assert'; import { Range, Position, EndOfLine } from './types-impl'; import { PrefixSumComputer } from './prefix-sum-computer'; import { getWordAtText, ensureValidWordDefinition } from './word-helper'; const _modeId2WordDefinition = new Map<string, RegExp | null>(); export function setWordDefinitionFor(modeId: string, wordDefinition: RegExp | null): void { _modeId2WordDefinition.set(modeId, wordDefinition); } export function getWordDefinitionFor(modeId: string): RegExp { return _modeId2WordDefinition.get(modeId)!; } export class DocumentDataExt { private disposed = false; private dirty: boolean; private _document: theia.TextDocument; private textLines = new Array<theia.TextLine>(); private lineStarts: PrefixSumComputer | undefined; constructor(private proxy: DocumentsMain, private uri: URI, private lines: string[], private eol: string, private languageId: string, private versionId: number, isDirty: boolean) { this.dirty = isDirty; } dispose(): void { ok(!this.disposed); this.dirty = false; this.disposed = true; } onEvents(e: ModelChangedEvent): void { if (e.eol && e.eol !== this.eol) { this.eol = e.eol; this.lineStarts = undefined; } // Update my lines const changes = e.changes; // tslint:disable-next-line:one-variable-per-declaration for (let i = 0, len = changes.length; i < len; i++) { const change = changes[i]; this.acceptDeleteRange(change.range); this.acceptInsertText(new Position(change.range.startLineNumber, change.range.startColumn), change.text); } this.versionId = e.versionId; } acceptIsDirty(isDirty: boolean): void { ok(!this.disposed); this.dirty = isDirty; } acceptLanguageId(langId: string): void { ok(!this.disposed); this.languageId = langId; } get document(): theia.TextDocument { if (!this._document) { const that = this; this._document = { get uri() { return that.uri; }, get fileName() { return that.uri.fsPath; }, get isUntitled() { return that.uri.scheme === 'untitled'; }, get languageId() { return that.languageId; }, get version() { return that.versionId; }, get isClosed() { return that.disposed; }, get isDirty() { return that.dirty; }, save() { return that.save(); }, getText(range?) { return range ? that.getTextInRange(range) : that.getText(); }, get eol() { return that.eol === '\n' ? EndOfLine.LF : EndOfLine.CRLF; }, get lineCount() { return that.lines.length; }, lineAt(lineOrPos: number | theia.Position) { return that.lineAt(lineOrPos); }, offsetAt(pos) { return that.offsetAt(pos); }, positionAt(offset) { return that.positionAt(offset); }, validateRange(ran) { return that.validateRange(ran); }, validatePosition(pos) { return that.validatePosition(pos); }, getWordRangeAtPosition(pos, regexp?) { return that.getWordRangeAtPosition(pos, regexp); } }; } return Object.freeze(this._document); } private acceptInsertText(position: Position, insertText: string): void { if (insertText.length === 0) { // Nothing to insert return; } const insertLines = insertText.split(/\r\n|\r|\n/); if (insertLines.length === 1) { // Inserting text on one line this.setLineText(position.line - 1, this.lines[position.line - 1].substring(0, position.character - 1) + insertLines[0] + this.lines[position.line - 1].substring(position.character - 1) ); return; } // Append overflowing text from first line to the end of text to insert insertLines[insertLines.length - 1] += this.lines[position.line - 1].substring(position.character - 1); // Delete overflowing text from first line and insert text on first line this.setLineText(position.line - 1, this.lines[position.line - 1].substring(0, position.character - 1) + insertLines[0] ); // Insert new lines & store lengths const newLengths = new Uint32Array(insertLines.length - 1); for (let i = 1; i < insertLines.length; i++) { this.lines.splice(position.line + i - 1, 0, insertLines[i]); newLengths[i - 1] = insertLines[i].length + this.eol.length; } if (this.lineStarts) { // update prefix sum this.lineStarts.insertValues(position.line, newLengths); } } private acceptDeleteRange(range: ARange): void { if (range.startLineNumber === range.endLineNumber) { if (range.startColumn === range.endColumn) { // Nothing to delete return; } // Delete text on the affected line this.setLineText(range.startLineNumber - 1, this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1) + this.lines[range.startLineNumber - 1].substring(range.endColumn - 1) ); return; } // Take remaining text on last line and append it to remaining text on first line this.setLineText(range.startLineNumber - 1, this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1) + this.lines[range.endLineNumber - 1].substring(range.endColumn - 1) ); // Delete middle lines this.lines.splice(range.startLineNumber, range.endLineNumber - range.startLineNumber); if (this.lineStarts) { this.lineStarts.removeValues(range.startLineNumber, range.endLineNumber - range.startLineNumber); } } private setLineText(lineIndex: number, newValue: string): void { this.lines[lineIndex] = newValue; if (this.lineStarts) { this.lineStarts.changeValue(lineIndex, this.lines[lineIndex].length + this.eol.length); } } private save(): Promise<boolean> { if (this.disposed) { return Promise.reject(new Error('Document is closed')); } return this.proxy.$trySaveDocument(this.uri); } private getTextInRange(_range: theia.Range): string { const range = this.validateRange(_range); if (range.isEmpty) { return ''; } if (range.isSingleLine) { return this.lines[range.start.line].substring(range.start.character, range.end.character); } const lineEnding = this.eol; const startLineIndex = range.start.line; const endLineIndex = range.end.line; const resultLines: string[] = []; resultLines.push(this.lines[startLineIndex].substring(range.start.character)); for (let i = startLineIndex + 1; i < endLineIndex; i++) { resultLines.push(this.lines[i]); } resultLines.push(this.lines[endLineIndex].substring(0, range.end.character)); return resultLines.join(lineEnding); } private validateRange(range: theia.Range): theia.Range { if (!(range instanceof Range)) { throw new Error('Invalid argument'); } const start = this.validatePosition(range.start); const end = this.validatePosition(range.end); if (start === range.start && end === range.end) {
private getText(): string { return this.lines.join(this.eol); } private validatePosition(position: theia.Position): theia.Position { if (!(position instanceof Position)) { throw new Error('Invalid argument'); } let { line, character } = position; let hasChanged = false; if (line < 0) { line = 0; character = 0; hasChanged = true; } else if (line >= this.lines.length) { line = this.lines.length - 1; character = this.lines[line].length; hasChanged = true; } else { const maxCharacter = this.lines[line].length; if (character < 0) { character = 0; hasChanged = true; } else if (character > maxCharacter) { character = maxCharacter; hasChanged = true; } } if (!hasChanged) { return position; } return new Position(line, character); } private lineAt(lineOrPosition: number | theia.Position): theia.TextLine { let line: number = -1; if (lineOrPosition instanceof Position) { line = lineOrPosition.line; } else if (typeof lineOrPosition === 'number') { line = lineOrPosition; } if (line < 0 || line >= this.lines.length) { throw new Error('Illegal value for `line`'); } let result = this.textLines[line]; if (!result || result.lineNumber !== line || result.text !== this.lines[line]) { const text = this.lines[line]; const firstNonWhitespaceCharacterIndex = /^(\s*)/.exec(text)![1].length; const range = new Range(line, 0, line, text.length); const rangeIncludingLineBreak = line < this.lines.length - 1 ? new Range(line, 0, line + 1, 0) : range; result = Object.freeze({ lineNumber: line, range, rangeIncludingLineBreak, text, firstNonWhitespaceCharacterIndex, isEmptyOrWhitespace: firstNonWhitespaceCharacterIndex === text.length }); this.textLines[line] = result; } return result; } private offsetAt(position: theia.Position): number { position = this.validatePosition(position); this.ensureLineStarts(); return this.lineStarts!.getAccumulatedValue(position.line - 1) + position.character; } private ensureLineStarts(): void { if (!this.lineStarts) { const eolLength = this.eol.length; const linesLength = this.lines.length; const lineStartValues = new Uint32Array(linesLength); for (let i = 0; i < linesLength; i++) { lineStartValues[i] = this.lines[i].length + eolLength; } this.lineStarts = new PrefixSumComputer(lineStartValues); } } private positionAt(offset: number): theia.Position { offset = Math.floor(offset); offset = Math.max(0, offset); this.ensureLineStarts(); const out = this.lineStarts!.getIndexOf(offset); const lineLength = this.lines[out.index].length; return new Position(out.index, Math.min(out.remainder, lineLength)); } private getWordRangeAtPosition(_position: theia.Position, regexp?: RegExp): theia.Range | undefined { const position = this.validatePosition(_position); if (!regexp) { // use default when custom-regexp isn't provided regexp = getWordDefinitionFor(this.languageId); } else if (regExpLeadsToEndlessLoop(regexp)) { // use default when custom-regexp is bad console.warn(`[getWordRangeAtPosition]: ignoring custom regexp '${regexp.source}' because it matches the empty string.`); regexp = getWordDefinitionFor(this.languageId); } const wordAtText = getWordAtText( position.character + 1, ensureValidWordDefinition(regexp), this.lines[position.line], 0 ); if (wordAtText) { return new Range(position.line, wordAtText.startColumn - 1, position.line, wordAtText.endColumn - 1); } return undefined; } } export function regExpLeadsToEndlessLoop(regexp: RegExp): boolean { // Exit early if it's one of these special cases which are meant to match // against an empty string if (regexp.source === '^' || regexp.source === '^$' || regexp.source === '$' || regexp.source === '^\\s*$') { return false; } // We check against an empty string. If the regular expression doesn't advance // (e.g. ends in an endless loop) it will match an empty string. const match = regexp.exec(''); // tslint:disable-next-line:no-any return (match && <any>regexp.lastIndex === 0)!; }
return range; } return new Range(start.line, start.character, end.line, end.character); }
random_line_split
document-data.ts
/******************************************************************************** * Copyright (C) 2018 Red Hat, Inc. and others. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v. 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0. * * This Source Code may also be made available under the following Secondary * Licenses when the conditions for such availability set forth in the Eclipse * Public License v. 2.0 are satisfied: GNU General Public License, version 2 * with the GNU Classpath Exception which is available at * https://www.gnu.org/software/classpath/license.html. * * SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 ********************************************************************************/ import * as theia from '@theia/plugin'; import { ModelChangedEvent, DocumentsMain } from '../api/plugin-api'; import { Range as ARange } from '../api/model'; import URI from 'vscode-uri'; import { ok } from '../common/assert'; import { Range, Position, EndOfLine } from './types-impl'; import { PrefixSumComputer } from './prefix-sum-computer'; import { getWordAtText, ensureValidWordDefinition } from './word-helper'; const _modeId2WordDefinition = new Map<string, RegExp | null>(); export function setWordDefinitionFor(modeId: string, wordDefinition: RegExp | null): void { _modeId2WordDefinition.set(modeId, wordDefinition); } export function getWordDefinitionFor(modeId: string): RegExp { return _modeId2WordDefinition.get(modeId)!; } export class DocumentDataExt { private disposed = false; private dirty: boolean; private _document: theia.TextDocument; private textLines = new Array<theia.TextLine>(); private lineStarts: PrefixSumComputer | undefined; constructor(private proxy: DocumentsMain, private uri: URI, private lines: string[], private eol: string, private languageId: string, private versionId: number, isDirty: boolean) { this.dirty = isDirty; } dispose(): void { ok(!this.disposed); this.dirty = false; this.disposed = true; } onEvents(e: ModelChangedEvent): void { if (e.eol && e.eol !== this.eol) { this.eol = e.eol; this.lineStarts = undefined; } // Update my lines const changes = e.changes; // tslint:disable-next-line:one-variable-per-declaration for (let i = 0, len = changes.length; i < len; i++) { const change = changes[i]; this.acceptDeleteRange(change.range); this.acceptInsertText(new Position(change.range.startLineNumber, change.range.startColumn), change.text); } this.versionId = e.versionId; } acceptIsDirty(isDirty: boolean): void { ok(!this.disposed); this.dirty = isDirty; } acceptLanguageId(langId: string): void { ok(!this.disposed); this.languageId = langId; } get document(): theia.TextDocument { if (!this._document) { const that = this; this._document = { get uri() { return that.uri; }, get fileName() { return that.uri.fsPath; }, get isUntitled() { return that.uri.scheme === 'untitled'; }, get languageId() { return that.languageId; }, get version() { return that.versionId; }, get isClosed() { return that.disposed; }, get isDirty() { return that.dirty; }, save() { return that.save(); }, getText(range?) { return range ? that.getTextInRange(range) : that.getText(); }, get eol() { return that.eol === '\n' ? EndOfLine.LF : EndOfLine.CRLF; }, get
() { return that.lines.length; }, lineAt(lineOrPos: number | theia.Position) { return that.lineAt(lineOrPos); }, offsetAt(pos) { return that.offsetAt(pos); }, positionAt(offset) { return that.positionAt(offset); }, validateRange(ran) { return that.validateRange(ran); }, validatePosition(pos) { return that.validatePosition(pos); }, getWordRangeAtPosition(pos, regexp?) { return that.getWordRangeAtPosition(pos, regexp); } }; } return Object.freeze(this._document); } private acceptInsertText(position: Position, insertText: string): void { if (insertText.length === 0) { // Nothing to insert return; } const insertLines = insertText.split(/\r\n|\r|\n/); if (insertLines.length === 1) { // Inserting text on one line this.setLineText(position.line - 1, this.lines[position.line - 1].substring(0, position.character - 1) + insertLines[0] + this.lines[position.line - 1].substring(position.character - 1) ); return; } // Append overflowing text from first line to the end of text to insert insertLines[insertLines.length - 1] += this.lines[position.line - 1].substring(position.character - 1); // Delete overflowing text from first line and insert text on first line this.setLineText(position.line - 1, this.lines[position.line - 1].substring(0, position.character - 1) + insertLines[0] ); // Insert new lines & store lengths const newLengths = new Uint32Array(insertLines.length - 1); for (let i = 1; i < insertLines.length; i++) { this.lines.splice(position.line + i - 1, 0, insertLines[i]); newLengths[i - 1] = insertLines[i].length + this.eol.length; } if (this.lineStarts) { // update prefix sum this.lineStarts.insertValues(position.line, newLengths); } } private acceptDeleteRange(range: ARange): void { if (range.startLineNumber === range.endLineNumber) { if (range.startColumn === range.endColumn) { // Nothing to delete return; } // Delete text on the affected line this.setLineText(range.startLineNumber - 1, this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1) + this.lines[range.startLineNumber - 1].substring(range.endColumn - 1) ); return; } // Take remaining text on last line and append it to remaining text on first line this.setLineText(range.startLineNumber - 1, this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1) + this.lines[range.endLineNumber - 1].substring(range.endColumn - 1) ); // Delete middle lines this.lines.splice(range.startLineNumber, range.endLineNumber - range.startLineNumber); if (this.lineStarts) { this.lineStarts.removeValues(range.startLineNumber, range.endLineNumber - range.startLineNumber); } } private setLineText(lineIndex: number, newValue: string): void { this.lines[lineIndex] = newValue; if (this.lineStarts) { this.lineStarts.changeValue(lineIndex, this.lines[lineIndex].length + this.eol.length); } } private save(): Promise<boolean> { if (this.disposed) { return Promise.reject(new Error('Document is closed')); } return this.proxy.$trySaveDocument(this.uri); } private getTextInRange(_range: theia.Range): string { const range = this.validateRange(_range); if (range.isEmpty) { return ''; } if (range.isSingleLine) { return this.lines[range.start.line].substring(range.start.character, range.end.character); } const lineEnding = this.eol; const startLineIndex = range.start.line; const endLineIndex = range.end.line; const resultLines: string[] = []; resultLines.push(this.lines[startLineIndex].substring(range.start.character)); for (let i = startLineIndex + 1; i < endLineIndex; i++) { resultLines.push(this.lines[i]); } resultLines.push(this.lines[endLineIndex].substring(0, range.end.character)); return resultLines.join(lineEnding); } private validateRange(range: theia.Range): theia.Range { if (!(range instanceof Range)) { throw new Error('Invalid argument'); } const start = this.validatePosition(range.start); const end = this.validatePosition(range.end); if (start === range.start && end === range.end) { return range; } return new Range(start.line, start.character, end.line, end.character); } private getText(): string { return this.lines.join(this.eol); } private validatePosition(position: theia.Position): theia.Position { if (!(position instanceof Position)) { throw new Error('Invalid argument'); } let { line, character } = position; let hasChanged = false; if (line < 0) { line = 0; character = 0; hasChanged = true; } else if (line >= this.lines.length) { line = this.lines.length - 1; character = this.lines[line].length; hasChanged = true; } else { const maxCharacter = this.lines[line].length; if (character < 0) { character = 0; hasChanged = true; } else if (character > maxCharacter) { character = maxCharacter; hasChanged = true; } } if (!hasChanged) { return position; } return new Position(line, character); } private lineAt(lineOrPosition: number | theia.Position): theia.TextLine { let line: number = -1; if (lineOrPosition instanceof Position) { line = lineOrPosition.line; } else if (typeof lineOrPosition === 'number') { line = lineOrPosition; } if (line < 0 || line >= this.lines.length) { throw new Error('Illegal value for `line`'); } let result = this.textLines[line]; if (!result || result.lineNumber !== line || result.text !== this.lines[line]) { const text = this.lines[line]; const firstNonWhitespaceCharacterIndex = /^(\s*)/.exec(text)![1].length; const range = new Range(line, 0, line, text.length); const rangeIncludingLineBreak = line < this.lines.length - 1 ? new Range(line, 0, line + 1, 0) : range; result = Object.freeze({ lineNumber: line, range, rangeIncludingLineBreak, text, firstNonWhitespaceCharacterIndex, isEmptyOrWhitespace: firstNonWhitespaceCharacterIndex === text.length }); this.textLines[line] = result; } return result; } private offsetAt(position: theia.Position): number { position = this.validatePosition(position); this.ensureLineStarts(); return this.lineStarts!.getAccumulatedValue(position.line - 1) + position.character; } private ensureLineStarts(): void { if (!this.lineStarts) { const eolLength = this.eol.length; const linesLength = this.lines.length; const lineStartValues = new Uint32Array(linesLength); for (let i = 0; i < linesLength; i++) { lineStartValues[i] = this.lines[i].length + eolLength; } this.lineStarts = new PrefixSumComputer(lineStartValues); } } private positionAt(offset: number): theia.Position { offset = Math.floor(offset); offset = Math.max(0, offset); this.ensureLineStarts(); const out = this.lineStarts!.getIndexOf(offset); const lineLength = this.lines[out.index].length; return new Position(out.index, Math.min(out.remainder, lineLength)); } private getWordRangeAtPosition(_position: theia.Position, regexp?: RegExp): theia.Range | undefined { const position = this.validatePosition(_position); if (!regexp) { // use default when custom-regexp isn't provided regexp = getWordDefinitionFor(this.languageId); } else if (regExpLeadsToEndlessLoop(regexp)) { // use default when custom-regexp is bad console.warn(`[getWordRangeAtPosition]: ignoring custom regexp '${regexp.source}' because it matches the empty string.`); regexp = getWordDefinitionFor(this.languageId); } const wordAtText = getWordAtText( position.character + 1, ensureValidWordDefinition(regexp), this.lines[position.line], 0 ); if (wordAtText) { return new Range(position.line, wordAtText.startColumn - 1, position.line, wordAtText.endColumn - 1); } return undefined; } } export function regExpLeadsToEndlessLoop(regexp: RegExp): boolean { // Exit early if it's one of these special cases which are meant to match // against an empty string if (regexp.source === '^' || regexp.source === '^$' || regexp.source === '$' || regexp.source === '^\\s*$') { return false; } // We check against an empty string. If the regular expression doesn't advance // (e.g. ends in an endless loop) it will match an empty string. const match = regexp.exec(''); // tslint:disable-next-line:no-any return (match && <any>regexp.lastIndex === 0)!; }
lineCount
identifier_name
document-data.ts
/******************************************************************************** * Copyright (C) 2018 Red Hat, Inc. and others. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v. 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0. * * This Source Code may also be made available under the following Secondary * Licenses when the conditions for such availability set forth in the Eclipse * Public License v. 2.0 are satisfied: GNU General Public License, version 2 * with the GNU Classpath Exception which is available at * https://www.gnu.org/software/classpath/license.html. * * SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 ********************************************************************************/ import * as theia from '@theia/plugin'; import { ModelChangedEvent, DocumentsMain } from '../api/plugin-api'; import { Range as ARange } from '../api/model'; import URI from 'vscode-uri'; import { ok } from '../common/assert'; import { Range, Position, EndOfLine } from './types-impl'; import { PrefixSumComputer } from './prefix-sum-computer'; import { getWordAtText, ensureValidWordDefinition } from './word-helper'; const _modeId2WordDefinition = new Map<string, RegExp | null>(); export function setWordDefinitionFor(modeId: string, wordDefinition: RegExp | null): void { _modeId2WordDefinition.set(modeId, wordDefinition); } export function getWordDefinitionFor(modeId: string): RegExp { return _modeId2WordDefinition.get(modeId)!; } export class DocumentDataExt { private disposed = false; private dirty: boolean; private _document: theia.TextDocument; private textLines = new Array<theia.TextLine>(); private lineStarts: PrefixSumComputer | undefined; constructor(private proxy: DocumentsMain, private uri: URI, private lines: string[], private eol: string, private languageId: string, private versionId: number, isDirty: boolean) { this.dirty = isDirty; } dispose(): void { ok(!this.disposed); this.dirty = false; this.disposed = true; } onEvents(e: ModelChangedEvent): void { if (e.eol && e.eol !== this.eol) { this.eol = e.eol; this.lineStarts = undefined; } // Update my lines const changes = e.changes; // tslint:disable-next-line:one-variable-per-declaration for (let i = 0, len = changes.length; i < len; i++) { const change = changes[i]; this.acceptDeleteRange(change.range); this.acceptInsertText(new Position(change.range.startLineNumber, change.range.startColumn), change.text); } this.versionId = e.versionId; } acceptIsDirty(isDirty: boolean): void { ok(!this.disposed); this.dirty = isDirty; } acceptLanguageId(langId: string): void { ok(!this.disposed); this.languageId = langId; } get document(): theia.TextDocument { if (!this._document) { const that = this; this._document = { get uri() { return that.uri; }, get fileName() { return that.uri.fsPath; }, get isUntitled() { return that.uri.scheme === 'untitled'; }, get languageId() { return that.languageId; }, get version() { return that.versionId; }, get isClosed() { return that.disposed; }, get isDirty() { return that.dirty; }, save() { return that.save(); }, getText(range?) { return range ? that.getTextInRange(range) : that.getText(); }, get eol() { return that.eol === '\n' ? EndOfLine.LF : EndOfLine.CRLF; }, get lineCount() { return that.lines.length; }, lineAt(lineOrPos: number | theia.Position) { return that.lineAt(lineOrPos); }, offsetAt(pos) { return that.offsetAt(pos); }, positionAt(offset) { return that.positionAt(offset); }, validateRange(ran) { return that.validateRange(ran); }, validatePosition(pos) { return that.validatePosition(pos); }, getWordRangeAtPosition(pos, regexp?) { return that.getWordRangeAtPosition(pos, regexp); } }; } return Object.freeze(this._document); } private acceptInsertText(position: Position, insertText: string): void { if (insertText.length === 0) { // Nothing to insert return; } const insertLines = insertText.split(/\r\n|\r|\n/); if (insertLines.length === 1) { // Inserting text on one line this.setLineText(position.line - 1, this.lines[position.line - 1].substring(0, position.character - 1) + insertLines[0] + this.lines[position.line - 1].substring(position.character - 1) ); return; } // Append overflowing text from first line to the end of text to insert insertLines[insertLines.length - 1] += this.lines[position.line - 1].substring(position.character - 1); // Delete overflowing text from first line and insert text on first line this.setLineText(position.line - 1, this.lines[position.line - 1].substring(0, position.character - 1) + insertLines[0] ); // Insert new lines & store lengths const newLengths = new Uint32Array(insertLines.length - 1); for (let i = 1; i < insertLines.length; i++) { this.lines.splice(position.line + i - 1, 0, insertLines[i]); newLengths[i - 1] = insertLines[i].length + this.eol.length; } if (this.lineStarts) { // update prefix sum this.lineStarts.insertValues(position.line, newLengths); } } private acceptDeleteRange(range: ARange): void { if (range.startLineNumber === range.endLineNumber) { if (range.startColumn === range.endColumn) { // Nothing to delete return; } // Delete text on the affected line this.setLineText(range.startLineNumber - 1, this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1) + this.lines[range.startLineNumber - 1].substring(range.endColumn - 1) ); return; } // Take remaining text on last line and append it to remaining text on first line this.setLineText(range.startLineNumber - 1, this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1) + this.lines[range.endLineNumber - 1].substring(range.endColumn - 1) ); // Delete middle lines this.lines.splice(range.startLineNumber, range.endLineNumber - range.startLineNumber); if (this.lineStarts) { this.lineStarts.removeValues(range.startLineNumber, range.endLineNumber - range.startLineNumber); } } private setLineText(lineIndex: number, newValue: string): void { this.lines[lineIndex] = newValue; if (this.lineStarts) { this.lineStarts.changeValue(lineIndex, this.lines[lineIndex].length + this.eol.length); } } private save(): Promise<boolean> { if (this.disposed) { return Promise.reject(new Error('Document is closed')); } return this.proxy.$trySaveDocument(this.uri); } private getTextInRange(_range: theia.Range): string { const range = this.validateRange(_range); if (range.isEmpty) { return ''; } if (range.isSingleLine) { return this.lines[range.start.line].substring(range.start.character, range.end.character); } const lineEnding = this.eol; const startLineIndex = range.start.line; const endLineIndex = range.end.line; const resultLines: string[] = []; resultLines.push(this.lines[startLineIndex].substring(range.start.character)); for (let i = startLineIndex + 1; i < endLineIndex; i++) { resultLines.push(this.lines[i]); } resultLines.push(this.lines[endLineIndex].substring(0, range.end.character)); return resultLines.join(lineEnding); } private validateRange(range: theia.Range): theia.Range { if (!(range instanceof Range)) { throw new Error('Invalid argument'); } const start = this.validatePosition(range.start); const end = this.validatePosition(range.end); if (start === range.start && end === range.end) { return range; } return new Range(start.line, start.character, end.line, end.character); } private getText(): string { return this.lines.join(this.eol); } private validatePosition(position: theia.Position): theia.Position { if (!(position instanceof Position)) { throw new Error('Invalid argument'); } let { line, character } = position; let hasChanged = false; if (line < 0) { line = 0; character = 0; hasChanged = true; } else if (line >= this.lines.length) { line = this.lines.length - 1; character = this.lines[line].length; hasChanged = true; } else { const maxCharacter = this.lines[line].length; if (character < 0) { character = 0; hasChanged = true; } else if (character > maxCharacter) { character = maxCharacter; hasChanged = true; } } if (!hasChanged) { return position; } return new Position(line, character); } private lineAt(lineOrPosition: number | theia.Position): theia.TextLine { let line: number = -1; if (lineOrPosition instanceof Position) { line = lineOrPosition.line; } else if (typeof lineOrPosition === 'number') { line = lineOrPosition; } if (line < 0 || line >= this.lines.length) { throw new Error('Illegal value for `line`'); } let result = this.textLines[line]; if (!result || result.lineNumber !== line || result.text !== this.lines[line]) { const text = this.lines[line]; const firstNonWhitespaceCharacterIndex = /^(\s*)/.exec(text)![1].length; const range = new Range(line, 0, line, text.length); const rangeIncludingLineBreak = line < this.lines.length - 1 ? new Range(line, 0, line + 1, 0) : range; result = Object.freeze({ lineNumber: line, range, rangeIncludingLineBreak, text, firstNonWhitespaceCharacterIndex, isEmptyOrWhitespace: firstNonWhitespaceCharacterIndex === text.length }); this.textLines[line] = result; } return result; } private offsetAt(position: theia.Position): number { position = this.validatePosition(position); this.ensureLineStarts(); return this.lineStarts!.getAccumulatedValue(position.line - 1) + position.character; } private ensureLineStarts(): void { if (!this.lineStarts) { const eolLength = this.eol.length; const linesLength = this.lines.length; const lineStartValues = new Uint32Array(linesLength); for (let i = 0; i < linesLength; i++) { lineStartValues[i] = this.lines[i].length + eolLength; } this.lineStarts = new PrefixSumComputer(lineStartValues); } } private positionAt(offset: number): theia.Position { offset = Math.floor(offset); offset = Math.max(0, offset); this.ensureLineStarts(); const out = this.lineStarts!.getIndexOf(offset); const lineLength = this.lines[out.index].length; return new Position(out.index, Math.min(out.remainder, lineLength)); } private getWordRangeAtPosition(_position: theia.Position, regexp?: RegExp): theia.Range | undefined { const position = this.validatePosition(_position); if (!regexp) { // use default when custom-regexp isn't provided regexp = getWordDefinitionFor(this.languageId); } else if (regExpLeadsToEndlessLoop(regexp)) { // use default when custom-regexp is bad console.warn(`[getWordRangeAtPosition]: ignoring custom regexp '${regexp.source}' because it matches the empty string.`); regexp = getWordDefinitionFor(this.languageId); } const wordAtText = getWordAtText( position.character + 1, ensureValidWordDefinition(regexp), this.lines[position.line], 0 ); if (wordAtText)
return undefined; } } export function regExpLeadsToEndlessLoop(regexp: RegExp): boolean { // Exit early if it's one of these special cases which are meant to match // against an empty string if (regexp.source === '^' || regexp.source === '^$' || regexp.source === '$' || regexp.source === '^\\s*$') { return false; } // We check against an empty string. If the regular expression doesn't advance // (e.g. ends in an endless loop) it will match an empty string. const match = regexp.exec(''); // tslint:disable-next-line:no-any return (match && <any>regexp.lastIndex === 0)!; }
{ return new Range(position.line, wordAtText.startColumn - 1, position.line, wordAtText.endColumn - 1); }
conditional_block
document-data.ts
/******************************************************************************** * Copyright (C) 2018 Red Hat, Inc. and others. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v. 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0. * * This Source Code may also be made available under the following Secondary * Licenses when the conditions for such availability set forth in the Eclipse * Public License v. 2.0 are satisfied: GNU General Public License, version 2 * with the GNU Classpath Exception which is available at * https://www.gnu.org/software/classpath/license.html. * * SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 ********************************************************************************/ import * as theia from '@theia/plugin'; import { ModelChangedEvent, DocumentsMain } from '../api/plugin-api'; import { Range as ARange } from '../api/model'; import URI from 'vscode-uri'; import { ok } from '../common/assert'; import { Range, Position, EndOfLine } from './types-impl'; import { PrefixSumComputer } from './prefix-sum-computer'; import { getWordAtText, ensureValidWordDefinition } from './word-helper'; const _modeId2WordDefinition = new Map<string, RegExp | null>(); export function setWordDefinitionFor(modeId: string, wordDefinition: RegExp | null): void { _modeId2WordDefinition.set(modeId, wordDefinition); } export function getWordDefinitionFor(modeId: string): RegExp { return _modeId2WordDefinition.get(modeId)!; } export class DocumentDataExt { private disposed = false; private dirty: boolean; private _document: theia.TextDocument; private textLines = new Array<theia.TextLine>(); private lineStarts: PrefixSumComputer | undefined; constructor(private proxy: DocumentsMain, private uri: URI, private lines: string[], private eol: string, private languageId: string, private versionId: number, isDirty: boolean) { this.dirty = isDirty; } dispose(): void { ok(!this.disposed); this.dirty = false; this.disposed = true; } onEvents(e: ModelChangedEvent): void { if (e.eol && e.eol !== this.eol) { this.eol = e.eol; this.lineStarts = undefined; } // Update my lines const changes = e.changes; // tslint:disable-next-line:one-variable-per-declaration for (let i = 0, len = changes.length; i < len; i++) { const change = changes[i]; this.acceptDeleteRange(change.range); this.acceptInsertText(new Position(change.range.startLineNumber, change.range.startColumn), change.text); } this.versionId = e.versionId; } acceptIsDirty(isDirty: boolean): void { ok(!this.disposed); this.dirty = isDirty; } acceptLanguageId(langId: string): void { ok(!this.disposed); this.languageId = langId; } get document(): theia.TextDocument { if (!this._document) { const that = this; this._document = { get uri() { return that.uri; }, get fileName() { return that.uri.fsPath; }, get isUntitled() { return that.uri.scheme === 'untitled'; }, get languageId() { return that.languageId; }, get version() { return that.versionId; }, get isClosed() { return that.disposed; }, get isDirty() { return that.dirty; }, save() { return that.save(); }, getText(range?) { return range ? that.getTextInRange(range) : that.getText(); }, get eol() { return that.eol === '\n' ? EndOfLine.LF : EndOfLine.CRLF; }, get lineCount() { return that.lines.length; }, lineAt(lineOrPos: number | theia.Position) { return that.lineAt(lineOrPos); }, offsetAt(pos) { return that.offsetAt(pos); }, positionAt(offset) { return that.positionAt(offset); }, validateRange(ran) { return that.validateRange(ran); }, validatePosition(pos) { return that.validatePosition(pos); }, getWordRangeAtPosition(pos, regexp?) { return that.getWordRangeAtPosition(pos, regexp); } }; } return Object.freeze(this._document); } private acceptInsertText(position: Position, insertText: string): void { if (insertText.length === 0) { // Nothing to insert return; } const insertLines = insertText.split(/\r\n|\r|\n/); if (insertLines.length === 1) { // Inserting text on one line this.setLineText(position.line - 1, this.lines[position.line - 1].substring(0, position.character - 1) + insertLines[0] + this.lines[position.line - 1].substring(position.character - 1) ); return; } // Append overflowing text from first line to the end of text to insert insertLines[insertLines.length - 1] += this.lines[position.line - 1].substring(position.character - 1); // Delete overflowing text from first line and insert text on first line this.setLineText(position.line - 1, this.lines[position.line - 1].substring(0, position.character - 1) + insertLines[0] ); // Insert new lines & store lengths const newLengths = new Uint32Array(insertLines.length - 1); for (let i = 1; i < insertLines.length; i++) { this.lines.splice(position.line + i - 1, 0, insertLines[i]); newLengths[i - 1] = insertLines[i].length + this.eol.length; } if (this.lineStarts) { // update prefix sum this.lineStarts.insertValues(position.line, newLengths); } } private acceptDeleteRange(range: ARange): void { if (range.startLineNumber === range.endLineNumber) { if (range.startColumn === range.endColumn) { // Nothing to delete return; } // Delete text on the affected line this.setLineText(range.startLineNumber - 1, this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1) + this.lines[range.startLineNumber - 1].substring(range.endColumn - 1) ); return; } // Take remaining text on last line and append it to remaining text on first line this.setLineText(range.startLineNumber - 1, this.lines[range.startLineNumber - 1].substring(0, range.startColumn - 1) + this.lines[range.endLineNumber - 1].substring(range.endColumn - 1) ); // Delete middle lines this.lines.splice(range.startLineNumber, range.endLineNumber - range.startLineNumber); if (this.lineStarts) { this.lineStarts.removeValues(range.startLineNumber, range.endLineNumber - range.startLineNumber); } } private setLineText(lineIndex: number, newValue: string): void { this.lines[lineIndex] = newValue; if (this.lineStarts) { this.lineStarts.changeValue(lineIndex, this.lines[lineIndex].length + this.eol.length); } } private save(): Promise<boolean> { if (this.disposed) { return Promise.reject(new Error('Document is closed')); } return this.proxy.$trySaveDocument(this.uri); } private getTextInRange(_range: theia.Range): string { const range = this.validateRange(_range); if (range.isEmpty) { return ''; } if (range.isSingleLine) { return this.lines[range.start.line].substring(range.start.character, range.end.character); } const lineEnding = this.eol; const startLineIndex = range.start.line; const endLineIndex = range.end.line; const resultLines: string[] = []; resultLines.push(this.lines[startLineIndex].substring(range.start.character)); for (let i = startLineIndex + 1; i < endLineIndex; i++) { resultLines.push(this.lines[i]); } resultLines.push(this.lines[endLineIndex].substring(0, range.end.character)); return resultLines.join(lineEnding); } private validateRange(range: theia.Range): theia.Range { if (!(range instanceof Range)) { throw new Error('Invalid argument'); } const start = this.validatePosition(range.start); const end = this.validatePosition(range.end); if (start === range.start && end === range.end) { return range; } return new Range(start.line, start.character, end.line, end.character); } private getText(): string
private validatePosition(position: theia.Position): theia.Position { if (!(position instanceof Position)) { throw new Error('Invalid argument'); } let { line, character } = position; let hasChanged = false; if (line < 0) { line = 0; character = 0; hasChanged = true; } else if (line >= this.lines.length) { line = this.lines.length - 1; character = this.lines[line].length; hasChanged = true; } else { const maxCharacter = this.lines[line].length; if (character < 0) { character = 0; hasChanged = true; } else if (character > maxCharacter) { character = maxCharacter; hasChanged = true; } } if (!hasChanged) { return position; } return new Position(line, character); } private lineAt(lineOrPosition: number | theia.Position): theia.TextLine { let line: number = -1; if (lineOrPosition instanceof Position) { line = lineOrPosition.line; } else if (typeof lineOrPosition === 'number') { line = lineOrPosition; } if (line < 0 || line >= this.lines.length) { throw new Error('Illegal value for `line`'); } let result = this.textLines[line]; if (!result || result.lineNumber !== line || result.text !== this.lines[line]) { const text = this.lines[line]; const firstNonWhitespaceCharacterIndex = /^(\s*)/.exec(text)![1].length; const range = new Range(line, 0, line, text.length); const rangeIncludingLineBreak = line < this.lines.length - 1 ? new Range(line, 0, line + 1, 0) : range; result = Object.freeze({ lineNumber: line, range, rangeIncludingLineBreak, text, firstNonWhitespaceCharacterIndex, isEmptyOrWhitespace: firstNonWhitespaceCharacterIndex === text.length }); this.textLines[line] = result; } return result; } private offsetAt(position: theia.Position): number { position = this.validatePosition(position); this.ensureLineStarts(); return this.lineStarts!.getAccumulatedValue(position.line - 1) + position.character; } private ensureLineStarts(): void { if (!this.lineStarts) { const eolLength = this.eol.length; const linesLength = this.lines.length; const lineStartValues = new Uint32Array(linesLength); for (let i = 0; i < linesLength; i++) { lineStartValues[i] = this.lines[i].length + eolLength; } this.lineStarts = new PrefixSumComputer(lineStartValues); } } private positionAt(offset: number): theia.Position { offset = Math.floor(offset); offset = Math.max(0, offset); this.ensureLineStarts(); const out = this.lineStarts!.getIndexOf(offset); const lineLength = this.lines[out.index].length; return new Position(out.index, Math.min(out.remainder, lineLength)); } private getWordRangeAtPosition(_position: theia.Position, regexp?: RegExp): theia.Range | undefined { const position = this.validatePosition(_position); if (!regexp) { // use default when custom-regexp isn't provided regexp = getWordDefinitionFor(this.languageId); } else if (regExpLeadsToEndlessLoop(regexp)) { // use default when custom-regexp is bad console.warn(`[getWordRangeAtPosition]: ignoring custom regexp '${regexp.source}' because it matches the empty string.`); regexp = getWordDefinitionFor(this.languageId); } const wordAtText = getWordAtText( position.character + 1, ensureValidWordDefinition(regexp), this.lines[position.line], 0 ); if (wordAtText) { return new Range(position.line, wordAtText.startColumn - 1, position.line, wordAtText.endColumn - 1); } return undefined; } } export function regExpLeadsToEndlessLoop(regexp: RegExp): boolean { // Exit early if it's one of these special cases which are meant to match // against an empty string if (regexp.source === '^' || regexp.source === '^$' || regexp.source === '$' || regexp.source === '^\\s*$') { return false; } // We check against an empty string. If the regular expression doesn't advance // (e.g. ends in an endless loop) it will match an empty string. const match = regexp.exec(''); // tslint:disable-next-line:no-any return (match && <any>regexp.lastIndex === 0)!; }
{ return this.lines.join(this.eol); }
identifier_body
run_hook.go
/* Copyright The Stash Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmds import ( "context" "fmt" "os" "path/filepath" "stash.appscode.dev/apimachinery/apis" "stash.appscode.dev/apimachinery/apis/stash/v1beta1" cs "stash.appscode.dev/apimachinery/client/clientset/versioned" "stash.appscode.dev/apimachinery/pkg/restic" "stash.appscode.dev/stash/pkg/status" "stash.appscode.dev/stash/pkg/util" "github.com/golang/glog" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "kmodules.xyz/client-go/meta" appcatalog_cs "kmodules.xyz/custom-resources/client/clientset/versioned" ) type hookOptions struct { masterURL string kubeConfigPath string namespace string hookType string backupSessionName string restoreSessionName string targetKind string targetName string invokerType string invokerName string hostname string config *rest.Config kubeClient kubernetes.Interface stashClient cs.Interface appClient appcatalog_cs.Interface metricOpts restic.MetricsOptions outputDir string } func NewCmdRunHook() *cobra.Command { opt := hookOptions{ masterURL: "", kubeConfigPath: "", namespace: meta.Namespace(), hostname: apis.DefaultHost, } cmd := &cobra.Command{ Use: "run-hook", Short: "Execute Backup or Restore Hooks", DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { config, err := clientcmd.BuildConfigFromFlags(opt.masterURL, opt.kubeConfigPath) if err != nil { glog.Fatalf("Could not get Kubernetes config: %s", err) return err } opt.config = config opt.kubeClient = kubernetes.NewForConfigOrDie(config) opt.stashClient = cs.NewForConfigOrDie(config) opt.appClient = appcatalog_cs.NewForConfigOrDie(config) err = opt.executeHook() if err != nil { // For preBackup or preRestore hook failure, we will fail the container so that the task does to proceed to next step. // We will also update the BackupSession/RestoreSession status as the update-status Function will not execute. if opt.hookType == apis.PreBackupHook || opt.hookType == apis.PreRestoreHook { return opt.handlePreTaskHookFailure(err) } // For other postBackup or postRestore hook failure, we will simply write the failure output into the output directory. // The update-status Function will update the status of the BackupSession/RestoreSession return opt.handlePostTaskHookFailure(err) } return nil }, } cmd.Flags().StringVar(&opt.masterURL, "master", opt.masterURL, "The address of the Kubernetes API server (overrides any value in kubeconfig)") cmd.Flags().StringVar(&opt.kubeConfigPath, "kubeconfig", opt.kubeConfigPath, "Path to kubeconfig file with authorization information (the master location is set by the master flag).") cmd.Flags().StringVar(&opt.backupSessionName, "backupsession", opt.backupSessionName, "Name of the respective BackupSession object") cmd.Flags().StringVar(&opt.restoreSessionName, "restoresession", opt.restoreSessionName, "Name of the respective RestoreSession") cmd.Flags().StringVar(&opt.invokerType, "invoker-type", opt.invokerType, "Type of the backup invoker") cmd.Flags().StringVar(&opt.invokerName, "invoker-name", opt.invokerName, "Name of the respective backup invoker") cmd.Flags().StringVar(&opt.targetName, "target-name", opt.targetName, "Name of the Target") cmd.Flags().StringVar(&opt.targetKind, "target-kind", opt.targetName, "Kind of the Target") cmd.Flags().StringVar(&opt.hookType, "hook-type", opt.hookType, "Type of hook to execute") cmd.Flags().StringVar(&opt.hostname, "hostname", opt.hostname, "Name of the host that is being backed up or restored") cmd.Flags().BoolVar(&opt.metricOpts.Enabled, "metrics-enabled", opt.metricOpts.Enabled, "Specify whether to export Prometheus metrics") cmd.Flags().StringVar(&opt.metricOpts.PushgatewayURL, "metrics-pushgateway-url", opt.metricOpts.PushgatewayURL, "Pushgateway URL where the metrics will be pushed") cmd.Flags().StringSliceVar(&opt.metricOpts.Labels, "metrics-labels", opt.metricOpts.Labels, "Labels to apply in exported metrics") cmd.Flags().StringVar(&opt.metricOpts.JobName, "prom-job-name", StashDefaultMetricJob, "Metrics job name") cmd.Flags().StringVar(&opt.outputDir, "output-dir", opt.outputDir, "Directory where output.json file will be written (keep empty if you don't need to write output in file)") return cmd } func (opt *hookOptions) executeHook() error { var hook interface{} var executorPodName string if opt.backupSessionName != "" { // For backup hooks, BackupSession name will be provided. We will read the hooks from the underlying backup invoker. invoker, err := apis.ExtractBackupInvokerInfo(opt.stashClient, opt.invokerType, opt.invokerName, opt.namespace) if err != nil { return err } // We need to extract the hook only for the current target for _, targetInfo := range invoker.TargetsInfo { if targetInfo.Target != nil && targetInfo.Target.Ref.Kind == opt.targetKind && targetInfo.Target.Ref.Name == opt.targetName { hook = targetInfo.Hooks executorPodName, err = opt.getHookExecutorPodName(targetInfo.Target.Ref) if err != nil { return err } break } } } else if opt.restoreSessionName != "" { // For restore hooks, RestoreSession name will be provided. We will read the hooks from the RestoreSession. restoreSession, err := opt.stashClient.StashV1beta1().RestoreSessions(opt.namespace).Get(context.TODO(), opt.restoreSessionName, metav1.GetOptions{}) if err != nil { return err } hook = restoreSession.Spec.Hooks if restoreSession.Spec.Target != nil { executorPodName, err = opt.getHookExecutorPodName(restoreSession.Spec.Target.Ref) if err != nil { return err } } else { executorPodName = os.Getenv(apis.KeyPodName) } } else { return fmt.Errorf("can not execute hooks. Reason: Respective BackupSession or RestoreSession has not been specified") } // Execute the hooks return util.ExecuteHook(opt.config, hook, opt.hookType, executorPodName, opt.namespace) } func (opt *hookOptions) getHookExecutorPodName(targetRef v1beta1.TargetRef) (string, error) { switch targetRef.Kind { case apis.KindAppBinding: // For AppBinding, we will execute the hooks in the respective app pod return opt.getAppPodName(targetRef.Name) default: // For other types of target, hook will be executed where this process is running. return os.Getenv(apis.KeyPodName), nil } } func (opt *hookOptions) getAppPodName(appbindingName string) (string, error) { // get the AppBinding appbinding, err := opt.appClient.AppcatalogV1alpha1().AppBindings(opt.namespace).Get(context.TODO(), appbindingName, metav1.GetOptions{}) if err != nil { return "", err } // AppBinding should have a Service in ClientConfig field. This service selects the app pod. We will execute the hooks in the app pod. if appbinding.Spec.ClientConfig.Service != nil { // there should be an endpoint with same name as the service which contains the name of the selected pods. endPoint, err := opt.kubeClient.CoreV1().Endpoints(opt.namespace).Get(context.TODO(), appbinding.Spec.ClientConfig.Service.Name, metav1.GetOptions{}) if err != nil { return "", err } for _, subSets := range endPoint.Subsets { // get pod from the ready addresses for _, readyAddrs := range subSets.Addresses { if readyAddrs.TargetRef != nil && readyAddrs.TargetRef.Kind == apis.KindPod { return readyAddrs.TargetRef.Name, nil } } // no pod found in ready addresses. now try in not ready addresses. for _, notReadyAddrs := range subSets.NotReadyAddresses { if notReadyAddrs.TargetRef != nil && notReadyAddrs.TargetRef.Kind == apis.KindPod { return notReadyAddrs.TargetRef.Name, nil } } } } return "", fmt.Errorf("no pod found for AppBinding %s/%s", opt.namespace, appbindingName) } func (opt *hookOptions)
(hookErr error) error { statusOpt := status.UpdateStatusOptions{ Config: opt.config, KubeClient: opt.kubeClient, StashClient: opt.stashClient, Namespace: opt.namespace, Metrics: opt.metricOpts, TargetRef: v1beta1.TargetRef{ Kind: opt.targetKind, Name: opt.targetName, }, } if opt.hookType == apis.PreBackupHook { backupOutput := &restic.BackupOutput{ HostBackupStats: []v1beta1.HostBackupStats{ { Hostname: opt.hostname, Phase: v1beta1.HostBackupFailed, Error: hookErr.Error(), }, }, } statusOpt.BackupSession = opt.backupSessionName // TODO: user real invoker invoker, err := apis.ExtractBackupInvokerInfo(opt.stashClient, opt.invokerType, opt.invokerName, opt.namespace) if err != nil { return err } for _, targetInfo := range invoker.TargetsInfo { if targetInfo.Target != nil && targetInfo.Target.Ref.Kind == opt.targetKind && targetInfo.Target.Ref.Name == opt.targetName { err := statusOpt.UpdatePostBackupStatus(backupOutput, invoker, targetInfo) if err != nil { hookErr = errors.NewAggregate([]error{hookErr, err}) } } } } else { // otherwise it is postRestore hook restoreOutput := &restic.RestoreOutput{ HostRestoreStats: []v1beta1.HostRestoreStats{ { Hostname: opt.hostname, Phase: v1beta1.HostRestoreFailed, Error: hookErr.Error(), }, }, } statusOpt.RestoreSession = opt.restoreSessionName err := statusOpt.UpdatePostRestoreStatus(restoreOutput) if err != nil { hookErr = errors.NewAggregate([]error{hookErr, err}) } } // return error so that the container fail return hookErr } func (opt *hookOptions) handlePostTaskHookFailure(hookErr error) error { if opt.hookType == apis.PostBackupHook { backupOutput := &restic.BackupOutput{ HostBackupStats: []v1beta1.HostBackupStats{ { Hostname: opt.hostname, Phase: v1beta1.HostBackupFailed, Error: hookErr.Error(), }, }, } err := backupOutput.WriteOutput(filepath.Join(opt.outputDir, restic.DefaultOutputFileName)) if err != nil { // failed to write output file. we should fail the container. hence, we are returning the error return errors.NewAggregate([]error{hookErr, err}) } } else { // otherwise it is postRestore hook restoreOutput := &restic.RestoreOutput{ HostRestoreStats: []v1beta1.HostRestoreStats{ { Hostname: opt.hostname, Phase: v1beta1.HostRestoreFailed, Error: hookErr.Error(), }, }, } err := restoreOutput.WriteOutput(filepath.Join(opt.outputDir, restic.DefaultOutputFileName)) if err != nil { // failed to write output file. we should fail the container. hence, are returning the error return errors.NewAggregate([]error{hookErr, err}) } } // don't return error. we don't want to fail this container. update-status Function will execute after it. // update-status Function will take care of updating BackupSession/RestoreSession status return nil }
handlePreTaskHookFailure
identifier_name
run_hook.go
/* Copyright The Stash Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmds import ( "context" "fmt" "os" "path/filepath" "stash.appscode.dev/apimachinery/apis" "stash.appscode.dev/apimachinery/apis/stash/v1beta1" cs "stash.appscode.dev/apimachinery/client/clientset/versioned" "stash.appscode.dev/apimachinery/pkg/restic" "stash.appscode.dev/stash/pkg/status" "stash.appscode.dev/stash/pkg/util" "github.com/golang/glog" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "kmodules.xyz/client-go/meta" appcatalog_cs "kmodules.xyz/custom-resources/client/clientset/versioned" ) type hookOptions struct { masterURL string kubeConfigPath string namespace string hookType string backupSessionName string restoreSessionName string targetKind string targetName string invokerType string invokerName string hostname string config *rest.Config kubeClient kubernetes.Interface stashClient cs.Interface appClient appcatalog_cs.Interface metricOpts restic.MetricsOptions outputDir string } func NewCmdRunHook() *cobra.Command { opt := hookOptions{ masterURL: "", kubeConfigPath: "", namespace: meta.Namespace(), hostname: apis.DefaultHost, } cmd := &cobra.Command{ Use: "run-hook", Short: "Execute Backup or Restore Hooks", DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { config, err := clientcmd.BuildConfigFromFlags(opt.masterURL, opt.kubeConfigPath) if err != nil { glog.Fatalf("Could not get Kubernetes config: %s", err) return err } opt.config = config opt.kubeClient = kubernetes.NewForConfigOrDie(config) opt.stashClient = cs.NewForConfigOrDie(config) opt.appClient = appcatalog_cs.NewForConfigOrDie(config) err = opt.executeHook() if err != nil { // For preBackup or preRestore hook failure, we will fail the container so that the task does to proceed to next step. // We will also update the BackupSession/RestoreSession status as the update-status Function will not execute. if opt.hookType == apis.PreBackupHook || opt.hookType == apis.PreRestoreHook { return opt.handlePreTaskHookFailure(err) } // For other postBackup or postRestore hook failure, we will simply write the failure output into the output directory. // The update-status Function will update the status of the BackupSession/RestoreSession return opt.handlePostTaskHookFailure(err) } return nil }, } cmd.Flags().StringVar(&opt.masterURL, "master", opt.masterURL, "The address of the Kubernetes API server (overrides any value in kubeconfig)") cmd.Flags().StringVar(&opt.kubeConfigPath, "kubeconfig", opt.kubeConfigPath, "Path to kubeconfig file with authorization information (the master location is set by the master flag).") cmd.Flags().StringVar(&opt.backupSessionName, "backupsession", opt.backupSessionName, "Name of the respective BackupSession object") cmd.Flags().StringVar(&opt.restoreSessionName, "restoresession", opt.restoreSessionName, "Name of the respective RestoreSession") cmd.Flags().StringVar(&opt.invokerType, "invoker-type", opt.invokerType, "Type of the backup invoker") cmd.Flags().StringVar(&opt.invokerName, "invoker-name", opt.invokerName, "Name of the respective backup invoker") cmd.Flags().StringVar(&opt.targetName, "target-name", opt.targetName, "Name of the Target") cmd.Flags().StringVar(&opt.targetKind, "target-kind", opt.targetName, "Kind of the Target") cmd.Flags().StringVar(&opt.hookType, "hook-type", opt.hookType, "Type of hook to execute") cmd.Flags().StringVar(&opt.hostname, "hostname", opt.hostname, "Name of the host that is being backed up or restored") cmd.Flags().BoolVar(&opt.metricOpts.Enabled, "metrics-enabled", opt.metricOpts.Enabled, "Specify whether to export Prometheus metrics") cmd.Flags().StringVar(&opt.metricOpts.PushgatewayURL, "metrics-pushgateway-url", opt.metricOpts.PushgatewayURL, "Pushgateway URL where the metrics will be pushed") cmd.Flags().StringSliceVar(&opt.metricOpts.Labels, "metrics-labels", opt.metricOpts.Labels, "Labels to apply in exported metrics") cmd.Flags().StringVar(&opt.metricOpts.JobName, "prom-job-name", StashDefaultMetricJob, "Metrics job name") cmd.Flags().StringVar(&opt.outputDir, "output-dir", opt.outputDir, "Directory where output.json file will be written (keep empty if you don't need to write output in file)") return cmd } func (opt *hookOptions) executeHook() error { var hook interface{} var executorPodName string if opt.backupSessionName != "" { // For backup hooks, BackupSession name will be provided. We will read the hooks from the underlying backup invoker. invoker, err := apis.ExtractBackupInvokerInfo(opt.stashClient, opt.invokerType, opt.invokerName, opt.namespace) if err != nil { return err } // We need to extract the hook only for the current target for _, targetInfo := range invoker.TargetsInfo { if targetInfo.Target != nil && targetInfo.Target.Ref.Kind == opt.targetKind && targetInfo.Target.Ref.Name == opt.targetName { hook = targetInfo.Hooks executorPodName, err = opt.getHookExecutorPodName(targetInfo.Target.Ref) if err != nil { return err } break } } } else if opt.restoreSessionName != "" { // For restore hooks, RestoreSession name will be provided. We will read the hooks from the RestoreSession. restoreSession, err := opt.stashClient.StashV1beta1().RestoreSessions(opt.namespace).Get(context.TODO(), opt.restoreSessionName, metav1.GetOptions{}) if err != nil { return err } hook = restoreSession.Spec.Hooks if restoreSession.Spec.Target != nil { executorPodName, err = opt.getHookExecutorPodName(restoreSession.Spec.Target.Ref) if err != nil { return err } } else { executorPodName = os.Getenv(apis.KeyPodName) } } else { return fmt.Errorf("can not execute hooks. Reason: Respective BackupSession or RestoreSession has not been specified") } // Execute the hooks return util.ExecuteHook(opt.config, hook, opt.hookType, executorPodName, opt.namespace) } func (opt *hookOptions) getHookExecutorPodName(targetRef v1beta1.TargetRef) (string, error) { switch targetRef.Kind { case apis.KindAppBinding: // For AppBinding, we will execute the hooks in the respective app pod return opt.getAppPodName(targetRef.Name) default: // For other types of target, hook will be executed where this process is running. return os.Getenv(apis.KeyPodName), nil } } func (opt *hookOptions) getAppPodName(appbindingName string) (string, error) { // get the AppBinding appbinding, err := opt.appClient.AppcatalogV1alpha1().AppBindings(opt.namespace).Get(context.TODO(), appbindingName, metav1.GetOptions{}) if err != nil { return "", err } // AppBinding should have a Service in ClientConfig field. This service selects the app pod. We will execute the hooks in the app pod. if appbinding.Spec.ClientConfig.Service != nil { // there should be an endpoint with same name as the service which contains the name of the selected pods. endPoint, err := opt.kubeClient.CoreV1().Endpoints(opt.namespace).Get(context.TODO(), appbinding.Spec.ClientConfig.Service.Name, metav1.GetOptions{}) if err != nil { return "", err } for _, subSets := range endPoint.Subsets { // get pod from the ready addresses for _, readyAddrs := range subSets.Addresses { if readyAddrs.TargetRef != nil && readyAddrs.TargetRef.Kind == apis.KindPod { return readyAddrs.TargetRef.Name, nil } } // no pod found in ready addresses. now try in not ready addresses. for _, notReadyAddrs := range subSets.NotReadyAddresses { if notReadyAddrs.TargetRef != nil && notReadyAddrs.TargetRef.Kind == apis.KindPod
} } } return "", fmt.Errorf("no pod found for AppBinding %s/%s", opt.namespace, appbindingName) } func (opt *hookOptions) handlePreTaskHookFailure(hookErr error) error { statusOpt := status.UpdateStatusOptions{ Config: opt.config, KubeClient: opt.kubeClient, StashClient: opt.stashClient, Namespace: opt.namespace, Metrics: opt.metricOpts, TargetRef: v1beta1.TargetRef{ Kind: opt.targetKind, Name: opt.targetName, }, } if opt.hookType == apis.PreBackupHook { backupOutput := &restic.BackupOutput{ HostBackupStats: []v1beta1.HostBackupStats{ { Hostname: opt.hostname, Phase: v1beta1.HostBackupFailed, Error: hookErr.Error(), }, }, } statusOpt.BackupSession = opt.backupSessionName // TODO: user real invoker invoker, err := apis.ExtractBackupInvokerInfo(opt.stashClient, opt.invokerType, opt.invokerName, opt.namespace) if err != nil { return err } for _, targetInfo := range invoker.TargetsInfo { if targetInfo.Target != nil && targetInfo.Target.Ref.Kind == opt.targetKind && targetInfo.Target.Ref.Name == opt.targetName { err := statusOpt.UpdatePostBackupStatus(backupOutput, invoker, targetInfo) if err != nil { hookErr = errors.NewAggregate([]error{hookErr, err}) } } } } else { // otherwise it is postRestore hook restoreOutput := &restic.RestoreOutput{ HostRestoreStats: []v1beta1.HostRestoreStats{ { Hostname: opt.hostname, Phase: v1beta1.HostRestoreFailed, Error: hookErr.Error(), }, }, } statusOpt.RestoreSession = opt.restoreSessionName err := statusOpt.UpdatePostRestoreStatus(restoreOutput) if err != nil { hookErr = errors.NewAggregate([]error{hookErr, err}) } } // return error so that the container fail return hookErr } func (opt *hookOptions) handlePostTaskHookFailure(hookErr error) error { if opt.hookType == apis.PostBackupHook { backupOutput := &restic.BackupOutput{ HostBackupStats: []v1beta1.HostBackupStats{ { Hostname: opt.hostname, Phase: v1beta1.HostBackupFailed, Error: hookErr.Error(), }, }, } err := backupOutput.WriteOutput(filepath.Join(opt.outputDir, restic.DefaultOutputFileName)) if err != nil { // failed to write output file. we should fail the container. hence, we are returning the error return errors.NewAggregate([]error{hookErr, err}) } } else { // otherwise it is postRestore hook restoreOutput := &restic.RestoreOutput{ HostRestoreStats: []v1beta1.HostRestoreStats{ { Hostname: opt.hostname, Phase: v1beta1.HostRestoreFailed, Error: hookErr.Error(), }, }, } err := restoreOutput.WriteOutput(filepath.Join(opt.outputDir, restic.DefaultOutputFileName)) if err != nil { // failed to write output file. we should fail the container. hence, are returning the error return errors.NewAggregate([]error{hookErr, err}) } } // don't return error. we don't want to fail this container. update-status Function will execute after it. // update-status Function will take care of updating BackupSession/RestoreSession status return nil }
{ return notReadyAddrs.TargetRef.Name, nil }
conditional_block
run_hook.go
/* Copyright The Stash Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmds import ( "context" "fmt" "os" "path/filepath" "stash.appscode.dev/apimachinery/apis" "stash.appscode.dev/apimachinery/apis/stash/v1beta1" cs "stash.appscode.dev/apimachinery/client/clientset/versioned" "stash.appscode.dev/apimachinery/pkg/restic" "stash.appscode.dev/stash/pkg/status" "stash.appscode.dev/stash/pkg/util" "github.com/golang/glog" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "kmodules.xyz/client-go/meta" appcatalog_cs "kmodules.xyz/custom-resources/client/clientset/versioned" ) type hookOptions struct { masterURL string kubeConfigPath string namespace string hookType string backupSessionName string restoreSessionName string targetKind string targetName string invokerType string invokerName string hostname string config *rest.Config kubeClient kubernetes.Interface stashClient cs.Interface appClient appcatalog_cs.Interface metricOpts restic.MetricsOptions outputDir string } func NewCmdRunHook() *cobra.Command { opt := hookOptions{ masterURL: "", kubeConfigPath: "", namespace: meta.Namespace(), hostname: apis.DefaultHost, } cmd := &cobra.Command{ Use: "run-hook", Short: "Execute Backup or Restore Hooks", DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { config, err := clientcmd.BuildConfigFromFlags(opt.masterURL, opt.kubeConfigPath) if err != nil { glog.Fatalf("Could not get Kubernetes config: %s", err) return err } opt.config = config opt.kubeClient = kubernetes.NewForConfigOrDie(config) opt.stashClient = cs.NewForConfigOrDie(config) opt.appClient = appcatalog_cs.NewForConfigOrDie(config) err = opt.executeHook() if err != nil { // For preBackup or preRestore hook failure, we will fail the container so that the task does to proceed to next step. // We will also update the BackupSession/RestoreSession status as the update-status Function will not execute. if opt.hookType == apis.PreBackupHook || opt.hookType == apis.PreRestoreHook { return opt.handlePreTaskHookFailure(err) } // For other postBackup or postRestore hook failure, we will simply write the failure output into the output directory. // The update-status Function will update the status of the BackupSession/RestoreSession return opt.handlePostTaskHookFailure(err) } return nil }, } cmd.Flags().StringVar(&opt.masterURL, "master", opt.masterURL, "The address of the Kubernetes API server (overrides any value in kubeconfig)") cmd.Flags().StringVar(&opt.kubeConfigPath, "kubeconfig", opt.kubeConfigPath, "Path to kubeconfig file with authorization information (the master location is set by the master flag).") cmd.Flags().StringVar(&opt.backupSessionName, "backupsession", opt.backupSessionName, "Name of the respective BackupSession object") cmd.Flags().StringVar(&opt.restoreSessionName, "restoresession", opt.restoreSessionName, "Name of the respective RestoreSession") cmd.Flags().StringVar(&opt.invokerType, "invoker-type", opt.invokerType, "Type of the backup invoker") cmd.Flags().StringVar(&opt.invokerName, "invoker-name", opt.invokerName, "Name of the respective backup invoker") cmd.Flags().StringVar(&opt.targetName, "target-name", opt.targetName, "Name of the Target") cmd.Flags().StringVar(&opt.targetKind, "target-kind", opt.targetName, "Kind of the Target") cmd.Flags().StringVar(&opt.hookType, "hook-type", opt.hookType, "Type of hook to execute") cmd.Flags().StringVar(&opt.hostname, "hostname", opt.hostname, "Name of the host that is being backed up or restored") cmd.Flags().BoolVar(&opt.metricOpts.Enabled, "metrics-enabled", opt.metricOpts.Enabled, "Specify whether to export Prometheus metrics") cmd.Flags().StringVar(&opt.metricOpts.PushgatewayURL, "metrics-pushgateway-url", opt.metricOpts.PushgatewayURL, "Pushgateway URL where the metrics will be pushed") cmd.Flags().StringSliceVar(&opt.metricOpts.Labels, "metrics-labels", opt.metricOpts.Labels, "Labels to apply in exported metrics") cmd.Flags().StringVar(&opt.metricOpts.JobName, "prom-job-name", StashDefaultMetricJob, "Metrics job name") cmd.Flags().StringVar(&opt.outputDir, "output-dir", opt.outputDir, "Directory where output.json file will be written (keep empty if you don't need to write output in file)") return cmd } func (opt *hookOptions) executeHook() error { var hook interface{} var executorPodName string if opt.backupSessionName != "" { // For backup hooks, BackupSession name will be provided. We will read the hooks from the underlying backup invoker. invoker, err := apis.ExtractBackupInvokerInfo(opt.stashClient, opt.invokerType, opt.invokerName, opt.namespace) if err != nil { return err } // We need to extract the hook only for the current target for _, targetInfo := range invoker.TargetsInfo { if targetInfo.Target != nil && targetInfo.Target.Ref.Kind == opt.targetKind && targetInfo.Target.Ref.Name == opt.targetName { hook = targetInfo.Hooks executorPodName, err = opt.getHookExecutorPodName(targetInfo.Target.Ref) if err != nil { return err } break } } } else if opt.restoreSessionName != "" { // For restore hooks, RestoreSession name will be provided. We will read the hooks from the RestoreSession. restoreSession, err := opt.stashClient.StashV1beta1().RestoreSessions(opt.namespace).Get(context.TODO(), opt.restoreSessionName, metav1.GetOptions{}) if err != nil { return err } hook = restoreSession.Spec.Hooks if restoreSession.Spec.Target != nil { executorPodName, err = opt.getHookExecutorPodName(restoreSession.Spec.Target.Ref) if err != nil { return err } } else { executorPodName = os.Getenv(apis.KeyPodName) } } else { return fmt.Errorf("can not execute hooks. Reason: Respective BackupSession or RestoreSession has not been specified") } // Execute the hooks return util.ExecuteHook(opt.config, hook, opt.hookType, executorPodName, opt.namespace) } func (opt *hookOptions) getHookExecutorPodName(targetRef v1beta1.TargetRef) (string, error) { switch targetRef.Kind { case apis.KindAppBinding: // For AppBinding, we will execute the hooks in the respective app pod return opt.getAppPodName(targetRef.Name) default: // For other types of target, hook will be executed where this process is running. return os.Getenv(apis.KeyPodName), nil } } func (opt *hookOptions) getAppPodName(appbindingName string) (string, error)
func (opt *hookOptions) handlePreTaskHookFailure(hookErr error) error { statusOpt := status.UpdateStatusOptions{ Config: opt.config, KubeClient: opt.kubeClient, StashClient: opt.stashClient, Namespace: opt.namespace, Metrics: opt.metricOpts, TargetRef: v1beta1.TargetRef{ Kind: opt.targetKind, Name: opt.targetName, }, } if opt.hookType == apis.PreBackupHook { backupOutput := &restic.BackupOutput{ HostBackupStats: []v1beta1.HostBackupStats{ { Hostname: opt.hostname, Phase: v1beta1.HostBackupFailed, Error: hookErr.Error(), }, }, } statusOpt.BackupSession = opt.backupSessionName // TODO: user real invoker invoker, err := apis.ExtractBackupInvokerInfo(opt.stashClient, opt.invokerType, opt.invokerName, opt.namespace) if err != nil { return err } for _, targetInfo := range invoker.TargetsInfo { if targetInfo.Target != nil && targetInfo.Target.Ref.Kind == opt.targetKind && targetInfo.Target.Ref.Name == opt.targetName { err := statusOpt.UpdatePostBackupStatus(backupOutput, invoker, targetInfo) if err != nil { hookErr = errors.NewAggregate([]error{hookErr, err}) } } } } else { // otherwise it is postRestore hook restoreOutput := &restic.RestoreOutput{ HostRestoreStats: []v1beta1.HostRestoreStats{ { Hostname: opt.hostname, Phase: v1beta1.HostRestoreFailed, Error: hookErr.Error(), }, }, } statusOpt.RestoreSession = opt.restoreSessionName err := statusOpt.UpdatePostRestoreStatus(restoreOutput) if err != nil { hookErr = errors.NewAggregate([]error{hookErr, err}) } } // return error so that the container fail return hookErr } func (opt *hookOptions) handlePostTaskHookFailure(hookErr error) error { if opt.hookType == apis.PostBackupHook { backupOutput := &restic.BackupOutput{ HostBackupStats: []v1beta1.HostBackupStats{ { Hostname: opt.hostname, Phase: v1beta1.HostBackupFailed, Error: hookErr.Error(), }, }, } err := backupOutput.WriteOutput(filepath.Join(opt.outputDir, restic.DefaultOutputFileName)) if err != nil { // failed to write output file. we should fail the container. hence, we are returning the error return errors.NewAggregate([]error{hookErr, err}) } } else { // otherwise it is postRestore hook restoreOutput := &restic.RestoreOutput{ HostRestoreStats: []v1beta1.HostRestoreStats{ { Hostname: opt.hostname, Phase: v1beta1.HostRestoreFailed, Error: hookErr.Error(), }, }, } err := restoreOutput.WriteOutput(filepath.Join(opt.outputDir, restic.DefaultOutputFileName)) if err != nil { // failed to write output file. we should fail the container. hence, are returning the error return errors.NewAggregate([]error{hookErr, err}) } } // don't return error. we don't want to fail this container. update-status Function will execute after it. // update-status Function will take care of updating BackupSession/RestoreSession status return nil }
{ // get the AppBinding appbinding, err := opt.appClient.AppcatalogV1alpha1().AppBindings(opt.namespace).Get(context.TODO(), appbindingName, metav1.GetOptions{}) if err != nil { return "", err } // AppBinding should have a Service in ClientConfig field. This service selects the app pod. We will execute the hooks in the app pod. if appbinding.Spec.ClientConfig.Service != nil { // there should be an endpoint with same name as the service which contains the name of the selected pods. endPoint, err := opt.kubeClient.CoreV1().Endpoints(opt.namespace).Get(context.TODO(), appbinding.Spec.ClientConfig.Service.Name, metav1.GetOptions{}) if err != nil { return "", err } for _, subSets := range endPoint.Subsets { // get pod from the ready addresses for _, readyAddrs := range subSets.Addresses { if readyAddrs.TargetRef != nil && readyAddrs.TargetRef.Kind == apis.KindPod { return readyAddrs.TargetRef.Name, nil } } // no pod found in ready addresses. now try in not ready addresses. for _, notReadyAddrs := range subSets.NotReadyAddresses { if notReadyAddrs.TargetRef != nil && notReadyAddrs.TargetRef.Kind == apis.KindPod { return notReadyAddrs.TargetRef.Name, nil } } } } return "", fmt.Errorf("no pod found for AppBinding %s/%s", opt.namespace, appbindingName) }
identifier_body
run_hook.go
/* Copyright The Stash Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmds import ( "context" "fmt" "os" "path/filepath" "stash.appscode.dev/apimachinery/apis" "stash.appscode.dev/apimachinery/apis/stash/v1beta1" cs "stash.appscode.dev/apimachinery/client/clientset/versioned" "stash.appscode.dev/apimachinery/pkg/restic" "stash.appscode.dev/stash/pkg/status" "stash.appscode.dev/stash/pkg/util" "github.com/golang/glog" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "kmodules.xyz/client-go/meta" appcatalog_cs "kmodules.xyz/custom-resources/client/clientset/versioned" ) type hookOptions struct { masterURL string kubeConfigPath string namespace string hookType string backupSessionName string restoreSessionName string targetKind string targetName string invokerType string invokerName string hostname string config *rest.Config kubeClient kubernetes.Interface stashClient cs.Interface appClient appcatalog_cs.Interface metricOpts restic.MetricsOptions outputDir string } func NewCmdRunHook() *cobra.Command { opt := hookOptions{ masterURL: "", kubeConfigPath: "", namespace: meta.Namespace(), hostname: apis.DefaultHost, } cmd := &cobra.Command{ Use: "run-hook", Short: "Execute Backup or Restore Hooks", DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { config, err := clientcmd.BuildConfigFromFlags(opt.masterURL, opt.kubeConfigPath) if err != nil { glog.Fatalf("Could not get Kubernetes config: %s", err) return err } opt.config = config opt.kubeClient = kubernetes.NewForConfigOrDie(config) opt.stashClient = cs.NewForConfigOrDie(config) opt.appClient = appcatalog_cs.NewForConfigOrDie(config) err = opt.executeHook()
if err != nil { // For preBackup or preRestore hook failure, we will fail the container so that the task does to proceed to next step. // We will also update the BackupSession/RestoreSession status as the update-status Function will not execute. if opt.hookType == apis.PreBackupHook || opt.hookType == apis.PreRestoreHook { return opt.handlePreTaskHookFailure(err) } // For other postBackup or postRestore hook failure, we will simply write the failure output into the output directory. // The update-status Function will update the status of the BackupSession/RestoreSession return opt.handlePostTaskHookFailure(err) } return nil }, } cmd.Flags().StringVar(&opt.masterURL, "master", opt.masterURL, "The address of the Kubernetes API server (overrides any value in kubeconfig)") cmd.Flags().StringVar(&opt.kubeConfigPath, "kubeconfig", opt.kubeConfigPath, "Path to kubeconfig file with authorization information (the master location is set by the master flag).") cmd.Flags().StringVar(&opt.backupSessionName, "backupsession", opt.backupSessionName, "Name of the respective BackupSession object") cmd.Flags().StringVar(&opt.restoreSessionName, "restoresession", opt.restoreSessionName, "Name of the respective RestoreSession") cmd.Flags().StringVar(&opt.invokerType, "invoker-type", opt.invokerType, "Type of the backup invoker") cmd.Flags().StringVar(&opt.invokerName, "invoker-name", opt.invokerName, "Name of the respective backup invoker") cmd.Flags().StringVar(&opt.targetName, "target-name", opt.targetName, "Name of the Target") cmd.Flags().StringVar(&opt.targetKind, "target-kind", opt.targetName, "Kind of the Target") cmd.Flags().StringVar(&opt.hookType, "hook-type", opt.hookType, "Type of hook to execute") cmd.Flags().StringVar(&opt.hostname, "hostname", opt.hostname, "Name of the host that is being backed up or restored") cmd.Flags().BoolVar(&opt.metricOpts.Enabled, "metrics-enabled", opt.metricOpts.Enabled, "Specify whether to export Prometheus metrics") cmd.Flags().StringVar(&opt.metricOpts.PushgatewayURL, "metrics-pushgateway-url", opt.metricOpts.PushgatewayURL, "Pushgateway URL where the metrics will be pushed") cmd.Flags().StringSliceVar(&opt.metricOpts.Labels, "metrics-labels", opt.metricOpts.Labels, "Labels to apply in exported metrics") cmd.Flags().StringVar(&opt.metricOpts.JobName, "prom-job-name", StashDefaultMetricJob, "Metrics job name") cmd.Flags().StringVar(&opt.outputDir, "output-dir", opt.outputDir, "Directory where output.json file will be written (keep empty if you don't need to write output in file)") return cmd } func (opt *hookOptions) executeHook() error { var hook interface{} var executorPodName string if opt.backupSessionName != "" { // For backup hooks, BackupSession name will be provided. We will read the hooks from the underlying backup invoker. invoker, err := apis.ExtractBackupInvokerInfo(opt.stashClient, opt.invokerType, opt.invokerName, opt.namespace) if err != nil { return err } // We need to extract the hook only for the current target for _, targetInfo := range invoker.TargetsInfo { if targetInfo.Target != nil && targetInfo.Target.Ref.Kind == opt.targetKind && targetInfo.Target.Ref.Name == opt.targetName { hook = targetInfo.Hooks executorPodName, err = opt.getHookExecutorPodName(targetInfo.Target.Ref) if err != nil { return err } break } } } else if opt.restoreSessionName != "" { // For restore hooks, RestoreSession name will be provided. We will read the hooks from the RestoreSession. restoreSession, err := opt.stashClient.StashV1beta1().RestoreSessions(opt.namespace).Get(context.TODO(), opt.restoreSessionName, metav1.GetOptions{}) if err != nil { return err } hook = restoreSession.Spec.Hooks if restoreSession.Spec.Target != nil { executorPodName, err = opt.getHookExecutorPodName(restoreSession.Spec.Target.Ref) if err != nil { return err } } else { executorPodName = os.Getenv(apis.KeyPodName) } } else { return fmt.Errorf("can not execute hooks. Reason: Respective BackupSession or RestoreSession has not been specified") } // Execute the hooks return util.ExecuteHook(opt.config, hook, opt.hookType, executorPodName, opt.namespace) } func (opt *hookOptions) getHookExecutorPodName(targetRef v1beta1.TargetRef) (string, error) { switch targetRef.Kind { case apis.KindAppBinding: // For AppBinding, we will execute the hooks in the respective app pod return opt.getAppPodName(targetRef.Name) default: // For other types of target, hook will be executed where this process is running. return os.Getenv(apis.KeyPodName), nil } } func (opt *hookOptions) getAppPodName(appbindingName string) (string, error) { // get the AppBinding appbinding, err := opt.appClient.AppcatalogV1alpha1().AppBindings(opt.namespace).Get(context.TODO(), appbindingName, metav1.GetOptions{}) if err != nil { return "", err } // AppBinding should have a Service in ClientConfig field. This service selects the app pod. We will execute the hooks in the app pod. if appbinding.Spec.ClientConfig.Service != nil { // there should be an endpoint with same name as the service which contains the name of the selected pods. endPoint, err := opt.kubeClient.CoreV1().Endpoints(opt.namespace).Get(context.TODO(), appbinding.Spec.ClientConfig.Service.Name, metav1.GetOptions{}) if err != nil { return "", err } for _, subSets := range endPoint.Subsets { // get pod from the ready addresses for _, readyAddrs := range subSets.Addresses { if readyAddrs.TargetRef != nil && readyAddrs.TargetRef.Kind == apis.KindPod { return readyAddrs.TargetRef.Name, nil } } // no pod found in ready addresses. now try in not ready addresses. for _, notReadyAddrs := range subSets.NotReadyAddresses { if notReadyAddrs.TargetRef != nil && notReadyAddrs.TargetRef.Kind == apis.KindPod { return notReadyAddrs.TargetRef.Name, nil } } } } return "", fmt.Errorf("no pod found for AppBinding %s/%s", opt.namespace, appbindingName) } func (opt *hookOptions) handlePreTaskHookFailure(hookErr error) error { statusOpt := status.UpdateStatusOptions{ Config: opt.config, KubeClient: opt.kubeClient, StashClient: opt.stashClient, Namespace: opt.namespace, Metrics: opt.metricOpts, TargetRef: v1beta1.TargetRef{ Kind: opt.targetKind, Name: opt.targetName, }, } if opt.hookType == apis.PreBackupHook { backupOutput := &restic.BackupOutput{ HostBackupStats: []v1beta1.HostBackupStats{ { Hostname: opt.hostname, Phase: v1beta1.HostBackupFailed, Error: hookErr.Error(), }, }, } statusOpt.BackupSession = opt.backupSessionName // TODO: user real invoker invoker, err := apis.ExtractBackupInvokerInfo(opt.stashClient, opt.invokerType, opt.invokerName, opt.namespace) if err != nil { return err } for _, targetInfo := range invoker.TargetsInfo { if targetInfo.Target != nil && targetInfo.Target.Ref.Kind == opt.targetKind && targetInfo.Target.Ref.Name == opt.targetName { err := statusOpt.UpdatePostBackupStatus(backupOutput, invoker, targetInfo) if err != nil { hookErr = errors.NewAggregate([]error{hookErr, err}) } } } } else { // otherwise it is postRestore hook restoreOutput := &restic.RestoreOutput{ HostRestoreStats: []v1beta1.HostRestoreStats{ { Hostname: opt.hostname, Phase: v1beta1.HostRestoreFailed, Error: hookErr.Error(), }, }, } statusOpt.RestoreSession = opt.restoreSessionName err := statusOpt.UpdatePostRestoreStatus(restoreOutput) if err != nil { hookErr = errors.NewAggregate([]error{hookErr, err}) } } // return error so that the container fail return hookErr } func (opt *hookOptions) handlePostTaskHookFailure(hookErr error) error { if opt.hookType == apis.PostBackupHook { backupOutput := &restic.BackupOutput{ HostBackupStats: []v1beta1.HostBackupStats{ { Hostname: opt.hostname, Phase: v1beta1.HostBackupFailed, Error: hookErr.Error(), }, }, } err := backupOutput.WriteOutput(filepath.Join(opt.outputDir, restic.DefaultOutputFileName)) if err != nil { // failed to write output file. we should fail the container. hence, we are returning the error return errors.NewAggregate([]error{hookErr, err}) } } else { // otherwise it is postRestore hook restoreOutput := &restic.RestoreOutput{ HostRestoreStats: []v1beta1.HostRestoreStats{ { Hostname: opt.hostname, Phase: v1beta1.HostRestoreFailed, Error: hookErr.Error(), }, }, } err := restoreOutput.WriteOutput(filepath.Join(opt.outputDir, restic.DefaultOutputFileName)) if err != nil { // failed to write output file. we should fail the container. hence, are returning the error return errors.NewAggregate([]error{hookErr, err}) } } // don't return error. we don't want to fail this container. update-status Function will execute after it. // update-status Function will take care of updating BackupSession/RestoreSession status return nil }
random_line_split
console.rs
// Copyright (c) 2019 - 2020 ESRLabs // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::{Event, EventTx, Name, State, TerminationReason, SETTINGS}; use anyhow::{anyhow, Context, Result}; use async_std::{io, net::TcpListener, path::PathBuf, prelude::*, sync, task}; use itertools::Itertools; use log::{debug, warn, *}; use prettytable::{format, Table}; use std::{iter, time}; /// Helptext displayed on the `help` command. The `dcon` tool parses this text /// and creates suggestions and completions. Ensure to a correct helptext when /// adding/removing/changing commands. const HELP: &str = "\ help: Display help text\n\ list: List all loaded images\n\ ps: List running instances\n\ shutdown: Stop the north runtime\n\ settings: Dump north configuration\n\ start: PATTERN Start containers matching PATTERN e.g 'start hello*'. Omit PATTERN to start all containers\n\ stop: PATTERN Stop all containers matching PATTERN. Omit PATTERN to stop all running containers\n\ uninstall: PATTERN: Unmount and remove all containers matching PATTERN\n\ update: Run update with provided ressources\n\ versions: Version list of installed applications"; pub async fn init(tx: &EventTx) -> Result<()> { let rx = serve().await?; let tx = tx.clone(); // Spawn a task that handles lines received on the debug port. task::spawn(async move { while let Ok((line, tx_reply)) = rx.recv().await { tx.send(Event::Console(line, tx_reply)).await; } }); Ok(()) } pub async fn process(state: &mut State, command: &str, reply: sync::Sender<String>) -> Result<()> { info!("Running \'{}\'", command); let mut commands = command.split_whitespace(); if let Some(cmd) = commands.next() { let args = commands.collect::<Vec<&str>>(); let start_timestamp = time::Instant::now(); match match cmd { "help" => help(), "list" => list(state).await, "ps" => ps(state).await, "settings" => settings(), "shutdown" => shutdown(state).await, "start" => start(state, &args).await, "stop" => stop(state, &args).await, "uninstall" => uninstall(state, &args).await, "update" => update(state, &args).await, "versions" => versions(state), _ => Err(anyhow!("Unknown command: {}", command)), } { Ok(mut r) => { r.push_str(&format!("Duration: {:?}\n", start_timestamp.elapsed())); reply.send(r).await } Err(e) => { let msg = format!("Failed to run: {} {:?}: {}\n", cmd, args, e); reply.send(msg).await } } } else { reply.send("Invalid command".into()).await } Ok(()) } /// Return the help text fn help() -> Result<String> { Ok(HELP.into()) } /// List all known containers instances and their state. async fn list(state: &State) -> Result<String>
/// List all running applications. #[cfg(all(not(target_os = "android"), not(target_os = "linux")))] async fn ps(state: &State) -> Result<String> { to_table( vec![vec![ "Name".to_string(), "Version".to_string(), "Uptime".to_string(), ]] .iter() .cloned() .chain( state .applications() .filter_map(|app| app.process_context().map(|p| (app, p))) .sorted_by_key(|(app, _)| app.name()) .map(|(app, context)| { vec![ app.name().to_string(), app.version().to_string(), format!("{:?}", context.uptime()), ] }), ), ) } /// List all running applications. #[cfg(any(target_os = "android", target_os = "linux"))] async fn ps(state: &State) -> Result<String> { use pretty_bytes::converter::convert; const PAGE_SIZE: usize = 4096; let mut result = vec![[ "Name", "Version", "PID", "Size", "Resident", "Shared", "Text", "Data", "Uptime", ] .iter() .map(ToString::to_string) .collect()]; for app in state.applications().sorted_by_key(|app| app.name()) { if let Some(ref context) = app.process_context() { let pid = context.process().pid(); let statm = procinfo::pid::statm(pid as i32)?; result.push(vec![ app.name().to_string(), app.version().to_string(), pid.to_string(), convert((statm.size * PAGE_SIZE) as f64), convert((statm.resident * PAGE_SIZE) as f64), convert((statm.share * PAGE_SIZE) as f64), convert((statm.text * PAGE_SIZE) as f64), convert((statm.data * PAGE_SIZE) as f64), format!("{:?}", context.uptime()), ]); } } to_table(result) } /// Start applications. If `args` is empty *all* known applications that /// are not in a running state are started. If a argument is supplied it /// is used to construct a Regex and all container (names) matching that /// Regex are attempted to be started. async fn start(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec![ "Name".to_string(), "Result".to_string(), "Duration".to_string(), ]]; let apps = state .applications() // Filter for not already running containers .filter(|app| app.process_context().is_none()) // Filter ressource container that are not startable .filter(|app| !app.container().is_resource_container()) // Filter matching container .filter(|app| re.is_match(app.name())) // Sort container by name .sorted_by_key(|app| app.name().clone()) .map(|app| app.name().clone()) .collect::<Vec<Name>>(); for app in &apps { let start = time::Instant::now(); match state.start(&app, 0).await { Ok(_) => result.push(vec![ app.to_string(), "Ok".to_string(), format!("{:?}", start.elapsed()), ]), Err(e) => result.push(vec![ app.to_string(), format!("Failed: {:?}", e), format!("{:?}", start.elapsed()), ]), } } to_table(result) } /// Dump settings fn settings() -> Result<String> { Ok(format!("{}", *SETTINGS)) } /// Stop one, some or all containers. See start for the argument handling. async fn stop(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec![ "Name".to_string(), "Result".to_string(), "Duration".to_string(), ]]; let apps = state .applications() .filter(|app| app.process_context().is_some()) .filter(|app| re.is_match(app.name())) .map(|app| app.name().clone()) .collect::<Vec<Name>>(); for app in &apps { let timeout = time::Duration::from_secs(10); let reason = TerminationReason::Stopped; let start = time::Instant::now(); match state.stop(&app, timeout, reason).await { Ok(()) => result.push(vec![ app.to_string(), "Ok".to_string(), format!("{:?}", start.elapsed()), ]), Err(e) => result.push(vec![ app.to_string(), e.to_string(), format!("{:?}", start.elapsed()), ]), } } to_table(result) } /// Umount and remove a containers. See `start` for the argument handling. /// The data directory is not removed. This needs discussion. async fn uninstall(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec!["Name".to_string(), "Result".to_string()]]; let to_uninstall = state .applications .values() .filter(|app| app.process_context().is_none()) .filter(|app| re.is_match(app.name())) .map(|app| app.name()) .cloned() .collect::<Vec<Name>>(); for app in &to_uninstall { match state.uninstall(&app).await { Ok(()) => result.push(vec![app.to_string(), "Ok".to_string()]), Err(e) => result.push(vec![app.to_string(), e.to_string()]), } } to_table(result) } /// Trigger the update module. async fn update(state: &mut State, args: &[&str]) -> Result<String> { if args.len() != 1 { return Err(anyhow!("Invalid arguments for update command")); } let dir = PathBuf::from(args[0]); if !dir.exists().await { let err = anyhow!("Update directory {} does not exists", dir.display()); Err(err) } else { let updates = crate::update::update(state, &dir).await?; let mut result = vec![vec![ "Name".to_string(), "From".to_string(), "To".to_string(), ]]; for update in &updates { result.push(vec![ update.0.to_string(), (update.1).0.to_string(), (update.1).1.to_string(), ]) } to_table(result) } } /// Send a shutdown command to the main loop. async fn shutdown(state: &mut State) -> Result<String> { let stop = stop(state, &[]).await?; state.tx().send(Event::Shutdown).await; Ok(stop) } /// Open a TCP socket and read lines terminated with `\n`. async fn serve() -> Result<sync::Receiver<(String, sync::Sender<String>)>> { let address = &SETTINGS.console_address; debug!("Starting console on {}", address); let listener = TcpListener::bind(address) .await .with_context(|| format!("Failed to open listener on {}", address))?; let (tx, rx) = sync::channel(1000); task::spawn(async move { let mut incoming = listener.incoming(); // Spawn a task for each incoming connection. while let Some(stream) = incoming.next().await { let (tx_reply, rx_reply) = sync::channel::<String>(10); if let Ok(stream) = stream { let peer = match stream.peer_addr() { Ok(peer) => peer, Err(e) => { warn!("Failed to get peer from console connection: {}", e); return; } }; debug!("Client {:?} connected", peer); let tx = tx.clone(); task::spawn(async move { let (reader, writer) = &mut (&stream, &stream); let reader = io::BufReader::new(reader); let mut lines = reader.lines(); while let Some(Ok(line)) = lines.next().await { let line = line.trim(); tx.send((line.into(), tx_reply.clone())).await; if let Ok(reply) = rx_reply.recv().await { if let Err(e) = writer.write_all(reply.as_bytes()).await { warn!("Error on console connection {:?}: {}", peer, e); break; } } } }); } } }); Ok(rx) } /// List versions of currently known containers and applications. fn versions(state: &mut State) -> Result<String> { let versions = state .applications() .map(|app| app.manifest()) .map(|manifest| { ( manifest.name.clone(), manifest.version.clone(), manifest.arch.clone(), ) }) .collect::<Vec<_>>(); serde_json::to_string(&versions).context("Failed to encode manifest to json") } /// Format something iterateable into a ascii table. The first row of the table input /// contains the column titles. The table cannot be empty. fn to_table<T: iter::IntoIterator<Item = I>, I: iter::IntoIterator<Item = S>, S: ToString>( table: T, ) -> Result<String> { let mut t = Table::new(); let format = prettytable::format::FormatBuilder::new() .column_separator('|') .separators(&[], format::LineSeparator::new('-', '+', '+', '+')) .padding(1, 1) .build(); t.set_format(format); let mut rows = table.into_iter(); let titles = rows.next().ok_or_else(|| anyhow!("Missing titles"))?.into(); t.set_titles(titles); for r in rows { t.add_row(r.into()); } let mut result = vec![]; t.print(&mut result).context("Failed to format table")?; String::from_utf8(result).context("Invalid table content") } fn arg_regex(args: &[&str]) -> Result<regex::Regex> { match args.len() { 1 => regex::Regex::new(args[0]) .with_context(|| format!("Invalid container name regex {}", args[0])), 0 => regex::Regex::new(".*") .with_context(|| format!("Invalid container name regex {}", args[0])), _ => Err(anyhow!("Arguments invalid. Use `start PATTERN`",)), } }
{ to_table( vec![vec![ "Name".to_string(), "Version".to_string(), "Running".to_string(), "Type".to_string(), ]] .iter() .cloned() .chain( state .applications() .sorted_by_key(|app| app.name()) .map(|app| { vec![ app.name().to_string(), app.version().to_string(), app.process_context() .map(|c| format!("Yes (pid: {})", c.process().pid())) .unwrap_or_else(|| "No".to_string()), if app.container().is_resource_container() { "resource" } else { "app" } .to_owned(), ] }), ), ) }
identifier_body
console.rs
// Copyright (c) 2019 - 2020 ESRLabs // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::{Event, EventTx, Name, State, TerminationReason, SETTINGS}; use anyhow::{anyhow, Context, Result}; use async_std::{io, net::TcpListener, path::PathBuf, prelude::*, sync, task}; use itertools::Itertools; use log::{debug, warn, *}; use prettytable::{format, Table}; use std::{iter, time}; /// Helptext displayed on the `help` command. The `dcon` tool parses this text /// and creates suggestions and completions. Ensure to a correct helptext when /// adding/removing/changing commands. const HELP: &str = "\ help: Display help text\n\ list: List all loaded images\n\ ps: List running instances\n\ shutdown: Stop the north runtime\n\ settings: Dump north configuration\n\ start: PATTERN Start containers matching PATTERN e.g 'start hello*'. Omit PATTERN to start all containers\n\ stop: PATTERN Stop all containers matching PATTERN. Omit PATTERN to stop all running containers\n\ uninstall: PATTERN: Unmount and remove all containers matching PATTERN\n\ update: Run update with provided ressources\n\ versions: Version list of installed applications"; pub async fn
(tx: &EventTx) -> Result<()> { let rx = serve().await?; let tx = tx.clone(); // Spawn a task that handles lines received on the debug port. task::spawn(async move { while let Ok((line, tx_reply)) = rx.recv().await { tx.send(Event::Console(line, tx_reply)).await; } }); Ok(()) } pub async fn process(state: &mut State, command: &str, reply: sync::Sender<String>) -> Result<()> { info!("Running \'{}\'", command); let mut commands = command.split_whitespace(); if let Some(cmd) = commands.next() { let args = commands.collect::<Vec<&str>>(); let start_timestamp = time::Instant::now(); match match cmd { "help" => help(), "list" => list(state).await, "ps" => ps(state).await, "settings" => settings(), "shutdown" => shutdown(state).await, "start" => start(state, &args).await, "stop" => stop(state, &args).await, "uninstall" => uninstall(state, &args).await, "update" => update(state, &args).await, "versions" => versions(state), _ => Err(anyhow!("Unknown command: {}", command)), } { Ok(mut r) => { r.push_str(&format!("Duration: {:?}\n", start_timestamp.elapsed())); reply.send(r).await } Err(e) => { let msg = format!("Failed to run: {} {:?}: {}\n", cmd, args, e); reply.send(msg).await } } } else { reply.send("Invalid command".into()).await } Ok(()) } /// Return the help text fn help() -> Result<String> { Ok(HELP.into()) } /// List all known containers instances and their state. async fn list(state: &State) -> Result<String> { to_table( vec![vec![ "Name".to_string(), "Version".to_string(), "Running".to_string(), "Type".to_string(), ]] .iter() .cloned() .chain( state .applications() .sorted_by_key(|app| app.name()) .map(|app| { vec![ app.name().to_string(), app.version().to_string(), app.process_context() .map(|c| format!("Yes (pid: {})", c.process().pid())) .unwrap_or_else(|| "No".to_string()), if app.container().is_resource_container() { "resource" } else { "app" } .to_owned(), ] }), ), ) } /// List all running applications. #[cfg(all(not(target_os = "android"), not(target_os = "linux")))] async fn ps(state: &State) -> Result<String> { to_table( vec![vec![ "Name".to_string(), "Version".to_string(), "Uptime".to_string(), ]] .iter() .cloned() .chain( state .applications() .filter_map(|app| app.process_context().map(|p| (app, p))) .sorted_by_key(|(app, _)| app.name()) .map(|(app, context)| { vec![ app.name().to_string(), app.version().to_string(), format!("{:?}", context.uptime()), ] }), ), ) } /// List all running applications. #[cfg(any(target_os = "android", target_os = "linux"))] async fn ps(state: &State) -> Result<String> { use pretty_bytes::converter::convert; const PAGE_SIZE: usize = 4096; let mut result = vec![[ "Name", "Version", "PID", "Size", "Resident", "Shared", "Text", "Data", "Uptime", ] .iter() .map(ToString::to_string) .collect()]; for app in state.applications().sorted_by_key(|app| app.name()) { if let Some(ref context) = app.process_context() { let pid = context.process().pid(); let statm = procinfo::pid::statm(pid as i32)?; result.push(vec![ app.name().to_string(), app.version().to_string(), pid.to_string(), convert((statm.size * PAGE_SIZE) as f64), convert((statm.resident * PAGE_SIZE) as f64), convert((statm.share * PAGE_SIZE) as f64), convert((statm.text * PAGE_SIZE) as f64), convert((statm.data * PAGE_SIZE) as f64), format!("{:?}", context.uptime()), ]); } } to_table(result) } /// Start applications. If `args` is empty *all* known applications that /// are not in a running state are started. If a argument is supplied it /// is used to construct a Regex and all container (names) matching that /// Regex are attempted to be started. async fn start(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec![ "Name".to_string(), "Result".to_string(), "Duration".to_string(), ]]; let apps = state .applications() // Filter for not already running containers .filter(|app| app.process_context().is_none()) // Filter ressource container that are not startable .filter(|app| !app.container().is_resource_container()) // Filter matching container .filter(|app| re.is_match(app.name())) // Sort container by name .sorted_by_key(|app| app.name().clone()) .map(|app| app.name().clone()) .collect::<Vec<Name>>(); for app in &apps { let start = time::Instant::now(); match state.start(&app, 0).await { Ok(_) => result.push(vec![ app.to_string(), "Ok".to_string(), format!("{:?}", start.elapsed()), ]), Err(e) => result.push(vec![ app.to_string(), format!("Failed: {:?}", e), format!("{:?}", start.elapsed()), ]), } } to_table(result) } /// Dump settings fn settings() -> Result<String> { Ok(format!("{}", *SETTINGS)) } /// Stop one, some or all containers. See start for the argument handling. async fn stop(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec![ "Name".to_string(), "Result".to_string(), "Duration".to_string(), ]]; let apps = state .applications() .filter(|app| app.process_context().is_some()) .filter(|app| re.is_match(app.name())) .map(|app| app.name().clone()) .collect::<Vec<Name>>(); for app in &apps { let timeout = time::Duration::from_secs(10); let reason = TerminationReason::Stopped; let start = time::Instant::now(); match state.stop(&app, timeout, reason).await { Ok(()) => result.push(vec![ app.to_string(), "Ok".to_string(), format!("{:?}", start.elapsed()), ]), Err(e) => result.push(vec![ app.to_string(), e.to_string(), format!("{:?}", start.elapsed()), ]), } } to_table(result) } /// Umount and remove a containers. See `start` for the argument handling. /// The data directory is not removed. This needs discussion. async fn uninstall(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec!["Name".to_string(), "Result".to_string()]]; let to_uninstall = state .applications .values() .filter(|app| app.process_context().is_none()) .filter(|app| re.is_match(app.name())) .map(|app| app.name()) .cloned() .collect::<Vec<Name>>(); for app in &to_uninstall { match state.uninstall(&app).await { Ok(()) => result.push(vec![app.to_string(), "Ok".to_string()]), Err(e) => result.push(vec![app.to_string(), e.to_string()]), } } to_table(result) } /// Trigger the update module. async fn update(state: &mut State, args: &[&str]) -> Result<String> { if args.len() != 1 { return Err(anyhow!("Invalid arguments for update command")); } let dir = PathBuf::from(args[0]); if !dir.exists().await { let err = anyhow!("Update directory {} does not exists", dir.display()); Err(err) } else { let updates = crate::update::update(state, &dir).await?; let mut result = vec![vec![ "Name".to_string(), "From".to_string(), "To".to_string(), ]]; for update in &updates { result.push(vec![ update.0.to_string(), (update.1).0.to_string(), (update.1).1.to_string(), ]) } to_table(result) } } /// Send a shutdown command to the main loop. async fn shutdown(state: &mut State) -> Result<String> { let stop = stop(state, &[]).await?; state.tx().send(Event::Shutdown).await; Ok(stop) } /// Open a TCP socket and read lines terminated with `\n`. async fn serve() -> Result<sync::Receiver<(String, sync::Sender<String>)>> { let address = &SETTINGS.console_address; debug!("Starting console on {}", address); let listener = TcpListener::bind(address) .await .with_context(|| format!("Failed to open listener on {}", address))?; let (tx, rx) = sync::channel(1000); task::spawn(async move { let mut incoming = listener.incoming(); // Spawn a task for each incoming connection. while let Some(stream) = incoming.next().await { let (tx_reply, rx_reply) = sync::channel::<String>(10); if let Ok(stream) = stream { let peer = match stream.peer_addr() { Ok(peer) => peer, Err(e) => { warn!("Failed to get peer from console connection: {}", e); return; } }; debug!("Client {:?} connected", peer); let tx = tx.clone(); task::spawn(async move { let (reader, writer) = &mut (&stream, &stream); let reader = io::BufReader::new(reader); let mut lines = reader.lines(); while let Some(Ok(line)) = lines.next().await { let line = line.trim(); tx.send((line.into(), tx_reply.clone())).await; if let Ok(reply) = rx_reply.recv().await { if let Err(e) = writer.write_all(reply.as_bytes()).await { warn!("Error on console connection {:?}: {}", peer, e); break; } } } }); } } }); Ok(rx) } /// List versions of currently known containers and applications. fn versions(state: &mut State) -> Result<String> { let versions = state .applications() .map(|app| app.manifest()) .map(|manifest| { ( manifest.name.clone(), manifest.version.clone(), manifest.arch.clone(), ) }) .collect::<Vec<_>>(); serde_json::to_string(&versions).context("Failed to encode manifest to json") } /// Format something iterateable into a ascii table. The first row of the table input /// contains the column titles. The table cannot be empty. fn to_table<T: iter::IntoIterator<Item = I>, I: iter::IntoIterator<Item = S>, S: ToString>( table: T, ) -> Result<String> { let mut t = Table::new(); let format = prettytable::format::FormatBuilder::new() .column_separator('|') .separators(&[], format::LineSeparator::new('-', '+', '+', '+')) .padding(1, 1) .build(); t.set_format(format); let mut rows = table.into_iter(); let titles = rows.next().ok_or_else(|| anyhow!("Missing titles"))?.into(); t.set_titles(titles); for r in rows { t.add_row(r.into()); } let mut result = vec![]; t.print(&mut result).context("Failed to format table")?; String::from_utf8(result).context("Invalid table content") } fn arg_regex(args: &[&str]) -> Result<regex::Regex> { match args.len() { 1 => regex::Regex::new(args[0]) .with_context(|| format!("Invalid container name regex {}", args[0])), 0 => regex::Regex::new(".*") .with_context(|| format!("Invalid container name regex {}", args[0])), _ => Err(anyhow!("Arguments invalid. Use `start PATTERN`",)), } }
init
identifier_name
console.rs
// Copyright (c) 2019 - 2020 ESRLabs // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::{Event, EventTx, Name, State, TerminationReason, SETTINGS}; use anyhow::{anyhow, Context, Result}; use async_std::{io, net::TcpListener, path::PathBuf, prelude::*, sync, task}; use itertools::Itertools; use log::{debug, warn, *}; use prettytable::{format, Table}; use std::{iter, time}; /// Helptext displayed on the `help` command. The `dcon` tool parses this text /// and creates suggestions and completions. Ensure to a correct helptext when /// adding/removing/changing commands. const HELP: &str = "\ help: Display help text\n\ list: List all loaded images\n\ ps: List running instances\n\ shutdown: Stop the north runtime\n\ settings: Dump north configuration\n\ start: PATTERN Start containers matching PATTERN e.g 'start hello*'. Omit PATTERN to start all containers\n\ stop: PATTERN Stop all containers matching PATTERN. Omit PATTERN to stop all running containers\n\ uninstall: PATTERN: Unmount and remove all containers matching PATTERN\n\ update: Run update with provided ressources\n\ versions: Version list of installed applications"; pub async fn init(tx: &EventTx) -> Result<()> { let rx = serve().await?; let tx = tx.clone(); // Spawn a task that handles lines received on the debug port. task::spawn(async move { while let Ok((line, tx_reply)) = rx.recv().await { tx.send(Event::Console(line, tx_reply)).await; } }); Ok(()) } pub async fn process(state: &mut State, command: &str, reply: sync::Sender<String>) -> Result<()> { info!("Running \'{}\'", command); let mut commands = command.split_whitespace(); if let Some(cmd) = commands.next() { let args = commands.collect::<Vec<&str>>(); let start_timestamp = time::Instant::now(); match match cmd { "help" => help(), "list" => list(state).await, "ps" => ps(state).await, "settings" => settings(), "shutdown" => shutdown(state).await, "start" => start(state, &args).await, "stop" => stop(state, &args).await, "uninstall" => uninstall(state, &args).await, "update" => update(state, &args).await, "versions" => versions(state), _ => Err(anyhow!("Unknown command: {}", command)), } { Ok(mut r) => { r.push_str(&format!("Duration: {:?}\n", start_timestamp.elapsed())); reply.send(r).await } Err(e) => { let msg = format!("Failed to run: {} {:?}: {}\n", cmd, args, e); reply.send(msg).await } } } else { reply.send("Invalid command".into()).await } Ok(()) } /// Return the help text fn help() -> Result<String> { Ok(HELP.into()) } /// List all known containers instances and their state. async fn list(state: &State) -> Result<String> { to_table( vec![vec![ "Name".to_string(), "Version".to_string(), "Running".to_string(), "Type".to_string(), ]] .iter() .cloned() .chain( state .applications() .sorted_by_key(|app| app.name()) .map(|app| { vec![ app.name().to_string(), app.version().to_string(), app.process_context() .map(|c| format!("Yes (pid: {})", c.process().pid())) .unwrap_or_else(|| "No".to_string()), if app.container().is_resource_container() { "resource" } else { "app" } .to_owned(), ] }), ), ) } /// List all running applications. #[cfg(all(not(target_os = "android"), not(target_os = "linux")))] async fn ps(state: &State) -> Result<String> { to_table( vec![vec![ "Name".to_string(), "Version".to_string(), "Uptime".to_string(), ]] .iter() .cloned() .chain( state .applications() .filter_map(|app| app.process_context().map(|p| (app, p))) .sorted_by_key(|(app, _)| app.name()) .map(|(app, context)| { vec![ app.name().to_string(), app.version().to_string(), format!("{:?}", context.uptime()), ] }), ), ) } /// List all running applications. #[cfg(any(target_os = "android", target_os = "linux"))] async fn ps(state: &State) -> Result<String> { use pretty_bytes::converter::convert; const PAGE_SIZE: usize = 4096; let mut result = vec![[ "Name", "Version", "PID", "Size", "Resident", "Shared", "Text", "Data", "Uptime", ] .iter() .map(ToString::to_string) .collect()]; for app in state.applications().sorted_by_key(|app| app.name()) { if let Some(ref context) = app.process_context() { let pid = context.process().pid(); let statm = procinfo::pid::statm(pid as i32)?; result.push(vec![ app.name().to_string(), app.version().to_string(), pid.to_string(), convert((statm.size * PAGE_SIZE) as f64), convert((statm.resident * PAGE_SIZE) as f64), convert((statm.share * PAGE_SIZE) as f64), convert((statm.text * PAGE_SIZE) as f64), convert((statm.data * PAGE_SIZE) as f64), format!("{:?}", context.uptime()), ]); } } to_table(result)
/// Start applications. If `args` is empty *all* known applications that /// are not in a running state are started. If a argument is supplied it /// is used to construct a Regex and all container (names) matching that /// Regex are attempted to be started. async fn start(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec![ "Name".to_string(), "Result".to_string(), "Duration".to_string(), ]]; let apps = state .applications() // Filter for not already running containers .filter(|app| app.process_context().is_none()) // Filter ressource container that are not startable .filter(|app| !app.container().is_resource_container()) // Filter matching container .filter(|app| re.is_match(app.name())) // Sort container by name .sorted_by_key(|app| app.name().clone()) .map(|app| app.name().clone()) .collect::<Vec<Name>>(); for app in &apps { let start = time::Instant::now(); match state.start(&app, 0).await { Ok(_) => result.push(vec![ app.to_string(), "Ok".to_string(), format!("{:?}", start.elapsed()), ]), Err(e) => result.push(vec![ app.to_string(), format!("Failed: {:?}", e), format!("{:?}", start.elapsed()), ]), } } to_table(result) } /// Dump settings fn settings() -> Result<String> { Ok(format!("{}", *SETTINGS)) } /// Stop one, some or all containers. See start for the argument handling. async fn stop(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec![ "Name".to_string(), "Result".to_string(), "Duration".to_string(), ]]; let apps = state .applications() .filter(|app| app.process_context().is_some()) .filter(|app| re.is_match(app.name())) .map(|app| app.name().clone()) .collect::<Vec<Name>>(); for app in &apps { let timeout = time::Duration::from_secs(10); let reason = TerminationReason::Stopped; let start = time::Instant::now(); match state.stop(&app, timeout, reason).await { Ok(()) => result.push(vec![ app.to_string(), "Ok".to_string(), format!("{:?}", start.elapsed()), ]), Err(e) => result.push(vec![ app.to_string(), e.to_string(), format!("{:?}", start.elapsed()), ]), } } to_table(result) } /// Umount and remove a containers. See `start` for the argument handling. /// The data directory is not removed. This needs discussion. async fn uninstall(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec!["Name".to_string(), "Result".to_string()]]; let to_uninstall = state .applications .values() .filter(|app| app.process_context().is_none()) .filter(|app| re.is_match(app.name())) .map(|app| app.name()) .cloned() .collect::<Vec<Name>>(); for app in &to_uninstall { match state.uninstall(&app).await { Ok(()) => result.push(vec![app.to_string(), "Ok".to_string()]), Err(e) => result.push(vec![app.to_string(), e.to_string()]), } } to_table(result) } /// Trigger the update module. async fn update(state: &mut State, args: &[&str]) -> Result<String> { if args.len() != 1 { return Err(anyhow!("Invalid arguments for update command")); } let dir = PathBuf::from(args[0]); if !dir.exists().await { let err = anyhow!("Update directory {} does not exists", dir.display()); Err(err) } else { let updates = crate::update::update(state, &dir).await?; let mut result = vec![vec![ "Name".to_string(), "From".to_string(), "To".to_string(), ]]; for update in &updates { result.push(vec![ update.0.to_string(), (update.1).0.to_string(), (update.1).1.to_string(), ]) } to_table(result) } } /// Send a shutdown command to the main loop. async fn shutdown(state: &mut State) -> Result<String> { let stop = stop(state, &[]).await?; state.tx().send(Event::Shutdown).await; Ok(stop) } /// Open a TCP socket and read lines terminated with `\n`. async fn serve() -> Result<sync::Receiver<(String, sync::Sender<String>)>> { let address = &SETTINGS.console_address; debug!("Starting console on {}", address); let listener = TcpListener::bind(address) .await .with_context(|| format!("Failed to open listener on {}", address))?; let (tx, rx) = sync::channel(1000); task::spawn(async move { let mut incoming = listener.incoming(); // Spawn a task for each incoming connection. while let Some(stream) = incoming.next().await { let (tx_reply, rx_reply) = sync::channel::<String>(10); if let Ok(stream) = stream { let peer = match stream.peer_addr() { Ok(peer) => peer, Err(e) => { warn!("Failed to get peer from console connection: {}", e); return; } }; debug!("Client {:?} connected", peer); let tx = tx.clone(); task::spawn(async move { let (reader, writer) = &mut (&stream, &stream); let reader = io::BufReader::new(reader); let mut lines = reader.lines(); while let Some(Ok(line)) = lines.next().await { let line = line.trim(); tx.send((line.into(), tx_reply.clone())).await; if let Ok(reply) = rx_reply.recv().await { if let Err(e) = writer.write_all(reply.as_bytes()).await { warn!("Error on console connection {:?}: {}", peer, e); break; } } } }); } } }); Ok(rx) } /// List versions of currently known containers and applications. fn versions(state: &mut State) -> Result<String> { let versions = state .applications() .map(|app| app.manifest()) .map(|manifest| { ( manifest.name.clone(), manifest.version.clone(), manifest.arch.clone(), ) }) .collect::<Vec<_>>(); serde_json::to_string(&versions).context("Failed to encode manifest to json") } /// Format something iterateable into a ascii table. The first row of the table input /// contains the column titles. The table cannot be empty. fn to_table<T: iter::IntoIterator<Item = I>, I: iter::IntoIterator<Item = S>, S: ToString>( table: T, ) -> Result<String> { let mut t = Table::new(); let format = prettytable::format::FormatBuilder::new() .column_separator('|') .separators(&[], format::LineSeparator::new('-', '+', '+', '+')) .padding(1, 1) .build(); t.set_format(format); let mut rows = table.into_iter(); let titles = rows.next().ok_or_else(|| anyhow!("Missing titles"))?.into(); t.set_titles(titles); for r in rows { t.add_row(r.into()); } let mut result = vec![]; t.print(&mut result).context("Failed to format table")?; String::from_utf8(result).context("Invalid table content") } fn arg_regex(args: &[&str]) -> Result<regex::Regex> { match args.len() { 1 => regex::Regex::new(args[0]) .with_context(|| format!("Invalid container name regex {}", args[0])), 0 => regex::Regex::new(".*") .with_context(|| format!("Invalid container name regex {}", args[0])), _ => Err(anyhow!("Arguments invalid. Use `start PATTERN`",)), } }
}
random_line_split
basic.rs
#[test] fn basics() { let immutable = 5; let mut mutable = 6; mutable = 2; let spaces = " "; // auto type deduction let spaces = spaces.len(); // "shadowing" new variable with same name const CONSTANT: u32 = 100_000; // constant (type must be specified) let tuple: (i32, f64, u8) = (500, 6.4, 1); let (t0, t1, t2) = tuple; assert_eq!(t0, tuple.0); assert_eq!(t1, tuple.1); assert_eq!(t2, tuple.2); let array: [i32; 5] = [3, 3, 3, 3, 3]; assert_eq!([3; 5], array); let x = array[2]; // runtime error if invalid array acces (only in debug mode) if 1 == 1 { // if statment // always has body with curly brackets } let if_expression = if 5 == 3 { 5 } else { 6 }; let loop_result = loop { println!("infinite loop"); break 5; }; assert_eq!(loop_result, 5); for element in array.iter() { println!("the value is: {}", element); } // countdown for number in (1..4).rev() { println!("{}!", number); } } fn expression_return() -> i32 { 69 * 420 } #[test] fn ownership_and_borrowing() { let primitive = 423; // lives on stack let string_literal: &'static str = "hello"; // reference to memory in binary let string = String::from("hello"); // lives on heap (~ smart pointer) { let temp = 5; let also_temp = String::from("ey"); } // variable leaves scope -> gets "dropped" let original = 5; let copy = original; // is copied, because primitive let original = String::from("I like to move it, move it"); let move_destination = original; // moved in new memory; `original` becomes invalid let copy = original.clone(); // copied, because has `copy` trait let value = 5; let reference = &value; assert_eq!(value, *reference); let other_value = 10; let mutable_reference = &mut other_value; *mutable_reference = 11; assert_eq!(value, *mutable_reference); // borrowing rules let mut original = String::new(); let mutable_reference = &mut original; let reference = &original; // can't borrow twice at a time let mut original = String::new(); let reference1 = &original; let reference2 = &original; // slices let string = String::from("Hello World"); let string_slice = &string[0..5]; // must be continuous part of memory assert_eq!("Hello", string_slice); let array = [1, 2, 3, 4, 5]; let array_slice = &array[1..3]; assert_eq!([2, 3, 4], array_slice); // passing function arguments let string = String::new(); take_reference(&string); take_mut_ref(&mut string); string = take_and_give_back_ownership(string); take_ownership(string); } // function arguments fn take_ownership(arg: String) { println!("Taken ownership of `{}`", arg); } fn take_and_give_back_ownership(mut arg: String) -> String { arg.push_str("shesh"); arg } fn take_reference(arg: &String) { println!("Length of string is {}", arg.len()); } fn take_mut_ref(arg: &mut String) { arg.push_str("soos"); } fn first_word_pos(s: &String) -> usize { let bytes = s.as_bytes(); for (i, &item) in bytes.iter().enumerate() { if item == b' ' { return i; } } s.len() } fn first_word(s: &str) -> &str { // str is the string-slice type let bytes = s.as_bytes(); for (i, &item) in bytes.iter().enumerate() { if item == b' ' { return &s[0..i]; } } &s[..] } // structs #[derive(Debug)] // for debug printing struct User { username: String, // field email: String, sign_in_count: u64, active: bool, } fn build_user(email: String, username: String) -> User { User { email: email, username: username, active: true, sign_in_count: 1, } } fn code_holder_3() { let mut user1 = User { email: String::from("someone@example.com"), username: String::from("someusername123"), active: true, sign_in_count: 69, }; user1.email = String::from("diffrentemail@gmail.com"); let user2 = User { email: String::from("lel@mail.com"), username: String::from("diffrentName"), ..user1 // copies the other values of user1 }; // println!("user is {}", user2); error because User doesn't impelement 'std::fmt::Display' println!("user is {:?}", user2); // using output format 'Debug' } // tuple struct struct Color(i32, i32, i32); // is its own type struct Point(i32, i32, i32); struct Rect { width: u32, height: u32, } impl Rect { // method (because takes self) fn area(&self) -> u32 { self.width * self.height } fn can_hold(&self, other: &Rect) -> bool { self.width > other.width && self.height > other.height } // associated function (bacause doesn't take self) -> Rect::square() fn square(size: u32) -> Rect { Rect { width: size, height: size, } } } // Enums enum IpAddrKind { // is a custom data type V4, // variant of enum V6, } struct IpAddrBad { kind: IpAddrKind, address: String, } enum IpAddr { // better way, also diffrent data types possible V4(u8, u8, u8, u8), V6(String), } fn route(ip_kind: IpAddrKind) {} fn code_holder_4() { let four = IpAddrKind::V4; // are of same type let six = IpAddrKind::V6; route(IpAddrKind::V4); route(IpAddrKind::V6); let home = IpAddr::V4(127, 0, 0, 1); let loopback = IpAddr::V6(String::from("::1")); } enum Message { Quit, Move { x: i32, y: i32 }, // struct Write(String), // tuple struct ChangeColor(i32, i32, i32), // tuple struct } impl Message { fn call(&self) { // code } } // option enum CustomOption<T> { // replaces 'null'-value Some(T), None, } fn code_block_5() { let some_number = Some(5); // option let some_string = Some("a string"); let absent_number: Option<i32> = None; } // match: control flow operator #[derive(Debug)] enum UsState { Alabama, Alaska, } enum Coin { Penny, Nickel, Dime, Quarter(UsState), } fn value_in_cents(coin: Coin) -> u8 { match coin { Coin::Penny => 1, Coin::Nickel => 5, Coin::Dime => 10, Coin::Quarter(state) => { println!("State quarter from {:?}!", state); 25 } } } fn plus_one(x: Option<i32>) -> Option<i32> { match x { None => None, Some(i) => Some(i + 1), } } fn matches_are_exhaustive(val: u8) { match val { 1 => println!("one"), 2 => println!("two"), 5 => println!("five"), 7 => println!("seven"), _ => (), } } // if let fn if_let() { let some_u8_value = Some(0u8); match some_u8_value { Some(3) => println!("three"), _ => (), } // equivalent to if let Some(3) = some_u8_value { println!("three"); } } // collections fn code_holder_6() { let v: Vec<i32> = Vec::new(); let v = vec![1, 2, 3]; let mut v = Vec::new(); v.push(5); v.push(6); let v = vec![1, 2, 3, 4, 5]; // two ways to access vector let third: &i32 = &v[2]; // panics if fails match v.get(2) { // doesn't panic Some(third) => (), None => (), } // iterating let mut v = vec![100, 32, 57]; for i in &v { println!("{}", i); } for i in &mut v { *i += 50; } // multiple type vector enum SpreadsheetCell { Int(i32), Float(f64), Text(String), } let row = vec![ SpreadsheetCell::Int(3), SpreadsheetCell::Text(String::from("blue")), SpreadsheetCell::Float(10.12), ]; } // strings // str is implemented in the core language and String is in the standard library fn code_holder_7() { let mut s = String::new(); let data = "inital contents"; // implements 'Display' trait let mut s = data.to_string(); s.push_str("bar"); s.push('a'); let s1 = String::from("Hello "); let s2 = String::from("World"); let s3 = s1 + &s2; // s1 was moved! (fn add(self, s: &str) -> String) let s1 = String::from("tic"); let s2 = String::from("tac"); let s3 = String::from("toe"); let s = format!("{}-{}-{}", s1, s2, s3); // you can't index into string, because of ambigueties and other reasons -> be more percise // slices... not so appropriate let hello = "Здравствуйте"; let s = &hello[0..4]; // 4 bytes -> "Зд" // best way: chars for c in "नमस्ते".chars() { println!("{}", c); } } // Hash Maps fn code_holder_8() { use std::collections::HashMap; let mut scores = HashMap::new(); scores.insert(String::from("Blue"), 10); scores.insert(String::from("Yellow"), 50); // morphing collections let teams = vec![String::from("Blue"), String::from("Yellow")]; let inital_scores = vec![10, 50]; let scores: HashMap<_, _> = teams.iter().zip(inital_scores.iter()).collect(); } // errors fn code_holder_9() { // panicing! // If rust panics before it quite it's starts unwinding (stack is cleared up), which takes a lot of time -> alternative abort (in Cargo.toml: panic = 'abort') panic!("crash and burn"); // Result use std::fs::File; use std::io::ErrorKind; use std::io::Read; let f = File::open("hello.txt"); let f = match f { Ok(file) => file, Err(error) => match error.kind() { ErrorKind::NotFound => match File::create("hello.txt") { Ok(fc) => fc, Err(e) => panic!("Problem creating the file: {:?}", e), }, other_error => panic!("Problem opening the file: {:?}", other_error), }, }; let f = File::open("hello.txt").unwrap(); // returns value if okay, panics otherwise let f = File::open("hello.txt").expect_err("Own error message"); // same as unwrap() just with custom error message // propagating error fn read_username_from_file_verbose() -> Result<String, std::io::Error> { // verbose way let f = File::open("hello.txt"); let mut f = match f { Ok(file) => file, Err(e) => return Err(e), }; let mut s = String::new(); match f.read_to_string(&mut s) { Ok(_) => Ok(s), Err(e) => Err(e), } } fn read_username_from_file() -> Result<String, std::io::Error> { // better way with ? operator let mut f = File::open("hello.txt")?; let mut s = String::new(); f.read_to_string(&mut s)?; // if ok expression has value, if Err then function returns with error Ok(s) } } // generics (similar to C++ typenames/templates) enum own_Result<T, E> { Ok(T), Err(E), } struct Point1<T> { x: T, y: T, } impl<T> Point1<T> { fn x(&self) -> &T { &self.x } } impl Point1<f32> { fn distance_from_origin(&self) -> f32 { (self.x.powi(2) + self.y.powi(2)).sqrt() } } struct Point2<T, U> { x: T, y: U, } impl<T, U> Point2<T, U> { fn mixup<V, W>(self, other: Point2<V, W>) -> Point2<T, W> { Point2 { x: self.x, y: other.y, } } } // traits trait Summarizable { fn summarize_author(&self) -> String; fn summarize(&self) -> String { format!("(Read more from {}...)", self.summarize_author()) } } struct NewsArticle { headline: String, location: String, author: String, content: String, } impl Summarizable for NewsArticle { fn summarize_author(&self) -> String { format!("{}", self.author) } fn summarize(&self) -> String { format!( "{}, by {} ({})", self.headline, self.summarize_author(), self.location ) } } struct Tweet { username: String, content: String, reply: bool, retweet: bool, } impl Summarizable for Tweet { fn summarize_author(&self) -> String { format!("@{}", self.username) } fn summarize(&self) -> String { format!("{}: {}", self.summarize_author(), self.content) } } // traits as parameters/ Trait bounds fn notify(item: impl Summarizable) { println!("Breaking news! {}", item.summarize()); } // ^ syntactic sugar for: // fn notify<T: Summarizable>(item: T) { // println!("Breaking news! {}", item.summarize()); // } fn notfiy<T: Summarizable + std::fmt::Display>(item1: T) {} // when many traits are used -> prefer 'where'-clauses to not clutter the funciton definition fn some_function<T, U>(t: T, u: U) -> i32 where T: std::fmt::Display + Clone, U: Clone + std::fmt::Debug, { 4 } fn returns_summarizable() -> impl Summarizable { Tweet { username: String::from("horse_ebooks"), content: String::from("of cource, as you probablay already know people"), reply: false, retweet: false, } } fn largest<T: std::cmp::PartialOrd + Copy>(list: &[T]) -> T { let mut largest = list[0]; for &item in list.iter() { if item > largest { largest = item; } } largest } // Trait bounds to conditionally implement Methods struct Pair<T> { x: T, y: T, } impl<T> Pair<T> { fn new(x: T, y: T) -> Self { Self { x, y } } } // conditional implementation (only if traits are Display + PartialOrd) impl<T: std::fmt::Display + std::cmp::PartialOrd> Pair<T> { fn cmp_disply(&self) { if self.x >= self.y { println!("The largest member is x = {}", self.x); } else { println!("The largest member is y = {}", self.y); } } } // implement a trait if the type implements another train --- alias blanket implementations // impl<T: std::fmt::Display> ToString for T { // if T already implements Display, than it also implements ToString // } // lifetimes // lifetimes gurantee, that references are still valid, when used. // Most of the time they are implicitly inferred. If they can't, they have to be explicitly specified // &i32; a reference // &'a i32; a reference with the explicit lifetime "'a" // &'a mut i32; a mutable reference with the explicit lifetime "'a" fn longest<'a>(x: &'a str, y: &'a str) -> &'a str { // now the compiler knows, how long the return value can live. (as long as the smaller lifetime of x or y) if x.len() > y.len() { x } else { y } } struct ImportantExcerpt<'a> { part: &'a str, // if struct holds reference, a explicit lifetime is required } impl<'a> ImportantExcerpt<'a> { fn level(&self) -> i32 { 3 } } // static lifetimes (references live for entire duration of program)... applies to all string ltierals fn code_holder_10() { let s: &'static str = "I have a static lifetime."; } // all generics together fn longest_with_an_announcement<'a, T>(x: &'a str, y: &'a str, ann: T) -> &'a str where T: std::fmt::Display, { println!("Announcement! {}", ann); if x.len() > y.len() { x } else { y } } // closures fn code_holder_11() { // types are automatically inferred (but can be explicitly specified) let some_closure = |arg| { println!("this is the argument: {}", arg); }; let minimalist_closure = |x| x; // returns itself some_closure(5); minimalist_closure("lel"); // pattern: memorization / lazy evaluation struct NoArgsCacher<T> where T: Fn() -> u32, { calculation: T, value: Option<u32>, } impl<T> NoArgsCacher<T> where T: Fn() -> u32, { fn new(calculation: T) -> NoArgsCacher<T> { NoArgsCacher { calculation, value: None, } } fn value(&mut self) -> u32 { match self.value { Some(v) => v, None => { let v = (self.calculation)(); self.value = Some(v); v } } } } use std::thread; use std::time::Duration; let mut expensive_result = NoArgsCacher::new(|| { println!("performing expensive calculation..."); thread::sleep(Duration::from_secs(2)); 420 }); // TODO: create better Cacher with generics and a hash-table (args-result pairs) } // iterators // zero-cost abstraction -> are very fast USE THEM! fn code_holder_12() { let v1 = vec![1, 2, 3]; let v1_iter = v1.iter(); for val in v1_iter { println!("Got: {}", val); } pub trait CustomIteratorTrait { type Item; // associated type fn next(&mut self) -> Option<Self::Item>; } #[test] fn iterator_demonstration() { let v1 = vec![1, 2, 3]; let mut v1_iter = v1.iter(); assert_eq!(v1_iter.next(), Some(&1)); assert_eq!(v1_iter.next(), Some(&2)); assert_eq!(v1_iter.next(), Some(&3)); assert_eq!(v1_iter.next(), None); } #[test] fn iterator_sum() { let v1 = vec![1, 2, 3]; let v1_iter = v1.iter(); let total: i32 = v1_iter.sum(); // iter has been consumed (moved) -> cannot be moved any more } #[test] fn iterator_map() { let v1: Vec<i32> = vec![1, 2, 3]; let v2: Vec<_> = v1.iter().map(|x| x + 1).collect(); // collect() must be called because iterators are lazy assert_eq!(v2, vec![2, 3, 4]); } struct Shoe { size: u32, style: String, } fn shoes_in_my_size(shoes: Vec<Shoe>, shoe_size: u32) -> Vec<Shoe> { shoes.into_iter().filter(|s| s.size == shoe_size).collect() } #[test] fn filter_by_size() { let shoes = vec![ Shoe { size: 10, style: String::from("sneaker"), }, Shoe { size: 13, style: String::from("sandal"), }, Shoe { size: 10, style: String::from("boot"), }, ]; let in_my_size = shoes_in_my_size(shoes, 10); assert_eq!( in_my_size, vec![ Shoe { size: 10, style: String::from("sneaker") }, Shoe { size: 10, style: String::from("boot") }, ] ); } // own iterator struct Counter { count: u32, } impl Counter { fn new() -> Counter { Counter { count: 0 } } } impl Iterator for Counter { type Item = u32; fn next(&mut self) -> Option<Self::Item> { self.count += 1; if self.count < 6 { Some(self.count) } else {
} } #[test] fn using_other_iterator_trait_methods() { let sum: u32 = Counter::new() .zip(Counter::new().skip(1)) .map(|(a, b)| a * b) .filter(|x| x % 3 == 0) .sum(); assert_eq!(18, sum); } } // cargo and creates // //! Another documentation style, which is at the top of the page, generally in the crate root // //! re-exports are listed in documentation -> expose them /// Documentation comment (3 slashes) /// will be used to generate HTML documentation (cargo doc --open) -> runs rustdoc /// supports MarkDown! /// Some commonly used headings /// # Examples /// # Panics /// # Errors (when it returns Result) /// # Safety (if unsafe to call) /// ''' /// assert_eq!(true, true); /// ''' /// this code example will be run as a test with (cargo test)!!! AWESOME fn documented_function() {} // smart pointers // Vec and String are smart pointers, because they point at data and have some additional metadata // allocate data on heap // Box<T> for storing data on heap (no performnace overhead) // usages: dynamic memory (unknown size), transfer ownership without copying, value that implements a trait but the type doesn't matter fn code_holder_13() { let b = Box::new(5); // recursive types and Cons List // enum List { idea // Cons(i32, List), // Nil, // } // let list = Cons(1, Cons(2, Cons(3, Nil))); enum List { Cons(i32, Box<List>), Nil, } use List::Cons; use List::Nil; let list = Cons(1, Box::new(Cons(2, Box::new(Cons(3, Box::new(Nil)))))); // dereferencing let x = 5; let y = &x; // assert_eq!(5, x); doesn't compile because diffrent types assert_eq!(5, *y); // dereferenced // deref trait struct FakeBox<T>(T); impl<T> FakeBox<T> { fn new(x: T) -> FakeBox<T> { FakeBox(x) } } use std::ops::Deref; impl<T> Deref for FakeBox<T> { type Target = T; fn deref(&self) -> &T { &self.0 } } let x = 5; let y = FakeBox::new(x); assert_eq!(5, *y); // deref coercions fn all_functions_perform_deref_coercions(arg: &str) { println!("Hello, {}", arg); } let m = FakeBox::new(String::from("Rust")); all_functions_perform_deref_coercions(&m); // even though the types don't match, it still works: because rust dereferenced the neccessary amount of times // DerefMut is the trait for mutable types // Drop trait (similar to Destructor) struct CustomSmartPointer { data: String, } impl Drop for CustomSmartPointer { fn drop(&mut self) { println!("Dropping CustomSmartPointer with data {}!", self.data); } } { let x: CustomSmartPointer; // x.drop(); // illegal to call drop explicitly // but there is std::mem::drop -> called with drop(x) } // destructed // Rc<T> or Reference counting (similar to shared_pointers) // only suited for single-thread usage use std::rc::Rc; enum ListRC { ConsRC(i32, Rc<ListRC>), NilRC, } use ListRC::{ConsRC, NilRC}; let a = Rc::new(ConsRC(5, Rc::new(ConsRC(10, Rc::new(NilRC))))); let b = ConsRC(3, Rc::clone(&a)); // clones increase reference count let c = ConsRC(4, Rc::clone(&a)); // bot b and c own a println!("reference count: {}", Rc::strong_count(&a)); // Interior Mutability Pattern and RefCell<T> // RefCell is implemented using unsafe code, because it diregardes the borrowing rules // but it checks at runtime // Compiler can't ALWAYS know if code is safe (Halting Problem), therefor it might reject a correct program // -> solution if programmer knows it's he can use RefCell // example: test double or Mock object pub trait Messenger { fn send(&self, msg: &str); } pub struct LimitTracker<'a, T: Messenger> { messenger: &'a T, value: usize, max: usize, } impl<'a, T> LimitTracker<'a, T> where T: Messenger, { pub fn new(messenger: &T, max: usize) -> LimitTracker<T> { LimitTracker { messenger, value: 0, max, } } pub fn set_value(&mut self, value: usize) { self.value = value; let percentage_of_max = self.value as f64 / self.max as f64; if percentage_of_max >= 1.0 { self.messenger.send("Error: You are over your quota!"); } else if percentage_of_max >= 0.0 { self.messenger .send("Urgent warning: You've used up over 90% of oyu quota"); } else if percentage_of_max >= 0.75 { self.messenger .send("Warning: You've used up over 75% of your quota!"); } } } use std::cell::RefCell; struct MockMessenger { sent_messages: RefCell<Vec<String>>, } impl MockMessenger { fn new() -> MockMessenger { MockMessenger { sent_messages: RefCell::new(vec![]), } } } impl Messenger for MockMessenger { fn send(&self, message: &str) { self.sent_messages.borrow_mut().push(String::from(message)); } } #[test] fn it_sens_an_over_75_percent_warning_message() { let mock_messenger = MockMessenger::new(); let mut limit_tracker = LimitTracker::new(&mock_messenger, 100); limit_tracker.set_value(80); assert_eq!(mock_messenger.sent_messages.borrow().len(), 1); } // having multiple owners of mutable data / combining Rc<T> and RefCell<T> let var_with_multiple_owners: Rc<RefCell<i32>>; } use std::thread; use std::sync::{mpsc, Mutex, Arc}; fn channeling() { // multiple producer single consumer let (tx, rx) = mpsc::channel(); let tx1 = mpsc::Sender::clone(&tx); // closure with `move` keyword takes ownership of captured variables let handle = thread::spawn(move || { let val = String::from("hi"); tx.send(val).unwrap(); thread::sleep(std::time::Duration::from_millis(1)); }); let received = rx.recv().unwrap(); for received in rx {} handle.join().unwrap(); } fn mutexing() { let counter = Arc::new(Mutex::new(0)); let mut handles = Vec::new(); for _ in 0..10 { let counter = Arc::clone(&counter); let handle = thread::spawn(move || { let mut num = counter.lock().unwrap(); *num += 1; }); handles.push(handle); } for handle in handles { handle.join().unwrap(); } println!("Result: {}", *counter.lock().unwrap()); }
None }
random_line_split
basic.rs
#[test] fn basics() { let immutable = 5; let mut mutable = 6; mutable = 2; let spaces = " "; // auto type deduction let spaces = spaces.len(); // "shadowing" new variable with same name const CONSTANT: u32 = 100_000; // constant (type must be specified) let tuple: (i32, f64, u8) = (500, 6.4, 1); let (t0, t1, t2) = tuple; assert_eq!(t0, tuple.0); assert_eq!(t1, tuple.1); assert_eq!(t2, tuple.2); let array: [i32; 5] = [3, 3, 3, 3, 3]; assert_eq!([3; 5], array); let x = array[2]; // runtime error if invalid array acces (only in debug mode) if 1 == 1 { // if statment // always has body with curly brackets } let if_expression = if 5 == 3 { 5 } else { 6 }; let loop_result = loop { println!("infinite loop"); break 5; }; assert_eq!(loop_result, 5); for element in array.iter() { println!("the value is: {}", element); } // countdown for number in (1..4).rev() { println!("{}!", number); } } fn expression_return() -> i32 { 69 * 420 } #[test] fn ownership_and_borrowing() { let primitive = 423; // lives on stack let string_literal: &'static str = "hello"; // reference to memory in binary let string = String::from("hello"); // lives on heap (~ smart pointer) { let temp = 5; let also_temp = String::from("ey"); } // variable leaves scope -> gets "dropped" let original = 5; let copy = original; // is copied, because primitive let original = String::from("I like to move it, move it"); let move_destination = original; // moved in new memory; `original` becomes invalid let copy = original.clone(); // copied, because has `copy` trait let value = 5; let reference = &value; assert_eq!(value, *reference); let other_value = 10; let mutable_reference = &mut other_value; *mutable_reference = 11; assert_eq!(value, *mutable_reference); // borrowing rules let mut original = String::new(); let mutable_reference = &mut original; let reference = &original; // can't borrow twice at a time let mut original = String::new(); let reference1 = &original; let reference2 = &original; // slices let string = String::from("Hello World"); let string_slice = &string[0..5]; // must be continuous part of memory assert_eq!("Hello", string_slice); let array = [1, 2, 3, 4, 5]; let array_slice = &array[1..3]; assert_eq!([2, 3, 4], array_slice); // passing function arguments let string = String::new(); take_reference(&string); take_mut_ref(&mut string); string = take_and_give_back_ownership(string); take_ownership(string); } // function arguments fn take_ownership(arg: String) { println!("Taken ownership of `{}`", arg); } fn take_and_give_back_ownership(mut arg: String) -> String { arg.push_str("shesh"); arg } fn take_reference(arg: &String) { println!("Length of string is {}", arg.len()); } fn take_mut_ref(arg: &mut String) { arg.push_str("soos"); } fn first_word_pos(s: &String) -> usize { let bytes = s.as_bytes(); for (i, &item) in bytes.iter().enumerate() { if item == b' ' { return i; } } s.len() } fn first_word(s: &str) -> &str { // str is the string-slice type let bytes = s.as_bytes(); for (i, &item) in bytes.iter().enumerate() { if item == b' ' { return &s[0..i]; } } &s[..] } // structs #[derive(Debug)] // for debug printing struct User { username: String, // field email: String, sign_in_count: u64, active: bool, } fn build_user(email: String, username: String) -> User { User { email: email, username: username, active: true, sign_in_count: 1, } } fn code_holder_3() { let mut user1 = User { email: String::from("someone@example.com"), username: String::from("someusername123"), active: true, sign_in_count: 69, }; user1.email = String::from("diffrentemail@gmail.com"); let user2 = User { email: String::from("lel@mail.com"), username: String::from("diffrentName"), ..user1 // copies the other values of user1 }; // println!("user is {}", user2); error because User doesn't impelement 'std::fmt::Display' println!("user is {:?}", user2); // using output format 'Debug' } // tuple struct struct Color(i32, i32, i32); // is its own type struct Point(i32, i32, i32); struct Rect { width: u32, height: u32, } impl Rect { // method (because takes self) fn area(&self) -> u32 { self.width * self.height } fn can_hold(&self, other: &Rect) -> bool { self.width > other.width && self.height > other.height } // associated function (bacause doesn't take self) -> Rect::square() fn square(size: u32) -> Rect { Rect { width: size, height: size, } } } // Enums enum IpAddrKind { // is a custom data type V4, // variant of enum V6, } struct IpAddrBad { kind: IpAddrKind, address: String, } enum IpAddr { // better way, also diffrent data types possible V4(u8, u8, u8, u8), V6(String), } fn route(ip_kind: IpAddrKind) {} fn code_holder_4() { let four = IpAddrKind::V4; // are of same type let six = IpAddrKind::V6; route(IpAddrKind::V4); route(IpAddrKind::V6); let home = IpAddr::V4(127, 0, 0, 1); let loopback = IpAddr::V6(String::from("::1")); } enum Message { Quit, Move { x: i32, y: i32 }, // struct Write(String), // tuple struct ChangeColor(i32, i32, i32), // tuple struct } impl Message { fn call(&self) { // code } } // option enum CustomOption<T> { // replaces 'null'-value Some(T), None, } fn code_block_5() { let some_number = Some(5); // option let some_string = Some("a string"); let absent_number: Option<i32> = None; } // match: control flow operator #[derive(Debug)] enum UsState { Alabama, Alaska, } enum Coin { Penny, Nickel, Dime, Quarter(UsState), } fn value_in_cents(coin: Coin) -> u8 { match coin { Coin::Penny => 1, Coin::Nickel => 5, Coin::Dime => 10, Coin::Quarter(state) => { println!("State quarter from {:?}!", state); 25 } } } fn plus_one(x: Option<i32>) -> Option<i32> { match x { None => None, Some(i) => Some(i + 1), } } fn matches_are_exhaustive(val: u8) { match val { 1 => println!("one"), 2 => println!("two"), 5 => println!("five"), 7 => println!("seven"), _ => (), } } // if let fn if_let() { let some_u8_value = Some(0u8); match some_u8_value { Some(3) => println!("three"), _ => (), } // equivalent to if let Some(3) = some_u8_value { println!("three"); } } // collections fn code_holder_6() { let v: Vec<i32> = Vec::new(); let v = vec![1, 2, 3]; let mut v = Vec::new(); v.push(5); v.push(6); let v = vec![1, 2, 3, 4, 5]; // two ways to access vector let third: &i32 = &v[2]; // panics if fails match v.get(2) { // doesn't panic Some(third) => (), None => (), } // iterating let mut v = vec![100, 32, 57]; for i in &v { println!("{}", i); } for i in &mut v { *i += 50; } // multiple type vector enum
{ Int(i32), Float(f64), Text(String), } let row = vec![ SpreadsheetCell::Int(3), SpreadsheetCell::Text(String::from("blue")), SpreadsheetCell::Float(10.12), ]; } // strings // str is implemented in the core language and String is in the standard library fn code_holder_7() { let mut s = String::new(); let data = "inital contents"; // implements 'Display' trait let mut s = data.to_string(); s.push_str("bar"); s.push('a'); let s1 = String::from("Hello "); let s2 = String::from("World"); let s3 = s1 + &s2; // s1 was moved! (fn add(self, s: &str) -> String) let s1 = String::from("tic"); let s2 = String::from("tac"); let s3 = String::from("toe"); let s = format!("{}-{}-{}", s1, s2, s3); // you can't index into string, because of ambigueties and other reasons -> be more percise // slices... not so appropriate let hello = "Здравствуйте"; let s = &hello[0..4]; // 4 bytes -> "Зд" // best way: chars for c in "नमस्ते".chars() { println!("{}", c); } } // Hash Maps fn code_holder_8() { use std::collections::HashMap; let mut scores = HashMap::new(); scores.insert(String::from("Blue"), 10); scores.insert(String::from("Yellow"), 50); // morphing collections let teams = vec![String::from("Blue"), String::from("Yellow")]; let inital_scores = vec![10, 50]; let scores: HashMap<_, _> = teams.iter().zip(inital_scores.iter()).collect(); } // errors fn code_holder_9() { // panicing! // If rust panics before it quite it's starts unwinding (stack is cleared up), which takes a lot of time -> alternative abort (in Cargo.toml: panic = 'abort') panic!("crash and burn"); // Result use std::fs::File; use std::io::ErrorKind; use std::io::Read; let f = File::open("hello.txt"); let f = match f { Ok(file) => file, Err(error) => match error.kind() { ErrorKind::NotFound => match File::create("hello.txt") { Ok(fc) => fc, Err(e) => panic!("Problem creating the file: {:?}", e), }, other_error => panic!("Problem opening the file: {:?}", other_error), }, }; let f = File::open("hello.txt").unwrap(); // returns value if okay, panics otherwise let f = File::open("hello.txt").expect_err("Own error message"); // same as unwrap() just with custom error message // propagating error fn read_username_from_file_verbose() -> Result<String, std::io::Error> { // verbose way let f = File::open("hello.txt"); let mut f = match f { Ok(file) => file, Err(e) => return Err(e), }; let mut s = String::new(); match f.read_to_string(&mut s) { Ok(_) => Ok(s), Err(e) => Err(e), } } fn read_username_from_file() -> Result<String, std::io::Error> { // better way with ? operator let mut f = File::open("hello.txt")?; let mut s = String::new(); f.read_to_string(&mut s)?; // if ok expression has value, if Err then function returns with error Ok(s) } } // generics (similar to C++ typenames/templates) enum own_Result<T, E> { Ok(T), Err(E), } struct Point1<T> { x: T, y: T, } impl<T> Point1<T> { fn x(&self) -> &T { &self.x } } impl Point1<f32> { fn distance_from_origin(&self) -> f32 { (self.x.powi(2) + self.y.powi(2)).sqrt() } } struct Point2<T, U> { x: T, y: U, } impl<T, U> Point2<T, U> { fn mixup<V, W>(self, other: Point2<V, W>) -> Point2<T, W> { Point2 { x: self.x, y: other.y, } } } // traits trait Summarizable { fn summarize_author(&self) -> String; fn summarize(&self) -> String { format!("(Read more from {}...)", self.summarize_author()) } } struct NewsArticle { headline: String, location: String, author: String, content: String, } impl Summarizable for NewsArticle { fn summarize_author(&self) -> String { format!("{}", self.author) } fn summarize(&self) -> String { format!( "{}, by {} ({})", self.headline, self.summarize_author(), self.location ) } } struct Tweet { username: String, content: String, reply: bool, retweet: bool, } impl Summarizable for Tweet { fn summarize_author(&self) -> String { format!("@{}", self.username) } fn summarize(&self) -> String { format!("{}: {}", self.summarize_author(), self.content) } } // traits as parameters/ Trait bounds fn notify(item: impl Summarizable) { println!("Breaking news! {}", item.summarize()); } // ^ syntactic sugar for: // fn notify<T: Summarizable>(item: T) { // println!("Breaking news! {}", item.summarize()); // } fn notfiy<T: Summarizable + std::fmt::Display>(item1: T) {} // when many traits are used -> prefer 'where'-clauses to not clutter the funciton definition fn some_function<T, U>(t: T, u: U) -> i32 where T: std::fmt::Display + Clone, U: Clone + std::fmt::Debug, { 4 } fn returns_summarizable() -> impl Summarizable { Tweet { username: String::from("horse_ebooks"), content: String::from("of cource, as you probablay already know people"), reply: false, retweet: false, } } fn largest<T: std::cmp::PartialOrd + Copy>(list: &[T]) -> T { let mut largest = list[0]; for &item in list.iter() { if item > largest { largest = item; } } largest } // Trait bounds to conditionally implement Methods struct Pair<T> { x: T, y: T, } impl<T> Pair<T> { fn new(x: T, y: T) -> Self { Self { x, y } } } // conditional implementation (only if traits are Display + PartialOrd) impl<T: std::fmt::Display + std::cmp::PartialOrd> Pair<T> { fn cmp_disply(&self) { if self.x >= self.y { println!("The largest member is x = {}", self.x); } else { println!("The largest member is y = {}", self.y); } } } // implement a trait if the type implements another train --- alias blanket implementations // impl<T: std::fmt::Display> ToString for T { // if T already implements Display, than it also implements ToString // } // lifetimes // lifetimes gurantee, that references are still valid, when used. // Most of the time they are implicitly inferred. If they can't, they have to be explicitly specified // &i32; a reference // &'a i32; a reference with the explicit lifetime "'a" // &'a mut i32; a mutable reference with the explicit lifetime "'a" fn longest<'a>(x: &'a str, y: &'a str) -> &'a str { // now the compiler knows, how long the return value can live. (as long as the smaller lifetime of x or y) if x.len() > y.len() { x } else { y } } struct ImportantExcerpt<'a> { part: &'a str, // if struct holds reference, a explicit lifetime is required } impl<'a> ImportantExcerpt<'a> { fn level(&self) -> i32 { 3 } } // static lifetimes (references live for entire duration of program)... applies to all string ltierals fn code_holder_10() { let s: &'static str = "I have a static lifetime."; } // all generics together fn longest_with_an_announcement<'a, T>(x: &'a str, y: &'a str, ann: T) -> &'a str where T: std::fmt::Display, { println!("Announcement! {}", ann); if x.len() > y.len() { x } else { y } } // closures fn code_holder_11() { // types are automatically inferred (but can be explicitly specified) let some_closure = |arg| { println!("this is the argument: {}", arg); }; let minimalist_closure = |x| x; // returns itself some_closure(5); minimalist_closure("lel"); // pattern: memorization / lazy evaluation struct NoArgsCacher<T> where T: Fn() -> u32, { calculation: T, value: Option<u32>, } impl<T> NoArgsCacher<T> where T: Fn() -> u32, { fn new(calculation: T) -> NoArgsCacher<T> { NoArgsCacher { calculation, value: None, } } fn value(&mut self) -> u32 { match self.value { Some(v) => v, None => { let v = (self.calculation)(); self.value = Some(v); v } } } } use std::thread; use std::time::Duration; let mut expensive_result = NoArgsCacher::new(|| { println!("performing expensive calculation..."); thread::sleep(Duration::from_secs(2)); 420 }); // TODO: create better Cacher with generics and a hash-table (args-result pairs) } // iterators // zero-cost abstraction -> are very fast USE THEM! fn code_holder_12() { let v1 = vec![1, 2, 3]; let v1_iter = v1.iter(); for val in v1_iter { println!("Got: {}", val); } pub trait CustomIteratorTrait { type Item; // associated type fn next(&mut self) -> Option<Self::Item>; } #[test] fn iterator_demonstration() { let v1 = vec![1, 2, 3]; let mut v1_iter = v1.iter(); assert_eq!(v1_iter.next(), Some(&1)); assert_eq!(v1_iter.next(), Some(&2)); assert_eq!(v1_iter.next(), Some(&3)); assert_eq!(v1_iter.next(), None); } #[test] fn iterator_sum() { let v1 = vec![1, 2, 3]; let v1_iter = v1.iter(); let total: i32 = v1_iter.sum(); // iter has been consumed (moved) -> cannot be moved any more } #[test] fn iterator_map() { let v1: Vec<i32> = vec![1, 2, 3]; let v2: Vec<_> = v1.iter().map(|x| x + 1).collect(); // collect() must be called because iterators are lazy assert_eq!(v2, vec![2, 3, 4]); } struct Shoe { size: u32, style: String, } fn shoes_in_my_size(shoes: Vec<Shoe>, shoe_size: u32) -> Vec<Shoe> { shoes.into_iter().filter(|s| s.size == shoe_size).collect() } #[test] fn filter_by_size() { let shoes = vec![ Shoe { size: 10, style: String::from("sneaker"), }, Shoe { size: 13, style: String::from("sandal"), }, Shoe { size: 10, style: String::from("boot"), }, ]; let in_my_size = shoes_in_my_size(shoes, 10); assert_eq!( in_my_size, vec![ Shoe { size: 10, style: String::from("sneaker") }, Shoe { size: 10, style: String::from("boot") }, ] ); } // own iterator struct Counter { count: u32, } impl Counter { fn new() -> Counter { Counter { count: 0 } } } impl Iterator for Counter { type Item = u32; fn next(&mut self) -> Option<Self::Item> { self.count += 1; if self.count < 6 { Some(self.count) } else { None } } } #[test] fn using_other_iterator_trait_methods() { let sum: u32 = Counter::new() .zip(Counter::new().skip(1)) .map(|(a, b)| a * b) .filter(|x| x % 3 == 0) .sum(); assert_eq!(18, sum); } } // cargo and creates // //! Another documentation style, which is at the top of the page, generally in the crate root // //! re-exports are listed in documentation -> expose them /// Documentation comment (3 slashes) /// will be used to generate HTML documentation (cargo doc --open) -> runs rustdoc /// supports MarkDown! /// Some commonly used headings /// # Examples /// # Panics /// # Errors (when it returns Result) /// # Safety (if unsafe to call) /// ''' /// assert_eq!(true, true); /// ''' /// this code example will be run as a test with (cargo test)!!! AWESOME fn documented_function() {} // smart pointers // Vec and String are smart pointers, because they point at data and have some additional metadata // allocate data on heap // Box<T> for storing data on heap (no performnace overhead) // usages: dynamic memory (unknown size), transfer ownership without copying, value that implements a trait but the type doesn't matter fn code_holder_13() { let b = Box::new(5); // recursive types and Cons List // enum List { idea // Cons(i32, List), // Nil, // } // let list = Cons(1, Cons(2, Cons(3, Nil))); enum List { Cons(i32, Box<List>), Nil, } use List::Cons; use List::Nil; let list = Cons(1, Box::new(Cons(2, Box::new(Cons(3, Box::new(Nil)))))); // dereferencing let x = 5; let y = &x; // assert_eq!(5, x); doesn't compile because diffrent types assert_eq!(5, *y); // dereferenced // deref trait struct FakeBox<T>(T); impl<T> FakeBox<T> { fn new(x: T) -> FakeBox<T> { FakeBox(x) } } use std::ops::Deref; impl<T> Deref for FakeBox<T> { type Target = T; fn deref(&self) -> &T { &self.0 } } let x = 5; let y = FakeBox::new(x); assert_eq!(5, *y); // deref coercions fn all_functions_perform_deref_coercions(arg: &str) { println!("Hello, {}", arg); } let m = FakeBox::new(String::from("Rust")); all_functions_perform_deref_coercions(&m); // even though the types don't match, it still works: because rust dereferenced the neccessary amount of times // DerefMut is the trait for mutable types // Drop trait (similar to Destructor) struct CustomSmartPointer { data: String, } impl Drop for CustomSmartPointer { fn drop(&mut self) { println!("Dropping CustomSmartPointer with data {}!", self.data); } } { let x: CustomSmartPointer; // x.drop(); // illegal to call drop explicitly // but there is std::mem::drop -> called with drop(x) } // destructed // Rc<T> or Reference counting (similar to shared_pointers) // only suited for single-thread usage use std::rc::Rc; enum ListRC { ConsRC(i32, Rc<ListRC>), NilRC, } use ListRC::{ConsRC, NilRC}; let a = Rc::new(ConsRC(5, Rc::new(ConsRC(10, Rc::new(NilRC))))); let b = ConsRC(3, Rc::clone(&a)); // clones increase reference count let c = ConsRC(4, Rc::clone(&a)); // bot b and c own a println!("reference count: {}", Rc::strong_count(&a)); // Interior Mutability Pattern and RefCell<T> // RefCell is implemented using unsafe code, because it diregardes the borrowing rules // but it checks at runtime // Compiler can't ALWAYS know if code is safe (Halting Problem), therefor it might reject a correct program // -> solution if programmer knows it's he can use RefCell // example: test double or Mock object pub trait Messenger { fn send(&self, msg: &str); } pub struct LimitTracker<'a, T: Messenger> { messenger: &'a T, value: usize, max: usize, } impl<'a, T> LimitTracker<'a, T> where T: Messenger, { pub fn new(messenger: &T, max: usize) -> LimitTracker<T> { LimitTracker { messenger, value: 0, max, } } pub fn set_value(&mut self, value: usize) { self.value = value; let percentage_of_max = self.value as f64 / self.max as f64; if percentage_of_max >= 1.0 { self.messenger.send("Error: You are over your quota!"); } else if percentage_of_max >= 0.0 { self.messenger .send("Urgent warning: You've used up over 90% of oyu quota"); } else if percentage_of_max >= 0.75 { self.messenger .send("Warning: You've used up over 75% of your quota!"); } } } use std::cell::RefCell; struct MockMessenger { sent_messages: RefCell<Vec<String>>, } impl MockMessenger { fn new() -> MockMessenger { MockMessenger { sent_messages: RefCell::new(vec![]), } } } impl Messenger for MockMessenger { fn send(&self, message: &str) { self.sent_messages.borrow_mut().push(String::from(message)); } } #[test] fn it_sens_an_over_75_percent_warning_message() { let mock_messenger = MockMessenger::new(); let mut limit_tracker = LimitTracker::new(&mock_messenger, 100); limit_tracker.set_value(80); assert_eq!(mock_messenger.sent_messages.borrow().len(), 1); } // having multiple owners of mutable data / combining Rc<T> and RefCell<T> let var_with_multiple_owners: Rc<RefCell<i32>>; } use std::thread; use std::sync::{mpsc, Mutex, Arc}; fn channeling() { // multiple producer single consumer let (tx, rx) = mpsc::channel(); let tx1 = mpsc::Sender::clone(&tx); // closure with `move` keyword takes ownership of captured variables let handle = thread::spawn(move || { let val = String::from("hi"); tx.send(val).unwrap(); thread::sleep(std::time::Duration::from_millis(1)); }); let received = rx.recv().unwrap(); for received in rx {} handle.join().unwrap(); } fn mutexing() { let counter = Arc::new(Mutex::new(0)); let mut handles = Vec::new(); for _ in 0..10 { let counter = Arc::clone(&counter); let handle = thread::spawn(move || { let mut num = counter.lock().unwrap(); *num += 1; }); handles.push(handle); } for handle in handles { handle.join().unwrap(); } println!("Result: {}", *counter.lock().unwrap()); }
SpreadsheetCell
identifier_name
vechain.go
package vechain import ( "bytes" "context" "crypto/sha256" "encoding/json" "fmt" "io/ioutil" "net/http" "strconv" "strings" "sync/atomic" "time" "github.com/myafeier/log" ) // ============common============ //通用请求表单 type Form struct { AppId string `json:"appid"` AppKey string `json:"appkey"` Nonce string `json:"nonce"` Timestamp string `json:"timestamp"` Signature string `json:"signature"` } //通用返回结构 type ResponseData struct { Data interface{} `json:"data"` Code int `json:"code"` Message string `json:"message"` } func sign(timestamp int64, config *VechainConfig) (signature string) { str := fmt.Sprintf("appid=%s&appkey=%s&nonce=%s&timestamp=%d", config.DeveloperId, config.DeveloperKey, config.Nonce, timestamp) signature = fmt.Sprintf("%x", sha256.Sum256([]byte(strings.ToLower(str)))) return } //区块链浏览器浏览地址 func BlockChainExploreLink(transactionId string, config *VechainConfig) string { return fmt.Sprintf(config.ExploreLink, transactionId) } //=========================Token====================== //返回Token结构 type Token struct { Token string `json:"token"` Expire int64 `json:"expire"` } var lock int32 = 0 var refreshError = fmt.Errorf("token refreshing") func GetToken(config *VechainConfig) (token *Token, err error) { if atomic.LoadInt32(&lock) == 1 { err = refreshError return } atomic.StoreInt32(&lock, 1) defer atomic.StoreInt32(&lock, 0) timestamp := time.Now().Unix() form := new(Form) form.AppId = config.DeveloperId form.AppKey = config.DeveloperKey form.Nonce = config.Nonce form.Timestamp = strconv.FormatInt(timestamp, 10) form.Signature = sign(timestamp, config) requestUrl := config.SiteUrl + "v1/tokens" formByte, err := json.Marshal(form) if err != nil { log.Error("%s", err.Error()) return } log.Debug("%+v", *form) data := bytes.NewReader(formByte) retryTimes := 0 Retry: retryTimes++ if retryTimes > 100 { time.Sleep(1 * time.Minute) } else if retryTimes > 1000 { time.Sleep(1 * time.Hour) } request, err := http.NewRequest("POST", requestUrl, data) if err != nil { log.Error("%s", err.Error()) goto Retry } defer request.Body.Close() request.Header.Set("Content-Type", "application/json") client := &http.Client{} response, err := client.Do(request) if err != nil { log.Error("%s", err.Error()) goto Retry } defer response.Body.Close() if response.StatusCode != 200 { log.Error("%s", err.Error()) goto Retry } body, err := ioutil.ReadAll(response.Body) if err != nil { log.Error("%s", err.Error())
respData := new(ResponseData) respData.Data = new(Token) err = json.Unmarshal(body, respData) if respData.Code != 1 { err = fmt.Errorf("responseCode:%d error,message:%s\n", respData.Code, respData.Message) log.Error(err.Error()) goto Retry } token = respData.Data.(*Token) return } //======================Occupy============= //抢占请求表单 type OccupyVidRequest struct { RequestNo string `json:"requestNo"` VidList []string `json:"vidList"` } //抢占响应结构 type OccupyVidResponse struct { RequestNo string `json:"requestNo,omitempty"` // 请求编号 Url string `json:"url,omitempty"` // 扫码 url Quantity int `json:"quantity,omitempty"` //请求的vid个数 Status string `json:"status,omitempty"` // 生成状态(GENERATING:抢占中,SUCCESS:成功) SuccessList []string `json:"successList,omitempty"` // 抢占成功 vid 列表 FailureList []string `json:"failureList,omitempty"` // 抢占失败 vid 列表 } // 抢占vid func OccupyVid(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *OccupyVidResponse, err error) { url := "v1/vid/occupy" request := (ctx.Value("request")).(*OccupyVidRequest) data, err := json.Marshal(request) if err != nil { log.Error(err.Error()) return } //log.Debug("request: %s \n",data) var justReturn bool go func() { <-ctx.Done() err = ctx.Err() justReturn = true }() retryTimes := 0 RetryWithNewToken: token := tokenServer.GetToken() Retry: retryTimes++ if justReturn { return } if retryTimes > 100 { time.Sleep(1 * time.Hour) } else if retryTimes > 10 { time.Sleep(1 * time.Minute) } req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data)) if err != nil { log.Error(err.Error()) goto Retry } req.Header.Add("Content-Type", "application/json;charset=utf-8") req.Header.Add("language", "zh_hans") req.Header.Add("x-api-token", token) client := http.DefaultClient resp, err := client.Do(req) if err != nil { log.Error(err.Error()) goto Retry } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err.Error()) goto Retry } if resp.StatusCode != http.StatusOK { err = fmt.Errorf("RemoteServerStatusError code:%d,body:%s", resp.StatusCode, respBody) log.Error(err.Error()) goto Retry } respData := new(ResponseData) respData.Data = new(OccupyVidResponse) err = json.Unmarshal(respBody, respData) if err != nil { log.Error(err.Error()) return } if respData.Code == 1 { response = respData.Data.(*OccupyVidResponse) log.Debug("response %+v \n", *response) if response.Status == "GENERATING" { time.Sleep(1 * time.Minute) goto Retry } return } else if respData.Code == 100004 { goto RetryWithNewToken } else { err = fmt.Errorf("Occupy vid error, remote response Code:%d, MSG: %s.", respData.Code, respData.Message) log.Error(err.Error()) } return } //================================Post======== type PostArtifactResponse struct { RequestNo string `json:"requestNo,omitempty"` // 请求编号 Uid string `json:"uid,omitempty"` // 上链子账户id Status string `json:"status,omitempty"` // 生成状态(PROCESSING:上链中,SUCCESS:成功,FAILURE: 失败,INSUFFICIENT:费用不足) TxList []*PostArtifactResponseData `json:"txList,omitempty"` //上链结果 } type PostArtifactResponseData struct { TxId string `json:"txid"` //上链事务id ClauseIndex string `json:"clauseIndex"` // 每40个vid组成一个clause Vid string `json:"vid"` //商品ID DataHash string `json:"dataHash"` //? } type PostArtifactRequest struct { RequestNo string `json:"requestNo"` //请求编号 Uid string `json:"uid"` //用户 Id Data []*PostArtifactRequestData `json:"data,omitempty"` } type PostArtifactRequestData struct { DataHash string `json:"dataHash"` Vid string `json:"vid"` } // 异步上链 // func PostArtifact(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *PostArtifactResponse, err error) { url := "v1/artifacts/hashinfo/create" request := (ctx.Value("request")).(*PostArtifactRequest) var data []byte data, err = json.Marshal(request) if err != nil { log.Error(err.Error()) return } justReturn := false go func() { <-ctx.Done() justReturn = true err = ctx.Err() log.Debug("ctx deadLine: %s", err.Error()) return }() retryTimes := 0 RetryWithNewToken: token := tokenServer.GetToken() Retry: retryTimes++ if justReturn { return } if retryTimes > 100 { time.Sleep(1 * time.Minute) } else if retryTimes > 1000 { time.Sleep(1 * time.Hour) } req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data)) if err != nil { log.Error(err.Error()) return } req.Header.Add("Content-Type", "application/json;charset=utf-8") req.Header.Add("language", "zh_hans") req.Header.Add("x-api-token", token) client := http.DefaultClient resp, err := client.Do(req) if err != nil { log.Error(err.Error()) return } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { err = fmt.Errorf("[Error] After 10 times retry,RemoteServerStatusError code:%d", resp.StatusCode) log.Error(err.Error()) goto Retry } respBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err.Error()) goto Retry } respData := new(ResponseData) respData.Data = new(PostArtifactResponse) err = json.Unmarshal(respBody, respData) if err != nil { log.Error(err.Error()) goto Retry } if respData.Code == 1 { log.Debug("postArtifact response %s,%+v \n", respData.Message, respData.Data) response = respData.Data.(*PostArtifactResponse) if response.Status == "PROCESSING" { time.Sleep(1 * time.Minute) goto Retry } } else if respData.Code == 100004 { goto RetryWithNewToken } else { err = fmt.Errorf("PostArtifactResponseerror, remote response Code:%d, MSG: %s.", respData.Code, respData.Message) log.Error(err.Error()) return } return } //================CreateAccount============ type CreateUser struct { RequestNo string `json:"requestNo"` //请求编号 Uid string `json:"uid"` //用户 Id(已分配时返回) Status string `json:"status"` //状态(PROCESSING:处理中,SUCCESS:成功,FAILURE:失败) } // 创建账号 // 在此系统只只需创建一个账号,无多账户的需求。 func GenerateSubAccount(requestNo, accountName string, config *VechainConfig, tokenServer IToken) (uid string, err error) { url := "v1/artifacts/user/create" postData := ` { "requestNo":"%s", "name":"%s" } ` req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBufferString(fmt.Sprintf(postData, requestNo, accountName))) if err != nil { log.Error(err.Error()) return } req.Header.Add("Content-Type", "application/json;charset=utf-8") req.Header.Add("language", "zh_hans") req.Header.Add("x-api-token", tokenServer.GetToken()) client := http.DefaultClient resp, err := client.Do(req) if err != nil { log.Error(err.Error()) return } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err.Error()) return } respData := new(ResponseData) respData.Data = new(CreateUser) err = json.Unmarshal(respBody, respData) if err != nil { log.Error(err.Error()) return } if respData.Code == 1 { log.Debug("response %d,%s,%+v \n", resp.StatusCode, resp.Status, *(respData.Data.(*CreateUser))) } uid = respData.Data.(*CreateUser).Uid return }
goto Retry } log.Debug("toke response :%s \n", body)
random_line_split
vechain.go
package vechain import ( "bytes" "context" "crypto/sha256" "encoding/json" "fmt" "io/ioutil" "net/http" "strconv" "strings" "sync/atomic" "time" "github.com/myafeier/log" ) // ============common============ //通用请求表单 type Form struct { AppId string `json:"appid"` AppKey string `json:"appkey"` Nonce string `json:"nonce"` Timestamp string `json:"timestamp"` Signature string `json:"signature"` } //通用返回结构 type ResponseData struct { Data interface{} `json:"data"` Code int `json:"code"` Message string `json:"message"` } func sign(timestamp int64, config *VechainConfig) (signature string) { str := fmt.Sprintf("appid=%s&appkey=%s&nonce=%s&timestamp=%d", config.DeveloperId, config.DeveloperKey, config.Nonce, timestamp) signature = fmt.Sprintf("%x", sha256.Sum256([]byte(strings.ToLower(str)))) return } //区块链浏览器浏览地址 func BlockChainExploreLink(transactionId string, config *VechainConfig) string { return fmt.Sprintf(config.ExploreLink, transactionId) } //=========================Token====================== //返回Token结构 type Token struct { Token string `json:"token"` Expire int64 `json:"expire"` } var lock int32 = 0 var refreshError = fmt.Errorf("token refreshing") func GetToken(config *VechainConfig) (token *Token, err error) { if atomic.LoadInt32(&lock) == 1 { err = refreshError return } atomic.StoreInt32(&lock, 1) defer atomic.StoreInt32(&lock, 0) timestamp := time.Now().Unix() form := new(Form) form.AppId = config.DeveloperId form.AppKey = config.DeveloperKey form.Nonce = config.Nonce form.Timestamp = strconv.FormatInt(timestamp, 10) form.Signature = sign(timestamp, config) requestUrl := config.SiteUrl + "v1/tokens" formByte, err := json.Marshal(form) if err != nil { log.Error("%s", err.Error()) return } log.Debug("%+v", *form) data := bytes.NewReader(formByte) retryTimes := 0 Retry: retryTimes++ if retryTimes > 100 { time.Sleep(1 * time.Minute) } else if retryTimes > 1000 { time.Sleep(1 * time.Hour) } request, err := http.NewRequest("POST", requestUrl, data) if err != nil { log.Error("%s", err.Error()) goto Retry } defer request.Body.Close() request.Header.Set("Content-Type", "application/json") client := &http.Client{} response, err := client.Do(request) if err != nil { log.Error("%s", err.Error()) goto Retry } defer response.Body.Close() if response.StatusCode != 200 { log.Error("%s", err.Error()) goto Retry } body, err := ioutil.ReadAll(response.Body) if err != nil { log.Error("%s", err.Error()) goto Retry } log.Debug("toke response :%s \n", body) respData := new(ResponseData) respData.Data = new(Token) err = json.Unmarshal(body, respData) if respData.Code != 1 { err = fmt.Errorf("responseCode:%d error,message:%s\n", respData.Code, respData.Message) log.Error(err.Error()) goto Retry } token = respData.Data.(*Token) return } //======================Occupy============= //抢占请求表单 type OccupyVidRequest struct { RequestNo string `json:"requestNo"` VidList []string `json:"vidList"` } //抢占响应结构 type OccupyVidResponse struct { RequestNo string `json:"requestNo,omitempty"` // 请求编号 Url string `json:"url,omitempty"` // 扫码 url Quantity int `json:"quantity,omitempty"` //请求的vid个数 Status string `json:"status,omitempty"` // 生成状态(GENERATING:抢占中,SUCCESS:成功) SuccessList []string `json:"successList,omitempty"` // 抢占成功 vid 列表 FailureList []string `json:"failureList,omitempty"` // 抢占失败 vid 列表 } // 抢占vid func OccupyVid(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *OccupyVidResponse, err error) { url := "v1/vid/occupy" request := (ctx.Value("request")).(*OccupyVidRequest) data, err := json.Marshal(request) if err != nil { log.Error(err.Error()) return } //log.Debug("request: %s \n",data) var justReturn bool go func() { <-ctx.Done() err = ctx.Err() justReturn = true }() retryTimes := 0 RetryWithNewToken: token := tokenServer.GetToken() Retry: retryTimes++ if justReturn { return } if retryTimes > 100 { time.Sleep(1 * time.Hour) } else if retryTimes > 10 { time.Sleep(1 * time.Minute) } req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data)) if err != nil { log.Error(err.Error()) goto Retry } req.Header.Add("Content-Type", "application/json;charset=utf-8") req.Header.Add("language", "zh_hans") req.Header.Add("x-api-token", token) client := http.DefaultClient resp, err := client.Do(req) if err != nil { log.Error(err.Error()) goto Retry } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err.Error()) goto Retry } if resp.StatusCode != http.StatusOK { err = fmt.Errorf("RemoteServerStatusError code:%d,body:%s", resp.StatusCode, respBody) log.Error(err.Error()) goto Retry } respData := new(ResponseData) respData.Data = new(OccupyVidResponse) err = json.Unmarshal(respBody, respData) if err != nil { log.Error(err.Error()) return } if respData.Code == 1 { response = respData.Data.(*OccupyVidResponse) log.Debug("response %+v \n", *response) if response.Status == "GENERATING" { time.Sleep(1 * time.Minute) goto Retry } return } else if respData.Code == 100004 { goto RetryWithNewToken } else { err = fmt.Errorf("Occupy vid error, remote response Code:%d, MSG: %s.", respData.Code, respData.Message) log.Error(err.Error()) } return } //================================Post======== type PostArtifactResponse struct { RequestNo string `json:"requestNo,omitempty"` // 请求编号 Uid string `json:"uid,omitempty"` // 上链子账户id Status string `json:"status,omitempty"` // 生成状态(PROCESSING:上链中,SUCCESS:成功,FAILURE: 失败,INSUFFICIENT:费用不足) TxList []*PostArtifactResponseData `json:"txList,omitempty"` //上链结果 } type PostArtifactResponseData struct { TxId string `json:"txid"` //上链事务id ClauseIndex string `json:"clauseIndex"` // 每40个vid组成一个clause Vid string `json:"vid"` //商品ID DataHash string `json:"dataHash"` //? } type PostArtifactRequest struct { RequestNo string `json:"requestNo"` //请求编号 Uid string `json:"uid"` //用户 Id Data []*PostArtifactRequestData `json:"data,omitempty"` } type PostArtifactRequestData struct { DataHash string `json:"dataHash"` Vid string `json:"vid"` } // 异步上链 // func PostArtifact(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *PostArtifactResponse, err error) { url := "v1/artifacts/hashinfo/create" request := (ctx.Value("request")).(*PostArtifactRequest) var data []byte data, err = json.Marshal(request) if err != nil { log.Error(err.Error()) return } justReturn := false go func() { <-ctx.Done() justReturn = true err = ctx.Err() log.Debug("ctx deadLine: %s", err.Error()) return }() retryTimes := 0 RetryWithNewToken: token := tokenServer.GetToken() Retry: retryTimes++ if justReturn { return } if retryTimes > 100 { time.Sleep(1 * time.Minute) } else if retryTimes > 1000 { time.Sleep(1 * time.Hour) } req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data)) if err != nil { log.Error(err.Error()) return } req.Header.Add("Content-Type", "application/json;charset=utf-8") req.Header.Add("language", "zh_hans") req.Header.Add("x-api-token", token) client := http.DefaultClient resp, err := client.Do(req) if err != nil { log.Error(err.Error()) return } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { err = fmt.Errorf("[Error] After 10 times retry,RemoteServerStatusError code:%d", resp.StatusCode) log.Error(err.Error()) goto Retry } respBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err.Error()) goto Retry } respData := new(ResponseData) respData.Data = new(PostArtifactResponse) err = json.Unmarshal(respBody, respData) if err != nil { log.Error(err.Error()) goto Retry } if respData.Code == 1 { log.Debug("postArtifact response %s,%+v \n", respData.Message, respData.Data) response = respData.Data.(*PostArtifactResponse) if response.Status == "PROCESSING" { time.Sleep(1 * time.Minute) goto Retry } } else if respData.Code == 100004 { goto RetryWithNewToken } else { err = fmt.Errorf("PostArtifactResponseerror, remote response Code:%d, MSG: %s.", respData.Code, respData.Message) log.Error(err.Error()) return } return } //================CreateAccount============ type CreateUser struct { RequestNo string `json:"requestNo"` //请求编号 Uid string `json:"uid"` //用户 Id(已分配时返回) Status string `json:"status"` //状态(PROCESSING:处理中,SUCCESS:成功,FAILURE:失败) } // 创建账号 // 在此系统只只需创建一个账号,无多账户的需求。 func GenerateSubAccount(requestNo, accountName string, config *VechainConfig, tokenServer IToken) (uid string, err error) { url := "v1/artifacts/user/create" postData := ` { "requestNo":"%s", "name":"%s" } ` req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBufferString(fmt.Sprintf(postData, requestNo, accountName))) if err != nil { log.Err
return } req.Header.Add("Content-Type", "application/json;charset=utf-8") req.Header.Add("language", "zh_hans") req.Header.Add("x-api-token", tokenServer.GetToken()) client := http.DefaultClient resp, err := client.Do(req) if err != nil { log.Error(err.Error()) return } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err.Error()) return } respData := new(ResponseData) respData.Data = new(CreateUser) err = json.Unmarshal(respBody, respData) if err != nil { log.Error(err.Error()) return } if respData.Code == 1 { log.Debug("response %d,%s,%+v \n", resp.StatusCode, resp.Status, *(respData.Data.(*CreateUser))) } uid = respData.Data.(*CreateUser).Uid return }
or(err.Error())
identifier_name
vechain.go
package vechain import ( "bytes" "context" "crypto/sha256" "encoding/json" "fmt" "io/ioutil" "net/http" "strconv" "strings" "sync/atomic" "time" "github.com/myafeier/log" ) // ============common============ //通用请求表单 type Form struct { AppId string `json:"appid"` AppKey string `json:"appkey"` Nonce string `json:"nonce"` Timestamp string `json:"timestamp"` Signature string `json:"signature"` } //通用返回结构 type ResponseData struct { Data interface{} `json:"data"` Code int `json:"code"` Message string `json:"message"` } func sign(timestamp int64, config *VechainConfig) (signature string) { str := fmt.Sprintf("appid=%s&appkey=%s&nonce=%s&timestamp=%d", config.DeveloperId, config.DeveloperKey, config.Nonce, timestamp) signature = fmt.Sprintf("%x", sha256.Sum256([]byte(strings.ToLower(str)))) return } //区块链浏览器浏览地址 func BlockChainExploreLink(transactionId string, config *VechainConfig) string { return fmt.Sprintf(config.ExploreLink, transactionId) } //=========================Token====================== //返回Token结构 type Token struct { Token string `json:"token"` Expire int64 `json:"expire"` } var lock int32 = 0 var refreshError = fmt.Errorf("token refreshing") func GetToken(config *VechainConfig) (token *Token, err error) { if atomic.LoadInt32(&lock) == 1 { err = refreshError return } atomic.StoreInt32(&lock, 1) defer atomic.StoreInt32(&lock, 0) timestamp := time.Now().Unix() form := new(Form) form.AppId = config.DeveloperId form.AppKey = config.DeveloperKey form.Nonce = config.Nonce form.Timestamp = strconv.FormatInt(timestamp, 10) form.Signature = sign(timestamp, config) requestUrl := config.SiteUrl + "v1/tokens" formByte, err := json.Marshal(form) if err != nil { log.Error("%s", err.Error()) return } log.Debug("%+v", *form) data := bytes.NewReader(formByte) retryTimes := 0 Retry: retryTimes++ if retryTimes > 100 { time.Sleep(1 * time.Minute) } else if retryTimes > 1000 { time.Sleep(1 * time.Hour) } request, err := http.NewRequest("POST", requestUrl, data) if err != nil { log.Error("%s", err.Error()) goto Retry } defer request.Body.Close() request.Header.Set("Content-Type", "application/json") client := &http.Client{} response, err := client.Do(request) if err != nil { log.Error("%s", err.Error()) goto Retry } defer response.Body.Close() if response.StatusCode != 200 { log.Error("%s", err.Error()) goto Retry } body, err := ioutil.ReadAll(response.Body) if err != nil { log.Error("%s", err.Error()) goto Retry } log.Debug("toke response :%s \n", body) respData := new(ResponseData) respData.Data = new(Token) err = json.Unmarshal(body, respData) if respData.Code != 1 { err = fmt.Errorf("responseCode:%d error,message:%s\n", respData.Code, respData.Message) log.Error(err.Error()) goto Retry } token = respData.Data.(*Token) return } //======================Occupy============= //抢占请求表单 type OccupyVidRequest struct { RequestNo string `json:"requestNo"` VidList []string `json:"vidList"` } //抢占响应结构 type OccupyVidResponse struct { RequestNo string `json:"requestNo,omitempty"` // 请求编号 Url string `json:"url,omitempty"` // 扫码 url Quantity int `json:"quantity,omitempty"` //请求的vid个数 Status string `json:"status,omitempty"` // 生成状态(GENERATING:抢占中,SUCCESS:成功) SuccessList []string `json:"successList,omitempty"` // 抢占成功 vid 列表 FailureList []string `json:"failureList,omitempty"` // 抢占失败 vid 列表 } // 抢占vid func OccupyVid(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *OccupyVidResponse, err error) { url := "v1/vid/occupy" request := (ctx.Value("request")).(*OccupyVidRequest) data, err := json.Marshal(request) if err != nil { log.Error(err.Error()) return } //log.Debug("request: %s \n",data) var justReturn bool go func() { <-ctx.Done() err = ctx.Err() justReturn = true }() retryTimes := 0 RetryWithNewToken: token := tokenServer.GetToken() Retry: retryTimes++ if justReturn { return } if retryTimes > 100 { time.Sleep(1 * time.Hour) } else if retryTimes > 10 { time.Sleep(1 * time.Minute) } req, err := http.NewRequest("POST", config.SiteUrl+url,
!= nil { log.Error(err.Error()) goto Retry } req.Header.Add("Content-Type", "application/json;charset=utf-8") req.Header.Add("language", "zh_hans") req.Header.Add("x-api-token", token) client := http.DefaultClient resp, err := client.Do(req) if err != nil { log.Error(err.Error()) goto Retry } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err.Error()) goto Retry } if resp.StatusCode != http.StatusOK { err = fmt.Errorf("RemoteServerStatusError code:%d,body:%s", resp.StatusCode, respBody) log.Error(err.Error()) goto Retry } respData := new(ResponseData) respData.Data = new(OccupyVidResponse) err = json.Unmarshal(respBody, respData) if err != nil { log.Error(err.Error()) return } if respData.Code == 1 { response = respData.Data.(*OccupyVidResponse) log.Debug("response %+v \n", *response) if response.Status == "GENERATING" { time.Sleep(1 * time.Minute) goto Retry } return } else if respData.Code == 100004 { goto RetryWithNewToken } else { err = fmt.Errorf("Occupy vid error, remote response Code:%d, MSG: %s.", respData.Code, respData.Message) log.Error(err.Error()) } return } //================================Post======== type PostArtifactResponse struct { RequestNo string `json:"requestNo,omitempty"` // 请求编号 Uid string `json:"uid,omitempty"` // 上链子账户id Status string `json:"status,omitempty"` // 生成状态(PROCESSING:上链中,SUCCESS:成功,FAILURE: 失败,INSUFFICIENT:费用不足) TxList []*PostArtifactResponseData `json:"txList,omitempty"` //上链结果 } type PostArtifactResponseData struct { TxId string `json:"txid"` //上链事务id ClauseIndex string `json:"clauseIndex"` // 每40个vid组成一个clause Vid string `json:"vid"` //商品ID DataHash string `json:"dataHash"` //? } type PostArtifactRequest struct { RequestNo string `json:"requestNo"` //请求编号 Uid string `json:"uid"` //用户 Id Data []*PostArtifactRequestData `json:"data,omitempty"` } type PostArtifactRequestData struct { DataHash string `json:"dataHash"` Vid string `json:"vid"` } // 异步上链 // func PostArtifact(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *PostArtifactResponse, err error) { url := "v1/artifacts/hashinfo/create" request := (ctx.Value("request")).(*PostArtifactRequest) var data []byte data, err = json.Marshal(request) if err != nil { log.Error(err.Error()) return } justReturn := false go func() { <-ctx.Done() justReturn = true err = ctx.Err() log.Debug("ctx deadLine: %s", err.Error()) return }() retryTimes := 0 RetryWithNewToken: token := tokenServer.GetToken() Retry: retryTimes++ if justReturn { return } if retryTimes > 100 { time.Sleep(1 * time.Minute) } else if retryTimes > 1000 { time.Sleep(1 * time.Hour) } req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data)) if err != nil { log.Error(err.Error()) return } req.Header.Add("Content-Type", "application/json;charset=utf-8") req.Header.Add("language", "zh_hans") req.Header.Add("x-api-token", token) client := http.DefaultClient resp, err := client.Do(req) if err != nil { log.Error(err.Error()) return } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { err = fmt.Errorf("[Error] After 10 times retry,RemoteServerStatusError code:%d", resp.StatusCode) log.Error(err.Error()) goto Retry } respBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err.Error()) goto Retry } respData := new(ResponseData) respData.Data = new(PostArtifactResponse) err = json.Unmarshal(respBody, respData) if err != nil { log.Error(err.Error()) goto Retry } if respData.Code == 1 { log.Debug("postArtifact response %s,%+v \n", respData.Message, respData.Data) response = respData.Data.(*PostArtifactResponse) if response.Status == "PROCESSING" { time.Sleep(1 * time.Minute) goto Retry } } else if respData.Code == 100004 { goto RetryWithNewToken } else { err = fmt.Errorf("PostArtifactResponseerror, remote response Code:%d, MSG: %s.", respData.Code, respData.Message) log.Error(err.Error()) return } return } //================CreateAccount============ type CreateUser struct { RequestNo string `json:"requestNo"` //请求编号 Uid string `json:"uid"` //用户 Id(已分配时返回) Status string `json:"status"` //状态(PROCESSING:处理中,SUCCESS:成功,FAILURE:失败) } // 创建账号 // 在此系统只只需创建一个账号,无多账户的需求。 func GenerateSubAccount(requestNo, accountName string, config *VechainConfig, tokenServer IToken) (uid string, err error) { url := "v1/artifacts/user/create" postData := ` { "requestNo":"%s", "name":"%s" } ` req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBufferString(fmt.Sprintf(postData, requestNo, accountName))) if err != nil { log.Error(err.Error()) return } req.Header.Add("Content-Type", "application/json;charset=utf-8") req.Header.Add("language", "zh_hans") req.Header.Add("x-api-token", tokenServer.GetToken()) client := http.DefaultClient resp, err := client.Do(req) if err != nil { log.Error(err.Error()) return } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err.Error()) return } respData := new(ResponseData) respData.Data = new(CreateUser) err = json.Unmarshal(respBody, respData) if err != nil { log.Error(err.Error()) return } if respData.Code == 1 { log.Debug("response %d,%s,%+v \n", resp.StatusCode, resp.Status, *(respData.Data.(*CreateUser))) } uid = respData.Data.(*CreateUser).Uid return }
bytes.NewBuffer(data)) if err
conditional_block
vechain.go
package vechain import ( "bytes" "context" "crypto/sha256" "encoding/json" "fmt" "io/ioutil" "net/http" "strconv" "strings" "sync/atomic" "time" "github.com/myafeier/log" ) // ============common============ //通用请求表单 type Form struct { AppId string `json:"appid"` AppKey string `json:"appkey"` Nonce string `json:"nonce"` Timestamp string `json:"timestamp"` Signature string `json:"signature"` } //通用返回结构 type ResponseData struct { Data interface{} `json:"data"` Code int `json:"code"` Message string `json:"message"` } func sign(timestamp int64, config *VechainConfig) (signature string) { str := fmt.Sprintf("appid=%s&appkey=%s&nonce=%s&timestamp=%d", config.DeveloperId, config.DeveloperKey, config.Nonce, timestamp) signature = fmt.Sprintf("%x", sha256.Sum256([]byte(strings.ToLower(str)))) return } //区块链浏览器浏览地址 func BlockChainExploreLink(transactionId string, config *VechainConfig) string { return fmt.Sprintf(config.ExploreLink, tr
============ //返回Token结构 type Token struct { Token string `json:"token"` Expire int64 `json:"expire"` } var lock int32 = 0 var refreshError = fmt.Errorf("token refreshing") func GetToken(config *VechainConfig) (token *Token, err error) { if atomic.LoadInt32(&lock) == 1 { err = refreshError return } atomic.StoreInt32(&lock, 1) defer atomic.StoreInt32(&lock, 0) timestamp := time.Now().Unix() form := new(Form) form.AppId = config.DeveloperId form.AppKey = config.DeveloperKey form.Nonce = config.Nonce form.Timestamp = strconv.FormatInt(timestamp, 10) form.Signature = sign(timestamp, config) requestUrl := config.SiteUrl + "v1/tokens" formByte, err := json.Marshal(form) if err != nil { log.Error("%s", err.Error()) return } log.Debug("%+v", *form) data := bytes.NewReader(formByte) retryTimes := 0 Retry: retryTimes++ if retryTimes > 100 { time.Sleep(1 * time.Minute) } else if retryTimes > 1000 { time.Sleep(1 * time.Hour) } request, err := http.NewRequest("POST", requestUrl, data) if err != nil { log.Error("%s", err.Error()) goto Retry } defer request.Body.Close() request.Header.Set("Content-Type", "application/json") client := &http.Client{} response, err := client.Do(request) if err != nil { log.Error("%s", err.Error()) goto Retry } defer response.Body.Close() if response.StatusCode != 200 { log.Error("%s", err.Error()) goto Retry } body, err := ioutil.ReadAll(response.Body) if err != nil { log.Error("%s", err.Error()) goto Retry } log.Debug("toke response :%s \n", body) respData := new(ResponseData) respData.Data = new(Token) err = json.Unmarshal(body, respData) if respData.Code != 1 { err = fmt.Errorf("responseCode:%d error,message:%s\n", respData.Code, respData.Message) log.Error(err.Error()) goto Retry } token = respData.Data.(*Token) return } //======================Occupy============= //抢占请求表单 type OccupyVidRequest struct { RequestNo string `json:"requestNo"` VidList []string `json:"vidList"` } //抢占响应结构 type OccupyVidResponse struct { RequestNo string `json:"requestNo,omitempty"` // 请求编号 Url string `json:"url,omitempty"` // 扫码 url Quantity int `json:"quantity,omitempty"` //请求的vid个数 Status string `json:"status,omitempty"` // 生成状态(GENERATING:抢占中,SUCCESS:成功) SuccessList []string `json:"successList,omitempty"` // 抢占成功 vid 列表 FailureList []string `json:"failureList,omitempty"` // 抢占失败 vid 列表 } // 抢占vid func OccupyVid(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *OccupyVidResponse, err error) { url := "v1/vid/occupy" request := (ctx.Value("request")).(*OccupyVidRequest) data, err := json.Marshal(request) if err != nil { log.Error(err.Error()) return } //log.Debug("request: %s \n",data) var justReturn bool go func() { <-ctx.Done() err = ctx.Err() justReturn = true }() retryTimes := 0 RetryWithNewToken: token := tokenServer.GetToken() Retry: retryTimes++ if justReturn { return } if retryTimes > 100 { time.Sleep(1 * time.Hour) } else if retryTimes > 10 { time.Sleep(1 * time.Minute) } req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data)) if err != nil { log.Error(err.Error()) goto Retry } req.Header.Add("Content-Type", "application/json;charset=utf-8") req.Header.Add("language", "zh_hans") req.Header.Add("x-api-token", token) client := http.DefaultClient resp, err := client.Do(req) if err != nil { log.Error(err.Error()) goto Retry } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err.Error()) goto Retry } if resp.StatusCode != http.StatusOK { err = fmt.Errorf("RemoteServerStatusError code:%d,body:%s", resp.StatusCode, respBody) log.Error(err.Error()) goto Retry } respData := new(ResponseData) respData.Data = new(OccupyVidResponse) err = json.Unmarshal(respBody, respData) if err != nil { log.Error(err.Error()) return } if respData.Code == 1 { response = respData.Data.(*OccupyVidResponse) log.Debug("response %+v \n", *response) if response.Status == "GENERATING" { time.Sleep(1 * time.Minute) goto Retry } return } else if respData.Code == 100004 { goto RetryWithNewToken } else { err = fmt.Errorf("Occupy vid error, remote response Code:%d, MSG: %s.", respData.Code, respData.Message) log.Error(err.Error()) } return } //================================Post======== type PostArtifactResponse struct { RequestNo string `json:"requestNo,omitempty"` // 请求编号 Uid string `json:"uid,omitempty"` // 上链子账户id Status string `json:"status,omitempty"` // 生成状态(PROCESSING:上链中,SUCCESS:成功,FAILURE: 失败,INSUFFICIENT:费用不足) TxList []*PostArtifactResponseData `json:"txList,omitempty"` //上链结果 } type PostArtifactResponseData struct { TxId string `json:"txid"` //上链事务id ClauseIndex string `json:"clauseIndex"` // 每40个vid组成一个clause Vid string `json:"vid"` //商品ID DataHash string `json:"dataHash"` //? } type PostArtifactRequest struct { RequestNo string `json:"requestNo"` //请求编号 Uid string `json:"uid"` //用户 Id Data []*PostArtifactRequestData `json:"data,omitempty"` } type PostArtifactRequestData struct { DataHash string `json:"dataHash"` Vid string `json:"vid"` } // 异步上链 // func PostArtifact(ctx context.Context, config *VechainConfig, tokenServer IToken) (response *PostArtifactResponse, err error) { url := "v1/artifacts/hashinfo/create" request := (ctx.Value("request")).(*PostArtifactRequest) var data []byte data, err = json.Marshal(request) if err != nil { log.Error(err.Error()) return } justReturn := false go func() { <-ctx.Done() justReturn = true err = ctx.Err() log.Debug("ctx deadLine: %s", err.Error()) return }() retryTimes := 0 RetryWithNewToken: token := tokenServer.GetToken() Retry: retryTimes++ if justReturn { return } if retryTimes > 100 { time.Sleep(1 * time.Minute) } else if retryTimes > 1000 { time.Sleep(1 * time.Hour) } req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBuffer(data)) if err != nil { log.Error(err.Error()) return } req.Header.Add("Content-Type", "application/json;charset=utf-8") req.Header.Add("language", "zh_hans") req.Header.Add("x-api-token", token) client := http.DefaultClient resp, err := client.Do(req) if err != nil { log.Error(err.Error()) return } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { err = fmt.Errorf("[Error] After 10 times retry,RemoteServerStatusError code:%d", resp.StatusCode) log.Error(err.Error()) goto Retry } respBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err.Error()) goto Retry } respData := new(ResponseData) respData.Data = new(PostArtifactResponse) err = json.Unmarshal(respBody, respData) if err != nil { log.Error(err.Error()) goto Retry } if respData.Code == 1 { log.Debug("postArtifact response %s,%+v \n", respData.Message, respData.Data) response = respData.Data.(*PostArtifactResponse) if response.Status == "PROCESSING" { time.Sleep(1 * time.Minute) goto Retry } } else if respData.Code == 100004 { goto RetryWithNewToken } else { err = fmt.Errorf("PostArtifactResponseerror, remote response Code:%d, MSG: %s.", respData.Code, respData.Message) log.Error(err.Error()) return } return } //================CreateAccount============ type CreateUser struct { RequestNo string `json:"requestNo"` //请求编号 Uid string `json:"uid"` //用户 Id(已分配时返回) Status string `json:"status"` //状态(PROCESSING:处理中,SUCCESS:成功,FAILURE:失败) } // 创建账号 // 在此系统只只需创建一个账号,无多账户的需求。 func GenerateSubAccount(requestNo, accountName string, config *VechainConfig, tokenServer IToken) (uid string, err error) { url := "v1/artifacts/user/create" postData := ` { "requestNo":"%s", "name":"%s" } ` req, err := http.NewRequest("POST", config.SiteUrl+url, bytes.NewBufferString(fmt.Sprintf(postData, requestNo, accountName))) if err != nil { log.Error(err.Error()) return } req.Header.Add("Content-Type", "application/json;charset=utf-8") req.Header.Add("language", "zh_hans") req.Header.Add("x-api-token", tokenServer.GetToken()) client := http.DefaultClient resp, err := client.Do(req) if err != nil { log.Error(err.Error()) return } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err.Error()) return } respData := new(ResponseData) respData.Data = new(CreateUser) err = json.Unmarshal(respBody, respData) if err != nil { log.Error(err.Error()) return } if respData.Code == 1 { log.Debug("response %d,%s,%+v \n", resp.StatusCode, resp.Status, *(respData.Data.(*CreateUser))) } uid = respData.Data.(*CreateUser).Uid return }
ansactionId) } //=========================Token==========
identifier_body
pyvideo_scrape.py
#!/usr/bin/env python3 """Scrape several conferences into pyvideo repository""" import copy import datetime import json import os import pathlib import re import sys import sh import slugify import yaml import youtube_dl from loguru import logger JSON_FORMAT_KWARGS = { 'indent': 2, 'separators': (',', ': '), 'sort_keys': True, } def load_events(fich): """Loads events data yaml file""" with fich.open() as fd_conf: yaml_text = fd_conf.read() conf = yaml.safe_load(yaml_text) return yaml_text, conf def save_file(path, text): """Create a file in `path` with content `text`""" with path.open(mode='w') as f_stream: f_stream.write(text) def youtube_dl_version(): """Returns the actual version of youtube-dl""" import pkg_resources return pkg_resources.get_distribution("youtube-dl").version class Event: """PyVideo Event metadata""" def __init__(self, event_data: dict, repository_path): self.videos = [] self.youtube_videos = [] self.file_videos = [] self.repository_path = repository_path self.branch = event_data['dir'] self.event_dir = self.repository_path / event_data['dir'] self.video_dir = self.event_dir / 'videos' self.title = event_data['title'] for mandatory_field in ['title', 'dir', 'issue', 'youtube_list']: if mandatory_field in event_data and event_data[mandatory_field]: pass else: logger.error('No {} data in conference {}', mandatory_field, self.title) raise ValueError("{} can't be null".format(mandatory_field)) self.issue = event_data['issue'] if isinstance(event_data['youtube_list'], str): self.youtube_lists = [event_data['youtube_list']] elif isinstance(event_data['youtube_list'], list): self.youtube_lists = event_data['youtube_list'] else: raise TypeError( "youtube_list must be a string or a list of strings") self.related_urls = event_data.get('related_urls', []) self.language = event_data.get('language', None) self.tags = event_data.get('tags', []) if not self.tags: self.tags = [] if 'dates' in event_data and event_data['dates']: self.know_date = True self.date_begin = event_data['dates']['begin'] self.date_end = event_data['dates'].get('end', self.date_begin) self.date_default = event_data['dates'].get( 'default', self.date_begin) else: self.know_date = False self.minimal_download = event_data.get('minimal_download', False) if self.minimal_download: self.branch = "{}--minimal-download".format(self.branch) self.overwrite, self.add_new_files, self.wipe = False, False, False self.overwrite_fields = [] if 'overwrite' in event_data and event_data['overwrite']: overwrite = event_data['overwrite'] self.overwrite = True if 'all' in overwrite and overwrite['all']: self.wipe = True else: if 'add_new_files' in overwrite and overwrite['add_new_files']: self.add_new_files = True if ('existing_files_fields' in overwrite and overwrite['existing_files_fields']): self.overwrite_fields = overwrite['existing_files_fields'] def create_branch(self): """Create a new branch in pyvideo repository to add a new event""" os.chdir(str(self.repository_path)) sh.git.checkout('master') sh.git.checkout('-b', self.branch) logger.debug('Branch {} created', self.branch) def create_dirs(self): """Create new directories and conference file in pyvideo repository to add a new event""" for new_directory in [self.event_dir, self.event_dir / 'videos']: new_directory.mkdir(exist_ok=self.overwrite) logger.debug('Dir {} created', new_directory) def create_category(self): # , conf_dir, title): """Create category.json for the conference""" category_file_path = self.event_dir / 'category.json' category_data = { 'title': self.title, } category_data_text = json.dumps(category_data, ** JSON_FORMAT_KWARGS) + '\n' save_file(category_file_path, category_data_text) logger.debug('File {} created', category_file_path) def download_video_data(self): """Download youtube metadata corresponding to this event youtube lists""" def scrape_url(url): """Scrape the video list, youtube_dl does all the heavy lifting""" ydl_opts = { "ignoreerrors": True, # Skip private and unavaliable videos } ydl = youtube_dl.YoutubeDL(ydl_opts) with ydl: result_ydl = ydl.extract_info( url, download=False # No download needed, only the info ) logger.debug('Url scraped {}', url) if 'entries' in result_ydl: # It's a playlist or a list of videos return result_ydl['entries'] # Just a video return [result_ydl]
if youtube_video_data: # Valid video self.youtube_videos.append( Video.from_youtube( video_data=youtube_video_data, event=self)) else: logger.warning('Null youtube video') def load_video_data(self): """Load video data form existing event video files""" self.file_videos = [ Video.from_file(path, self) for path in self.video_dir.glob('*.json') ] def merge_video_data(self): """Merge old video data when configured so""" if self.overwrite: if self.wipe: self.videos = self.youtube_videos elif self.add_new_files or self.overwrite_fields: old_videos = { video.filename: video for video in self.file_videos } old_videos_url = { video.metadata['videos'][0]['url']: video for video in self.file_videos } new_videos = {} for video in self.youtube_videos: new_video_url = video.metadata['videos'][0]['url'] if new_video_url in old_videos_url: new_video_filename = old_videos_url[new_video_url].filename else: new_video_filename = video.filename new_videos[new_video_filename] = video if self.overwrite_fields: forgotten = set(old_videos) - set(new_videos) for name in forgotten: logger.warning('Missing video: {} {}', old_videos[name].filename, old_videos[name].metadata['videos'][0]['url'], ) changes = set(new_videos).intersection(set(old_videos)) for path in changes: merged_video = old_videos[path].merge( new_videos[path], self.overwrite_fields) self.videos.append(merged_video) else: self.videos = self.file_videos if self.add_new_files: adds = set(new_videos) - set(old_videos) self.videos.extend([new_videos[path] for path in adds]) else: # not self.overwrite self.videos = self.youtube_videos def save_video_data(self): """Save all event videos in PyVideo format""" if self.overwrite: # Erase old event videos for path in self.video_dir.glob('*.json'): path.unlink() for video in self.videos: video.save() def create_commit(self, event_data_yaml): """Create a new commit in pyvideo repository with the new event data""" os.chdir(str(self.repository_path)) sh.git.checkout(self.branch) sh.git.add(self.event_dir) message_body = ( '\n\nEvent config:\n~~~yaml\n{}\n~~~\n'.format(event_data_yaml) + '\nScraped with [pyvideo_scrape]' + '(https://github.com/pyvideo/pyvideo_scrape)') if self.minimal_download: message = ('Minimal download: ' + '{}\n\nMinimal download executed for #{}'.format( self.title, self.issue) + '\n\nOnly data that needs [no review](https://' + 'github.com/pyvideo/pyvideo_scrape#use-cases) was scraped.' + '\nThis event needs further scraping and human ' + 'reviewing for the description and other data to show.' + message_body) sh.git.commit('-m', message) sh.git.push('--set-upstream', 'origin', self.branch) # ~ sh.git.push('--set-upstream', '--force', 'origin', self.branch) sh.git.checkout('master') else: message = ( 'Scraped {}\n\nFixes #{}'.format(self.branch, self.issue) + message_body) sh.git.commit('-m', message) sh.git.checkout('master') logger.debug('Conference {} commited', self.branch) class Video: """PyVideo Video metadata""" @staticmethod def __calculate_title(video_data): """Calculate title from youtube fields""" title = 'Unknown' if 'fulltitle' in video_data.keys(): title = video_data['fulltitle'] elif 'title' in video_data.keys(): title = video_data['title'] elif '_filename' in video_data.keys(): title = video_data['_filename'] return title def __calculate_slug(self): """Calculate slug from title""" return slugify.slugify(self.metadata['title']) def __calculate_date_recorded(self, upload_date_str): """Calculate record date from youtube field and event dates""" upload_date = datetime.date( int(upload_date_str[0:4]), int(upload_date_str[4:6]), int(upload_date_str[6:8])) if self.event.know_date: if not (self.event.date_begin <= upload_date <= self.event.date_end): return self.event.date_default.isoformat() return upload_date.isoformat() def __init__(self, event): self.event = event self.filename = None self.metadata = {} @classmethod def from_file(cls, path, event): """Contructor. Retrieves video metadata from file""" self = cls(event) self.filename = path.stem # Name without .json try: with path.open() as f_path: self.metadata = json.load(f_path) except ValueError: print('Json syntax error in file {}'.format(path)) raise return self @classmethod def from_youtube(cls, video_data, event): """Contructor. Retrieves video metadata with youtube-dl""" self = cls(event) metadata = self.metadata metadata['title'] = self.__calculate_title(video_data) self.filename = self.__calculate_slug() metadata['speakers'] = ['TODO'] # Needs human intervention later # youtube_id = video_data['display_id'] # metadata['thumbnail_url'] = # 'https://i.ytimg.com/vi/{}/maxresdefault.jpg'.format(youtube_id) metadata['thumbnail_url'] = video_data['thumbnail'] metadata['videos'] = [{ 'type': 'youtube', 'url': video_data['webpage_url'] }] metadata['recorded'] = self.__calculate_date_recorded( video_data['upload_date']) # optional values metadata['copyright_text'] = video_data['license'] metadata['duration'] = video_data['duration'] # In seconds metadata['language'] = video_data['formats'][0].get( 'language', event.language) if not metadata['language']: metadata['language'] = event.language metadata['related_urls'] = copy.deepcopy(event.related_urls) if event.minimal_download: metadata['speakers'] = [] metadata['tags'] = event.tags metadata['description'] = '' else: metadata['tags'] = sorted( set(video_data['tags']).union(set(event.tags))) metadata['description'] = video_data['description'] description_urls = list( set( re.findall(r'http[s]?://[^ \\\n\t()[\]"`´\']+', video_data[ 'description']))) for url in description_urls: metadata['related_urls'].append({'label': url, 'url': url}) return self def merge(self, new_video, fields): """Create video copy overwriting fields """ merged_video = Video(self.event) merged_video.filename = self.filename for field in self.metadata: if field in set(fields): merged_video.metadata[field] = new_video.metadata.get(field) else: merged_video.metadata[field] = self.metadata.get(field) return merged_video def save(self): """"Save to disk""" path = self.event.video_dir / '{}.json'.format(self.filename) if path.exists(): duplicate_num = 1 new_path = path while new_path.exists(): duplicate_num += 1 new_path = path.parent / ( path.stem + '-{}{}'.format(duplicate_num, path.suffix)) logger.debug('Duplicate, renaming to {}', path) path = new_path data_text = json.dumps(self.metadata, **JSON_FORMAT_KWARGS) + '\n' save_file(path, data_text) logger.debug('File {} created', path) @logger.catch def main(): """Scrape several conferences into pyvideo repository""" logger.add( sys.stderr, format="{time} {level} {message}", filter="my_module", level="DEBUG") time_init = datetime.datetime.now() logger.debug('Time init: {}', time_init) logger.debug('youtube-dl version: {} ', youtube_dl_version()) cwd = pathlib.Path.cwd() events_file = cwd / 'events.yml' event_data_yaml, events_data = load_events(events_file) pyvideo_repo = pathlib.PosixPath( events_data['repo_dir']).expanduser().resolve() events = [ Event(event_data, repository_path=pyvideo_repo) for event_data in events_data['events'] ] for event in events: try: event.create_branch() event.create_dirs() event.create_category() except (sh.ErrorReturnCode_128, FileExistsError) as exc: logger.warning('Event {} skipped', event.branch) logger.debug(exc.args[0]) continue event.download_video_data() event.load_video_data() event.merge_video_data() event.save_video_data() event.create_commit(event_data_yaml) time_end = datetime.datetime.now() time_delta = str(time_end - time_init) logger.debug('Time init: {}', time_init) logger.debug('Time end: {}', time_end) logger.debug('Time delta: {}', time_delta) if __name__ == '__main__': main()
youtube_list = sum((scrape_url(url) for url in self.youtube_lists), []) for youtube_video_data in youtube_list:
random_line_split
pyvideo_scrape.py
#!/usr/bin/env python3 """Scrape several conferences into pyvideo repository""" import copy import datetime import json import os import pathlib import re import sys import sh import slugify import yaml import youtube_dl from loguru import logger JSON_FORMAT_KWARGS = { 'indent': 2, 'separators': (',', ': '), 'sort_keys': True, } def load_events(fich): """Loads events data yaml file""" with fich.open() as fd_conf: yaml_text = fd_conf.read() conf = yaml.safe_load(yaml_text) return yaml_text, conf def save_file(path, text): """Create a file in `path` with content `text`""" with path.open(mode='w') as f_stream: f_stream.write(text) def youtube_dl_version(): """Returns the actual version of youtube-dl""" import pkg_resources return pkg_resources.get_distribution("youtube-dl").version class Event: """PyVideo Event metadata""" def __init__(self, event_data: dict, repository_path): self.videos = [] self.youtube_videos = [] self.file_videos = [] self.repository_path = repository_path self.branch = event_data['dir'] self.event_dir = self.repository_path / event_data['dir'] self.video_dir = self.event_dir / 'videos' self.title = event_data['title'] for mandatory_field in ['title', 'dir', 'issue', 'youtube_list']: if mandatory_field in event_data and event_data[mandatory_field]: pass else: logger.error('No {} data in conference {}', mandatory_field, self.title) raise ValueError("{} can't be null".format(mandatory_field)) self.issue = event_data['issue'] if isinstance(event_data['youtube_list'], str): self.youtube_lists = [event_data['youtube_list']] elif isinstance(event_data['youtube_list'], list): self.youtube_lists = event_data['youtube_list'] else: raise TypeError( "youtube_list must be a string or a list of strings") self.related_urls = event_data.get('related_urls', []) self.language = event_data.get('language', None) self.tags = event_data.get('tags', []) if not self.tags: self.tags = [] if 'dates' in event_data and event_data['dates']: self.know_date = True self.date_begin = event_data['dates']['begin'] self.date_end = event_data['dates'].get('end', self.date_begin) self.date_default = event_data['dates'].get( 'default', self.date_begin) else: self.know_date = False self.minimal_download = event_data.get('minimal_download', False) if self.minimal_download: self.branch = "{}--minimal-download".format(self.branch) self.overwrite, self.add_new_files, self.wipe = False, False, False self.overwrite_fields = [] if 'overwrite' in event_data and event_data['overwrite']: overwrite = event_data['overwrite'] self.overwrite = True if 'all' in overwrite and overwrite['all']: self.wipe = True else: if 'add_new_files' in overwrite and overwrite['add_new_files']: self.add_new_files = True if ('existing_files_fields' in overwrite and overwrite['existing_files_fields']): self.overwrite_fields = overwrite['existing_files_fields'] def create_branch(self): """Create a new branch in pyvideo repository to add a new event""" os.chdir(str(self.repository_path)) sh.git.checkout('master') sh.git.checkout('-b', self.branch) logger.debug('Branch {} created', self.branch) def create_dirs(self): """Create new directories and conference file in pyvideo repository to add a new event""" for new_directory in [self.event_dir, self.event_dir / 'videos']: new_directory.mkdir(exist_ok=self.overwrite) logger.debug('Dir {} created', new_directory) def create_category(self): # , conf_dir, title): """Create category.json for the conference""" category_file_path = self.event_dir / 'category.json' category_data = { 'title': self.title, } category_data_text = json.dumps(category_data, ** JSON_FORMAT_KWARGS) + '\n' save_file(category_file_path, category_data_text) logger.debug('File {} created', category_file_path) def download_video_data(self): """Download youtube metadata corresponding to this event youtube lists""" def scrape_url(url): """Scrape the video list, youtube_dl does all the heavy lifting""" ydl_opts = { "ignoreerrors": True, # Skip private and unavaliable videos } ydl = youtube_dl.YoutubeDL(ydl_opts) with ydl: result_ydl = ydl.extract_info( url, download=False # No download needed, only the info ) logger.debug('Url scraped {}', url) if 'entries' in result_ydl: # It's a playlist or a list of videos return result_ydl['entries'] # Just a video return [result_ydl] youtube_list = sum((scrape_url(url) for url in self.youtube_lists), []) for youtube_video_data in youtube_list: if youtube_video_data: # Valid video self.youtube_videos.append( Video.from_youtube( video_data=youtube_video_data, event=self)) else: logger.warning('Null youtube video') def load_video_data(self): """Load video data form existing event video files""" self.file_videos = [ Video.from_file(path, self) for path in self.video_dir.glob('*.json') ] def merge_video_data(self): """Merge old video data when configured so""" if self.overwrite: if self.wipe: self.videos = self.youtube_videos elif self.add_new_files or self.overwrite_fields: old_videos = { video.filename: video for video in self.file_videos } old_videos_url = { video.metadata['videos'][0]['url']: video for video in self.file_videos } new_videos = {} for video in self.youtube_videos: new_video_url = video.metadata['videos'][0]['url'] if new_video_url in old_videos_url: new_video_filename = old_videos_url[new_video_url].filename else: new_video_filename = video.filename new_videos[new_video_filename] = video if self.overwrite_fields: forgotten = set(old_videos) - set(new_videos) for name in forgotten: logger.warning('Missing video: {} {}', old_videos[name].filename, old_videos[name].metadata['videos'][0]['url'], ) changes = set(new_videos).intersection(set(old_videos)) for path in changes: merged_video = old_videos[path].merge( new_videos[path], self.overwrite_fields) self.videos.append(merged_video) else: self.videos = self.file_videos if self.add_new_files: adds = set(new_videos) - set(old_videos) self.videos.extend([new_videos[path] for path in adds]) else: # not self.overwrite
def save_video_data(self): """Save all event videos in PyVideo format""" if self.overwrite: # Erase old event videos for path in self.video_dir.glob('*.json'): path.unlink() for video in self.videos: video.save() def create_commit(self, event_data_yaml): """Create a new commit in pyvideo repository with the new event data""" os.chdir(str(self.repository_path)) sh.git.checkout(self.branch) sh.git.add(self.event_dir) message_body = ( '\n\nEvent config:\n~~~yaml\n{}\n~~~\n'.format(event_data_yaml) + '\nScraped with [pyvideo_scrape]' + '(https://github.com/pyvideo/pyvideo_scrape)') if self.minimal_download: message = ('Minimal download: ' + '{}\n\nMinimal download executed for #{}'.format( self.title, self.issue) + '\n\nOnly data that needs [no review](https://' + 'github.com/pyvideo/pyvideo_scrape#use-cases) was scraped.' + '\nThis event needs further scraping and human ' + 'reviewing for the description and other data to show.' + message_body) sh.git.commit('-m', message) sh.git.push('--set-upstream', 'origin', self.branch) # ~ sh.git.push('--set-upstream', '--force', 'origin', self.branch) sh.git.checkout('master') else: message = ( 'Scraped {}\n\nFixes #{}'.format(self.branch, self.issue) + message_body) sh.git.commit('-m', message) sh.git.checkout('master') logger.debug('Conference {} commited', self.branch) class Video: """PyVideo Video metadata""" @staticmethod def __calculate_title(video_data): """Calculate title from youtube fields""" title = 'Unknown' if 'fulltitle' in video_data.keys(): title = video_data['fulltitle'] elif 'title' in video_data.keys(): title = video_data['title'] elif '_filename' in video_data.keys(): title = video_data['_filename'] return title def __calculate_slug(self): """Calculate slug from title""" return slugify.slugify(self.metadata['title']) def __calculate_date_recorded(self, upload_date_str): """Calculate record date from youtube field and event dates""" upload_date = datetime.date( int(upload_date_str[0:4]), int(upload_date_str[4:6]), int(upload_date_str[6:8])) if self.event.know_date: if not (self.event.date_begin <= upload_date <= self.event.date_end): return self.event.date_default.isoformat() return upload_date.isoformat() def __init__(self, event): self.event = event self.filename = None self.metadata = {} @classmethod def from_file(cls, path, event): """Contructor. Retrieves video metadata from file""" self = cls(event) self.filename = path.stem # Name without .json try: with path.open() as f_path: self.metadata = json.load(f_path) except ValueError: print('Json syntax error in file {}'.format(path)) raise return self @classmethod def from_youtube(cls, video_data, event): """Contructor. Retrieves video metadata with youtube-dl""" self = cls(event) metadata = self.metadata metadata['title'] = self.__calculate_title(video_data) self.filename = self.__calculate_slug() metadata['speakers'] = ['TODO'] # Needs human intervention later # youtube_id = video_data['display_id'] # metadata['thumbnail_url'] = # 'https://i.ytimg.com/vi/{}/maxresdefault.jpg'.format(youtube_id) metadata['thumbnail_url'] = video_data['thumbnail'] metadata['videos'] = [{ 'type': 'youtube', 'url': video_data['webpage_url'] }] metadata['recorded'] = self.__calculate_date_recorded( video_data['upload_date']) # optional values metadata['copyright_text'] = video_data['license'] metadata['duration'] = video_data['duration'] # In seconds metadata['language'] = video_data['formats'][0].get( 'language', event.language) if not metadata['language']: metadata['language'] = event.language metadata['related_urls'] = copy.deepcopy(event.related_urls) if event.minimal_download: metadata['speakers'] = [] metadata['tags'] = event.tags metadata['description'] = '' else: metadata['tags'] = sorted( set(video_data['tags']).union(set(event.tags))) metadata['description'] = video_data['description'] description_urls = list( set( re.findall(r'http[s]?://[^ \\\n\t()[\]"`´\']+', video_data[ 'description']))) for url in description_urls: metadata['related_urls'].append({'label': url, 'url': url}) return self def merge(self, new_video, fields): """Create video copy overwriting fields """ merged_video = Video(self.event) merged_video.filename = self.filename for field in self.metadata: if field in set(fields): merged_video.metadata[field] = new_video.metadata.get(field) else: merged_video.metadata[field] = self.metadata.get(field) return merged_video def save(self): """"Save to disk""" path = self.event.video_dir / '{}.json'.format(self.filename) if path.exists(): duplicate_num = 1 new_path = path while new_path.exists(): duplicate_num += 1 new_path = path.parent / ( path.stem + '-{}{}'.format(duplicate_num, path.suffix)) logger.debug('Duplicate, renaming to {}', path) path = new_path data_text = json.dumps(self.metadata, **JSON_FORMAT_KWARGS) + '\n' save_file(path, data_text) logger.debug('File {} created', path) @logger.catch def main(): """Scrape several conferences into pyvideo repository""" logger.add( sys.stderr, format="{time} {level} {message}", filter="my_module", level="DEBUG") time_init = datetime.datetime.now() logger.debug('Time init: {}', time_init) logger.debug('youtube-dl version: {} ', youtube_dl_version()) cwd = pathlib.Path.cwd() events_file = cwd / 'events.yml' event_data_yaml, events_data = load_events(events_file) pyvideo_repo = pathlib.PosixPath( events_data['repo_dir']).expanduser().resolve() events = [ Event(event_data, repository_path=pyvideo_repo) for event_data in events_data['events'] ] for event in events: try: event.create_branch() event.create_dirs() event.create_category() except (sh.ErrorReturnCode_128, FileExistsError) as exc: logger.warning('Event {} skipped', event.branch) logger.debug(exc.args[0]) continue event.download_video_data() event.load_video_data() event.merge_video_data() event.save_video_data() event.create_commit(event_data_yaml) time_end = datetime.datetime.now() time_delta = str(time_end - time_init) logger.debug('Time init: {}', time_init) logger.debug('Time end: {}', time_end) logger.debug('Time delta: {}', time_delta) if __name__ == '__main__': main()
self.videos = self.youtube_videos
conditional_block
pyvideo_scrape.py
#!/usr/bin/env python3 """Scrape several conferences into pyvideo repository""" import copy import datetime import json import os import pathlib import re import sys import sh import slugify import yaml import youtube_dl from loguru import logger JSON_FORMAT_KWARGS = { 'indent': 2, 'separators': (',', ': '), 'sort_keys': True, } def load_events(fich): """Loads events data yaml file""" with fich.open() as fd_conf: yaml_text = fd_conf.read() conf = yaml.safe_load(yaml_text) return yaml_text, conf def save_file(path, text): """Create a file in `path` with content `text`""" with path.open(mode='w') as f_stream: f_stream.write(text) def youtube_dl_version(): """Returns the actual version of youtube-dl""" import pkg_resources return pkg_resources.get_distribution("youtube-dl").version class Event: """PyVideo Event metadata""" def __init__(self, event_data: dict, repository_path): self.videos = [] self.youtube_videos = [] self.file_videos = [] self.repository_path = repository_path self.branch = event_data['dir'] self.event_dir = self.repository_path / event_data['dir'] self.video_dir = self.event_dir / 'videos' self.title = event_data['title'] for mandatory_field in ['title', 'dir', 'issue', 'youtube_list']: if mandatory_field in event_data and event_data[mandatory_field]: pass else: logger.error('No {} data in conference {}', mandatory_field, self.title) raise ValueError("{} can't be null".format(mandatory_field)) self.issue = event_data['issue'] if isinstance(event_data['youtube_list'], str): self.youtube_lists = [event_data['youtube_list']] elif isinstance(event_data['youtube_list'], list): self.youtube_lists = event_data['youtube_list'] else: raise TypeError( "youtube_list must be a string or a list of strings") self.related_urls = event_data.get('related_urls', []) self.language = event_data.get('language', None) self.tags = event_data.get('tags', []) if not self.tags: self.tags = [] if 'dates' in event_data and event_data['dates']: self.know_date = True self.date_begin = event_data['dates']['begin'] self.date_end = event_data['dates'].get('end', self.date_begin) self.date_default = event_data['dates'].get( 'default', self.date_begin) else: self.know_date = False self.minimal_download = event_data.get('minimal_download', False) if self.minimal_download: self.branch = "{}--minimal-download".format(self.branch) self.overwrite, self.add_new_files, self.wipe = False, False, False self.overwrite_fields = [] if 'overwrite' in event_data and event_data['overwrite']: overwrite = event_data['overwrite'] self.overwrite = True if 'all' in overwrite and overwrite['all']: self.wipe = True else: if 'add_new_files' in overwrite and overwrite['add_new_files']: self.add_new_files = True if ('existing_files_fields' in overwrite and overwrite['existing_files_fields']): self.overwrite_fields = overwrite['existing_files_fields'] def create_branch(self): """Create a new branch in pyvideo repository to add a new event""" os.chdir(str(self.repository_path)) sh.git.checkout('master') sh.git.checkout('-b', self.branch) logger.debug('Branch {} created', self.branch) def create_dirs(self): """Create new directories and conference file in pyvideo repository to add a new event""" for new_directory in [self.event_dir, self.event_dir / 'videos']: new_directory.mkdir(exist_ok=self.overwrite) logger.debug('Dir {} created', new_directory) def create_category(self): # , conf_dir, title): """Create category.json for the conference""" category_file_path = self.event_dir / 'category.json' category_data = { 'title': self.title, } category_data_text = json.dumps(category_data, ** JSON_FORMAT_KWARGS) + '\n' save_file(category_file_path, category_data_text) logger.debug('File {} created', category_file_path) def download_video_data(self): """Download youtube metadata corresponding to this event youtube lists""" def scrape_url(url): """Scrape the video list, youtube_dl does all the heavy lifting""" ydl_opts = { "ignoreerrors": True, # Skip private and unavaliable videos } ydl = youtube_dl.YoutubeDL(ydl_opts) with ydl: result_ydl = ydl.extract_info( url, download=False # No download needed, only the info ) logger.debug('Url scraped {}', url) if 'entries' in result_ydl: # It's a playlist or a list of videos return result_ydl['entries'] # Just a video return [result_ydl] youtube_list = sum((scrape_url(url) for url in self.youtube_lists), []) for youtube_video_data in youtube_list: if youtube_video_data: # Valid video self.youtube_videos.append( Video.from_youtube( video_data=youtube_video_data, event=self)) else: logger.warning('Null youtube video') def load_video_data(self): """Load video data form existing event video files""" self.file_videos = [ Video.from_file(path, self) for path in self.video_dir.glob('*.json') ] def merge_video_data(self): """Merge old video data when configured so""" if self.overwrite: if self.wipe: self.videos = self.youtube_videos elif self.add_new_files or self.overwrite_fields: old_videos = { video.filename: video for video in self.file_videos } old_videos_url = { video.metadata['videos'][0]['url']: video for video in self.file_videos } new_videos = {} for video in self.youtube_videos: new_video_url = video.metadata['videos'][0]['url'] if new_video_url in old_videos_url: new_video_filename = old_videos_url[new_video_url].filename else: new_video_filename = video.filename new_videos[new_video_filename] = video if self.overwrite_fields: forgotten = set(old_videos) - set(new_videos) for name in forgotten: logger.warning('Missing video: {} {}', old_videos[name].filename, old_videos[name].metadata['videos'][0]['url'], ) changes = set(new_videos).intersection(set(old_videos)) for path in changes: merged_video = old_videos[path].merge( new_videos[path], self.overwrite_fields) self.videos.append(merged_video) else: self.videos = self.file_videos if self.add_new_files: adds = set(new_videos) - set(old_videos) self.videos.extend([new_videos[path] for path in adds]) else: # not self.overwrite self.videos = self.youtube_videos def save_video_data(self): """Save all event videos in PyVideo format""" if self.overwrite: # Erase old event videos for path in self.video_dir.glob('*.json'): path.unlink() for video in self.videos: video.save() def create_commit(self, event_data_yaml): """Create a new commit in pyvideo repository with the new event data""" os.chdir(str(self.repository_path)) sh.git.checkout(self.branch) sh.git.add(self.event_dir) message_body = ( '\n\nEvent config:\n~~~yaml\n{}\n~~~\n'.format(event_data_yaml) + '\nScraped with [pyvideo_scrape]' + '(https://github.com/pyvideo/pyvideo_scrape)') if self.minimal_download: message = ('Minimal download: ' + '{}\n\nMinimal download executed for #{}'.format( self.title, self.issue) + '\n\nOnly data that needs [no review](https://' + 'github.com/pyvideo/pyvideo_scrape#use-cases) was scraped.' + '\nThis event needs further scraping and human ' + 'reviewing for the description and other data to show.' + message_body) sh.git.commit('-m', message) sh.git.push('--set-upstream', 'origin', self.branch) # ~ sh.git.push('--set-upstream', '--force', 'origin', self.branch) sh.git.checkout('master') else: message = ( 'Scraped {}\n\nFixes #{}'.format(self.branch, self.issue) + message_body) sh.git.commit('-m', message) sh.git.checkout('master') logger.debug('Conference {} commited', self.branch) class Video: """PyVideo Video metadata""" @staticmethod def __calculate_title(video_data): """Calculate title from youtube fields""" title = 'Unknown' if 'fulltitle' in video_data.keys(): title = video_data['fulltitle'] elif 'title' in video_data.keys(): title = video_data['title'] elif '_filename' in video_data.keys(): title = video_data['_filename'] return title def __calculate_slug(self): """Calculate slug from title""" return slugify.slugify(self.metadata['title']) def __calculate_date_recorded(self, upload_date_str): """Calculate record date from youtube field and event dates""" upload_date = datetime.date( int(upload_date_str[0:4]), int(upload_date_str[4:6]), int(upload_date_str[6:8])) if self.event.know_date: if not (self.event.date_begin <= upload_date <= self.event.date_end): return self.event.date_default.isoformat() return upload_date.isoformat() def __init__(self, event): self.event = event self.filename = None self.metadata = {} @classmethod def from_file(cls, path, event): """Contructor. Retrieves video metadata from file""" self = cls(event) self.filename = path.stem # Name without .json try: with path.open() as f_path: self.metadata = json.load(f_path) except ValueError: print('Json syntax error in file {}'.format(path)) raise return self @classmethod def from_youtube(cls, video_data, event): """Contructor. Retrieves video metadata with youtube-dl""" self = cls(event) metadata = self.metadata metadata['title'] = self.__calculate_title(video_data) self.filename = self.__calculate_slug() metadata['speakers'] = ['TODO'] # Needs human intervention later # youtube_id = video_data['display_id'] # metadata['thumbnail_url'] = # 'https://i.ytimg.com/vi/{}/maxresdefault.jpg'.format(youtube_id) metadata['thumbnail_url'] = video_data['thumbnail'] metadata['videos'] = [{ 'type': 'youtube', 'url': video_data['webpage_url'] }] metadata['recorded'] = self.__calculate_date_recorded( video_data['upload_date']) # optional values metadata['copyright_text'] = video_data['license'] metadata['duration'] = video_data['duration'] # In seconds metadata['language'] = video_data['formats'][0].get( 'language', event.language) if not metadata['language']: metadata['language'] = event.language metadata['related_urls'] = copy.deepcopy(event.related_urls) if event.minimal_download: metadata['speakers'] = [] metadata['tags'] = event.tags metadata['description'] = '' else: metadata['tags'] = sorted( set(video_data['tags']).union(set(event.tags))) metadata['description'] = video_data['description'] description_urls = list( set( re.findall(r'http[s]?://[^ \\\n\t()[\]"`´\']+', video_data[ 'description']))) for url in description_urls: metadata['related_urls'].append({'label': url, 'url': url}) return self def merge(self, new_video, fields): "
def save(self): """"Save to disk""" path = self.event.video_dir / '{}.json'.format(self.filename) if path.exists(): duplicate_num = 1 new_path = path while new_path.exists(): duplicate_num += 1 new_path = path.parent / ( path.stem + '-{}{}'.format(duplicate_num, path.suffix)) logger.debug('Duplicate, renaming to {}', path) path = new_path data_text = json.dumps(self.metadata, **JSON_FORMAT_KWARGS) + '\n' save_file(path, data_text) logger.debug('File {} created', path) @logger.catch def main(): """Scrape several conferences into pyvideo repository""" logger.add( sys.stderr, format="{time} {level} {message}", filter="my_module", level="DEBUG") time_init = datetime.datetime.now() logger.debug('Time init: {}', time_init) logger.debug('youtube-dl version: {} ', youtube_dl_version()) cwd = pathlib.Path.cwd() events_file = cwd / 'events.yml' event_data_yaml, events_data = load_events(events_file) pyvideo_repo = pathlib.PosixPath( events_data['repo_dir']).expanduser().resolve() events = [ Event(event_data, repository_path=pyvideo_repo) for event_data in events_data['events'] ] for event in events: try: event.create_branch() event.create_dirs() event.create_category() except (sh.ErrorReturnCode_128, FileExistsError) as exc: logger.warning('Event {} skipped', event.branch) logger.debug(exc.args[0]) continue event.download_video_data() event.load_video_data() event.merge_video_data() event.save_video_data() event.create_commit(event_data_yaml) time_end = datetime.datetime.now() time_delta = str(time_end - time_init) logger.debug('Time init: {}', time_init) logger.debug('Time end: {}', time_end) logger.debug('Time delta: {}', time_delta) if __name__ == '__main__': main()
""Create video copy overwriting fields """ merged_video = Video(self.event) merged_video.filename = self.filename for field in self.metadata: if field in set(fields): merged_video.metadata[field] = new_video.metadata.get(field) else: merged_video.metadata[field] = self.metadata.get(field) return merged_video
identifier_body
pyvideo_scrape.py
#!/usr/bin/env python3 """Scrape several conferences into pyvideo repository""" import copy import datetime import json import os import pathlib import re import sys import sh import slugify import yaml import youtube_dl from loguru import logger JSON_FORMAT_KWARGS = { 'indent': 2, 'separators': (',', ': '), 'sort_keys': True, } def load_events(fich): """Loads events data yaml file""" with fich.open() as fd_conf: yaml_text = fd_conf.read() conf = yaml.safe_load(yaml_text) return yaml_text, conf def save_file(path, text): """Create a file in `path` with content `text`""" with path.open(mode='w') as f_stream: f_stream.write(text) def youtube_dl_version(): """Returns the actual version of youtube-dl""" import pkg_resources return pkg_resources.get_distribution("youtube-dl").version class Event: """PyVideo Event metadata""" def __init__(self, event_data: dict, repository_path): self.videos = [] self.youtube_videos = [] self.file_videos = [] self.repository_path = repository_path self.branch = event_data['dir'] self.event_dir = self.repository_path / event_data['dir'] self.video_dir = self.event_dir / 'videos' self.title = event_data['title'] for mandatory_field in ['title', 'dir', 'issue', 'youtube_list']: if mandatory_field in event_data and event_data[mandatory_field]: pass else: logger.error('No {} data in conference {}', mandatory_field, self.title) raise ValueError("{} can't be null".format(mandatory_field)) self.issue = event_data['issue'] if isinstance(event_data['youtube_list'], str): self.youtube_lists = [event_data['youtube_list']] elif isinstance(event_data['youtube_list'], list): self.youtube_lists = event_data['youtube_list'] else: raise TypeError( "youtube_list must be a string or a list of strings") self.related_urls = event_data.get('related_urls', []) self.language = event_data.get('language', None) self.tags = event_data.get('tags', []) if not self.tags: self.tags = [] if 'dates' in event_data and event_data['dates']: self.know_date = True self.date_begin = event_data['dates']['begin'] self.date_end = event_data['dates'].get('end', self.date_begin) self.date_default = event_data['dates'].get( 'default', self.date_begin) else: self.know_date = False self.minimal_download = event_data.get('minimal_download', False) if self.minimal_download: self.branch = "{}--minimal-download".format(self.branch) self.overwrite, self.add_new_files, self.wipe = False, False, False self.overwrite_fields = [] if 'overwrite' in event_data and event_data['overwrite']: overwrite = event_data['overwrite'] self.overwrite = True if 'all' in overwrite and overwrite['all']: self.wipe = True else: if 'add_new_files' in overwrite and overwrite['add_new_files']: self.add_new_files = True if ('existing_files_fields' in overwrite and overwrite['existing_files_fields']): self.overwrite_fields = overwrite['existing_files_fields'] def create_branch(self): """Create a new branch in pyvideo repository to add a new event""" os.chdir(str(self.repository_path)) sh.git.checkout('master') sh.git.checkout('-b', self.branch) logger.debug('Branch {} created', self.branch) def create_dirs(self): """Create new directories and conference file in pyvideo repository to add a new event""" for new_directory in [self.event_dir, self.event_dir / 'videos']: new_directory.mkdir(exist_ok=self.overwrite) logger.debug('Dir {} created', new_directory) def create_category(self): # , conf_dir, title): """Create category.json for the conference""" category_file_path = self.event_dir / 'category.json' category_data = { 'title': self.title, } category_data_text = json.dumps(category_data, ** JSON_FORMAT_KWARGS) + '\n' save_file(category_file_path, category_data_text) logger.debug('File {} created', category_file_path) def download_video_data(self): """Download youtube metadata corresponding to this event youtube lists""" def scrape_url(url): """Scrape the video list, youtube_dl does all the heavy lifting""" ydl_opts = { "ignoreerrors": True, # Skip private and unavaliable videos } ydl = youtube_dl.YoutubeDL(ydl_opts) with ydl: result_ydl = ydl.extract_info( url, download=False # No download needed, only the info ) logger.debug('Url scraped {}', url) if 'entries' in result_ydl: # It's a playlist or a list of videos return result_ydl['entries'] # Just a video return [result_ydl] youtube_list = sum((scrape_url(url) for url in self.youtube_lists), []) for youtube_video_data in youtube_list: if youtube_video_data: # Valid video self.youtube_videos.append( Video.from_youtube( video_data=youtube_video_data, event=self)) else: logger.warning('Null youtube video') def load_video_data(self): """Load video data form existing event video files""" self.file_videos = [ Video.from_file(path, self) for path in self.video_dir.glob('*.json') ] def merge_video_data(self): """Merge old video data when configured so""" if self.overwrite: if self.wipe: self.videos = self.youtube_videos elif self.add_new_files or self.overwrite_fields: old_videos = { video.filename: video for video in self.file_videos } old_videos_url = { video.metadata['videos'][0]['url']: video for video in self.file_videos } new_videos = {} for video in self.youtube_videos: new_video_url = video.metadata['videos'][0]['url'] if new_video_url in old_videos_url: new_video_filename = old_videos_url[new_video_url].filename else: new_video_filename = video.filename new_videos[new_video_filename] = video if self.overwrite_fields: forgotten = set(old_videos) - set(new_videos) for name in forgotten: logger.warning('Missing video: {} {}', old_videos[name].filename, old_videos[name].metadata['videos'][0]['url'], ) changes = set(new_videos).intersection(set(old_videos)) for path in changes: merged_video = old_videos[path].merge( new_videos[path], self.overwrite_fields) self.videos.append(merged_video) else: self.videos = self.file_videos if self.add_new_files: adds = set(new_videos) - set(old_videos) self.videos.extend([new_videos[path] for path in adds]) else: # not self.overwrite self.videos = self.youtube_videos def save_video_data(self): """Save all event videos in PyVideo format""" if self.overwrite: # Erase old event videos for path in self.video_dir.glob('*.json'): path.unlink() for video in self.videos: video.save() def create_commit(self, event_data_yaml): """Create a new commit in pyvideo repository with the new event data""" os.chdir(str(self.repository_path)) sh.git.checkout(self.branch) sh.git.add(self.event_dir) message_body = ( '\n\nEvent config:\n~~~yaml\n{}\n~~~\n'.format(event_data_yaml) + '\nScraped with [pyvideo_scrape]' + '(https://github.com/pyvideo/pyvideo_scrape)') if self.minimal_download: message = ('Minimal download: ' + '{}\n\nMinimal download executed for #{}'.format( self.title, self.issue) + '\n\nOnly data that needs [no review](https://' + 'github.com/pyvideo/pyvideo_scrape#use-cases) was scraped.' + '\nThis event needs further scraping and human ' + 'reviewing for the description and other data to show.' + message_body) sh.git.commit('-m', message) sh.git.push('--set-upstream', 'origin', self.branch) # ~ sh.git.push('--set-upstream', '--force', 'origin', self.branch) sh.git.checkout('master') else: message = ( 'Scraped {}\n\nFixes #{}'.format(self.branch, self.issue) + message_body) sh.git.commit('-m', message) sh.git.checkout('master') logger.debug('Conference {} commited', self.branch) class Video: """PyVideo Video metadata""" @staticmethod def __calculate_title(video_data): """Calculate title from youtube fields""" title = 'Unknown' if 'fulltitle' in video_data.keys(): title = video_data['fulltitle'] elif 'title' in video_data.keys(): title = video_data['title'] elif '_filename' in video_data.keys(): title = video_data['_filename'] return title def __calculate_slug(self): """Calculate slug from title""" return slugify.slugify(self.metadata['title']) def
(self, upload_date_str): """Calculate record date from youtube field and event dates""" upload_date = datetime.date( int(upload_date_str[0:4]), int(upload_date_str[4:6]), int(upload_date_str[6:8])) if self.event.know_date: if not (self.event.date_begin <= upload_date <= self.event.date_end): return self.event.date_default.isoformat() return upload_date.isoformat() def __init__(self, event): self.event = event self.filename = None self.metadata = {} @classmethod def from_file(cls, path, event): """Contructor. Retrieves video metadata from file""" self = cls(event) self.filename = path.stem # Name without .json try: with path.open() as f_path: self.metadata = json.load(f_path) except ValueError: print('Json syntax error in file {}'.format(path)) raise return self @classmethod def from_youtube(cls, video_data, event): """Contructor. Retrieves video metadata with youtube-dl""" self = cls(event) metadata = self.metadata metadata['title'] = self.__calculate_title(video_data) self.filename = self.__calculate_slug() metadata['speakers'] = ['TODO'] # Needs human intervention later # youtube_id = video_data['display_id'] # metadata['thumbnail_url'] = # 'https://i.ytimg.com/vi/{}/maxresdefault.jpg'.format(youtube_id) metadata['thumbnail_url'] = video_data['thumbnail'] metadata['videos'] = [{ 'type': 'youtube', 'url': video_data['webpage_url'] }] metadata['recorded'] = self.__calculate_date_recorded( video_data['upload_date']) # optional values metadata['copyright_text'] = video_data['license'] metadata['duration'] = video_data['duration'] # In seconds metadata['language'] = video_data['formats'][0].get( 'language', event.language) if not metadata['language']: metadata['language'] = event.language metadata['related_urls'] = copy.deepcopy(event.related_urls) if event.minimal_download: metadata['speakers'] = [] metadata['tags'] = event.tags metadata['description'] = '' else: metadata['tags'] = sorted( set(video_data['tags']).union(set(event.tags))) metadata['description'] = video_data['description'] description_urls = list( set( re.findall(r'http[s]?://[^ \\\n\t()[\]"`´\']+', video_data[ 'description']))) for url in description_urls: metadata['related_urls'].append({'label': url, 'url': url}) return self def merge(self, new_video, fields): """Create video copy overwriting fields """ merged_video = Video(self.event) merged_video.filename = self.filename for field in self.metadata: if field in set(fields): merged_video.metadata[field] = new_video.metadata.get(field) else: merged_video.metadata[field] = self.metadata.get(field) return merged_video def save(self): """"Save to disk""" path = self.event.video_dir / '{}.json'.format(self.filename) if path.exists(): duplicate_num = 1 new_path = path while new_path.exists(): duplicate_num += 1 new_path = path.parent / ( path.stem + '-{}{}'.format(duplicate_num, path.suffix)) logger.debug('Duplicate, renaming to {}', path) path = new_path data_text = json.dumps(self.metadata, **JSON_FORMAT_KWARGS) + '\n' save_file(path, data_text) logger.debug('File {} created', path) @logger.catch def main(): """Scrape several conferences into pyvideo repository""" logger.add( sys.stderr, format="{time} {level} {message}", filter="my_module", level="DEBUG") time_init = datetime.datetime.now() logger.debug('Time init: {}', time_init) logger.debug('youtube-dl version: {} ', youtube_dl_version()) cwd = pathlib.Path.cwd() events_file = cwd / 'events.yml' event_data_yaml, events_data = load_events(events_file) pyvideo_repo = pathlib.PosixPath( events_data['repo_dir']).expanduser().resolve() events = [ Event(event_data, repository_path=pyvideo_repo) for event_data in events_data['events'] ] for event in events: try: event.create_branch() event.create_dirs() event.create_category() except (sh.ErrorReturnCode_128, FileExistsError) as exc: logger.warning('Event {} skipped', event.branch) logger.debug(exc.args[0]) continue event.download_video_data() event.load_video_data() event.merge_video_data() event.save_video_data() event.create_commit(event_data_yaml) time_end = datetime.datetime.now() time_delta = str(time_end - time_init) logger.debug('Time init: {}', time_init) logger.debug('Time end: {}', time_end) logger.debug('Time delta: {}', time_delta) if __name__ == '__main__': main()
__calculate_date_recorded
identifier_name
verbose.py
#!/usr/bin/env python import sys from datetime import datetime, timedelta from xml.etree import ElementTree as ET import csv weekdays = ('Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun') # locations = {} # with open('lost-locations.csv') as locfile: # reader = csv.DictReader(locfile) # for line in reader: # locations[line['name']] = (line['lat'], line['lon']) # point_forecast_url = list(root.iter('moreWeatherInformation'))[0].text def parse_noaa_time_string(noaa_time_str): date_str, time_str = noaa_time_str.split('T') # will raise ValueError if it doesn't split into two pieces tzhackdelta = None if '-' in time_str: time_str, tzinfo_str = time_str.split('-') # ignoring time zone info for now elif time_str[-1] == 'Z': print 'HACK subtracting eight hours from GMT' tzhackdelta = timedelta(hours=-8) time_str = time_str[:-1] year, month, day = [ int(val) for val in date_str.split('-') ] hour, minute, second = [ int(val) for val in time_str.split(':') ] moment = datetime(year, month, day, hour, minute, second) if tzhackdelta is not None: moment += tzhackdelta return moment def get_time_layouts(root): layouts = {} for lout in root.find('data').findall('time-layout'): name = lout.find('layout-key').text layouts[name] = {'start':[], 'end':[]} for start_end in ('start', 'end'): for tmptime in lout.iter(start_end + '-valid-time'): moment = parse_noaa_time_string(tmptime.text) layouts[name][start_end].append(moment) return layouts def combine_days(action, pdata, debug=False): """ Perform <action> for all the values within each day, where <action> is either sum or mean. """ assert action == 'sum' or action == 'mean' starts, ends, values, weight_sum = [], [], [], [] def get_time_delta_in_hours(start, end): """ NOTE assumes no overflows or wraps or nothing """ dhour = end.hour - start.hour dmin = end.minute - start.minute dsec = end.second - start.second dtime = timedelta(hours=dhour, minutes=dmin, seconds=dsec) # NOTE rounds to nearest second # print start, end, dtime return float(dtime.seconds) / (60*60) def add_new_day(dstart, dend, dval): weight = '-' starts.append(dstart) ends.append(dend) if action == 'sum': values.append(dval) elif action == 'mean': weight = float(get_time_delta_in_hours(dstart, dend)) values.append(weight*dval) weight_sum.append(weight) else: raise Exception('invalid action'+action) if debug: print ' new day', dstart, dend, weight, dval def increment_day(dstart, dend, dval): ends[-1] = dend weight = '-' if action == 'sum': values[-1] += dval elif action == 'mean': weight = float(get_time_delta_in_hours(dstart, dend)) values[-1] += weight * dval weight_sum[-1] += weight else: raise Exception('invalid action'+action) if debug: print ' increment', starts[-1], dend, weight, dval, ' ', values[-1] def incorporate_value(istart, iend, ival): # if debug: # print ' incorporate', istart, iend, ival if len(values) == 0 or ends[-1].day != istart.day: add_new_day(istart, iend, ival) else: increment_day(istart, iend, ival) for ival in range(len(pdata['values'])): start = pdata['time-layout']['start'][ival] if len(pdata['time-layout']['end']) > 0: # some of them only have start times end = pdata['time-layout']['end'][ival] elif len(pdata['time-layout']['start']) > ival+1: # so use the next start time minus a ms if we can end = pdata['time-layout']['start'][ival+1] - timedelta(milliseconds=-1) else: end = pdata['time-layout']['start'][ival] + timedelta(hours=6) # otherwise just, hell, add six hours if debug: print ' day %3d-%-3d hour %3d-%-3d %s' % (start.day, end.day, start.hour, end.hour, pdata['values'][ival]) # skip null values (probably from cloud cover) if pdata['values'][ival] == None: if debug: print ' skipping null value' continue val = float(pdata['values'][ival]) if start.day == end.day: incorporate_value(start, end, val) else: if debug: print ' start (%s) and end (%s) days differ' % (start, end) assert start.day + 1 == end.day # for now only handle the case where they differ by one day midnight = datetime(year=end.year, month=end.month, day=end.day, hour=0, minute=0, second=0) if action == 'sum': hours_before = get_time_delta_in_hours(start, midnight) #24 - start.hour hours_after = get_time_delta_in_hours(midnight, end) #end.hour val_before = val * float(hours_before) / (hours_before + hours_after) val_after = val * float(hours_after) / (hours_before + hours_after) if debug: print ' apportioning between', print 'first %f * %f / (%f + %f) = %f' % (val, hours_before, hours_before, hours_after, val_before), print 'and second %f * %f / (%f + %f) = %f' % (val, hours_after, hours_before, hours_after, val_after) else: val_before, val_after = val, val incorporate_value(start, midnight + timedelta(milliseconds=-1), val_before) #start + timedelta(hours=24-start.hour, milliseconds=-1), val_before) incorporate_value(midnight, end + timedelta(milliseconds=-1), val_after) # end - timedelta(hours=end.hour), end, val_after) dailyvals = {} for ival in range(len(values)): dailyvals[int(starts[ival].day)] = values[ival] if action == 'mean': # if debug: # print 'total', get_time_delta_in_hours(starts[ival], ends[ival]) dailyvals[int(starts[ival].day)] /= weight_sum[ival] #get_time_delta_in_hours(starts[ival], ends[ival]) if debug: print ' final:' for key in sorted(dailyvals.keys()): print ' ', key, dailyvals[key] return dailyvals def parse_data(root, time_layouts, debug=False): pars = root.find('data').find('parameters') data = {} for vardata in pars: # first figure out the name all_names = list(vardata.iter('name')) if len(all_names) != 1: raise Exception('ERROR too many names for %s: %s' % (vardata.tag, ', '.join(all_names))) name = all_names[0].text if name in data: raise Exception('ERROR %s already in data' % key) # then get the data data[name] = {} if vardata.get('time-layout') is None: # single-point data if debug: print ' no layout %s' % name continue else: # time series data data[name]['time-layout'] = time_layouts[vardata.get('time-layout')] data[name]['values'] = [ val.text for val in vardata.findall('value') ] if debug: print 'added %s (%s)' % (name, vardata.get('time-layout')) if len(data[name]['time-layout']['start']) != len(data[name]['values']): if debug: print ' time layout different length for %s' % name else: pass return data def find_min_temp(pdata, prev_day, next_day):
def find_max_temp(pdata, day): """ find min temp for the night of <prev_day> to <next_day> """ for ival in range(len(pdata['values'])): start = pdata['time-layout']['start'][ival] end = pdata['time-layout']['end'][ival] if start.day == day and end.day == day: return int(pdata['values'][ival]) # raise Exception('ERROR didn\'t find max temp for %d in %s' % (day, pdata['time-layout'])) return None def prettify_values(data, ndays=5, debug=False): mintemps = data['Daily Minimum Temperature'] maxtemps = data['Daily Maximum Temperature'] liquid = combine_days('sum', data['Liquid Precipitation Amount']) snow = combine_days('sum', data['Snow Amount']) wind_speed = combine_days('mean', data['Wind Speed']) cloud = combine_days('mean', data['Cloud Cover Amount']) percent_precip = combine_days('mean', data['12 Hourly Probability of Precipitation']) txtvals = {'days':[], 'tmax':[], 'tmin':[], 'liquid':[], 'snow':[], 'wind':[], 'cloud':[], 'precip':[]} if debug: print '%-5s %4s %5s%5s %5s %5s' % ('', 'hi lo', 'precip (snow)', '%', 'wind', 'cloud') rowlist = [] for iday in range(ndays): day = datetime.now() + timedelta(days=iday) tmax = find_max_temp(maxtemps, day.day) tmin = find_min_temp(mintemps, day.day, day.day+1) row = '' if tmax is not None: row += ' %d' % tmax if tmin is not None: row += ' %d<br>' % tmin if day.day in percent_precip: row += ' %.0f<font size=1>%%</font>' % percent_precip[day.day] # liquid row += '<font color=blue><b>' if day.day in liquid: if liquid[day.day] > 0.0: row += (' %.2f' % liquid[day.day]).replace('0.', '.') else: row += ' 0' else: row += ' - ' row += '</b></font>' # snow row += '<font color=grey><b>' if day.day in liquid: if snow[day.day] > 0.0: row += (' (%.2f)' % snow[day.day]).replace('0.', '.') else: row += ' ' else: row += ' - ' row += '</b></font>' row += '<br>' # wind speed if day.day in wind_speed: row += ' %.0f' % wind_speed[day.day] row += '<font size=1>mph</font>' else: row += ' - ' # cloud cover if day.day in cloud: row += ' %.0f' % cloud[day.day] row += '<font size=1>% cover</font>' else: row += ' - ' rowlist.append(row) tv = txtvals tv['tmax'].append('-' if tmax is None else tmax) tv['tmin'].append('-' if tmin is None else tmin) tv['liquid'].append(('%5.1f' % liquid[day.day]) if day.day in liquid else '-') tv['snow'].append('') if day.day in snow and snow[day.day] > 0.0: tv['snow'][-1] = '%5.1f' % snow[day.day] tv['wind'].append(('%5.0f' % wind_speed[day.day]) if day.day in wind_speed else '-') tv['cloud'].append(('%5.0f' % cloud[day.day]) if day.day in cloud else '-') tv['precip'].append(('%5.0f' % percent_precip[day.day]) if day.day in percent_precip else '-') tv['days'].append(weekdays[day.weekday()]) if debug: print '%-6s %4s %-3s %5s %5s %5s %5s %5s' % (weekdays[day.weekday()], tv['tmax'][-1], tv['tmin'][-1], tv['liquid'][-1], tv['snow'][-1], tv['precip'][-1], tv['wind'][-1], tv['cloud'][-1]) return tv, rowlist def verbosocast(tree): root = tree.getroot() time_layouts = get_time_layouts(root) data = parse_data(root, time_layouts) point = root.find('data').find('location').find('point') lat, lon = point.get('latitude'), point.get('longitude') tv, rowlist = prettify_values(data, debug=True) import HTML rowlist.insert(0, ' %s <br> %s ' % (lat, lon)) table_vals = [rowlist,] htmlcode = HTML.table(table_vals, header_row=['',] + tv['days'], col_width=['15%' for _ in range(len(table_vals[0]))]) with open('tmp.html', 'w') as outfile: outfile.write(htmlcode)
""" find min temp for the night of <prev_day> to <next_day> """ for ival in range(len(pdata['values'])): start = pdata['time-layout']['start'][ival] end = pdata['time-layout']['end'][ival] if start.day == prev_day and end.day == next_day: return int(pdata['values'][ival]) # raise Exception('ERROR didn\'t find min temp for night of %d-%d in %s' % (prev_day, next_day, pdata['time-layout'])) return None
identifier_body
verbose.py
#!/usr/bin/env python import sys from datetime import datetime, timedelta from xml.etree import ElementTree as ET import csv weekdays = ('Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun') # locations = {} # with open('lost-locations.csv') as locfile: # reader = csv.DictReader(locfile) # for line in reader: # locations[line['name']] = (line['lat'], line['lon']) # point_forecast_url = list(root.iter('moreWeatherInformation'))[0].text def parse_noaa_time_string(noaa_time_str): date_str, time_str = noaa_time_str.split('T') # will raise ValueError if it doesn't split into two pieces tzhackdelta = None if '-' in time_str: time_str, tzinfo_str = time_str.split('-') # ignoring time zone info for now elif time_str[-1] == 'Z': print 'HACK subtracting eight hours from GMT' tzhackdelta = timedelta(hours=-8) time_str = time_str[:-1] year, month, day = [ int(val) for val in date_str.split('-') ] hour, minute, second = [ int(val) for val in time_str.split(':') ] moment = datetime(year, month, day, hour, minute, second) if tzhackdelta is not None: moment += tzhackdelta return moment def get_time_layouts(root): layouts = {} for lout in root.find('data').findall('time-layout'): name = lout.find('layout-key').text layouts[name] = {'start':[], 'end':[]} for start_end in ('start', 'end'): for tmptime in lout.iter(start_end + '-valid-time'): moment = parse_noaa_time_string(tmptime.text) layouts[name][start_end].append(moment) return layouts def combine_days(action, pdata, debug=False): """ Perform <action> for all the values within each day, where <action> is either sum or mean. """ assert action == 'sum' or action == 'mean' starts, ends, values, weight_sum = [], [], [], [] def get_time_delta_in_hours(start, end): """ NOTE assumes no overflows or wraps or nothing """ dhour = end.hour - start.hour dmin = end.minute - start.minute dsec = end.second - start.second dtime = timedelta(hours=dhour, minutes=dmin, seconds=dsec) # NOTE rounds to nearest second # print start, end, dtime return float(dtime.seconds) / (60*60) def add_new_day(dstart, dend, dval): weight = '-' starts.append(dstart) ends.append(dend) if action == 'sum': values.append(dval) elif action == 'mean': weight = float(get_time_delta_in_hours(dstart, dend)) values.append(weight*dval) weight_sum.append(weight) else: raise Exception('invalid action'+action) if debug: print ' new day', dstart, dend, weight, dval def increment_day(dstart, dend, dval): ends[-1] = dend weight = '-' if action == 'sum': values[-1] += dval elif action == 'mean': weight = float(get_time_delta_in_hours(dstart, dend)) values[-1] += weight * dval weight_sum[-1] += weight else: raise Exception('invalid action'+action) if debug: print ' increment', starts[-1], dend, weight, dval, ' ', values[-1] def incorporate_value(istart, iend, ival): # if debug: # print ' incorporate', istart, iend, ival if len(values) == 0 or ends[-1].day != istart.day: add_new_day(istart, iend, ival) else: increment_day(istart, iend, ival) for ival in range(len(pdata['values'])): start = pdata['time-layout']['start'][ival] if len(pdata['time-layout']['end']) > 0: # some of them only have start times end = pdata['time-layout']['end'][ival] elif len(pdata['time-layout']['start']) > ival+1: # so use the next start time minus a ms if we can end = pdata['time-layout']['start'][ival+1] - timedelta(milliseconds=-1) else: end = pdata['time-layout']['start'][ival] + timedelta(hours=6) # otherwise just, hell, add six hours if debug: print ' day %3d-%-3d hour %3d-%-3d %s' % (start.day, end.day, start.hour, end.hour, pdata['values'][ival]) # skip null values (probably from cloud cover) if pdata['values'][ival] == None: if debug: print ' skipping null value' continue val = float(pdata['values'][ival]) if start.day == end.day: incorporate_value(start, end, val) else: if debug: print ' start (%s) and end (%s) days differ' % (start, end) assert start.day + 1 == end.day # for now only handle the case where they differ by one day midnight = datetime(year=end.year, month=end.month, day=end.day, hour=0, minute=0, second=0) if action == 'sum': hours_before = get_time_delta_in_hours(start, midnight) #24 - start.hour hours_after = get_time_delta_in_hours(midnight, end) #end.hour val_before = val * float(hours_before) / (hours_before + hours_after) val_after = val * float(hours_after) / (hours_before + hours_after) if debug: print ' apportioning between', print 'first %f * %f / (%f + %f) = %f' % (val, hours_before, hours_before, hours_after, val_before), print 'and second %f * %f / (%f + %f) = %f' % (val, hours_after, hours_before, hours_after, val_after) else: val_before, val_after = val, val incorporate_value(start, midnight + timedelta(milliseconds=-1), val_before) #start + timedelta(hours=24-start.hour, milliseconds=-1), val_before) incorporate_value(midnight, end + timedelta(milliseconds=-1), val_after) # end - timedelta(hours=end.hour), end, val_after) dailyvals = {} for ival in range(len(values)): dailyvals[int(starts[ival].day)] = values[ival] if action == 'mean': # if debug: # print 'total', get_time_delta_in_hours(starts[ival], ends[ival]) dailyvals[int(starts[ival].day)] /= weight_sum[ival] #get_time_delta_in_hours(starts[ival], ends[ival]) if debug: print ' final:' for key in sorted(dailyvals.keys()): print ' ', key, dailyvals[key] return dailyvals def parse_data(root, time_layouts, debug=False): pars = root.find('data').find('parameters') data = {} for vardata in pars: # first figure out the name all_names = list(vardata.iter('name')) if len(all_names) != 1: raise Exception('ERROR too many names for %s: %s' % (vardata.tag, ', '.join(all_names))) name = all_names[0].text if name in data: raise Exception('ERROR %s already in data' % key) # then get the data data[name] = {} if vardata.get('time-layout') is None: # single-point data if debug: print ' no layout %s' % name continue else: # time series data data[name]['time-layout'] = time_layouts[vardata.get('time-layout')] data[name]['values'] = [ val.text for val in vardata.findall('value') ] if debug: print 'added %s (%s)' % (name, vardata.get('time-layout')) if len(data[name]['time-layout']['start']) != len(data[name]['values']): if debug: print ' time layout different length for %s' % name else: pass return data def find_min_temp(pdata, prev_day, next_day): """ find min temp for the night of <prev_day> to <next_day> """ for ival in range(len(pdata['values'])): start = pdata['time-layout']['start'][ival] end = pdata['time-layout']['end'][ival] if start.day == prev_day and end.day == next_day: return int(pdata['values'][ival]) # raise Exception('ERROR didn\'t find min temp for night of %d-%d in %s' % (prev_day, next_day, pdata['time-layout'])) return None def
(pdata, day): """ find min temp for the night of <prev_day> to <next_day> """ for ival in range(len(pdata['values'])): start = pdata['time-layout']['start'][ival] end = pdata['time-layout']['end'][ival] if start.day == day and end.day == day: return int(pdata['values'][ival]) # raise Exception('ERROR didn\'t find max temp for %d in %s' % (day, pdata['time-layout'])) return None def prettify_values(data, ndays=5, debug=False): mintemps = data['Daily Minimum Temperature'] maxtemps = data['Daily Maximum Temperature'] liquid = combine_days('sum', data['Liquid Precipitation Amount']) snow = combine_days('sum', data['Snow Amount']) wind_speed = combine_days('mean', data['Wind Speed']) cloud = combine_days('mean', data['Cloud Cover Amount']) percent_precip = combine_days('mean', data['12 Hourly Probability of Precipitation']) txtvals = {'days':[], 'tmax':[], 'tmin':[], 'liquid':[], 'snow':[], 'wind':[], 'cloud':[], 'precip':[]} if debug: print '%-5s %4s %5s%5s %5s %5s' % ('', 'hi lo', 'precip (snow)', '%', 'wind', 'cloud') rowlist = [] for iday in range(ndays): day = datetime.now() + timedelta(days=iday) tmax = find_max_temp(maxtemps, day.day) tmin = find_min_temp(mintemps, day.day, day.day+1) row = '' if tmax is not None: row += ' %d' % tmax if tmin is not None: row += ' %d<br>' % tmin if day.day in percent_precip: row += ' %.0f<font size=1>%%</font>' % percent_precip[day.day] # liquid row += '<font color=blue><b>' if day.day in liquid: if liquid[day.day] > 0.0: row += (' %.2f' % liquid[day.day]).replace('0.', '.') else: row += ' 0' else: row += ' - ' row += '</b></font>' # snow row += '<font color=grey><b>' if day.day in liquid: if snow[day.day] > 0.0: row += (' (%.2f)' % snow[day.day]).replace('0.', '.') else: row += ' ' else: row += ' - ' row += '</b></font>' row += '<br>' # wind speed if day.day in wind_speed: row += ' %.0f' % wind_speed[day.day] row += '<font size=1>mph</font>' else: row += ' - ' # cloud cover if day.day in cloud: row += ' %.0f' % cloud[day.day] row += '<font size=1>% cover</font>' else: row += ' - ' rowlist.append(row) tv = txtvals tv['tmax'].append('-' if tmax is None else tmax) tv['tmin'].append('-' if tmin is None else tmin) tv['liquid'].append(('%5.1f' % liquid[day.day]) if day.day in liquid else '-') tv['snow'].append('') if day.day in snow and snow[day.day] > 0.0: tv['snow'][-1] = '%5.1f' % snow[day.day] tv['wind'].append(('%5.0f' % wind_speed[day.day]) if day.day in wind_speed else '-') tv['cloud'].append(('%5.0f' % cloud[day.day]) if day.day in cloud else '-') tv['precip'].append(('%5.0f' % percent_precip[day.day]) if day.day in percent_precip else '-') tv['days'].append(weekdays[day.weekday()]) if debug: print '%-6s %4s %-3s %5s %5s %5s %5s %5s' % (weekdays[day.weekday()], tv['tmax'][-1], tv['tmin'][-1], tv['liquid'][-1], tv['snow'][-1], tv['precip'][-1], tv['wind'][-1], tv['cloud'][-1]) return tv, rowlist def verbosocast(tree): root = tree.getroot() time_layouts = get_time_layouts(root) data = parse_data(root, time_layouts) point = root.find('data').find('location').find('point') lat, lon = point.get('latitude'), point.get('longitude') tv, rowlist = prettify_values(data, debug=True) import HTML rowlist.insert(0, ' %s <br> %s ' % (lat, lon)) table_vals = [rowlist,] htmlcode = HTML.table(table_vals, header_row=['',] + tv['days'], col_width=['15%' for _ in range(len(table_vals[0]))]) with open('tmp.html', 'w') as outfile: outfile.write(htmlcode)
find_max_temp
identifier_name
verbose.py
#!/usr/bin/env python import sys from datetime import datetime, timedelta from xml.etree import ElementTree as ET import csv weekdays = ('Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun') # locations = {} # with open('lost-locations.csv') as locfile: # reader = csv.DictReader(locfile) # for line in reader: # locations[line['name']] = (line['lat'], line['lon']) # point_forecast_url = list(root.iter('moreWeatherInformation'))[0].text def parse_noaa_time_string(noaa_time_str): date_str, time_str = noaa_time_str.split('T') # will raise ValueError if it doesn't split into two pieces tzhackdelta = None if '-' in time_str: time_str, tzinfo_str = time_str.split('-') # ignoring time zone info for now elif time_str[-1] == 'Z': print 'HACK subtracting eight hours from GMT' tzhackdelta = timedelta(hours=-8) time_str = time_str[:-1] year, month, day = [ int(val) for val in date_str.split('-') ] hour, minute, second = [ int(val) for val in time_str.split(':') ] moment = datetime(year, month, day, hour, minute, second) if tzhackdelta is not None: moment += tzhackdelta return moment def get_time_layouts(root): layouts = {} for lout in root.find('data').findall('time-layout'): name = lout.find('layout-key').text layouts[name] = {'start':[], 'end':[]} for start_end in ('start', 'end'): for tmptime in lout.iter(start_end + '-valid-time'): moment = parse_noaa_time_string(tmptime.text) layouts[name][start_end].append(moment) return layouts def combine_days(action, pdata, debug=False): """ Perform <action> for all the values within each day, where <action> is either sum or mean. """ assert action == 'sum' or action == 'mean' starts, ends, values, weight_sum = [], [], [], [] def get_time_delta_in_hours(start, end): """ NOTE assumes no overflows or wraps or nothing """ dhour = end.hour - start.hour dmin = end.minute - start.minute dsec = end.second - start.second dtime = timedelta(hours=dhour, minutes=dmin, seconds=dsec) # NOTE rounds to nearest second # print start, end, dtime return float(dtime.seconds) / (60*60) def add_new_day(dstart, dend, dval): weight = '-' starts.append(dstart) ends.append(dend) if action == 'sum': values.append(dval) elif action == 'mean': weight = float(get_time_delta_in_hours(dstart, dend)) values.append(weight*dval) weight_sum.append(weight) else: raise Exception('invalid action'+action) if debug: print ' new day', dstart, dend, weight, dval def increment_day(dstart, dend, dval): ends[-1] = dend weight = '-' if action == 'sum': values[-1] += dval elif action == 'mean': weight = float(get_time_delta_in_hours(dstart, dend)) values[-1] += weight * dval weight_sum[-1] += weight else: raise Exception('invalid action'+action) if debug: print ' increment', starts[-1], dend, weight, dval, ' ', values[-1] def incorporate_value(istart, iend, ival): # if debug: # print ' incorporate', istart, iend, ival if len(values) == 0 or ends[-1].day != istart.day: add_new_day(istart, iend, ival) else: increment_day(istart, iend, ival) for ival in range(len(pdata['values'])): start = pdata['time-layout']['start'][ival] if len(pdata['time-layout']['end']) > 0: # some of them only have start times end = pdata['time-layout']['end'][ival] elif len(pdata['time-layout']['start']) > ival+1: # so use the next start time minus a ms if we can end = pdata['time-layout']['start'][ival+1] - timedelta(milliseconds=-1) else: end = pdata['time-layout']['start'][ival] + timedelta(hours=6) # otherwise just, hell, add six hours if debug: print ' day %3d-%-3d hour %3d-%-3d %s' % (start.day, end.day, start.hour, end.hour, pdata['values'][ival]) # skip null values (probably from cloud cover) if pdata['values'][ival] == None: if debug: print ' skipping null value' continue val = float(pdata['values'][ival]) if start.day == end.day: incorporate_value(start, end, val) else: if debug: print ' start (%s) and end (%s) days differ' % (start, end) assert start.day + 1 == end.day # for now only handle the case where they differ by one day midnight = datetime(year=end.year, month=end.month, day=end.day, hour=0, minute=0, second=0) if action == 'sum': hours_before = get_time_delta_in_hours(start, midnight) #24 - start.hour hours_after = get_time_delta_in_hours(midnight, end) #end.hour val_before = val * float(hours_before) / (hours_before + hours_after) val_after = val * float(hours_after) / (hours_before + hours_after) if debug: print ' apportioning between', print 'first %f * %f / (%f + %f) = %f' % (val, hours_before, hours_before, hours_after, val_before), print 'and second %f * %f / (%f + %f) = %f' % (val, hours_after, hours_before, hours_after, val_after) else: val_before, val_after = val, val incorporate_value(start, midnight + timedelta(milliseconds=-1), val_before) #start + timedelta(hours=24-start.hour, milliseconds=-1), val_before) incorporate_value(midnight, end + timedelta(milliseconds=-1), val_after) # end - timedelta(hours=end.hour), end, val_after) dailyvals = {} for ival in range(len(values)):
if debug: print ' final:' for key in sorted(dailyvals.keys()): print ' ', key, dailyvals[key] return dailyvals def parse_data(root, time_layouts, debug=False): pars = root.find('data').find('parameters') data = {} for vardata in pars: # first figure out the name all_names = list(vardata.iter('name')) if len(all_names) != 1: raise Exception('ERROR too many names for %s: %s' % (vardata.tag, ', '.join(all_names))) name = all_names[0].text if name in data: raise Exception('ERROR %s already in data' % key) # then get the data data[name] = {} if vardata.get('time-layout') is None: # single-point data if debug: print ' no layout %s' % name continue else: # time series data data[name]['time-layout'] = time_layouts[vardata.get('time-layout')] data[name]['values'] = [ val.text for val in vardata.findall('value') ] if debug: print 'added %s (%s)' % (name, vardata.get('time-layout')) if len(data[name]['time-layout']['start']) != len(data[name]['values']): if debug: print ' time layout different length for %s' % name else: pass return data def find_min_temp(pdata, prev_day, next_day): """ find min temp for the night of <prev_day> to <next_day> """ for ival in range(len(pdata['values'])): start = pdata['time-layout']['start'][ival] end = pdata['time-layout']['end'][ival] if start.day == prev_day and end.day == next_day: return int(pdata['values'][ival]) # raise Exception('ERROR didn\'t find min temp for night of %d-%d in %s' % (prev_day, next_day, pdata['time-layout'])) return None def find_max_temp(pdata, day): """ find min temp for the night of <prev_day> to <next_day> """ for ival in range(len(pdata['values'])): start = pdata['time-layout']['start'][ival] end = pdata['time-layout']['end'][ival] if start.day == day and end.day == day: return int(pdata['values'][ival]) # raise Exception('ERROR didn\'t find max temp for %d in %s' % (day, pdata['time-layout'])) return None def prettify_values(data, ndays=5, debug=False): mintemps = data['Daily Minimum Temperature'] maxtemps = data['Daily Maximum Temperature'] liquid = combine_days('sum', data['Liquid Precipitation Amount']) snow = combine_days('sum', data['Snow Amount']) wind_speed = combine_days('mean', data['Wind Speed']) cloud = combine_days('mean', data['Cloud Cover Amount']) percent_precip = combine_days('mean', data['12 Hourly Probability of Precipitation']) txtvals = {'days':[], 'tmax':[], 'tmin':[], 'liquid':[], 'snow':[], 'wind':[], 'cloud':[], 'precip':[]} if debug: print '%-5s %4s %5s%5s %5s %5s' % ('', 'hi lo', 'precip (snow)', '%', 'wind', 'cloud') rowlist = [] for iday in range(ndays): day = datetime.now() + timedelta(days=iday) tmax = find_max_temp(maxtemps, day.day) tmin = find_min_temp(mintemps, day.day, day.day+1) row = '' if tmax is not None: row += ' %d' % tmax if tmin is not None: row += ' %d<br>' % tmin if day.day in percent_precip: row += ' %.0f<font size=1>%%</font>' % percent_precip[day.day] # liquid row += '<font color=blue><b>' if day.day in liquid: if liquid[day.day] > 0.0: row += (' %.2f' % liquid[day.day]).replace('0.', '.') else: row += ' 0' else: row += ' - ' row += '</b></font>' # snow row += '<font color=grey><b>' if day.day in liquid: if snow[day.day] > 0.0: row += (' (%.2f)' % snow[day.day]).replace('0.', '.') else: row += ' ' else: row += ' - ' row += '</b></font>' row += '<br>' # wind speed if day.day in wind_speed: row += ' %.0f' % wind_speed[day.day] row += '<font size=1>mph</font>' else: row += ' - ' # cloud cover if day.day in cloud: row += ' %.0f' % cloud[day.day] row += '<font size=1>% cover</font>' else: row += ' - ' rowlist.append(row) tv = txtvals tv['tmax'].append('-' if tmax is None else tmax) tv['tmin'].append('-' if tmin is None else tmin) tv['liquid'].append(('%5.1f' % liquid[day.day]) if day.day in liquid else '-') tv['snow'].append('') if day.day in snow and snow[day.day] > 0.0: tv['snow'][-1] = '%5.1f' % snow[day.day] tv['wind'].append(('%5.0f' % wind_speed[day.day]) if day.day in wind_speed else '-') tv['cloud'].append(('%5.0f' % cloud[day.day]) if day.day in cloud else '-') tv['precip'].append(('%5.0f' % percent_precip[day.day]) if day.day in percent_precip else '-') tv['days'].append(weekdays[day.weekday()]) if debug: print '%-6s %4s %-3s %5s %5s %5s %5s %5s' % (weekdays[day.weekday()], tv['tmax'][-1], tv['tmin'][-1], tv['liquid'][-1], tv['snow'][-1], tv['precip'][-1], tv['wind'][-1], tv['cloud'][-1]) return tv, rowlist def verbosocast(tree): root = tree.getroot() time_layouts = get_time_layouts(root) data = parse_data(root, time_layouts) point = root.find('data').find('location').find('point') lat, lon = point.get('latitude'), point.get('longitude') tv, rowlist = prettify_values(data, debug=True) import HTML rowlist.insert(0, ' %s <br> %s ' % (lat, lon)) table_vals = [rowlist,] htmlcode = HTML.table(table_vals, header_row=['',] + tv['days'], col_width=['15%' for _ in range(len(table_vals[0]))]) with open('tmp.html', 'w') as outfile: outfile.write(htmlcode)
dailyvals[int(starts[ival].day)] = values[ival] if action == 'mean': # if debug: # print 'total', get_time_delta_in_hours(starts[ival], ends[ival]) dailyvals[int(starts[ival].day)] /= weight_sum[ival] #get_time_delta_in_hours(starts[ival], ends[ival])
conditional_block
verbose.py
#!/usr/bin/env python import sys from datetime import datetime, timedelta from xml.etree import ElementTree as ET import csv weekdays = ('Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun') # locations = {} # with open('lost-locations.csv') as locfile: # reader = csv.DictReader(locfile) # for line in reader: # locations[line['name']] = (line['lat'], line['lon']) # point_forecast_url = list(root.iter('moreWeatherInformation'))[0].text def parse_noaa_time_string(noaa_time_str): date_str, time_str = noaa_time_str.split('T') # will raise ValueError if it doesn't split into two pieces tzhackdelta = None if '-' in time_str: time_str, tzinfo_str = time_str.split('-') # ignoring time zone info for now elif time_str[-1] == 'Z': print 'HACK subtracting eight hours from GMT' tzhackdelta = timedelta(hours=-8) time_str = time_str[:-1] year, month, day = [ int(val) for val in date_str.split('-') ] hour, minute, second = [ int(val) for val in time_str.split(':') ] moment = datetime(year, month, day, hour, minute, second) if tzhackdelta is not None: moment += tzhackdelta return moment def get_time_layouts(root): layouts = {} for lout in root.find('data').findall('time-layout'): name = lout.find('layout-key').text layouts[name] = {'start':[], 'end':[]} for start_end in ('start', 'end'): for tmptime in lout.iter(start_end + '-valid-time'): moment = parse_noaa_time_string(tmptime.text) layouts[name][start_end].append(moment) return layouts def combine_days(action, pdata, debug=False): """ Perform <action> for all the values within each day, where <action> is either sum or mean. """ assert action == 'sum' or action == 'mean' starts, ends, values, weight_sum = [], [], [], [] def get_time_delta_in_hours(start, end): """ NOTE assumes no overflows or wraps or nothing """ dhour = end.hour - start.hour dmin = end.minute - start.minute dsec = end.second - start.second dtime = timedelta(hours=dhour, minutes=dmin, seconds=dsec) # NOTE rounds to nearest second # print start, end, dtime return float(dtime.seconds) / (60*60) def add_new_day(dstart, dend, dval): weight = '-' starts.append(dstart) ends.append(dend) if action == 'sum': values.append(dval) elif action == 'mean': weight = float(get_time_delta_in_hours(dstart, dend)) values.append(weight*dval) weight_sum.append(weight) else: raise Exception('invalid action'+action) if debug: print ' new day', dstart, dend, weight, dval def increment_day(dstart, dend, dval): ends[-1] = dend weight = '-' if action == 'sum': values[-1] += dval elif action == 'mean': weight = float(get_time_delta_in_hours(dstart, dend)) values[-1] += weight * dval weight_sum[-1] += weight else: raise Exception('invalid action'+action) if debug: print ' increment', starts[-1], dend, weight, dval, ' ', values[-1] def incorporate_value(istart, iend, ival): # if debug: # print ' incorporate', istart, iend, ival if len(values) == 0 or ends[-1].day != istart.day: add_new_day(istart, iend, ival) else: increment_day(istart, iend, ival) for ival in range(len(pdata['values'])): start = pdata['time-layout']['start'][ival] if len(pdata['time-layout']['end']) > 0: # some of them only have start times end = pdata['time-layout']['end'][ival] elif len(pdata['time-layout']['start']) > ival+1: # so use the next start time minus a ms if we can end = pdata['time-layout']['start'][ival+1] - timedelta(milliseconds=-1) else: end = pdata['time-layout']['start'][ival] + timedelta(hours=6) # otherwise just, hell, add six hours if debug: print ' day %3d-%-3d hour %3d-%-3d %s' % (start.day, end.day, start.hour, end.hour, pdata['values'][ival]) # skip null values (probably from cloud cover) if pdata['values'][ival] == None: if debug: print ' skipping null value' continue val = float(pdata['values'][ival]) if start.day == end.day: incorporate_value(start, end, val) else: if debug: print ' start (%s) and end (%s) days differ' % (start, end) assert start.day + 1 == end.day # for now only handle the case where they differ by one day midnight = datetime(year=end.year, month=end.month, day=end.day, hour=0, minute=0, second=0) if action == 'sum': hours_before = get_time_delta_in_hours(start, midnight) #24 - start.hour hours_after = get_time_delta_in_hours(midnight, end) #end.hour val_before = val * float(hours_before) / (hours_before + hours_after) val_after = val * float(hours_after) / (hours_before + hours_after) if debug: print ' apportioning between', print 'first %f * %f / (%f + %f) = %f' % (val, hours_before, hours_before, hours_after, val_before), print 'and second %f * %f / (%f + %f) = %f' % (val, hours_after, hours_before, hours_after, val_after) else: val_before, val_after = val, val incorporate_value(start, midnight + timedelta(milliseconds=-1), val_before) #start + timedelta(hours=24-start.hour, milliseconds=-1), val_before) incorporate_value(midnight, end + timedelta(milliseconds=-1), val_after) # end - timedelta(hours=end.hour), end, val_after) dailyvals = {} for ival in range(len(values)): dailyvals[int(starts[ival].day)] = values[ival] if action == 'mean': # if debug: # print 'total', get_time_delta_in_hours(starts[ival], ends[ival]) dailyvals[int(starts[ival].day)] /= weight_sum[ival] #get_time_delta_in_hours(starts[ival], ends[ival]) if debug: print ' final:' for key in sorted(dailyvals.keys()): print ' ', key, dailyvals[key] return dailyvals def parse_data(root, time_layouts, debug=False): pars = root.find('data').find('parameters') data = {} for vardata in pars: # first figure out the name all_names = list(vardata.iter('name')) if len(all_names) != 1: raise Exception('ERROR too many names for %s: %s' % (vardata.tag, ', '.join(all_names))) name = all_names[0].text if name in data: raise Exception('ERROR %s already in data' % key) # then get the data data[name] = {} if vardata.get('time-layout') is None: # single-point data if debug: print ' no layout %s' % name continue else: # time series data data[name]['time-layout'] = time_layouts[vardata.get('time-layout')] data[name]['values'] = [ val.text for val in vardata.findall('value') ] if debug: print 'added %s (%s)' % (name, vardata.get('time-layout')) if len(data[name]['time-layout']['start']) != len(data[name]['values']): if debug: print ' time layout different length for %s' % name else: pass return data def find_min_temp(pdata, prev_day, next_day): """ find min temp for the night of <prev_day> to <next_day> """ for ival in range(len(pdata['values'])): start = pdata['time-layout']['start'][ival] end = pdata['time-layout']['end'][ival] if start.day == prev_day and end.day == next_day: return int(pdata['values'][ival]) # raise Exception('ERROR didn\'t find min temp for night of %d-%d in %s' % (prev_day, next_day, pdata['time-layout'])) return None def find_max_temp(pdata, day): """ find min temp for the night of <prev_day> to <next_day> """ for ival in range(len(pdata['values'])): start = pdata['time-layout']['start'][ival] end = pdata['time-layout']['end'][ival] if start.day == day and end.day == day: return int(pdata['values'][ival]) # raise Exception('ERROR didn\'t find max temp for %d in %s' % (day, pdata['time-layout'])) return None def prettify_values(data, ndays=5, debug=False): mintemps = data['Daily Minimum Temperature'] maxtemps = data['Daily Maximum Temperature'] liquid = combine_days('sum', data['Liquid Precipitation Amount']) snow = combine_days('sum', data['Snow Amount']) wind_speed = combine_days('mean', data['Wind Speed']) cloud = combine_days('mean', data['Cloud Cover Amount']) percent_precip = combine_days('mean', data['12 Hourly Probability of Precipitation']) txtvals = {'days':[], 'tmax':[], 'tmin':[], 'liquid':[], 'snow':[], 'wind':[], 'cloud':[], 'precip':[]} if debug: print '%-5s %4s %5s%5s %5s %5s' % ('', 'hi lo', 'precip (snow)', '%', 'wind', 'cloud') rowlist = [] for iday in range(ndays): day = datetime.now() + timedelta(days=iday) tmax = find_max_temp(maxtemps, day.day) tmin = find_min_temp(mintemps, day.day, day.day+1) row = '' if tmax is not None: row += ' %d' % tmax if tmin is not None: row += ' %d<br>' % tmin
# liquid row += '<font color=blue><b>' if day.day in liquid: if liquid[day.day] > 0.0: row += (' %.2f' % liquid[day.day]).replace('0.', '.') else: row += ' 0' else: row += ' - ' row += '</b></font>' # snow row += '<font color=grey><b>' if day.day in liquid: if snow[day.day] > 0.0: row += (' (%.2f)' % snow[day.day]).replace('0.', '.') else: row += ' ' else: row += ' - ' row += '</b></font>' row += '<br>' # wind speed if day.day in wind_speed: row += ' %.0f' % wind_speed[day.day] row += '<font size=1>mph</font>' else: row += ' - ' # cloud cover if day.day in cloud: row += ' %.0f' % cloud[day.day] row += '<font size=1>% cover</font>' else: row += ' - ' rowlist.append(row) tv = txtvals tv['tmax'].append('-' if tmax is None else tmax) tv['tmin'].append('-' if tmin is None else tmin) tv['liquid'].append(('%5.1f' % liquid[day.day]) if day.day in liquid else '-') tv['snow'].append('') if day.day in snow and snow[day.day] > 0.0: tv['snow'][-1] = '%5.1f' % snow[day.day] tv['wind'].append(('%5.0f' % wind_speed[day.day]) if day.day in wind_speed else '-') tv['cloud'].append(('%5.0f' % cloud[day.day]) if day.day in cloud else '-') tv['precip'].append(('%5.0f' % percent_precip[day.day]) if day.day in percent_precip else '-') tv['days'].append(weekdays[day.weekday()]) if debug: print '%-6s %4s %-3s %5s %5s %5s %5s %5s' % (weekdays[day.weekday()], tv['tmax'][-1], tv['tmin'][-1], tv['liquid'][-1], tv['snow'][-1], tv['precip'][-1], tv['wind'][-1], tv['cloud'][-1]) return tv, rowlist def verbosocast(tree): root = tree.getroot() time_layouts = get_time_layouts(root) data = parse_data(root, time_layouts) point = root.find('data').find('location').find('point') lat, lon = point.get('latitude'), point.get('longitude') tv, rowlist = prettify_values(data, debug=True) import HTML rowlist.insert(0, ' %s <br> %s ' % (lat, lon)) table_vals = [rowlist,] htmlcode = HTML.table(table_vals, header_row=['',] + tv['days'], col_width=['15%' for _ in range(len(table_vals[0]))]) with open('tmp.html', 'w') as outfile: outfile.write(htmlcode)
if day.day in percent_precip: row += ' %.0f<font size=1>%%</font>' % percent_precip[day.day]
random_line_split
ethereum-block.ts
import {toBigIntBE, toBufferBE} from 'bigint-buffer'; import {RlpEncode, RlpList} from 'rlp-stream'; import * as secp256k1 from 'secp256k1'; declare var process: {browser: boolean;}; const keccak = require('keccak'); interface NativeInterface { recoverFromAddress(verifyBlock: Buffer, signature: Buffer, recovery: boolean): Promise<bigint>; getPublicAddress(privateKey: bigint): Promise<bigint>; signTransaction( transaction: Buffer, privateKey: bigint, chainId: number, transactionRlp: RlpList): RlpList; } let native: NativeInterface; if (!process.browser) { try { native = require('bindings')('block_native'); } catch (e) { console.log(e); console.warn( 'Native bindings loading failed, using pure JS implementation'); } } /** A deserialized Ethereum block. */ export interface EthereumBlock { /** The header for the Ethereum block. */ header: EthereumHeader; /** The transaction list for the Ethereum block. */ transactions: EthereumTransaction[]; /** A list of headers for uncles. */ uncles: EthereumHeader[]; } export interface EthereumBlockDecoderOptions { /** For a EIP-155 transaction, which chain to use to replace v. */ chainId: number; /** * For decoding a block, which block number EIP-155 semantics automatically * applies. */ eip155Block: bigint; /** * For decoding a transaction, whether or not to use EIP-155 semantics to * decode the transaction. */ eip155: boolean; /** * If available, use native bindings to do transaction processing. */ native: boolean; } const defaultOptions: EthereumBlockDecoderOptions = { chainId: 1, eip155Block: BigInt(2675000), eip155: false, native: true }; export const CONTRACT_CREATION: bigint = BigInt(-1); /** A header for an Ethereum block. */ export interface EthereumHeader { /** The Keccak 256-bit hash of the parent block’s header, in its entirety. */ parentHash: bigint; /** The Keccak 256-bit hash of the ommers list portion of this block. */ uncleHash: bigint; /** * The 160-bit address to which all fees collected from the successful mining * of this block be transferred. */ beneficiary: bigint; /** * The Keccak 256-bit hash of the root node of the state trie, after all * transactions are executed and finalisations applied. */ stateRoot: bigint; /** * The Keccak 256-bit hash of the root node of the trie structure populated * with each transaction in the transactions list portion of the block. */ transactionsRoot: bigint; /** * The Keccak 256-bit hash of the root node of the trie structure populated * with the receipts of each transaction in the transactions list portion of * the block. */ receiptsRoot: bigint; /** * The Bloom filter composed from indexable information (logger address and * log topics) contained in each log entry from the receipt of each * transaction in the transactions list. */ logsBloom: Buffer; /** * A scalar value corresponding to the difficulty level of this block. This * can be calculated from the previous block’s difficulty level and the * timestamp. */ difficulty: bigint; /** * A scalar value equal to the number of ancestor blocks. The genesis block * has a number of zero. */ blockNumber: bigint; /** * A scalar value equal to the current limit of gas expenditure per block. */ gasLimit: bigint; /** * A scalar value equal to the total gas used in transactions in this block. */ gasUsed: bigint; /** * A scalar value equal to the reasonable output of Unix’s time() at this * block’s inception. */ timestamp: bigint; /** * An arbitrary byte array containing data relevant to this block. This must * be 32 bytes or fewer. */ extraData: Buffer; /** * A 256-bit hash which proves combined with the nonce that a sufficient * amount of computation has been carried out on this block. */ mixHash: bigint; /** * A 64-bit hash which proves combined with the mix-hash that a sufficient * amount of computation has been carried out on this block. */ nonce: bigint; } /** The data stored in a block for a signed Ethereum transaction */ export interface EthereumTransaction { /** * A scalar value equal to the number of transactions sent from this address * or, in the case of accounts with associated code, the number of * contract-creations made by this account. */ nonce: bigint; /** * A scalar value equal to the number of Wei to be paid per unit of gas for * all computation costs incurred as a result of the execution of this * transaction. */ gasPrice: bigint; /** * A scalar value equal to the maximum amount of gas that should be used in * executing this transaction. */ gasLimit: bigint; /** * A scalar value equal to the number of Wei to be transferred to the message * call’s recipient or, in the case of contract creation, as an endowment to * the newly created account. */ value: bigint; /** * The 160-bit address of the message call’s recipient or, for a contract * creation transaction, CONTRACT_CREATION (-1), to distinguish against * account 0x0000000000000000000000000000000000000000. */ to: bigint; /** * An unlimited size byte array specifying the EVM-code for the account * initialisation procedure, for a contract transaction, or an unlimited size * byte array specifying the input data of the message call, for a message * call. */ data: Buffer; /** The 160-bit address of the message caller. */ from: bigint; } export class EthereumBlockDecoderError extends Error { constructor(message: string) { super(message); } } const HEADER_PARENT_HASH = 0; const HEADER_UNCLE_HASH = 1; const HEADER_BENEFICIARY = 2; const HEADER_STATE_ROOT = 3; const HEADER_TRANSACTIONS_ROOT = 4; const HEADER_RECEIPTS_ROOT = 5; const HEADER_LOGSBLOOM = 6; const HEADER_DIFFICULTY = 7; const HEADER_BLOCK_NUMBER = 8; const HEADER_GAS_LIMIT = 9; const HEADER_GAS_USED = 10; const HEADER_TIMESTAMP = 11; const HEADER_EXTRADATA = 12; const HEADER_MIXHASH = 13; const HEADER_NONCE = 14; /** * Given a RLP-serialized list with an Ethereum header, decodes the list and * validates the Ethereum header. * * @param header The RLP-encoded list with the header to decode. * * @returns A validated and decoded EthereumHeader. */ export function decodeHeader(header: RlpList): EthereumHeader { if (!Array.isArray(header)) { throw new EthereumBlockDecoderError( `Expected block header as RLP-encoded list!`); } return { parentHash: toBigIntBE(header[HEADER_PARENT_HASH] as Buffer), uncleHash: toBigIntBE(header[HEADER_UNCLE_HASH] as Buffer), beneficiary: toBigIntBE(header[HEADER_BENEFICIARY] as Buffer), stateRoot: toBigIntBE(header[HEADER_STATE_ROOT] as Buffer), transactionsRoot: toBigIntBE(header[HEADER_TRANSACTIONS_ROOT] as Buffer), receiptsRoot: toBigIntBE(header[HEADER_RECEIPTS_ROOT] as Buffer), logsBloom: header[HEADER_LOGSBLOOM] as Buffer, difficulty: toBigIntBE(header[HEADER_DIFFICULTY] as Buffer), blockNumber: toBigIntBE(header[HEADER_BLOCK_NUMBER] as Buffer), gasLimit: toBigIntBE(header[HEADER_GAS_LIMIT] as Buffer), gasUsed: toBigIntBE(header[HEADER_GAS_USED] as Buffer), timestamp: toBigIntBE(header[HEADER_TIMESTAMP] as Buffer), extraData: (header[HEADER_EXTRADATA] as Buffer), mixHash: toBigIntBE(header[HEADER_MIXHASH] as Buffer), nonce: toBigIntBE(header[HEADER_NONCE] as Buffer) }; } const TRANSACTION_NONCE = 0; const TRANSACTION_GASPRICE = 1; const TRANSACTION_STARTGAS = 2; const TRANSACTION_TO = 3; const TRANSACTION_VALUE = 4; const TRANSACTION_DATA = 5; const TRANSACTION_V = 6; const TRANSACTION_R = 7; const TRANSACTION_S = 8; /** * Given a RLP-serialized list with an Ethereum transaction, decodes the list * and validates the Ethereum transaction. * * @param header The RLP-encoded list with the transaction to decode. * * @returns A validated and decoded EthereumTransaction. */ export async function decodeTransaction( transaction: RlpList, options: EthereumBlockDecoderOptions = defaultOptions): Promise<EthereumTransaction> { const v = transaction[TRANSACTION_V] as Buffer; const r = transaction[TRANSACTION_R] as Buffer; const s = transaction[TRANSACTION_S] as Buffer; if (r.length > 32) { throw
gth > 32) { throw new Error(`s > 32 bytes!`); } const signature = Buffer.alloc(64, 0); r.copy(signature, 32 - r.length); s.copy(signature, 64 - s.length); const chainV = options.chainId * 2 + 35; const verifySignature = options.eip155 ? v[0] === chainV || v[0] === chainV + 1 : false; const recovery = verifySignature ? v[0] - (options.chainId * 2 + 8) - 27 : v[0] - 27; if (recovery !== 0 && recovery !== 1) { throw new EthereumBlockDecoderError( `Invalid infinite recovery = ${recovery}`); } // TODO: Get existing buffer from stream instead of regenerating it. const toHash = verifySignature ? RlpEncode([ (transaction[TRANSACTION_NONCE] as Buffer), (transaction[TRANSACTION_GASPRICE] as Buffer), (transaction[TRANSACTION_STARTGAS] as Buffer), (transaction[TRANSACTION_TO] as Buffer), (transaction[TRANSACTION_VALUE] as Buffer), (transaction[TRANSACTION_DATA] as Buffer), Buffer.from([options.chainId]), Buffer.from([]), Buffer.from([]), ]) : RlpEncode([ (transaction[TRANSACTION_NONCE] as Buffer), (transaction[TRANSACTION_GASPRICE] as Buffer), (transaction[TRANSACTION_STARTGAS] as Buffer), (transaction[TRANSACTION_TO] as Buffer), (transaction[TRANSACTION_VALUE] as Buffer), (transaction[TRANSACTION_DATA] as Buffer) ]); let from: bigint; if (process.browser || native === undefined || !options.native) { const hash = keccak('keccak256').update(toHash).digest(); // Recover and decompress the public key const pubKey = secp256k1.recover(hash, signature, recovery, false).slice(1); if (pubKey.length !== 64) { throw new EthereumBlockDecoderError( `Incorrect public key length ${pubKey.length}`); } from = toBigIntBE(keccak('keccak256').update(pubKey).digest().slice(-20)); if (from === undefined) { throw new EthereumBlockDecoderError(`Failed to get from account`); } } else { from = await native.recoverFromAddress(toHash, signature, recovery === 1); } const toBuffer = transaction[TRANSACTION_TO] as Buffer; return { nonce: toBigIntBE(transaction[TRANSACTION_NONCE] as Buffer), gasPrice: toBigIntBE(transaction[TRANSACTION_GASPRICE] as Buffer), gasLimit: toBigIntBE(transaction[TRANSACTION_STARTGAS] as Buffer), to: toBuffer.length === 0 ? BigInt(-1) : toBigIntBE(toBuffer), value: toBigIntBE(transaction[TRANSACTION_VALUE] as Buffer), data: transaction[TRANSACTION_DATA] as Buffer, from }; } /** * Given a RLP-serialized list with an Ethereum block, decodes the list and * validates the Ethereum block. * * @param header The RLP-encoded list with the transaction to decode. * * @returns A validated and decoded EthereumTransaction. */ export async function decodeBlock( rlp: RlpList, options: EthereumBlockDecoderOptions = defaultOptions): Promise<EthereumBlock> { // Each incoming block should be an RLP list. if (!Array.isArray(rlp)) { throw new EthereumBlockDecoderError(`Expected RLP-encoded list!`); } // The RlpList should have 3 parts: the header, the transaction list and the // uncle list. const header: EthereumHeader = decodeHeader(rlp[0] as RlpList); if (header.blockNumber >= defaultOptions.eip155Block) { defaultOptions.eip155 = true; } const transactionPromises: Array<Promise<EthereumTransaction>> = (rlp[1] as RlpList).map(tx => decodeTransaction(tx as RlpList, options)); const transactions: EthereumTransaction[] = await Promise.all(transactionPromises); const uncles: EthereumHeader[] = (rlp[2] as RlpList).map(buf => decodeHeader(buf as RlpList)); return {header, transactions, uncles} as EthereumBlock; } /** * Remove leading null bytes from a buffer. * * @param buf Buffer to remove null bytes from * * @returns A slice of the buffer without null bytes. */ function removeNullPrefix(buf: Buffer): Buffer { for (let i = 0; i < buf.length; i++) { if (buf[i] !== 0) { return buf.slice(i); } } return Buffer.from([]); } /** * Encodes an Ethereum header as a RLP list * * @param header The Ethreum header to encode. * * @return A RlpList with the encoded Ethereum header. */ export function encodeHeaderAsRLP(header: EthereumHeader): RlpList { const asRlpList: RlpList = []; asRlpList[HEADER_PARENT_HASH] = toBufferBE(header.parentHash, 32); asRlpList[HEADER_UNCLE_HASH] = toBufferBE(header.uncleHash, 32); asRlpList[HEADER_BENEFICIARY] = toBufferBE(header.beneficiary, 20); asRlpList[HEADER_STATE_ROOT] = toBufferBE(header.stateRoot, 32); asRlpList[HEADER_TRANSACTIONS_ROOT] = toBufferBE(header.transactionsRoot, 32); asRlpList[HEADER_RECEIPTS_ROOT] = toBufferBE(header.receiptsRoot, 32); asRlpList[HEADER_LOGSBLOOM] = header.logsBloom; asRlpList[HEADER_DIFFICULTY] = removeNullPrefix(toBufferBE(header.difficulty, 32)); asRlpList[HEADER_BLOCK_NUMBER] = removeNullPrefix(toBufferBE(header.blockNumber, 32)); asRlpList[HEADER_GAS_LIMIT] = removeNullPrefix(toBufferBE(header.gasLimit, 32)); asRlpList[HEADER_GAS_USED] = removeNullPrefix(toBufferBE(header.gasUsed, 32)); asRlpList[HEADER_TIMESTAMP] = removeNullPrefix(toBufferBE(header.timestamp, 32)); asRlpList[HEADER_EXTRADATA] = header.extraData; asRlpList[HEADER_MIXHASH] = toBufferBE(header.mixHash, 32); asRlpList[HEADER_NONCE] = toBufferBE(header.nonce, 8); return asRlpList; } /** * Encodes a new block. Transactions must be encoded and signed as a RLPList * * @param header The Ethreum header to encode. * @param transactions Encoded, signed transactions to include * @param uncleList A list of uncles to include * * @return A new RLP encoded Ethereum block. */ export function encodeBlock( header: EthereumHeader, transactions: RlpList, uncleList: EthereumHeader[]): Buffer { const asRlpList: RlpList = [ encodeHeaderAsRLP(header), transactions, uncleList.map(uncle => encodeHeaderAsRLP(uncle)) ]; return RlpEncode(asRlpList); } /** * Get the public address of a given private key. * * @param privateKey The private key to obtain an address for. It should be a * 256-bit bigint which cannot be 0. * @param useNativeIfAvailable Set to false to force fallback to js-only code. * * @return The public address for the given private key. */ export function getPublicAddress( privateKey: bigint, useNativeIfAvailable = true) { if (process.browser || native === undefined || !useNativeIfAvailable) { // Public address is last 20 bytes of the hashed public key (bytes 1-65) const pubKey = secp256k1.publicKeyCreate(toBufferBE(privateKey, 32), false); const hashed = toBigIntBE( keccak('keccak256').update(pubKey.slice(1)).digest().slice(-20)); return hashed; } return native.getPublicAddress(privateKey); } /** * Sign an [EthereumTransaction] using a private key. * * @param transaction The transaction to sign. The from field, if present, is * ignored (it will be derived from the private key) * @param privateKey The private key to sign the transaction with. * @param chainId The chain id to use. 0=pre EIP-155 semantics. 1=mainnet. * @param useNativeIfAvailable Set to false to force fallback to js-only code. * * @return A [RlpList] representing the transaction. Run this list through * RlpEncode to obtain a [Buffer]. */ export function signTransaction( transaction: EthereumTransaction, privateKey: bigint, chainId = 1, useNativeIfAvailable = true) { const rlpList: RlpList = [ removeNullPrefix(toBufferBE(transaction.nonce, 32)), removeNullPrefix(toBufferBE(transaction.gasPrice, 32)), removeNullPrefix(toBufferBE(transaction.gasLimit, 32)), transaction.to === CONTRACT_CREATION ? Buffer.from([]) : toBufferBE(transaction.to, 20), removeNullPrefix(toBufferBE(transaction.value, 32)), transaction.data ]; // EIP-155 transaction if (chainId !== 0) { rlpList[TRANSACTION_V] = Buffer.from([chainId]); rlpList[TRANSACTION_R] = Buffer.from([]); rlpList[TRANSACTION_S] = Buffer.from([]); } const toHash = RlpEncode(rlpList); if (process.browser || native === undefined || !useNativeIfAvailable) { const hash = keccak('keccak256').update(toHash).digest(); const signature = secp256k1.sign(hash, toBufferBE(privateKey, 32)); rlpList[TRANSACTION_R] = removeNullPrefix(signature.signature.slice(0, 32)); rlpList[TRANSACTION_S] = removeNullPrefix(signature.signature.slice(32, 64)); rlpList[TRANSACTION_V] = Buffer.from( [chainId > 0 ? signature.recovery + (chainId * 2 + 35) : signature.recovery + 27]); return rlpList; } else { const ret = native.signTransaction(toHash, privateKey, chainId, rlpList); ret[TRANSACTION_R] = removeNullPrefix(ret[TRANSACTION_R] as Buffer); ret[TRANSACTION_S] = removeNullPrefix(ret[TRANSACTION_S] as Buffer); return ret; } }
new Error(`r > 32 bytes!`); } if (s.len
conditional_block