text stringlengths 11 4.05M |
|---|
package google
import (
"strings"
"github.com/davecgh/go-spew/spew"
log "github.com/sirupsen/logrus"
)
func mergeOutput(input []inputObject, output []responsePair) []responsePair {
inputCount := len(input)
outputCount := len(output)
if inputCount == 0 || outputCount == 0 {
return output
}
if inputCount > outputCount {
log.Fatal("Truncated output", spew.Sdump(input), spew.Sdump(output))
}
for i := 0; i < outputCount-1; i++ {
in := input[i].req.Text
out := output[i].input
if strings.TrimSpace(in) == strings.TrimSpace(out) {
continue
}
if len(in) < len(out) {
log.Warnf("original text is smaller than output! %q %q", in, out)
}
// TODO: Loop and handle more than one item join
next := i + 1
if next > outputCount-1 {
log.Fatal("output exhausted, unable to get proper results")
}
nextOut := output[next].input
// Only check next input if it exists
if next < inputCount-1 {
nextIn := input[next].req.Text
if nextIn == nextOut {
log.Fatalf("output has truncated input string\n%q == %q\n%s\n%s", nextIn, nextOut, spew.Sdump(input), spew.Sdump(output))
}
}
out += nextOut
// Update current record
output[i].input = out
output[i].output += output[next].output
// Delete next item in output
if next == outputCount-1 {
// Truncate
output = output[:i+1]
} else {
// Cut
output = append(output[:next], output[next+1:]...)
}
outputCount--
// Exit if we have balanced items
if inputCount == outputCount {
break
}
}
return output
}
|
package day5
import (
"fmt"
"io/ioutil"
"os"
"strings"
)
//DayFiveOne Day five task one
func DayFiveOne() {
amountOfRows := createNumberArray(0, 127)
amountOfColumns := createNumberArray(0, 7)
highestSeatId := 0
input, err := ioutil.ReadFile("./5/input.txt")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
boardingCards := strings.Split(string(input), "\n")
for i := 0; i < len(boardingCards); i++ {
currentSeatId := 0
ourRows := amountOfRows
ourColumns := amountOfColumns
firstSeven := boardingCards[i][0:7]
for j := 0; j < len(firstSeven); j++ {
switch letter := string(firstSeven[j]); letter {
case "B":
ourRows = ourRows[len(ourRows)/2 : len(ourRows)]
case "F":
ourRows = ourRows[0 : len(ourRows)/2]
}
}
lastThree := boardingCards[i][7:]
for z := 0; z < len(lastThree); z++ {
switch letter := string(lastThree[z]); letter {
case "R":
ourColumns = ourColumns[len(ourColumns)/2 : len(ourColumns)]
case "L":
ourColumns = ourColumns[0 : len(ourColumns)/2]
}
}
currentSeatId = ourRows[0]*8 + ourColumns[0]
if currentSeatId > highestSeatId {
highestSeatId = currentSeatId
}
}
fmt.Println(highestSeatId)
}
func createNumberArray(min, max int) []int {
a := make([]int, max-min+1)
for i := range a {
a[i] = min + i
}
return a
}
|
package controllers
import (
"crud/models"
"strconv"
"strings"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
)
type InsereController struct {
beego.Controller
}
func (c *InsereController) Get() {
c.TplName = "insere.tpl"
}
func (c *InsereController) Post() {
c.TplName = "insere.tpl"
codigo := 0
nome := c.GetString("nome")
preco, err := strconv.ParseFloat(strings.Replace(c.GetString("preco"), ",", ".", -1), 64)
p := models.Produto{codigo, nome, preco}
o := orm.NewOrm()
o.Begin()
id, err := o.Insert(&p)
if err != nil {
o.Rollback()
msg := "Falha na inserção\nErro :" + err.Error() + "\nCódigo : " + strconv.FormatInt(id, 64)
c.Data["msg"] = msg
} else {
o.Commit()
msg := "Cadastro realizado com sucesso !"
c.Data["msg"] = msg
}
}
|
package cmd
import (
"context"
"github.com/kumahq/kuma/pkg/core"
)
type RunCmdOpts struct {
SetupSignalHandler func() context.Context
}
var DefaultRunCmdOpts = RunCmdOpts{
SetupSignalHandler: core.SetupSignalHandler,
}
|
package meta
type LicenseType uint8
const (
Unlicensed LicenseType = iota
Proprietary
Custom
GPLv3
GPLv2
LGPLv3
LGPLv2_1
AGPLv3_0
Apache2_0
MPL_2_0
PublicDomain
)
|
package ipfix
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"ipfix-gen/util"
"net"
"reflect"
"testing"
"time"
)
func TestCheckE(t *testing.T) {
fmt.Println(0x80)
fmt.Println(1 << 7)
fmt.Println(0x00 == uint8(0))
fmt.Println(reflect.TypeOf(0x00).Size(), reflect.TypeOf(uint8(0)).Size())
fmt.Println(reflect.TypeOf(0x00).Name(), reflect.TypeOf(uint8(0)).Name())
fmt.Println([]byte{0x00}, []byte{uint8(0)})
fmt.Println(len([]byte{0x00}), len([]byte{uint8(0)}))
}
func TestParseData(t *testing.T) {
ip := net.ParseIP("127.0.3.8").To4()
fmt.Println(len(ip), ip, reflect.TypeOf(ip).Name(), reflect.TypeOf(ip).Size())
ip2 := net.IP{127, 0, 3, 8}
fmt.Println(len(ip2), ip2, reflect.TypeOf(ip2).Name(), reflect.TypeOf(ip2).Size())
buf := new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, ip2)
fmt.Println(buf.Bytes())
//uint16
var a uint16 = 23
buf = new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, a)
fmt.Println("uint16 a:", buf.Bytes())
//uint32
var b uint32 = 23
buf = new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, b)
fmt.Println("b uint32:", buf.Bytes())
ip4 := net.ParseIP("2400:dd01:12:1028:0:3316:12b2:979d").To16()
fmt.Println(len(ip4), ip4)
//ip4 := net.IP{2400,0xdd01,0x1001,0x1028,0x9999,0x3316,0x12b2,0x979d}
//fmt.Println(len(ip4),ip4)
buf = new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, ip4)
fmt.Println(buf.Bytes())
fmt.Println(len(mac))
}
func TestGetLength(t *testing.T) {
fmt.Println(InfoModel[ElementKey{
EnterpriseNo: 0,
ElementID: 5,
}].Type.minLen())
}
var mac, _ = net.ParseMAC("00-FF-1D-3C-84-D4")
var t, _ = time.Parse("yyyy-MM-dd HH:mm:ss", "2018-5-6 13:30:00")
//Error,can not parse every type.
var testMap = map[int]interface{}{
4: uint16(34),
7: uint32(32768),
8: net.ParseIP("10.10.29.8").To4(),
62: net.ParseIP("2400:dd01:1001:1028:9999:3316:12b2:979d").To16(),
21: t.UnixNano(),
56: mac,
}
//need bytes arr before build
var testBytesMap = map[int]interface{}{
4: []byte{34},
7: util.HostTo2Net(32768),
8: net.ParseIP("10.10.29.8").To4(),
62: net.ParseIP("2400:dd01:1001:1028:9999:3316:12b2:979d").To16(),
21: util.HostTo4Net(uint32(t.UnixNano())),
56: mac,
}
var IDs = []uint16{4, 7, 8, 62, 21, 56}
var Vals = []interface{}{[]byte{34}, util.HostTo2Net(32768), net.ParseIP("10.10.29.8").To4(), net.ParseIP("2400:dd01:1001:1028:9999:3316:12b2:979d").To16(),
util.HostTo4Net(uint32(t.UnixNano())), mac}
func TestBuildIPFIX(t *testing.T) {
tID := getTemplateID()
//msg := buildMap(testBytesMap, tID)
msg := BuildArr(IDs, Vals, tID)
Filling(msg)
js, _ := json.Marshal(msg)
fmt.Println(bytes.NewBuffer(js).String())
bs := Encode(*msg, 234234234)
fmt.Println(bs)
//send(bs,"127.0.0.1:2055","127.0.0.1:2055")
send(bs, "10.10.28.139:8088", "159.226.26.107:4739")
}
func send(message []byte, srcAddr, dstAddr string) {
laddr, err := net.ResolveUDPAddr("udp", srcAddr)
if err != nil {
panic(err)
}
udpDialer := net.Dialer{
Timeout: time.Second * time.Duration(10),
LocalAddr: laddr,
}
conn, err := udpDialer.Dial("udp", dstAddr)
if err != nil {
panic(err)
}
count := 100
for count > 0 {
count--
conn.Write(message)
}
conn.Close()
}
var templateID uint16 = 257
func getTemplateID() uint16 {
templateID++
if templateID >= 1<<15-1 {
templateID = 257
}
return templateID
}
|
package response
//BusRoute to hold response from rest api
type BusRoute struct {
Description string `json:"Description"`
ProviderID string `json:"ProviderID"`
Route string `json:"Route"`
}
|
package main
import (
"encoding/json"
"fmt"
"time"
"github.com/evanxg852000/eserveless/internal/core"
"github.com/evanxg852000/eserveless/internal/database"
"github.com/evanxg852000/eserveless/internal/helpers"
"github.com/gofiber/fiber"
"github.com/sirupsen/logrus"
)
// ProjectController provides all project & function handlers
type ProjectController struct {
store database.Datastore
}
// ListProjects ...
func (pc *ProjectController) ListProjects(c *fiber.Ctx) {
//pc.store.Close()
fmt.Println("test")
}
// CreateProject ...
func (pc *ProjectController) CreateProject(c *fiber.Ctx) {
var data map[string]string
err := json.Unmarshal([]byte(c.Body()), &data)
if err != nil {
c.Status(400).JSON(fiber.Map{
"error": "Bad Request",
"message": "unable to parse request data",
})
return
}
repoURL, projectName, err := helpers.ValidateGithubRepoURL(data["repository"])
if err != nil {
c.Status(400).JSON(fiber.Map{
"error": "Bad Request",
"message": "repository field is not a valid github repository url",
})
return
}
//attempt to create project and functions
httpFns := make([]string, 0)
hasChanged, isCreated, err := core.SetupProject(pc.store, projectName, repoURL, &httpFns)
if err != nil {
c.Status(500).JSON(fiber.Map{
"error": "Server Error",
"message": err.Error(),
})
return
}
if hasChanged == false {
c.Status(200).JSON(fiber.Map{
"message": "repository has not changed since last deployment",
})
return
}
status := "created"
if isCreated == false {
status = "updated"
}
c.JSON(fiber.Map{
"message": fmt.Sprintf("Yeah! project %s.", status),
"repository": repoURL,
"project": projectName,
"functions": httpFns,
})
}
// GetProject ...
func (pc *ProjectController) GetProject(c *fiber.Ctx) {
}
// DeleteProject ...
func (pc *ProjectController) DeleteProject(c *fiber.Ctx) {
}
// InvokeFunction ...
func (pc *ProjectController) InvokeFunction(c *fiber.Ctx) {
projectName := c.Params("project")
functionName := c.Params("function")
project := pc.store.GetProject(projectName)
if project == nil {
c.Status(404).JSON(fiber.Map{
"message": "project not found!",
})
return
}
function := pc.store.GetFunction(functionName, project.ID)
if function == nil || function.Handler != database.HttpHandler {
c.Status(404).JSON(fiber.Map{
"message": "function not found!",
})
return
}
//run container and redirect current request to it
err := helpers.RunDockerImage(function, func(url string) {
fmt.Println("\n request", url)
time.Sleep(2 * time.Second)
c.Redirect(url, 301)
})
if err != nil {
logrus.Error(err.Error())
return
}
logrus.Info("function invoked succesfully")
}
|
package gormv2
import (
"fmt"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"log"
"os"
"time"
)
var (
db *gorm.DB
)
// InitMysql [账号]:[密码]@tcp([地址]:[端口]) db.table([库名].[表名])
func InitMysql(dbUrl string) {
url := fmt.Sprintf("%v/?charset=utf8mb4&collation=utf8mb4_unicode_ci&parseTime=True&loc=Local", dbUrl)
db = NewMysql(url, 10)
}
func GetMainDB() *gorm.DB {
return db
}
func NewMysql(args string, maxCon int) *gorm.DB {
newLogger := logger.New(
log.New(os.Stdout, "\r\n", log.LstdFlags), // io writer
logger.Config{
SlowThreshold: time.Second, // 慢 SQL 阈值
LogLevel: logger.Info, // Log level
Colorful: false, // 禁用彩色打印
},
)
db, err := gorm.Open(mysql.Open(args), &gorm.Config{Logger: newLogger})
if err != nil {
panic(fmt.Sprintf("Got errors when connect database, the errors is '%v'", err))
}
sqlDB, err := db.DB()
if err != nil {
panic(err)
}
idle := maxCon
if maxCon/3 > 10 {
idle = maxCon / 3
}
// 设置空闲连接池中连接的最大数量
sqlDB.SetMaxIdleConns(idle)
// 设置打开数据库连接的最大数量
sqlDB.SetMaxOpenConns(maxCon)
return db
}
|
// Copyright 2017 Canonical Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raftmembership
import (
"time"
"github.com/hashicorp/raft"
)
// Changer is an API that can be used by a raft server to change its
// membership in a cluster (i.e. either join it or leave it).
//
// It works by using some transport layer (e.g. HTTP, TCP, etc) to
// send a membership change request to a target server that is part of
// the cluster and that can handle such requests, possibly redirecting
// the requesting server to another server (e.g. the cluster leader).
//
// It is effectively an extensions of the raft.Transport interface,
// with additional semantics for joining/leaving a raft cluster.
type Changer interface {
Join(raft.ServerID, raft.ServerAddress, time.Duration) error
Leave(raft.ServerID, raft.ServerAddress, time.Duration) error
}
|
package runner
import (
"errors"
"github.com/gofrs/uuid"
"github.com/hitman99/peppercd/internal/redis"
log "github.com/sirupsen/logrus"
v1batch "k8s.io/api/batch/v1"
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"time"
)
type Interface interface {
RunPipeline(req *Request) (<-chan *BuildState, error)
GetPipeline(uid string) (*Pipeline, error)
ListArtifacts(pipelineId string) ([]string, error)
GetArtifact(pipelineId, name string) ([]byte, error)
RetryFailedJobs(pipelineId string, ctx *BuildContext) (<-chan *BuildState, error)
}
type runner struct {
cfg *Config
kube *kubernetes.Clientset
id string
rc redis.Client
logger *log.Logger
}
func (r *runner) GetPipeline(pipelineId string) (*Pipeline, error) {
p := &Pipeline{}
err := r.rc.GetPipeline(pipelineId, p)
if err != nil {
return nil, err
}
return p, nil
}
func (r *runner) RunPipeline(req *Request) (<-chan *BuildState, error) {
if req == nil {
return nil, errors.New("invalid build request")
}
jobs := make([]*Job, 0, len(req.PipelineDef.Jobs))
for _, j := range req.PipelineDef.Jobs {
jobs = append(jobs, &Job{
JobDef: j,
UID: "",
Status: JOB_UNKNOWN,
Done: false,
})
}
p := &Pipeline{
PipelineDef: *req.PipelineDef,
UID: uuid.Must(uuid.NewV4()).String(),
Status: 0,
Jobs: jobs,
}
r.logger.WithField("pipelineUid", p.UID).Debug("creating pipeline")
lease, err := r.rc.CreatePipeline(p.UID, r.id, p)
if err != nil {
return nil, err
}
lp := &livePipeline{
lease: lease,
buildContext: req.Context,
updates: make(chan *BuildState),
p: p,
}
stop := make(chan struct{})
go func() {
for {
select {
case <-time.After(redis.LEASE_DURATION / 2):
if lp.lease.Expires.Before(time.Now().Add(redis.LEASE_DURATION)) {
extendedLease, err := r.rc.ExtendLease(r.id, lp.p.UID)
if err != nil {
r.logger.WithError(err).WithField("pipelineUid", lp.p.UID).Error("failed to extend lease for pipeline")
// cannot extend lease, this will break saving pipeline state to REDIS, exiting
return
} else {
lp.lease = extendedLease
r.logger.WithFields(log.Fields{"pipelineUid": lp.p.UID, "expiration": lp.lease.Expires.String()}).Debug("lease for pipeline extended")
}
}
case <-stop:
return
}
}
}()
go func() {
defer close(lp.updates)
defer close(stop)
defer func() {
if rec := recover(); rec != nil {
r.logger.WithError(rec.(error)).WithField("pipelineUid", lp.p.UID).Error("Pipeline failed")
r.updateLivePipeline(lp)
return
}
}()
defer func() {
if lp.lease.Expires.After(time.Now()) {
if err := r.rc.TerminateLease(lp.p.UID); err != nil {
r.logger.WithError(err).WithField("pipelineUid", lp.p.UID).Error("failed to terminate lease for pipeline")
} else {
r.logger.WithField("pipelineUid", lp.p.UID).Debug("terminating lease for pipeline")
}
}
}()
for jobId, job := range lp.p.Jobs {
watcher, err := r.runAndWatchJob(&job.JobDef, lp.p.UID)
if err != nil {
lp.p.Status = PIPELINE_FAILED
lp.p.StatusContext = err.Error()
r.updateLivePipeline(lp)
return
}
for {
shouldReWatch := r.watchJob(watcher, jobId, job, lp)
if !shouldReWatch {
if lp.p.Status != PIPELINE_FAILED {
break
} else {
r.logger.WithField("pipelineUid", lp.p.UID).WithField("jobName", job.Name).Debug("pipeline execution stopped due to failed job")
return
}
}
r.logger.WithField("jobName", job.Name).Info("job watcher failed, will re-initialize job watcher")
// check the status first in case it has changed and the update was missed for some reason
kubeJob, err := r.getJob(job.Name)
if err != nil {
lp.p.Status = PIPELINE_FAILED
lp.p.StatusContext = err.Error()
r.updateLivePipeline(lp)
r.logger.WithField("pipelineUid", lp.p.UID).WithField("jobName", job.Name).Debug("cannot get job status, pipeline execution stopped")
return
}
lp.p.Jobs[jobId] = r.updateJob(kubeJob, job)
r.updateLivePipeline(lp)
if kubeJob.Status.Succeeded == 1 {
r.logger.WithField("pipelineUid", lp.p.UID).WithField("jobName", job.Name).Debug("job succeeded")
break
}
if kubeJob.Status.Failed == 1 {
r.logger.WithField("pipelineUid", lp.p.UID).WithField("jobName", job.Name).Debug("job failed")
return
}
watcher, err = r.getJobWatcher(job.Name)
if err != nil {
lp.p.Status = PIPELINE_FAILED
lp.p.StatusContext = err.Error()
r.updateLivePipeline(lp)
r.logger.WithField("pipelineUid", lp.p.UID).WithField("jobName", job.Name).Debug("job failed, pipeline execution stopped")
return
}
}
}
if lp.p.Status != PIPELINE_FAILED {
// all jobs finished
lp.p.Status = PIPELINE_COMPLETED
r.updateLivePipeline(lp)
r.logger.WithField("pipelineUid", lp.p.UID).Debug("pipeline finished successfully")
}
}()
return lp.updates, nil
}
func (r *runner) RetryFailedJobs(pipelineId string, ctx *BuildContext) (<-chan *BuildState, error) {
r.logger.WithField("pipelineUid", pipelineId).Debug("looking up pipeline")
p := &Pipeline{}
err := r.rc.GetPipeline(pipelineId, p)
if err != nil {
return nil, err
}
lease, err := r.rc.AcquireLease(r.id, pipelineId)
if err != nil {
return nil, err
}
p.Status = PIPELINE_RUNNING
lp := &livePipeline{
lease: lease,
buildContext: ctx,
updates: make(chan *BuildState),
p: p,
}
stop := make(chan struct{})
go func() {
for {
select {
case <-time.After(redis.LEASE_DURATION / 2):
if lp.lease.Expires.Before(time.Now().Add(redis.LEASE_DURATION)) {
extendedLease, err := r.rc.ExtendLease(r.id, lp.p.UID)
if err != nil {
r.logger.WithError(err).WithField("pipelineUid", lp.p.UID).Error("failed to extend lease for pipeline")
// cannot extend lease, this will break saving pipeline state to REDIS, exiting
return
} else {
lp.lease = extendedLease
r.logger.WithFields(log.Fields{"pipelineUid": lp.p.UID, "expiration": lp.lease.Expires.String()}).Debug("lease for pipeline extended")
}
}
case <-stop:
return
}
}
}()
go func() {
defer close(lp.updates)
defer close(stop)
defer func() {
if lp.lease.Expires.After(time.Now()) {
if err := r.rc.TerminateLease(lp.p.UID); err != nil {
r.logger.WithError(err).WithField("pipelineUid", lp.p.UID).Error("failed to terminate lease for pipeline")
} else {
r.logger.WithField("pipelineUid", lp.p.UID).Debug("terminating lease for pipeline")
}
}
}()
defer func() {
if rec := recover(); rec != nil {
r.logger.WithError(rec.(error)).WithField("pipelineUid", lp.p.UID).Error("Pipeline failed")
r.updateLivePipeline(lp)
return
}
}()
for jobId, job := range lp.p.Jobs {
if job.Status == JOB_SUCCEEDED {
r.logger.WithField("jobName", job.Name).Info("skipping job because it already succeeded")
r.updateLivePipeline(lp)
continue
}
watcher, err := r.runAndWatchJob(&job.JobDef, lp.p.UID)
if err != nil {
lp.p.Status = PIPELINE_FAILED
lp.p.StatusContext = err.Error()
r.updateLivePipeline(lp)
return
}
for {
shouldReWatch := r.watchJob(watcher, jobId, job, lp)
if !shouldReWatch {
if lp.p.Status != PIPELINE_FAILED {
break
} else {
r.logger.WithField("pipelineUid", lp.p.UID).WithField("jobName", job.Name).Debug("pipeline execution stopped due to failed job")
return
}
}
r.logger.WithField("jobName", job.Name).Info("job watcher failed, will re-initialize job watcher")
watcher, err = r.getJobWatcher(job.Name)
if err != nil {
lp.p.Status = PIPELINE_FAILED
lp.p.StatusContext = err.Error()
r.updateLivePipeline(lp)
r.logger.WithField("pipelineUid", lp.p.UID).WithField("jobName", job.Name).Debug("job failed, pipeline execution stopped")
return
}
}
}
if lp.p.Status != PIPELINE_FAILED {
// all jobs finished
lp.p.Status = PIPELINE_COMPLETED
r.updateLivePipeline(lp)
r.logger.WithField("pipelineUid", lp.p.UID).Debug("pipeline finished successfully")
}
}()
return lp.updates, nil
}
func (r *runner) updateLivePipeline(lp *livePipeline) {
newLease, err := r.rc.SavePipeline(lp.p.UID, lp.p)
if err != nil {
if err == redis.LeaseExpired {
_, err := r.rc.ExtendLease(r.id, lp.p.UID)
if err != nil {
r.logger.WithError(err).WithField("pipelineUid", lp.p.UID).Error("failed to re-acquire expired lease for pipeline")
}
}
if err == redis.LeaseNotFound {
_, err := r.rc.AcquireLease(r.id, lp.p.UID)
if err != nil {
r.logger.WithError(err).WithField("pipelineUid", lp.p.UID).Error("failed to re-acquire expired lease for pipeline")
}
}
newLease, err := r.rc.SavePipeline(lp.p.UID, lp.p)
if err != nil {
r.logger.WithError(err).WithField("pipelineUid", lp.p.UID).Error("failed to save pipeline after re-acquiring lease")
}
lp.lease = newLease
}
lp.lease = newLease
lp.updates <- &BuildState{
Pipeline: lp.p,
BuildContext: lp.buildContext,
}
}
func (r *runner) updateJob(kubeJob *v1batch.Job, job *Job) *Job {
currentStatus := JOB_UNKNOWN
if kubeJob.Status.Active == 1 {
currentStatus = JOB_ACTIVE
}
if kubeJob.Status.Failed == 1 {
currentStatus = JOB_FAILED
}
if kubeJob.Status.Succeeded == 1 {
currentStatus = JOB_SUCCEEDED
job.Done = true
}
job.Status = currentStatus
if kubeJob.Status.StartTime != nil {
job.StartTime = kubeJob.Status.StartTime.UTC()
} else {
// not sure about this, but it will do for now
if job.StartTime.IsZero() {
job.Status = JOB_SCHEDULED
}
}
if kubeJob.Status.CompletionTime != nil {
job.CompletionTime = kubeJob.Status.CompletionTime.UTC()
}
job.Name = kubeJob.Name
return job
}
func New(cfg *Config, runnerId string, logger *log.Logger) Interface {
return &runner{
cfg: cfg,
kube: mustNewKubeClient(&cfg.Kubernetes),
id: runnerId,
rc: redis.MustNewClient(cfg.Redis.IsCluster, cfg.Redis.Address),
logger: logger,
}
}
func (r *runner) watchJob(watcher watch.Interface, jobId int, job *Job, lp *livePipeline) bool {
for e := range watcher.ResultChan() {
switch e.Type {
case watch.Added:
j := e.Object.DeepCopyObject().(*v1batch.Job)
lp.p.Jobs[jobId] = r.updateJob(j, job)
r.updateLivePipeline(lp)
case watch.Modified:
j := e.Object.DeepCopyObject().(*v1batch.Job)
job.UID = string(j.UID)
lp.p.Jobs[jobId] = r.updateJob(j, job)
r.updateLivePipeline(lp)
// if job is not allowed to fail, stop pipeline
if j.Status.Failed == 1 && !job.AllowFailure {
lp.p.Status = PIPELINE_FAILED
lp.p.StatusContext = j.Status.String()
r.updateLivePipeline(lp)
watcher.Stop()
r.logger.WithField("pipelineUid", lp.p.UID).WithField("jobName", job.Name).Debug("job failed")
return false
}
if j.Status.Succeeded == 1 {
r.logger.WithField("pipelineUid", lp.p.UID).WithField("jobName", job.Name).Debug("job succeeded")
r.updateLivePipeline(lp)
watcher.Stop()
}
case watch.Error:
s := e.Object.DeepCopyObject().(*v1meta.Status)
// in case of internal error, try to restart watching
if s.Code == 500 {
// terminate old watcher
watcher.Stop()
return true
}
job.Status = JOB_FAILED
lp.p.Jobs[jobId] = job
if !job.AllowFailure {
lp.p.Status = PIPELINE_FAILED
lp.p.StatusContext = s.Status
r.updateLivePipeline(lp)
watcher.Stop()
r.logger.WithField("pipelineUid", lp.p.UID).WithField("jobName", job.Name).Debug("job failed")
return false
}
r.updateLivePipeline(lp)
watcher.Stop()
case watch.Deleted:
r.logger.WithField("jobName", job.Name).Info("job was deleted externally")
job.Status = JOB_FAILED
lp.p.Jobs[jobId] = job
lp.p.Status = PIPELINE_FAILED
lp.p.StatusContext = "job was deleted"
r.updateLivePipeline(lp)
watcher.Stop()
return false
default:
r.logger.WithField("jobName", job.Name).Info("unexpected job state")
}
}
return false
}
|
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
package logutil
import (
"encoding/hex"
"encoding/json"
"fmt"
"strings"
"github.com/google/uuid"
"github.com/pingcap/errors"
backuppb "github.com/pingcap/kvproto/pkg/brpb"
"github.com/pingcap/kvproto/pkg/import_sstpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/log"
"github.com/pingcap/tidb/br/pkg/redact"
"github.com/pingcap/tidb/kv"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// AbbreviatedArrayMarshaler abbreviates an array of elements.
type AbbreviatedArrayMarshaler []string
// MarshalLogArray implements zapcore.ArrayMarshaler.
func (abb AbbreviatedArrayMarshaler) MarshalLogArray(encoder zapcore.ArrayEncoder) error {
if len(abb) <= 4 {
for _, e := range abb {
encoder.AppendString(e)
}
} else {
total := len(abb)
encoder.AppendString(abb[0])
encoder.AppendString(fmt.Sprintf("(skip %d)", total-2))
encoder.AppendString(abb[total-1])
}
return nil
}
// AbbreviatedArray constructs a field that abbreviates an array of elements.
func AbbreviatedArray(
key string, elements interface{}, marshalFunc func(interface{}) []string,
) zap.Field {
return zap.Array(key, AbbreviatedArrayMarshaler(marshalFunc(elements)))
}
type zapFileMarshaler struct{ *backuppb.File }
func (file zapFileMarshaler) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddString("name", file.GetName())
enc.AddString("CF", file.GetCf())
enc.AddString("sha256", hex.EncodeToString(file.GetSha256()))
enc.AddString("startKey", redact.Key(file.GetStartKey()))
enc.AddString("endKey", redact.Key(file.GetEndKey()))
enc.AddUint64("startVersion", file.GetStartVersion())
enc.AddUint64("endVersion", file.GetEndVersion())
enc.AddUint64("totalKvs", file.GetTotalKvs())
enc.AddUint64("totalBytes", file.GetTotalBytes())
enc.AddUint64("CRC64Xor", file.GetCrc64Xor())
return nil
}
type zapFilesMarshaler []*backuppb.File
// MarshalLogObjectForFiles is an internal util function to zap something having `Files` field.
func MarshalLogObjectForFiles(files []*backuppb.File, encoder zapcore.ObjectEncoder) error {
return zapFilesMarshaler(files).MarshalLogObject(encoder)
}
func (fs zapFilesMarshaler) MarshalLogObject(encoder zapcore.ObjectEncoder) error {
total := len(fs)
encoder.AddInt("total", total)
elements := make([]string, 0, total)
for _, f := range fs {
elements = append(elements, f.GetName())
}
_ = encoder.AddArray("files", AbbreviatedArrayMarshaler(elements))
totalKVs := uint64(0)
totalBytes := uint64(0)
totalSize := uint64(0)
for _, file := range fs {
totalKVs += file.GetTotalKvs()
totalBytes += file.GetTotalBytes()
totalSize += file.GetSize_()
}
encoder.AddUint64("totalKVs", totalKVs)
encoder.AddUint64("totalBytes", totalBytes)
encoder.AddUint64("totalSize", totalSize)
return nil
}
// File make the zap fields for a file.
func File(file *backuppb.File) zap.Field {
return zap.Object("file", zapFileMarshaler{file})
}
// Files make the zap field for a set of file.
func Files(fs []*backuppb.File) zap.Field {
return zap.Object("files", zapFilesMarshaler(fs))
}
type zapStreamBackupTaskInfo struct{ *backuppb.StreamBackupTaskInfo }
func (t zapStreamBackupTaskInfo) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddString("taskName", t.Name)
enc.AddUint64("startTs", t.StartTs)
enc.AddUint64("endTS", t.EndTs)
enc.AddString("tableFilter", strings.Join(t.TableFilter, ","))
return nil
}
// StreamBackupTaskInfo makes the zap fields for a stream backup task info.
func StreamBackupTaskInfo(t *backuppb.StreamBackupTaskInfo) zap.Field {
return zap.Object("streamTaskInfo", zapStreamBackupTaskInfo{t})
}
type zapRewriteRuleMarshaler struct{ *import_sstpb.RewriteRule }
func (rewriteRule zapRewriteRuleMarshaler) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddString("oldKeyPrefix", hex.EncodeToString(rewriteRule.GetOldKeyPrefix()))
enc.AddString("newKeyPrefix", hex.EncodeToString(rewriteRule.GetNewKeyPrefix()))
enc.AddUint64("newTimestamp", rewriteRule.GetNewTimestamp())
return nil
}
// RewriteRule make the zap fields for a rewrite rule.
func RewriteRule(rewriteRule *import_sstpb.RewriteRule) zap.Field {
return zap.Object("rewriteRule", zapRewriteRuleMarshaler{rewriteRule})
}
// RewriteRuleObject make zap object marshaler for a rewrite rule.
func RewriteRuleObject(rewriteRule *import_sstpb.RewriteRule) zapcore.ObjectMarshaler {
return zapRewriteRuleMarshaler{rewriteRule}
}
type zapMarshalRegionMarshaler struct{ *metapb.Region }
func (region zapMarshalRegionMarshaler) MarshalLogObject(enc zapcore.ObjectEncoder) error {
peers := make([]string, 0, len(region.GetPeers()))
for _, peer := range region.GetPeers() {
peers = append(peers, peer.String())
}
enc.AddUint64("ID", region.GetId())
enc.AddString("startKey", redact.Key(region.GetStartKey()))
enc.AddString("endKey", redact.Key(region.GetEndKey()))
enc.AddString("epoch", region.GetRegionEpoch().String())
enc.AddString("peers", strings.Join(peers, ","))
return nil
}
// Region make the zap fields for a region.
func Region(region *metapb.Region) zap.Field {
return zap.Object("region", zapMarshalRegionMarshaler{region})
}
// RegionBy make the zap fields for a region with name.
func RegionBy(key string, region *metapb.Region) zap.Field {
return zap.Object(key, zapMarshalRegionMarshaler{region})
}
// Leader make the zap fields for a peer as leader.
// nolint:interfacer
func Leader(peer *metapb.Peer) zap.Field {
return zap.String("leader", peer.String())
}
// Peer make the zap fields for a peer.
func Peer(peer *metapb.Peer) zap.Field {
return zap.String("peer", peer.String())
}
type zapSSTMetaMarshaler struct{ *import_sstpb.SSTMeta }
func (sstMeta zapSSTMetaMarshaler) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddString("CF", sstMeta.GetCfName())
enc.AddBool("endKeyExclusive", sstMeta.EndKeyExclusive)
enc.AddUint32("CRC32", sstMeta.Crc32)
enc.AddUint64("length", sstMeta.Length)
enc.AddUint64("regionID", sstMeta.RegionId)
enc.AddString("regionEpoch", sstMeta.RegionEpoch.String())
enc.AddString("startKey", redact.Key(sstMeta.GetRange().GetStart()))
enc.AddString("endKey", redact.Key(sstMeta.GetRange().GetEnd()))
sstUUID, err := uuid.FromBytes(sstMeta.GetUuid())
if err != nil {
enc.AddString("UUID", fmt.Sprintf("invalid UUID %s", hex.EncodeToString(sstMeta.GetUuid())))
} else {
enc.AddString("UUID", sstUUID.String())
}
return nil
}
// SSTMeta make the zap fields for a SST meta.
func SSTMeta(sstMeta *import_sstpb.SSTMeta) zap.Field {
return zap.Object("sstMeta", zapSSTMetaMarshaler{sstMeta})
}
type zapSSTMetasMarshaler []*import_sstpb.SSTMeta
func (m zapSSTMetasMarshaler) MarshalLogArray(encoder zapcore.ArrayEncoder) error {
for _, meta := range m {
if err := encoder.AppendObject(zapSSTMetaMarshaler{meta}); err != nil {
return errors.Trace(err)
}
}
return nil
}
// SSTMetas make the zap fields for SST metas.
func SSTMetas(sstMetas []*import_sstpb.SSTMeta) zap.Field {
return zap.Array("sstMetas", zapSSTMetasMarshaler(sstMetas))
}
type zapKeysMarshaler [][]byte
func (keys zapKeysMarshaler) MarshalLogObject(encoder zapcore.ObjectEncoder) error {
total := len(keys)
encoder.AddInt("total", total)
elements := make([]string, 0, total)
for _, k := range keys {
elements = append(elements, redact.Key(k))
}
_ = encoder.AddArray("keys", AbbreviatedArrayMarshaler(elements))
return nil
}
// Key constructs a field that carries upper hex format key.
func Key(fieldKey string, key []byte) zap.Field {
return zap.String(fieldKey, redact.Key(key))
}
// Keys constructs a field that carries upper hex format keys.
func Keys(keys [][]byte) zap.Field {
return zap.Object("keys", zapKeysMarshaler(keys))
}
// AShortError make the zap field with key to display error without verbose representation (e.g. the stack trace).
func AShortError(key string, err error) zap.Field {
if err == nil {
return zap.Skip()
}
return zap.String(key, err.Error())
}
// ShortError make the zap field to display error without verbose representation (e.g. the stack trace).
func ShortError(err error) zap.Field {
if err == nil {
return zap.Skip()
}
return zap.String("error", err.Error())
}
var loggerToTerm, _, _ = log.InitLogger(new(log.Config), zap.AddCallerSkip(1))
// WarnTerm put a log both to terminal and to the log file.
func WarnTerm(message string, fields ...zap.Field) {
log.Warn(message, fields...)
if loggerToTerm != nil {
loggerToTerm.Warn(message, fields...)
}
}
// RedactAny constructs a redacted field that carries an interface{}.
func RedactAny(fieldKey string, key interface{}) zap.Field {
if redact.NeedRedact() {
return zap.String(fieldKey, "?")
}
return zap.Any(fieldKey, key)
}
// Redact replaces the zap field by a '?' if redaction is turned on.
func Redact(field zap.Field) zap.Field {
if redact.NeedRedact() {
return zap.String(field.Key, "?")
}
return field
}
// StringifyKeys wraps the key range into a stringer.
type StringifyKeys []kv.KeyRange
func (kr StringifyKeys) String() string {
sb := new(strings.Builder)
sb.WriteString("{")
for i, rng := range kr {
if i > 0 {
sb.WriteString(", ")
}
sb.WriteString(StringifyRange(rng).String())
}
sb.WriteString("}")
return sb.String()
}
// StringifyRange is the wrapper for displaying a key range.
type StringifyRange kv.KeyRange
func (rng StringifyRange) String() string {
sb := new(strings.Builder)
sb.WriteString("[")
sb.WriteString(redact.Key(rng.StartKey))
sb.WriteString(", ")
var endKey string
if len(rng.EndKey) == 0 {
endKey = "inf"
} else {
endKey = redact.Key(rng.EndKey)
}
sb.WriteString(redact.String(endKey))
sb.WriteString(")")
return sb.String()
}
// StringifyMany returns an array marshaler for a slice of stringers.
func StringifyMany[T fmt.Stringer](items []T) zapcore.ArrayMarshaler {
return zapcore.ArrayMarshalerFunc(func(ae zapcore.ArrayEncoder) error {
for _, item := range items {
ae.AppendString(item.String())
}
return nil
})
}
// HexBytes is a wrapper which make a byte sequence printed by the hex format.
type HexBytes []byte
var (
_ fmt.Stringer = HexBytes{}
_ json.Marshaler = HexBytes{}
)
// String implements fmt.Stringer.
func (b HexBytes) String() string {
return hex.EncodeToString(b)
}
// MarshalJSON implements json.Marshaler.
func (b HexBytes) MarshalJSON() ([]byte, error) {
return json.Marshal(hex.EncodeToString(b))
}
|
// symlink.
package main
import (
"fmt"
"io/ioutil"
"os"
)
func main() {
testroot, err := ioutil.TempDir("", "test_symlink")
if err != nil {
panic(err)
}
defer os.RemoveAll(testroot)
dir, err := ioutil.TempDir(testroot, "test_symlink")
if err != nil {
panic(err)
}
sym := dir + ".link"
err = os.Symlink(dir, sym)
if err != nil {
panic(err)
}
fis, err := ioutil.ReadDir(testroot)
if err != nil {
panic(err)
}
for i, fi := range fis {
fmt.Printf("%d\n", i)
fmt.Printf("\t%s\n", fi.Name())
fmt.Printf("\t%+v\n", fi.Mode())
}
}
|
package models
import (
"testing"
)
func Test_common(t *testing.T) {
mi := MongoInfo{"127.0.0.1:27017", 5, 1000}
err := newMongodb(mi)
if err != nil {
println(err.Error())
t.Fail()
}
}
|
package main
import (
"fmt"
cd "go.jlucktay.dev/golang-workbench/custom-domain/one"
)
func main() {
fmt.Println(cd.HelloCustomDomain())
}
|
package dao
import (
"errors"
"github.com/golang/glog"
"qipai/model"
)
var Room roomDao
type roomDao struct {
}
func (roomDao) Get(roomId uint) (room model.Room, err error) {
if ret := Db().First(&room, roomId); ret.Error != nil || ret.RecordNotFound() {
err = errors.New("该房间不存在")
return
}
return
}
func (roomDao) IsRoomPlayer(rid, uid uint) bool {
var n int
Db().Model(&model.Player{}).Where(&model.Player{Uid: uid, RoomId: rid}).Count(&n)
return n > 0
}
// 房间中所有坐下的玩家
func (roomDao) PlayersSitDown(roomId uint) (players []model.Player) {
Db().Where(&model.Player{RoomId: roomId}).Where("desk_id>0").Find(&players)
return
}
func (roomDao) Exists(roomId uint) bool {
var n int
Db().Model(&model.Room{}).Where("id=?", roomId).Count(&n)
return n > 0
}
// 删除房间信息
func (roomDao) Delete(roomId uint) (err error) {
res := Db().Where("id=?", roomId).Delete(&model.Room{})
if res.Error != nil {
glog.Errorln(res.Error)
err = errors.New("解散房间出错")
return
}
return
}
func (roomDao) MyRooms(uid uint) (rooms []model.Room) {
// select r.* from rooms r join players p on p.room_id=r.id where p.uid=100000;
Db().Raw("select r.* from rooms r join players p on p.room_id=r.id where r.`deleted_at` IS NULL and r.club_id=0 and p.uid=?", uid).Scan(&rooms)
return
}
|
package problem0057
import "testing"
func TestSolve(t *testing.T) {
t.Log(insert([][]int{[]int{1, 3}, []int{6, 9}}, []int{2, 5}))
t.Log(insert([][]int{[]int{1, 2}, []int{3, 5}, []int{6, 7}, []int{8, 10}, []int{12, 16}}, []int{4, 8}))
t.Log(insert([][]int{[]int{1, 5}}, []int{2, 3}))
t.Log(insert([][]int{[]int{1, 5}}, []int{2, 7}))
t.Log(insert([][]int{}, []int{2, 7}))
}
|
package adminController
import (
"github.com/krix38/gophotogallery/web/controller/adminController/handlers"
"github.com/krix38/gophotogallery/external/github.com/gorilla/context"
"github.com/krix38/gophotogallery/properties"
"net/http"
"log"
)
func StartAdminController() {
http.HandleFunc(properties.UrlAdminLoginPath,
handlers.CreateSSLOnlyHandler(handlers.Login))
sslRouting := make(map[string]func(http.ResponseWriter, *http.Request))
sslRouting[properties.UrlAdminPath] = handlers.MainView
sslRouting[properties.UrlAdminAddGalleryPath] = handlers.AddGallery
sslRouting[properties.UrlAdminAddPhotoPath] = handlers.AddPhoto
sslRouting[properties.UrlAdminShowPhotoPath] = handlers.ShowPhoto
sslRouting[properties.UrlAdminShowGalleryPath] = handlers.ShowGallery
sslRouting[properties.UrlAdminLogoutPath] = handlers.Logout
sslRouting[properties.UrlAdminDeleteGallery] = handlers.DeleteGallery
sslRouting[properties.UrlAdminDeletePhoto] = handlers.DeletePhoto
handlers.CreateSSLSessionHandlers(sslRouting)
err := http.ListenAndServeTLS(
properties.HttpsPort,
properties.CertFullPath,
properties.KeyFullPath,
context.ClearHandler(http.DefaultServeMux))
if err != nil {
log.Fatal(err)
}
}
|
/*
命題
「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ。
*/
package main
import (
"strings"
"fmt"
)
func main(){
str1 := "パトカー"
str2 := "タクシー"
slice1 := strings.Split(str1, "")
slice2 := strings.Split(str2, "")
var combineStr string
for i := range slice1 {
combineStr += slice1[i]
combineStr += slice2[i]
}
fmt.Println(combineStr)
// => パタトクカシーー
}
|
--- vendor/github.com/modern-go/reflect2/unsafe_link.go.orig 2022-04-16 22:01:31 UTC
+++ vendor/github.com/modern-go/reflect2/unsafe_link.go
@@ -19,19 +19,13 @@ func typedslicecopy(elemType unsafe.Pointer, dst, src
//go:linkname mapassign reflect.mapassign
//go:noescape
-func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer)
+func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer, val unsafe.Pointer)
//go:linkname mapaccess reflect.mapaccess
//go:noescape
func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
-// m escapes into the return value, but the caller of mapiterinit
-// doesn't let the return value escape.
//go:noescape
-//go:linkname mapiterinit reflect.mapiterinit
-func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter
-
-//go:noescape
//go:linkname mapiternext reflect.mapiternext
func mapiternext(it *hiter)
@@ -42,9 +36,21 @@ func ifaceE2I(rtype unsafe.Pointer, src interface{}, d
// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
// the layout of this structure.
type hiter struct {
- key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go).
- value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
- // rest fields are ignored
+ key unsafe.Pointer
+ value unsafe.Pointer
+ t unsafe.Pointer
+ h unsafe.Pointer
+ buckets unsafe.Pointer
+ bptr unsafe.Pointer
+ overflow *[]unsafe.Pointer
+ oldoverflow *[]unsafe.Pointer
+ startBucket uintptr
+ offset uint8
+ wrapped bool
+ B uint8
+ i uint8
+ bucket uintptr
+ checkBucket uintptr
}
// add returns p+x.
|
package rtrserver
import (
"bytes"
"encoding/binary"
"errors"
"github.com/cpusoft/goutil/belogs"
"github.com/cpusoft/goutil/jsonutil"
)
func ParseToRouterKey(buf *bytes.Reader, protocolVersion uint8) (rtrPduModel RtrPduModel, err error) {
/*
ProtocolVersion uint8 `json:"protocolVersion"`
PduType uint8 `json:"pduType"`
Flags uint8 `json:"flags"`
Zero uint8 `json:"zero"`
Length uint32 `json:"length"`
SubjectKeyIdentifier [20]byte `json:"subjectKeyIdentifier"`
Asn uint32 `json:"asn"`
SubjectPublicKeyInfo uint32 `json:"subjectPublicKeyInfo"`
*/
var flags uint8
var zero uint8
var length uint32
var subjectKeyIdentifier [20]byte
var asn uint32
var subjectPublicKeyInfo uint32
// get flags
err = binary.Read(buf, binary.BigEndian, &flags)
if err != nil {
belogs.Error("ParseToRouterKey(): PDU_TYPE_ROUTER_KEY get flags fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get flags")
return rtrPduModel, rtrError
}
if flags != 0 && flags != 1 {
belogs.Error("ParseToRouterKey():PDU_TYPE_ROUTER_KEY, flags must be 0 or 1, buf:", buf, " flags:", flags)
rtrError := NewRtrError(
errors.New("pduType is ROUTER KEY, flags must be 0 or 1"),
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get flags")
return rtrPduModel, rtrError
}
// get zero
err = binary.Read(buf, binary.BigEndian, &zero)
if err != nil {
belogs.Error("ParseToRouterKey(): PDU_TYPE_ROUTER_KEY get zero fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get zero")
return rtrPduModel, rtrError
}
// length
err = binary.Read(buf, binary.BigEndian, &length)
if err != nil {
belogs.Error("ParseToRouterKey(): PDU_TYPE_ROUTER_KEY get length fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get length")
return rtrPduModel, rtrError
}
// get subjectKeyIdentifier
err = binary.Read(buf, binary.BigEndian, &subjectKeyIdentifier)
if err != nil {
belogs.Error("ParseToRouterKey(): PDU_TYPE_ROUTER_KEY get subjectKeyIdentifier fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get subjectKeyIdentifier")
return rtrPduModel, rtrError
}
// get asn
err = binary.Read(buf, binary.BigEndian, &asn)
if err != nil {
belogs.Error("ParseToRouterKey(): PDU_TYPE_ROUTER_KEY get asn fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get asn")
return rtrPduModel, rtrError
}
// get subjectPublicKeyInfo
err = binary.Read(buf, binary.BigEndian, &subjectPublicKeyInfo)
if err != nil {
belogs.Error("ParseToRouterKey(): PDU_TYPE_ROUTER_KEY get subjectPublicKeyInfo fail, buf:", buf, err)
rtrError := NewRtrError(
err,
true, protocolVersion, PDU_TYPE_ERROR_CODE_CORRUPT_DATA,
buf, "Fail to get subjectPublicKeyInfo")
return rtrPduModel, rtrError
}
sq := NewRtrRouterKeyModel(protocolVersion, flags, subjectKeyIdentifier,
asn, subjectPublicKeyInfo)
belogs.Debug("ParseToRouterKey():get PDU_TYPE_ROUTER_KEY, buf:", buf, " sq:", jsonutil.MarshalJson(sq))
return sq, nil
}
|
package mysqldb
import (
"context"
"time"
)
// AnalysisStatus 分析状态
type AnalysisStatus int32
const (
// AnalysisStatusPending 待决
AnalysisStatusPending AnalysisStatus = 0
// AnalysisStatusInProgress 进行中
AnalysisStatusInProgress AnalysisStatus = 1
// AnalysisStatusCompeleted 完成
AnalysisStatusCompeleted AnalysisStatus = 2
// AnalysisStatusError 错误
AnalysisStatusError AnalysisStatus = 3
)
// AEStatus AE的有效性
type AEStatus int32
const (
// HasAeNoError 没有ae错误
HasAeNoError AEStatus = 0
// HasAeError 有ae错误
HasAeError AEStatus = 1
)
// Gender 性别
type Gender string
const (
// GenderMale 男性
GenderMale Gender = "M"
// GenderFemale 女性
GenderFemale Gender = "F"
// GenderInvalid 非法的性别
GenderInvalid Gender = ""
)
// 手指
type Finger int32
const (
// FingerLeft1 左小拇指
FingerLeft1 Finger = 1
// FingerLeft2 左无名指
FingerLeft2 Finger = 2
// FingerLeft3 左中指
FingerLeft3 Finger = 3
// FingerLeft4 左食指
FingerLeft4 Finger = 4
// FingerLeft5 左大拇指
FingerLeft5 Finger = 5
// FingerRight5 右大拇指
FingerRight5 Finger = 6
// FingerRight4 右食指
FingerRight4 Finger = 7
// FingerRight3 右中指
FingerRight3 Finger = 8
// FingerRight2 右无名指
FingerRight2 Finger = 9
// FingerRight1 右小拇指
FingerRight1 Finger = 10
// FingerInvalid 非法的手指
FingerInvalid Finger = -1
)
// 测量姿势
type MeasurementPosture int32
const (
// MeasurementPostureSetting 坐姿
MeasurementPostureSetting MeasurementPosture = 0
// MeasurementPostureStanging 站姿
MeasurementPostureStanging MeasurementPosture = 1
// MeasurementPostureLying 躺姿
MeasurementPostureLying MeasurementPosture = 2
// MeasurementPostureInvalid 错误的姿势
MeasurementPostureInvalid MeasurementPosture = -1
)
// Record 记录
type Record struct {
RecordID int32 `gorm:"primary_key"` // 测量结果记录ID
ClientID string `gorm:"client_id"` // 客户端ID
UserID int32 `gorm:"column:user_id"` // 用户档案ID
C0 float64 `gorm:"column:c0"` // 心包经测量指标
C1 float64 `gorm:"column:c1"` // 肝经测量指标
C2 float64 `gorm:"column:c2"` // 肾经测量指标
C3 float64 `gorm:"column:c3"` // 脾经测量指标
C4 float64 `gorm:"column:c4"` // 肺经测量指标
C5 float64 `gorm:"column:c5"` // 胃经测量指标
C6 float64 `gorm:"column:c6"` // 胆经测量指标
C7 float64 `gorm:"column:c7"` // 膀胱经测量指标
HeartRate float64 `gorm:"column:heart_rate"`
AlgorithmHighestHeartRate int32 `gorm:"column:algorithm_highest_heart_rate"` // 算法服务计算得到的最高心率
AlgorithmLowestHeartRate int32 `gorm:"column:algorithm_lowest_heart_rate"` // 算法服务计算得到的最低心率
Finger Finger `gorm:"column:finger"` // 左右手
Remark string `gorm:"column:remark"` // 备注
HasAEError int32 `gorm:"column:has_ae_error"` // ae得出的结果是否异常
S3Key string `gorm:"column:s3_key"` // S3的key
CustomizedCode string `gorm:"-"` // 用户自定义代码
HasStressState bool `gorm:"column:has_stress_state"` // 是否是应激态
StressState string `gorm:"column:stress_state"` // 应激态json数组 map[string]bool
AnalyzeBody string `gorm:"column:analyze_body"` // 新分析接口的body
AnalyzeStatus AnalysisStatus `gorm:"column:analyze_status"` // 分析状态 0 pending,1 in_progress,2 completed,3 error
MeasurementPosture MeasurementPosture `gorm:"column:measurement_posture"` // 测量姿态
TransactionNumber string `gorm:"column:transaction_number"` // 流水号
CreatedAt time.Time // 创建时间
UpdatedAt time.Time // 更新时间
DeletedAt *time.Time // 删除时间
}
// TableName 表名
func (r Record) TableName() string {
return "record"
}
// UpdateAnalysisRecord 更新分析记录
func (db *DbClient) UpdateAnalysisRecord(record *Record) error {
return db.Model(&Record{}).Where("record_id = ?", record.RecordID).Update(map[string]interface{}{
"has_stress_state": record.HasStressState,
"stress_state": record.StressState,
"analyze_body": record.AnalyzeBody,
"analyze_status": record.AnalyzeStatus,
"updated_at": time.Now().UTC(),
}).Error
}
// FindAnalysisParams 找到分析的参数
func (db *DbClient) FindAnalysisParams(recordID int32) (*Record, error) {
var record Record
err := db.Raw(`SELECT
R.c0,
R.c1,
R.c2,
R.c3,
R.c4,
R.c5,
R.c6,
R.c7,
R.user_id,
R.client_id,
R.finger,
R.transaction_number,
UP.nickname,
UP.nickname_initial,
UP.gender,
UP.birthday,
UP.height,
UP.weight,
R.remark,
R.heart_rate,
R.s3_key,
U.customized_code,
TIMESTAMPDIFF(YEAR,UP.birthday,CURDATE()) age,
R.created_at
FROM
record AS R
INNER JOIN
user_profile AS UP ON UP.user_id = R.user_id
INNER JOIN
user AS U ON U.user_id = R.user_id
WHERE
R.record_id = ? AND R.deleted_at IS NULL AND UP.deleted_at IS NULL`, recordID).Scan(&record).Error
return &record, err
}
// FindRecordByRecordID 通过 recordID 找到 record
func (db *DbClient) FindRecordByRecordID(recordID int32) (*Record, error) {
var record Record
if err := db.First(&record, "( record_id = ? AND deleted_at IS NULL ) ", recordID).Error; err != nil {
return nil, err
}
return &record, nil
}
// FindAnalysisBodyByToken 通过token找AnalysisBody
func (db *DbClient) FindAnalysisBodyByToken(token string) (*Record, error) {
var record Record
err := db.Raw(`SELECT
R.record_id,
R.user_id,
R.analyze_body,
R.analyze_status,
R.created_at
FROM
record AS R
WHERE
R.record_token = ? AND R.deleted_at IS NULL AND R.analyze_status = 2`, token).Scan(&record).Error
return &record, err
}
// UpdateAnalysisStatusError 更新分析状态错误
func (db *DbClient) UpdateAnalysisStatusError(recordID int32) error {
return db.Model(&Record{}).Where("record_id = ?", recordID).Update(map[string]interface{}{
"analyze_status": AnalysisStatusError,
"updated_at": time.Now().UTC(),
}).Error
}
// UpdateAnalysisStatusInProgress 更新分析进行中
func (db *DbClient) UpdateAnalysisStatusInProgress(recordID int32) error {
return db.Model(&Record{}).Where("record_id = ?", recordID).Update(map[string]interface{}{
"analyze_status": AnalysisStatusInProgress,
"updated_at": time.Now().UTC(),
}).Error
}
// UpdateRecordHasAEError 更新记录有效性
func (db *DbClient) UpdateRecordHasAEError(recordID int32, hasAEError AEStatus) error {
return db.Model(&Record{}).Where("record_id = ?", recordID).Update(map[string]interface{}{
"has_ae_error": hasAEError,
"updated_at": time.Now().UTC(),
}).Error
}
// UpdateRecordTransactionNumber 更新记录的流水号
func (db *DbClient) UpdateRecordTransactionNumber(ctx context.Context, recordID int32, transactionNumber string) error {
return db.Model(&Record{}).Where("record_id = ?", recordID).Update(map[string]interface{}{
"transaction_number": transactionNumber,
"updated_at": time.Now().UTC(),
}).Error
}
|
// Copyright 2021 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/compute/beta/compute_beta_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta"
)
// Server implements the gRPC interface for UrlMap.
type UrlMapServer struct{}
// ProtoToUrlMapDefaultUrlRedirectRedirectResponseCodeEnum converts a UrlMapDefaultUrlRedirectRedirectResponseCodeEnum enum from its proto representation.
func ProtoToComputeBetaUrlMapDefaultUrlRedirectRedirectResponseCodeEnum(e betapb.ComputeBetaUrlMapDefaultUrlRedirectRedirectResponseCodeEnum) *beta.UrlMapDefaultUrlRedirectRedirectResponseCodeEnum {
if e == 0 {
return nil
}
if n, ok := betapb.ComputeBetaUrlMapDefaultUrlRedirectRedirectResponseCodeEnum_name[int32(e)]; ok {
e := beta.UrlMapDefaultUrlRedirectRedirectResponseCodeEnum(n[len("ComputeBetaUrlMapDefaultUrlRedirectRedirectResponseCodeEnum"):])
return &e
}
return nil
}
// ProtoToUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum converts a UrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum enum from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum(e betapb.ComputeBetaUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum) *beta.UrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum {
if e == 0 {
return nil
}
if n, ok := betapb.ComputeBetaUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum_name[int32(e)]; ok {
e := beta.UrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum(n[len("ComputeBetaUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum"):])
return &e
}
return nil
}
// ProtoToUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum converts a UrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum enum from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum(e betapb.ComputeBetaUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum) *beta.UrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum {
if e == 0 {
return nil
}
if n, ok := betapb.ComputeBetaUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum_name[int32(e)]; ok {
e := beta.UrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum(n[len("ComputeBetaUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum"):])
return &e
}
return nil
}
// ProtoToUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum converts a UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum enum from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum(e betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum) *beta.UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum {
if e == 0 {
return nil
}
if n, ok := betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum_name[int32(e)]; ok {
e := beta.UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum(n[len("ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum"):])
return &e
}
return nil
}
// ProtoToUrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum converts a UrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum enum from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum(e betapb.ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum) *beta.UrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum {
if e == 0 {
return nil
}
if n, ok := betapb.ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum_name[int32(e)]; ok {
e := beta.UrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum(n[len("ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum"):])
return &e
}
return nil
}
// ProtoToUrlMapDefaultRouteAction converts a UrlMapDefaultRouteAction resource from its proto representation.
func ProtoToComputeBetaUrlMapDefaultRouteAction(p *betapb.ComputeBetaUrlMapDefaultRouteAction) *beta.UrlMapDefaultRouteAction {
if p == nil {
return nil
}
obj := &beta.UrlMapDefaultRouteAction{
UrlRewrite: ProtoToComputeBetaUrlMapDefaultRouteActionUrlRewrite(p.GetUrlRewrite()),
Timeout: ProtoToComputeBetaUrlMapDefaultRouteActionTimeout(p.GetTimeout()),
RetryPolicy: ProtoToComputeBetaUrlMapDefaultRouteActionRetryPolicy(p.GetRetryPolicy()),
RequestMirrorPolicy: ProtoToComputeBetaUrlMapDefaultRouteActionRequestMirrorPolicy(p.GetRequestMirrorPolicy()),
CorsPolicy: ProtoToComputeBetaUrlMapDefaultRouteActionCorsPolicy(p.GetCorsPolicy()),
FaultInjectionPolicy: ProtoToComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicy(p.GetFaultInjectionPolicy()),
}
for _, r := range p.GetWeightedBackendService() {
obj.WeightedBackendService = append(obj.WeightedBackendService, *ProtoToComputeBetaUrlMapDefaultRouteActionWeightedBackendService(r))
}
return obj
}
// ProtoToUrlMapDefaultRouteActionWeightedBackendService converts a UrlMapDefaultRouteActionWeightedBackendService resource from its proto representation.
func ProtoToComputeBetaUrlMapDefaultRouteActionWeightedBackendService(p *betapb.ComputeBetaUrlMapDefaultRouteActionWeightedBackendService) *beta.UrlMapDefaultRouteActionWeightedBackendService {
if p == nil {
return nil
}
obj := &beta.UrlMapDefaultRouteActionWeightedBackendService{
BackendService: dcl.StringOrNil(p.BackendService),
Weight: dcl.Int64OrNil(p.Weight),
HeaderAction: ProtoToComputeBetaUrlMapHeaderAction(p.GetHeaderAction()),
}
return obj
}
// ProtoToUrlMapHeaderAction converts a UrlMapHeaderAction resource from its proto representation.
func ProtoToComputeBetaUrlMapHeaderAction(p *betapb.ComputeBetaUrlMapHeaderAction) *beta.UrlMapHeaderAction {
if p == nil {
return nil
}
obj := &beta.UrlMapHeaderAction{}
for _, r := range p.GetRequestHeadersToRemove() {
obj.RequestHeadersToRemove = append(obj.RequestHeadersToRemove, r)
}
for _, r := range p.GetRequestHeadersToAdd() {
obj.RequestHeadersToAdd = append(obj.RequestHeadersToAdd, *ProtoToComputeBetaUrlMapHeaderActionRequestHeadersToAdd(r))
}
for _, r := range p.GetResponseHeadersToRemove() {
obj.ResponseHeadersToRemove = append(obj.ResponseHeadersToRemove, r)
}
for _, r := range p.GetResponseHeadersToAdd() {
obj.ResponseHeadersToAdd = append(obj.ResponseHeadersToAdd, *ProtoToComputeBetaUrlMapHeaderActionResponseHeadersToAdd(r))
}
return obj
}
// ProtoToUrlMapHeaderActionRequestHeadersToAdd converts a UrlMapHeaderActionRequestHeadersToAdd resource from its proto representation.
func ProtoToComputeBetaUrlMapHeaderActionRequestHeadersToAdd(p *betapb.ComputeBetaUrlMapHeaderActionRequestHeadersToAdd) *beta.UrlMapHeaderActionRequestHeadersToAdd {
if p == nil {
return nil
}
obj := &beta.UrlMapHeaderActionRequestHeadersToAdd{
HeaderName: dcl.StringOrNil(p.HeaderName),
HeaderValue: dcl.StringOrNil(p.HeaderValue),
Replace: dcl.Bool(p.Replace),
}
return obj
}
// ProtoToUrlMapHeaderActionResponseHeadersToAdd converts a UrlMapHeaderActionResponseHeadersToAdd resource from its proto representation.
func ProtoToComputeBetaUrlMapHeaderActionResponseHeadersToAdd(p *betapb.ComputeBetaUrlMapHeaderActionResponseHeadersToAdd) *beta.UrlMapHeaderActionResponseHeadersToAdd {
if p == nil {
return nil
}
obj := &beta.UrlMapHeaderActionResponseHeadersToAdd{
HeaderName: dcl.StringOrNil(p.HeaderName),
HeaderValue: dcl.StringOrNil(p.HeaderValue),
Replace: dcl.Bool(p.Replace),
}
return obj
}
// ProtoToUrlMapDefaultRouteActionUrlRewrite converts a UrlMapDefaultRouteActionUrlRewrite resource from its proto representation.
func ProtoToComputeBetaUrlMapDefaultRouteActionUrlRewrite(p *betapb.ComputeBetaUrlMapDefaultRouteActionUrlRewrite) *beta.UrlMapDefaultRouteActionUrlRewrite {
if p == nil {
return nil
}
obj := &beta.UrlMapDefaultRouteActionUrlRewrite{
PathPrefixRewrite: dcl.StringOrNil(p.PathPrefixRewrite),
HostRewrite: dcl.StringOrNil(p.HostRewrite),
}
return obj
}
// ProtoToUrlMapDefaultRouteActionTimeout converts a UrlMapDefaultRouteActionTimeout resource from its proto representation.
func ProtoToComputeBetaUrlMapDefaultRouteActionTimeout(p *betapb.ComputeBetaUrlMapDefaultRouteActionTimeout) *beta.UrlMapDefaultRouteActionTimeout {
if p == nil {
return nil
}
obj := &beta.UrlMapDefaultRouteActionTimeout{
Seconds: dcl.Int64OrNil(p.Seconds),
Nanos: dcl.Int64OrNil(p.Nanos),
}
return obj
}
// ProtoToUrlMapDefaultRouteActionRetryPolicy converts a UrlMapDefaultRouteActionRetryPolicy resource from its proto representation.
func ProtoToComputeBetaUrlMapDefaultRouteActionRetryPolicy(p *betapb.ComputeBetaUrlMapDefaultRouteActionRetryPolicy) *beta.UrlMapDefaultRouteActionRetryPolicy {
if p == nil {
return nil
}
obj := &beta.UrlMapDefaultRouteActionRetryPolicy{
NumRetries: dcl.Int64OrNil(p.NumRetries),
PerTryTimeout: ProtoToComputeBetaUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(p.GetPerTryTimeout()),
}
for _, r := range p.GetRetryCondition() {
obj.RetryCondition = append(obj.RetryCondition, r)
}
return obj
}
// ProtoToUrlMapDefaultRouteActionRetryPolicyPerTryTimeout converts a UrlMapDefaultRouteActionRetryPolicyPerTryTimeout resource from its proto representation.
func ProtoToComputeBetaUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(p *betapb.ComputeBetaUrlMapDefaultRouteActionRetryPolicyPerTryTimeout) *beta.UrlMapDefaultRouteActionRetryPolicyPerTryTimeout {
if p == nil {
return nil
}
obj := &beta.UrlMapDefaultRouteActionRetryPolicyPerTryTimeout{
Seconds: dcl.Int64OrNil(p.Seconds),
Nanos: dcl.Int64OrNil(p.Nanos),
}
return obj
}
// ProtoToUrlMapDefaultRouteActionRequestMirrorPolicy converts a UrlMapDefaultRouteActionRequestMirrorPolicy resource from its proto representation.
func ProtoToComputeBetaUrlMapDefaultRouteActionRequestMirrorPolicy(p *betapb.ComputeBetaUrlMapDefaultRouteActionRequestMirrorPolicy) *beta.UrlMapDefaultRouteActionRequestMirrorPolicy {
if p == nil {
return nil
}
obj := &beta.UrlMapDefaultRouteActionRequestMirrorPolicy{
BackendService: dcl.StringOrNil(p.BackendService),
}
return obj
}
// ProtoToUrlMapDefaultRouteActionCorsPolicy converts a UrlMapDefaultRouteActionCorsPolicy resource from its proto representation.
func ProtoToComputeBetaUrlMapDefaultRouteActionCorsPolicy(p *betapb.ComputeBetaUrlMapDefaultRouteActionCorsPolicy) *beta.UrlMapDefaultRouteActionCorsPolicy {
if p == nil {
return nil
}
obj := &beta.UrlMapDefaultRouteActionCorsPolicy{
MaxAge: dcl.Int64OrNil(p.MaxAge),
AllowCredentials: dcl.Bool(p.AllowCredentials),
Disabled: dcl.Bool(p.Disabled),
}
for _, r := range p.GetAllowOrigin() {
obj.AllowOrigin = append(obj.AllowOrigin, r)
}
for _, r := range p.GetAllowOriginRegex() {
obj.AllowOriginRegex = append(obj.AllowOriginRegex, r)
}
for _, r := range p.GetAllowMethod() {
obj.AllowMethod = append(obj.AllowMethod, r)
}
for _, r := range p.GetAllowHeader() {
obj.AllowHeader = append(obj.AllowHeader, r)
}
for _, r := range p.GetExposeHeader() {
obj.ExposeHeader = append(obj.ExposeHeader, r)
}
return obj
}
// ProtoToUrlMapDefaultRouteActionFaultInjectionPolicy converts a UrlMapDefaultRouteActionFaultInjectionPolicy resource from its proto representation.
func ProtoToComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicy(p *betapb.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicy) *beta.UrlMapDefaultRouteActionFaultInjectionPolicy {
if p == nil {
return nil
}
obj := &beta.UrlMapDefaultRouteActionFaultInjectionPolicy{
Delay: ProtoToComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelay(p.GetDelay()),
Abort: ProtoToComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyAbort(p.GetAbort()),
}
return obj
}
// ProtoToUrlMapDefaultRouteActionFaultInjectionPolicyDelay converts a UrlMapDefaultRouteActionFaultInjectionPolicyDelay resource from its proto representation.
func ProtoToComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelay(p *betapb.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelay) *beta.UrlMapDefaultRouteActionFaultInjectionPolicyDelay {
if p == nil {
return nil
}
obj := &beta.UrlMapDefaultRouteActionFaultInjectionPolicyDelay{
FixedDelay: ProtoToComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(p.GetFixedDelay()),
Percentage: dcl.Float64OrNil(p.Percentage),
}
return obj
}
// ProtoToUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay converts a UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay resource from its proto representation.
func ProtoToComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(p *betapb.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay) *beta.UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay {
if p == nil {
return nil
}
obj := &beta.UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay{
Seconds: dcl.Int64OrNil(p.Seconds),
Nanos: dcl.Int64OrNil(p.Nanos),
}
return obj
}
// ProtoToUrlMapDefaultRouteActionFaultInjectionPolicyAbort converts a UrlMapDefaultRouteActionFaultInjectionPolicyAbort resource from its proto representation.
func ProtoToComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyAbort(p *betapb.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyAbort) *beta.UrlMapDefaultRouteActionFaultInjectionPolicyAbort {
if p == nil {
return nil
}
obj := &beta.UrlMapDefaultRouteActionFaultInjectionPolicyAbort{
HttpStatus: dcl.Int64OrNil(p.HttpStatus),
Percentage: dcl.Float64OrNil(p.Percentage),
}
return obj
}
// ProtoToUrlMapDefaultUrlRedirect converts a UrlMapDefaultUrlRedirect resource from its proto representation.
func ProtoToComputeBetaUrlMapDefaultUrlRedirect(p *betapb.ComputeBetaUrlMapDefaultUrlRedirect) *beta.UrlMapDefaultUrlRedirect {
if p == nil {
return nil
}
obj := &beta.UrlMapDefaultUrlRedirect{
HostRedirect: dcl.StringOrNil(p.HostRedirect),
PathRedirect: dcl.StringOrNil(p.PathRedirect),
PrefixRedirect: dcl.StringOrNil(p.PrefixRedirect),
RedirectResponseCode: ProtoToComputeBetaUrlMapDefaultUrlRedirectRedirectResponseCodeEnum(p.GetRedirectResponseCode()),
HttpsRedirect: dcl.Bool(p.HttpsRedirect),
StripQuery: dcl.Bool(p.StripQuery),
}
return obj
}
// ProtoToUrlMapHostRule converts a UrlMapHostRule resource from its proto representation.
func ProtoToComputeBetaUrlMapHostRule(p *betapb.ComputeBetaUrlMapHostRule) *beta.UrlMapHostRule {
if p == nil {
return nil
}
obj := &beta.UrlMapHostRule{
Description: dcl.StringOrNil(p.Description),
PathMatcher: dcl.StringOrNil(p.PathMatcher),
}
for _, r := range p.GetHost() {
obj.Host = append(obj.Host, r)
}
return obj
}
// ProtoToUrlMapPathMatcher converts a UrlMapPathMatcher resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcher(p *betapb.ComputeBetaUrlMapPathMatcher) *beta.UrlMapPathMatcher {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcher{
Name: dcl.StringOrNil(p.Name),
Description: dcl.StringOrNil(p.Description),
DefaultService: dcl.StringOrNil(p.DefaultService),
DefaultRouteAction: ProtoToComputeBetaUrlMapDefaultRouteAction(p.GetDefaultRouteAction()),
DefaultUrlRedirect: ProtoToComputeBetaUrlMapPathMatcherDefaultUrlRedirect(p.GetDefaultUrlRedirect()),
HeaderAction: ProtoToComputeBetaUrlMapHeaderAction(p.GetHeaderAction()),
}
for _, r := range p.GetPathRule() {
obj.PathRule = append(obj.PathRule, *ProtoToComputeBetaUrlMapPathMatcherPathRule(r))
}
for _, r := range p.GetRouteRule() {
obj.RouteRule = append(obj.RouteRule, *ProtoToComputeBetaUrlMapPathMatcherRouteRule(r))
}
return obj
}
// ProtoToUrlMapPathMatcherDefaultUrlRedirect converts a UrlMapPathMatcherDefaultUrlRedirect resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherDefaultUrlRedirect(p *betapb.ComputeBetaUrlMapPathMatcherDefaultUrlRedirect) *beta.UrlMapPathMatcherDefaultUrlRedirect {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherDefaultUrlRedirect{
HostRedirect: dcl.StringOrNil(p.HostRedirect),
PathRedirect: dcl.StringOrNil(p.PathRedirect),
PrefixRedirect: dcl.StringOrNil(p.PrefixRedirect),
RedirectResponseCode: ProtoToComputeBetaUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum(p.GetRedirectResponseCode()),
HttpsRedirect: dcl.Bool(p.HttpsRedirect),
StripQuery: dcl.Bool(p.StripQuery),
}
return obj
}
// ProtoToUrlMapPathMatcherPathRule converts a UrlMapPathMatcherPathRule resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRule(p *betapb.ComputeBetaUrlMapPathMatcherPathRule) *beta.UrlMapPathMatcherPathRule {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRule{
BackendService: dcl.StringOrNil(p.BackendService),
RouteAction: ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteAction(p.GetRouteAction()),
UrlRedirect: ProtoToComputeBetaUrlMapPathMatcherPathRuleUrlRedirect(p.GetUrlRedirect()),
}
for _, r := range p.GetPath() {
obj.Path = append(obj.Path, r)
}
return obj
}
// ProtoToUrlMapPathMatcherPathRuleRouteAction converts a UrlMapPathMatcherPathRuleRouteAction resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteAction(p *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteAction) *beta.UrlMapPathMatcherPathRuleRouteAction {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRuleRouteAction{
UrlRewrite: ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionUrlRewrite(p.GetUrlRewrite()),
Timeout: ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionTimeout(p.GetTimeout()),
RetryPolicy: ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicy(p.GetRetryPolicy()),
RequestMirrorPolicy: ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(p.GetRequestMirrorPolicy()),
CorsPolicy: ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionCorsPolicy(p.GetCorsPolicy()),
FaultInjectionPolicy: ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(p.GetFaultInjectionPolicy()),
}
for _, r := range p.GetWeightedBackendService() {
obj.WeightedBackendService = append(obj.WeightedBackendService, *ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionWeightedBackendService(r))
}
return obj
}
// ProtoToUrlMapPathMatcherPathRuleRouteActionWeightedBackendService converts a UrlMapPathMatcherPathRuleRouteActionWeightedBackendService resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionWeightedBackendService(p *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionWeightedBackendService) *beta.UrlMapPathMatcherPathRuleRouteActionWeightedBackendService {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRuleRouteActionWeightedBackendService{
BackendService: dcl.StringOrNil(p.BackendService),
Weight: dcl.Int64OrNil(p.Weight),
HeaderAction: ProtoToComputeBetaUrlMapHeaderAction(p.GetHeaderAction()),
}
return obj
}
// ProtoToUrlMapPathMatcherPathRuleRouteActionUrlRewrite converts a UrlMapPathMatcherPathRuleRouteActionUrlRewrite resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionUrlRewrite(p *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionUrlRewrite) *beta.UrlMapPathMatcherPathRuleRouteActionUrlRewrite {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRuleRouteActionUrlRewrite{
PathPrefixRewrite: dcl.StringOrNil(p.PathPrefixRewrite),
HostRewrite: dcl.StringOrNil(p.HostRewrite),
}
return obj
}
// ProtoToUrlMapPathMatcherPathRuleRouteActionTimeout converts a UrlMapPathMatcherPathRuleRouteActionTimeout resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionTimeout(p *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionTimeout) *beta.UrlMapPathMatcherPathRuleRouteActionTimeout {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRuleRouteActionTimeout{
Seconds: dcl.Int64OrNil(p.Seconds),
Nanos: dcl.Int64OrNil(p.Nanos),
}
return obj
}
// ProtoToUrlMapPathMatcherPathRuleRouteActionRetryPolicy converts a UrlMapPathMatcherPathRuleRouteActionRetryPolicy resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicy(p *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicy) *beta.UrlMapPathMatcherPathRuleRouteActionRetryPolicy {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRuleRouteActionRetryPolicy{
NumRetries: dcl.Int64OrNil(p.NumRetries),
PerTryTimeout: ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(p.GetPerTryTimeout()),
}
for _, r := range p.GetRetryCondition() {
obj.RetryCondition = append(obj.RetryCondition, r)
}
return obj
}
// ProtoToUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout converts a UrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(p *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout) *beta.UrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout{
Seconds: dcl.Int64OrNil(p.Seconds),
Nanos: dcl.Int64OrNil(p.Nanos),
}
return obj
}
// ProtoToUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy converts a UrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(p *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy) *beta.UrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy{
BackendService: dcl.StringOrNil(p.BackendService),
}
return obj
}
// ProtoToUrlMapPathMatcherPathRuleRouteActionCorsPolicy converts a UrlMapPathMatcherPathRuleRouteActionCorsPolicy resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionCorsPolicy(p *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionCorsPolicy) *beta.UrlMapPathMatcherPathRuleRouteActionCorsPolicy {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRuleRouteActionCorsPolicy{
MaxAge: dcl.Int64OrNil(p.MaxAge),
AllowCredentials: dcl.Bool(p.AllowCredentials),
Disabled: dcl.Bool(p.Disabled),
}
for _, r := range p.GetAllowOrigin() {
obj.AllowOrigin = append(obj.AllowOrigin, r)
}
for _, r := range p.GetAllowOriginRegex() {
obj.AllowOriginRegex = append(obj.AllowOriginRegex, r)
}
for _, r := range p.GetAllowMethod() {
obj.AllowMethod = append(obj.AllowMethod, r)
}
for _, r := range p.GetAllowHeader() {
obj.AllowHeader = append(obj.AllowHeader, r)
}
for _, r := range p.GetExposeHeader() {
obj.ExposeHeader = append(obj.ExposeHeader, r)
}
return obj
}
// ProtoToUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy converts a UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(p *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy) *beta.UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy{
Delay: ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(p.GetDelay()),
Abort: ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(p.GetAbort()),
}
return obj
}
// ProtoToUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay converts a UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(p *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay) *beta.UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay{
FixedDelay: ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(p.GetFixedDelay()),
Percentage: dcl.Float64OrNil(p.Percentage),
}
return obj
}
// ProtoToUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay converts a UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(p *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay) *beta.UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay{
Seconds: dcl.Int64OrNil(p.Seconds),
Nanos: dcl.Int64OrNil(p.Nanos),
}
return obj
}
// ProtoToUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort converts a UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(p *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort) *beta.UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort{
HttpStatus: dcl.Int64OrNil(p.HttpStatus),
Percentage: dcl.Float64OrNil(p.Percentage),
}
return obj
}
// ProtoToUrlMapPathMatcherPathRuleUrlRedirect converts a UrlMapPathMatcherPathRuleUrlRedirect resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherPathRuleUrlRedirect(p *betapb.ComputeBetaUrlMapPathMatcherPathRuleUrlRedirect) *beta.UrlMapPathMatcherPathRuleUrlRedirect {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherPathRuleUrlRedirect{
HostRedirect: dcl.StringOrNil(p.HostRedirect),
PathRedirect: dcl.StringOrNil(p.PathRedirect),
PrefixRedirect: dcl.StringOrNil(p.PrefixRedirect),
RedirectResponseCode: ProtoToComputeBetaUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum(p.GetRedirectResponseCode()),
HttpsRedirect: dcl.Bool(p.HttpsRedirect),
StripQuery: dcl.Bool(p.StripQuery),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRule converts a UrlMapPathMatcherRouteRule resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRule(p *betapb.ComputeBetaUrlMapPathMatcherRouteRule) *beta.UrlMapPathMatcherRouteRule {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRule{
Priority: dcl.Int64OrNil(p.Priority),
Description: dcl.StringOrNil(p.Description),
BackendService: dcl.StringOrNil(p.BackendService),
RouteAction: ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteAction(p.GetRouteAction()),
UrlRedirect: ProtoToComputeBetaUrlMapPathMatcherRouteRuleUrlRedirect(p.GetUrlRedirect()),
HeaderAction: ProtoToComputeBetaUrlMapHeaderAction(p.GetHeaderAction()),
}
for _, r := range p.GetMatchRule() {
obj.MatchRule = append(obj.MatchRule, *ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRule(r))
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleMatchRule converts a UrlMapPathMatcherRouteRuleMatchRule resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRule(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRule) *beta.UrlMapPathMatcherRouteRuleMatchRule {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleMatchRule{
PrefixMatch: dcl.StringOrNil(p.PrefixMatch),
FullPathMatch: dcl.StringOrNil(p.FullPathMatch),
RegexMatch: dcl.StringOrNil(p.RegexMatch),
IgnoreCase: dcl.Bool(p.IgnoreCase),
}
for _, r := range p.GetHeaderMatch() {
obj.HeaderMatch = append(obj.HeaderMatch, *ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatch(r))
}
for _, r := range p.GetQueryParameterMatch() {
obj.QueryParameterMatch = append(obj.QueryParameterMatch, *ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatch(r))
}
for _, r := range p.GetMetadataFilter() {
obj.MetadataFilter = append(obj.MetadataFilter, *ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilter(r))
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleMatchRuleHeaderMatch converts a UrlMapPathMatcherRouteRuleMatchRuleHeaderMatch resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatch(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatch) *beta.UrlMapPathMatcherRouteRuleMatchRuleHeaderMatch {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleMatchRuleHeaderMatch{
HeaderName: dcl.StringOrNil(p.HeaderName),
ExactMatch: dcl.StringOrNil(p.ExactMatch),
RegexMatch: dcl.StringOrNil(p.RegexMatch),
RangeMatch: ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch(p.GetRangeMatch()),
PresentMatch: dcl.Bool(p.PresentMatch),
PrefixMatch: dcl.StringOrNil(p.PrefixMatch),
SuffixMatch: dcl.StringOrNil(p.SuffixMatch),
InvertMatch: dcl.Bool(p.InvertMatch),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch converts a UrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch) *beta.UrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch{
RangeStart: dcl.Int64OrNil(p.RangeStart),
RangeEnd: dcl.Int64OrNil(p.RangeEnd),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatch converts a UrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatch resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatch(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatch) *beta.UrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatch {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatch{
Name: dcl.StringOrNil(p.Name),
PresentMatch: dcl.Bool(p.PresentMatch),
ExactMatch: dcl.StringOrNil(p.ExactMatch),
RegexMatch: dcl.StringOrNil(p.RegexMatch),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleMatchRuleMetadataFilter converts a UrlMapPathMatcherRouteRuleMatchRuleMetadataFilter resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilter(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilter) *beta.UrlMapPathMatcherRouteRuleMatchRuleMetadataFilter {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleMatchRuleMetadataFilter{
FilterMatchCriteria: ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum(p.GetFilterMatchCriteria()),
}
for _, r := range p.GetFilterLabel() {
obj.FilterLabel = append(obj.FilterLabel, *ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel(r))
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel converts a UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel) *beta.UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel{
Name: dcl.StringOrNil(p.Name),
Value: dcl.StringOrNil(p.Value),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleRouteAction converts a UrlMapPathMatcherRouteRuleRouteAction resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteAction(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteAction) *beta.UrlMapPathMatcherRouteRuleRouteAction {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleRouteAction{
UrlRewrite: ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionUrlRewrite(p.GetUrlRewrite()),
Timeout: ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionTimeout(p.GetTimeout()),
RetryPolicy: ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicy(p.GetRetryPolicy()),
RequestMirrorPolicy: ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy(p.GetRequestMirrorPolicy()),
CorsPolicy: ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionCorsPolicy(p.GetCorsPolicy()),
FaultInjectionPolicy: ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy(p.GetFaultInjectionPolicy()),
}
for _, r := range p.GetWeightedBackendService() {
obj.WeightedBackendService = append(obj.WeightedBackendService, *ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionWeightedBackendService(r))
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleRouteActionWeightedBackendService converts a UrlMapPathMatcherRouteRuleRouteActionWeightedBackendService resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionWeightedBackendService(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionWeightedBackendService) *beta.UrlMapPathMatcherRouteRuleRouteActionWeightedBackendService {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleRouteActionWeightedBackendService{
BackendService: dcl.StringOrNil(p.BackendService),
Weight: dcl.Int64OrNil(p.Weight),
HeaderAction: ProtoToComputeBetaUrlMapHeaderAction(p.GetHeaderAction()),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleRouteActionUrlRewrite converts a UrlMapPathMatcherRouteRuleRouteActionUrlRewrite resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionUrlRewrite(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionUrlRewrite) *beta.UrlMapPathMatcherRouteRuleRouteActionUrlRewrite {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleRouteActionUrlRewrite{
PathPrefixRewrite: dcl.StringOrNil(p.PathPrefixRewrite),
HostRewrite: dcl.StringOrNil(p.HostRewrite),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleRouteActionTimeout converts a UrlMapPathMatcherRouteRuleRouteActionTimeout resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionTimeout(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionTimeout) *beta.UrlMapPathMatcherRouteRuleRouteActionTimeout {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleRouteActionTimeout{
Seconds: dcl.Int64OrNil(p.Seconds),
Nanos: dcl.Int64OrNil(p.Nanos),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleRouteActionRetryPolicy converts a UrlMapPathMatcherRouteRuleRouteActionRetryPolicy resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicy(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicy) *beta.UrlMapPathMatcherRouteRuleRouteActionRetryPolicy {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleRouteActionRetryPolicy{
NumRetries: dcl.Int64OrNil(p.NumRetries),
PerTryTimeout: ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout(p.GetPerTryTimeout()),
}
for _, r := range p.GetRetryCondition() {
obj.RetryCondition = append(obj.RetryCondition, r)
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout converts a UrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout) *beta.UrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout{
Seconds: dcl.Int64OrNil(p.Seconds),
Nanos: dcl.Int64OrNil(p.Nanos),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy converts a UrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy) *beta.UrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy{
BackendService: dcl.StringOrNil(p.BackendService),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleRouteActionCorsPolicy converts a UrlMapPathMatcherRouteRuleRouteActionCorsPolicy resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionCorsPolicy(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionCorsPolicy) *beta.UrlMapPathMatcherRouteRuleRouteActionCorsPolicy {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleRouteActionCorsPolicy{
MaxAge: dcl.Int64OrNil(p.MaxAge),
AllowCredentials: dcl.Bool(p.AllowCredentials),
Disabled: dcl.Bool(p.Disabled),
}
for _, r := range p.GetAllowOrigin() {
obj.AllowOrigin = append(obj.AllowOrigin, r)
}
for _, r := range p.GetAllowOriginRegex() {
obj.AllowOriginRegex = append(obj.AllowOriginRegex, r)
}
for _, r := range p.GetAllowMethod() {
obj.AllowMethod = append(obj.AllowMethod, r)
}
for _, r := range p.GetAllowHeader() {
obj.AllowHeader = append(obj.AllowHeader, r)
}
for _, r := range p.GetExposeHeader() {
obj.ExposeHeader = append(obj.ExposeHeader, r)
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy converts a UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy) *beta.UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy{
Delay: ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay(p.GetDelay()),
Abort: ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort(p.GetAbort()),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay converts a UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay) *beta.UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay{
FixedDelay: ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay(p.GetFixedDelay()),
Percentage: dcl.Float64OrNil(p.Percentage),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay converts a UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay) *beta.UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay{
Seconds: dcl.Int64OrNil(p.Seconds),
Nanos: dcl.Int64OrNil(p.Nanos),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort converts a UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort) *beta.UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort{
HttpStatus: dcl.Int64OrNil(p.HttpStatus),
Percentage: dcl.Float64OrNil(p.Percentage),
}
return obj
}
// ProtoToUrlMapPathMatcherRouteRuleUrlRedirect converts a UrlMapPathMatcherRouteRuleUrlRedirect resource from its proto representation.
func ProtoToComputeBetaUrlMapPathMatcherRouteRuleUrlRedirect(p *betapb.ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirect) *beta.UrlMapPathMatcherRouteRuleUrlRedirect {
if p == nil {
return nil
}
obj := &beta.UrlMapPathMatcherRouteRuleUrlRedirect{
HostRedirect: dcl.StringOrNil(p.HostRedirect),
PathRedirect: dcl.StringOrNil(p.PathRedirect),
PrefixRedirect: dcl.StringOrNil(p.PrefixRedirect),
RedirectResponseCode: ProtoToComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum(p.GetRedirectResponseCode()),
HttpsRedirect: dcl.Bool(p.HttpsRedirect),
StripQuery: dcl.Bool(p.StripQuery),
}
return obj
}
// ProtoToUrlMapTest converts a UrlMapTest resource from its proto representation.
func ProtoToComputeBetaUrlMapTest(p *betapb.ComputeBetaUrlMapTest) *beta.UrlMapTest {
if p == nil {
return nil
}
obj := &beta.UrlMapTest{
Description: dcl.StringOrNil(p.Description),
Host: dcl.StringOrNil(p.Host),
Path: dcl.StringOrNil(p.Path),
ExpectedBackendService: dcl.StringOrNil(p.ExpectedBackendService),
}
return obj
}
// ProtoToUrlMap converts a UrlMap resource from its proto representation.
func ProtoToUrlMap(p *betapb.ComputeBetaUrlMap) *beta.UrlMap {
obj := &beta.UrlMap{
DefaultRouteAction: ProtoToComputeBetaUrlMapDefaultRouteAction(p.GetDefaultRouteAction()),
DefaultService: dcl.StringOrNil(p.DefaultService),
DefaultUrlRedirect: ProtoToComputeBetaUrlMapDefaultUrlRedirect(p.GetDefaultUrlRedirect()),
Description: dcl.StringOrNil(p.Description),
SelfLink: dcl.StringOrNil(p.SelfLink),
HeaderAction: ProtoToComputeBetaUrlMapHeaderAction(p.GetHeaderAction()),
Name: dcl.StringOrNil(p.Name),
Region: dcl.StringOrNil(p.Region),
Project: dcl.StringOrNil(p.Project),
}
for _, r := range p.GetHostRule() {
obj.HostRule = append(obj.HostRule, *ProtoToComputeBetaUrlMapHostRule(r))
}
for _, r := range p.GetPathMatcher() {
obj.PathMatcher = append(obj.PathMatcher, *ProtoToComputeBetaUrlMapPathMatcher(r))
}
for _, r := range p.GetTest() {
obj.Test = append(obj.Test, *ProtoToComputeBetaUrlMapTest(r))
}
return obj
}
// UrlMapDefaultUrlRedirectRedirectResponseCodeEnumToProto converts a UrlMapDefaultUrlRedirectRedirectResponseCodeEnum enum to its proto representation.
func ComputeBetaUrlMapDefaultUrlRedirectRedirectResponseCodeEnumToProto(e *beta.UrlMapDefaultUrlRedirectRedirectResponseCodeEnum) betapb.ComputeBetaUrlMapDefaultUrlRedirectRedirectResponseCodeEnum {
if e == nil {
return betapb.ComputeBetaUrlMapDefaultUrlRedirectRedirectResponseCodeEnum(0)
}
if v, ok := betapb.ComputeBetaUrlMapDefaultUrlRedirectRedirectResponseCodeEnum_value["UrlMapDefaultUrlRedirectRedirectResponseCodeEnum"+string(*e)]; ok {
return betapb.ComputeBetaUrlMapDefaultUrlRedirectRedirectResponseCodeEnum(v)
}
return betapb.ComputeBetaUrlMapDefaultUrlRedirectRedirectResponseCodeEnum(0)
}
// UrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnumToProto converts a UrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum enum to its proto representation.
func ComputeBetaUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnumToProto(e *beta.UrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum) betapb.ComputeBetaUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum {
if e == nil {
return betapb.ComputeBetaUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum(0)
}
if v, ok := betapb.ComputeBetaUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum_value["UrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum"+string(*e)]; ok {
return betapb.ComputeBetaUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum(v)
}
return betapb.ComputeBetaUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum(0)
}
// UrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnumToProto converts a UrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum enum to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnumToProto(e *beta.UrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum) betapb.ComputeBetaUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum {
if e == nil {
return betapb.ComputeBetaUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum(0)
}
if v, ok := betapb.ComputeBetaUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum_value["UrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum"+string(*e)]; ok {
return betapb.ComputeBetaUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum(v)
}
return betapb.ComputeBetaUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnum(0)
}
// UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnumToProto converts a UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum enum to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnumToProto(e *beta.UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum) betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum {
if e == nil {
return betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum(0)
}
if v, ok := betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum_value["UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum"+string(*e)]; ok {
return betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum(v)
}
return betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnum(0)
}
// UrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnumToProto converts a UrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum enum to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnumToProto(e *beta.UrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum) betapb.ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum {
if e == nil {
return betapb.ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum(0)
}
if v, ok := betapb.ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum_value["UrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum"+string(*e)]; ok {
return betapb.ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum(v)
}
return betapb.ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnum(0)
}
// UrlMapDefaultRouteActionToProto converts a UrlMapDefaultRouteAction resource to its proto representation.
func ComputeBetaUrlMapDefaultRouteActionToProto(o *beta.UrlMapDefaultRouteAction) *betapb.ComputeBetaUrlMapDefaultRouteAction {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapDefaultRouteAction{
UrlRewrite: ComputeBetaUrlMapDefaultRouteActionUrlRewriteToProto(o.UrlRewrite),
Timeout: ComputeBetaUrlMapDefaultRouteActionTimeoutToProto(o.Timeout),
RetryPolicy: ComputeBetaUrlMapDefaultRouteActionRetryPolicyToProto(o.RetryPolicy),
RequestMirrorPolicy: ComputeBetaUrlMapDefaultRouteActionRequestMirrorPolicyToProto(o.RequestMirrorPolicy),
CorsPolicy: ComputeBetaUrlMapDefaultRouteActionCorsPolicyToProto(o.CorsPolicy),
FaultInjectionPolicy: ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyToProto(o.FaultInjectionPolicy),
}
for _, r := range o.WeightedBackendService {
p.WeightedBackendService = append(p.WeightedBackendService, ComputeBetaUrlMapDefaultRouteActionWeightedBackendServiceToProto(&r))
}
return p
}
// UrlMapDefaultRouteActionWeightedBackendServiceToProto converts a UrlMapDefaultRouteActionWeightedBackendService resource to its proto representation.
func ComputeBetaUrlMapDefaultRouteActionWeightedBackendServiceToProto(o *beta.UrlMapDefaultRouteActionWeightedBackendService) *betapb.ComputeBetaUrlMapDefaultRouteActionWeightedBackendService {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapDefaultRouteActionWeightedBackendService{
BackendService: dcl.ValueOrEmptyString(o.BackendService),
Weight: dcl.ValueOrEmptyInt64(o.Weight),
HeaderAction: ComputeBetaUrlMapHeaderActionToProto(o.HeaderAction),
}
return p
}
// UrlMapHeaderActionToProto converts a UrlMapHeaderAction resource to its proto representation.
func ComputeBetaUrlMapHeaderActionToProto(o *beta.UrlMapHeaderAction) *betapb.ComputeBetaUrlMapHeaderAction {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapHeaderAction{}
for _, r := range o.RequestHeadersToRemove {
p.RequestHeadersToRemove = append(p.RequestHeadersToRemove, r)
}
for _, r := range o.RequestHeadersToAdd {
p.RequestHeadersToAdd = append(p.RequestHeadersToAdd, ComputeBetaUrlMapHeaderActionRequestHeadersToAddToProto(&r))
}
for _, r := range o.ResponseHeadersToRemove {
p.ResponseHeadersToRemove = append(p.ResponseHeadersToRemove, r)
}
for _, r := range o.ResponseHeadersToAdd {
p.ResponseHeadersToAdd = append(p.ResponseHeadersToAdd, ComputeBetaUrlMapHeaderActionResponseHeadersToAddToProto(&r))
}
return p
}
// UrlMapHeaderActionRequestHeadersToAddToProto converts a UrlMapHeaderActionRequestHeadersToAdd resource to its proto representation.
func ComputeBetaUrlMapHeaderActionRequestHeadersToAddToProto(o *beta.UrlMapHeaderActionRequestHeadersToAdd) *betapb.ComputeBetaUrlMapHeaderActionRequestHeadersToAdd {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapHeaderActionRequestHeadersToAdd{
HeaderName: dcl.ValueOrEmptyString(o.HeaderName),
HeaderValue: dcl.ValueOrEmptyString(o.HeaderValue),
Replace: dcl.ValueOrEmptyBool(o.Replace),
}
return p
}
// UrlMapHeaderActionResponseHeadersToAddToProto converts a UrlMapHeaderActionResponseHeadersToAdd resource to its proto representation.
func ComputeBetaUrlMapHeaderActionResponseHeadersToAddToProto(o *beta.UrlMapHeaderActionResponseHeadersToAdd) *betapb.ComputeBetaUrlMapHeaderActionResponseHeadersToAdd {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapHeaderActionResponseHeadersToAdd{
HeaderName: dcl.ValueOrEmptyString(o.HeaderName),
HeaderValue: dcl.ValueOrEmptyString(o.HeaderValue),
Replace: dcl.ValueOrEmptyBool(o.Replace),
}
return p
}
// UrlMapDefaultRouteActionUrlRewriteToProto converts a UrlMapDefaultRouteActionUrlRewrite resource to its proto representation.
func ComputeBetaUrlMapDefaultRouteActionUrlRewriteToProto(o *beta.UrlMapDefaultRouteActionUrlRewrite) *betapb.ComputeBetaUrlMapDefaultRouteActionUrlRewrite {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapDefaultRouteActionUrlRewrite{
PathPrefixRewrite: dcl.ValueOrEmptyString(o.PathPrefixRewrite),
HostRewrite: dcl.ValueOrEmptyString(o.HostRewrite),
}
return p
}
// UrlMapDefaultRouteActionTimeoutToProto converts a UrlMapDefaultRouteActionTimeout resource to its proto representation.
func ComputeBetaUrlMapDefaultRouteActionTimeoutToProto(o *beta.UrlMapDefaultRouteActionTimeout) *betapb.ComputeBetaUrlMapDefaultRouteActionTimeout {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapDefaultRouteActionTimeout{
Seconds: dcl.ValueOrEmptyInt64(o.Seconds),
Nanos: dcl.ValueOrEmptyInt64(o.Nanos),
}
return p
}
// UrlMapDefaultRouteActionRetryPolicyToProto converts a UrlMapDefaultRouteActionRetryPolicy resource to its proto representation.
func ComputeBetaUrlMapDefaultRouteActionRetryPolicyToProto(o *beta.UrlMapDefaultRouteActionRetryPolicy) *betapb.ComputeBetaUrlMapDefaultRouteActionRetryPolicy {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapDefaultRouteActionRetryPolicy{
NumRetries: dcl.ValueOrEmptyInt64(o.NumRetries),
PerTryTimeout: ComputeBetaUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutToProto(o.PerTryTimeout),
}
for _, r := range o.RetryCondition {
p.RetryCondition = append(p.RetryCondition, r)
}
return p
}
// UrlMapDefaultRouteActionRetryPolicyPerTryTimeoutToProto converts a UrlMapDefaultRouteActionRetryPolicyPerTryTimeout resource to its proto representation.
func ComputeBetaUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutToProto(o *beta.UrlMapDefaultRouteActionRetryPolicyPerTryTimeout) *betapb.ComputeBetaUrlMapDefaultRouteActionRetryPolicyPerTryTimeout {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapDefaultRouteActionRetryPolicyPerTryTimeout{
Seconds: dcl.ValueOrEmptyInt64(o.Seconds),
Nanos: dcl.ValueOrEmptyInt64(o.Nanos),
}
return p
}
// UrlMapDefaultRouteActionRequestMirrorPolicyToProto converts a UrlMapDefaultRouteActionRequestMirrorPolicy resource to its proto representation.
func ComputeBetaUrlMapDefaultRouteActionRequestMirrorPolicyToProto(o *beta.UrlMapDefaultRouteActionRequestMirrorPolicy) *betapb.ComputeBetaUrlMapDefaultRouteActionRequestMirrorPolicy {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapDefaultRouteActionRequestMirrorPolicy{
BackendService: dcl.ValueOrEmptyString(o.BackendService),
}
return p
}
// UrlMapDefaultRouteActionCorsPolicyToProto converts a UrlMapDefaultRouteActionCorsPolicy resource to its proto representation.
func ComputeBetaUrlMapDefaultRouteActionCorsPolicyToProto(o *beta.UrlMapDefaultRouteActionCorsPolicy) *betapb.ComputeBetaUrlMapDefaultRouteActionCorsPolicy {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapDefaultRouteActionCorsPolicy{
MaxAge: dcl.ValueOrEmptyInt64(o.MaxAge),
AllowCredentials: dcl.ValueOrEmptyBool(o.AllowCredentials),
Disabled: dcl.ValueOrEmptyBool(o.Disabled),
}
for _, r := range o.AllowOrigin {
p.AllowOrigin = append(p.AllowOrigin, r)
}
for _, r := range o.AllowOriginRegex {
p.AllowOriginRegex = append(p.AllowOriginRegex, r)
}
for _, r := range o.AllowMethod {
p.AllowMethod = append(p.AllowMethod, r)
}
for _, r := range o.AllowHeader {
p.AllowHeader = append(p.AllowHeader, r)
}
for _, r := range o.ExposeHeader {
p.ExposeHeader = append(p.ExposeHeader, r)
}
return p
}
// UrlMapDefaultRouteActionFaultInjectionPolicyToProto converts a UrlMapDefaultRouteActionFaultInjectionPolicy resource to its proto representation.
func ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyToProto(o *beta.UrlMapDefaultRouteActionFaultInjectionPolicy) *betapb.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicy {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicy{
Delay: ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelayToProto(o.Delay),
Abort: ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyAbortToProto(o.Abort),
}
return p
}
// UrlMapDefaultRouteActionFaultInjectionPolicyDelayToProto converts a UrlMapDefaultRouteActionFaultInjectionPolicyDelay resource to its proto representation.
func ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelayToProto(o *beta.UrlMapDefaultRouteActionFaultInjectionPolicyDelay) *betapb.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelay {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelay{
FixedDelay: ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayToProto(o.FixedDelay),
Percentage: dcl.ValueOrEmptyDouble(o.Percentage),
}
return p
}
// UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayToProto converts a UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay resource to its proto representation.
func ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayToProto(o *beta.UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay) *betapb.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay{
Seconds: dcl.ValueOrEmptyInt64(o.Seconds),
Nanos: dcl.ValueOrEmptyInt64(o.Nanos),
}
return p
}
// UrlMapDefaultRouteActionFaultInjectionPolicyAbortToProto converts a UrlMapDefaultRouteActionFaultInjectionPolicyAbort resource to its proto representation.
func ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyAbortToProto(o *beta.UrlMapDefaultRouteActionFaultInjectionPolicyAbort) *betapb.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyAbort {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyAbort{
HttpStatus: dcl.ValueOrEmptyInt64(o.HttpStatus),
Percentage: dcl.ValueOrEmptyDouble(o.Percentage),
}
return p
}
// UrlMapDefaultUrlRedirectToProto converts a UrlMapDefaultUrlRedirect resource to its proto representation.
func ComputeBetaUrlMapDefaultUrlRedirectToProto(o *beta.UrlMapDefaultUrlRedirect) *betapb.ComputeBetaUrlMapDefaultUrlRedirect {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapDefaultUrlRedirect{
HostRedirect: dcl.ValueOrEmptyString(o.HostRedirect),
PathRedirect: dcl.ValueOrEmptyString(o.PathRedirect),
PrefixRedirect: dcl.ValueOrEmptyString(o.PrefixRedirect),
RedirectResponseCode: ComputeBetaUrlMapDefaultUrlRedirectRedirectResponseCodeEnumToProto(o.RedirectResponseCode),
HttpsRedirect: dcl.ValueOrEmptyBool(o.HttpsRedirect),
StripQuery: dcl.ValueOrEmptyBool(o.StripQuery),
}
return p
}
// UrlMapHostRuleToProto converts a UrlMapHostRule resource to its proto representation.
func ComputeBetaUrlMapHostRuleToProto(o *beta.UrlMapHostRule) *betapb.ComputeBetaUrlMapHostRule {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapHostRule{
Description: dcl.ValueOrEmptyString(o.Description),
PathMatcher: dcl.ValueOrEmptyString(o.PathMatcher),
}
for _, r := range o.Host {
p.Host = append(p.Host, r)
}
return p
}
// UrlMapPathMatcherToProto converts a UrlMapPathMatcher resource to its proto representation.
func ComputeBetaUrlMapPathMatcherToProto(o *beta.UrlMapPathMatcher) *betapb.ComputeBetaUrlMapPathMatcher {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcher{
Name: dcl.ValueOrEmptyString(o.Name),
Description: dcl.ValueOrEmptyString(o.Description),
DefaultService: dcl.ValueOrEmptyString(o.DefaultService),
DefaultRouteAction: ComputeBetaUrlMapDefaultRouteActionToProto(o.DefaultRouteAction),
DefaultUrlRedirect: ComputeBetaUrlMapPathMatcherDefaultUrlRedirectToProto(o.DefaultUrlRedirect),
HeaderAction: ComputeBetaUrlMapHeaderActionToProto(o.HeaderAction),
}
for _, r := range o.PathRule {
p.PathRule = append(p.PathRule, ComputeBetaUrlMapPathMatcherPathRuleToProto(&r))
}
for _, r := range o.RouteRule {
p.RouteRule = append(p.RouteRule, ComputeBetaUrlMapPathMatcherRouteRuleToProto(&r))
}
return p
}
// UrlMapPathMatcherDefaultUrlRedirectToProto converts a UrlMapPathMatcherDefaultUrlRedirect resource to its proto representation.
func ComputeBetaUrlMapPathMatcherDefaultUrlRedirectToProto(o *beta.UrlMapPathMatcherDefaultUrlRedirect) *betapb.ComputeBetaUrlMapPathMatcherDefaultUrlRedirect {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherDefaultUrlRedirect{
HostRedirect: dcl.ValueOrEmptyString(o.HostRedirect),
PathRedirect: dcl.ValueOrEmptyString(o.PathRedirect),
PrefixRedirect: dcl.ValueOrEmptyString(o.PrefixRedirect),
RedirectResponseCode: ComputeBetaUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnumToProto(o.RedirectResponseCode),
HttpsRedirect: dcl.ValueOrEmptyBool(o.HttpsRedirect),
StripQuery: dcl.ValueOrEmptyBool(o.StripQuery),
}
return p
}
// UrlMapPathMatcherPathRuleToProto converts a UrlMapPathMatcherPathRule resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleToProto(o *beta.UrlMapPathMatcherPathRule) *betapb.ComputeBetaUrlMapPathMatcherPathRule {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRule{
BackendService: dcl.ValueOrEmptyString(o.BackendService),
RouteAction: ComputeBetaUrlMapPathMatcherPathRuleRouteActionToProto(o.RouteAction),
UrlRedirect: ComputeBetaUrlMapPathMatcherPathRuleUrlRedirectToProto(o.UrlRedirect),
}
for _, r := range o.Path {
p.Path = append(p.Path, r)
}
return p
}
// UrlMapPathMatcherPathRuleRouteActionToProto converts a UrlMapPathMatcherPathRuleRouteAction resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleRouteActionToProto(o *beta.UrlMapPathMatcherPathRuleRouteAction) *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteAction {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteAction{
UrlRewrite: ComputeBetaUrlMapPathMatcherPathRuleRouteActionUrlRewriteToProto(o.UrlRewrite),
Timeout: ComputeBetaUrlMapPathMatcherPathRuleRouteActionTimeoutToProto(o.Timeout),
RetryPolicy: ComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicyToProto(o.RetryPolicy),
RequestMirrorPolicy: ComputeBetaUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyToProto(o.RequestMirrorPolicy),
CorsPolicy: ComputeBetaUrlMapPathMatcherPathRuleRouteActionCorsPolicyToProto(o.CorsPolicy),
FaultInjectionPolicy: ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyToProto(o.FaultInjectionPolicy),
}
for _, r := range o.WeightedBackendService {
p.WeightedBackendService = append(p.WeightedBackendService, ComputeBetaUrlMapPathMatcherPathRuleRouteActionWeightedBackendServiceToProto(&r))
}
return p
}
// UrlMapPathMatcherPathRuleRouteActionWeightedBackendServiceToProto converts a UrlMapPathMatcherPathRuleRouteActionWeightedBackendService resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleRouteActionWeightedBackendServiceToProto(o *beta.UrlMapPathMatcherPathRuleRouteActionWeightedBackendService) *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionWeightedBackendService {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionWeightedBackendService{
BackendService: dcl.ValueOrEmptyString(o.BackendService),
Weight: dcl.ValueOrEmptyInt64(o.Weight),
HeaderAction: ComputeBetaUrlMapHeaderActionToProto(o.HeaderAction),
}
return p
}
// UrlMapPathMatcherPathRuleRouteActionUrlRewriteToProto converts a UrlMapPathMatcherPathRuleRouteActionUrlRewrite resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleRouteActionUrlRewriteToProto(o *beta.UrlMapPathMatcherPathRuleRouteActionUrlRewrite) *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionUrlRewrite {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionUrlRewrite{
PathPrefixRewrite: dcl.ValueOrEmptyString(o.PathPrefixRewrite),
HostRewrite: dcl.ValueOrEmptyString(o.HostRewrite),
}
return p
}
// UrlMapPathMatcherPathRuleRouteActionTimeoutToProto converts a UrlMapPathMatcherPathRuleRouteActionTimeout resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleRouteActionTimeoutToProto(o *beta.UrlMapPathMatcherPathRuleRouteActionTimeout) *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionTimeout {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionTimeout{
Seconds: dcl.ValueOrEmptyInt64(o.Seconds),
Nanos: dcl.ValueOrEmptyInt64(o.Nanos),
}
return p
}
// UrlMapPathMatcherPathRuleRouteActionRetryPolicyToProto converts a UrlMapPathMatcherPathRuleRouteActionRetryPolicy resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicyToProto(o *beta.UrlMapPathMatcherPathRuleRouteActionRetryPolicy) *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicy {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicy{
NumRetries: dcl.ValueOrEmptyInt64(o.NumRetries),
PerTryTimeout: ComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutToProto(o.PerTryTimeout),
}
for _, r := range o.RetryCondition {
p.RetryCondition = append(p.RetryCondition, r)
}
return p
}
// UrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutToProto converts a UrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutToProto(o *beta.UrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout) *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout{
Seconds: dcl.ValueOrEmptyInt64(o.Seconds),
Nanos: dcl.ValueOrEmptyInt64(o.Nanos),
}
return p
}
// UrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyToProto converts a UrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyToProto(o *beta.UrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy) *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy{
BackendService: dcl.ValueOrEmptyString(o.BackendService),
}
return p
}
// UrlMapPathMatcherPathRuleRouteActionCorsPolicyToProto converts a UrlMapPathMatcherPathRuleRouteActionCorsPolicy resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleRouteActionCorsPolicyToProto(o *beta.UrlMapPathMatcherPathRuleRouteActionCorsPolicy) *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionCorsPolicy {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionCorsPolicy{
MaxAge: dcl.ValueOrEmptyInt64(o.MaxAge),
AllowCredentials: dcl.ValueOrEmptyBool(o.AllowCredentials),
Disabled: dcl.ValueOrEmptyBool(o.Disabled),
}
for _, r := range o.AllowOrigin {
p.AllowOrigin = append(p.AllowOrigin, r)
}
for _, r := range o.AllowOriginRegex {
p.AllowOriginRegex = append(p.AllowOriginRegex, r)
}
for _, r := range o.AllowMethod {
p.AllowMethod = append(p.AllowMethod, r)
}
for _, r := range o.AllowHeader {
p.AllowHeader = append(p.AllowHeader, r)
}
for _, r := range o.ExposeHeader {
p.ExposeHeader = append(p.ExposeHeader, r)
}
return p
}
// UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyToProto converts a UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyToProto(o *beta.UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy) *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy{
Delay: ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayToProto(o.Delay),
Abort: ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortToProto(o.Abort),
}
return p
}
// UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayToProto converts a UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayToProto(o *beta.UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay) *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay{
FixedDelay: ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayToProto(o.FixedDelay),
Percentage: dcl.ValueOrEmptyDouble(o.Percentage),
}
return p
}
// UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayToProto converts a UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayToProto(o *beta.UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay) *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay{
Seconds: dcl.ValueOrEmptyInt64(o.Seconds),
Nanos: dcl.ValueOrEmptyInt64(o.Nanos),
}
return p
}
// UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortToProto converts a UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortToProto(o *beta.UrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort) *betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort{
HttpStatus: dcl.ValueOrEmptyInt64(o.HttpStatus),
Percentage: dcl.ValueOrEmptyDouble(o.Percentage),
}
return p
}
// UrlMapPathMatcherPathRuleUrlRedirectToProto converts a UrlMapPathMatcherPathRuleUrlRedirect resource to its proto representation.
func ComputeBetaUrlMapPathMatcherPathRuleUrlRedirectToProto(o *beta.UrlMapPathMatcherPathRuleUrlRedirect) *betapb.ComputeBetaUrlMapPathMatcherPathRuleUrlRedirect {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherPathRuleUrlRedirect{
HostRedirect: dcl.ValueOrEmptyString(o.HostRedirect),
PathRedirect: dcl.ValueOrEmptyString(o.PathRedirect),
PrefixRedirect: dcl.ValueOrEmptyString(o.PrefixRedirect),
RedirectResponseCode: ComputeBetaUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCodeEnumToProto(o.RedirectResponseCode),
HttpsRedirect: dcl.ValueOrEmptyBool(o.HttpsRedirect),
StripQuery: dcl.ValueOrEmptyBool(o.StripQuery),
}
return p
}
// UrlMapPathMatcherRouteRuleToProto converts a UrlMapPathMatcherRouteRule resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleToProto(o *beta.UrlMapPathMatcherRouteRule) *betapb.ComputeBetaUrlMapPathMatcherRouteRule {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRule{
Priority: dcl.ValueOrEmptyInt64(o.Priority),
Description: dcl.ValueOrEmptyString(o.Description),
BackendService: dcl.ValueOrEmptyString(o.BackendService),
RouteAction: ComputeBetaUrlMapPathMatcherRouteRuleRouteActionToProto(o.RouteAction),
UrlRedirect: ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectToProto(o.UrlRedirect),
HeaderAction: ComputeBetaUrlMapHeaderActionToProto(o.HeaderAction),
}
for _, r := range o.MatchRule {
p.MatchRule = append(p.MatchRule, ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleToProto(&r))
}
return p
}
// UrlMapPathMatcherRouteRuleMatchRuleToProto converts a UrlMapPathMatcherRouteRuleMatchRule resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleToProto(o *beta.UrlMapPathMatcherRouteRuleMatchRule) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRule {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRule{
PrefixMatch: dcl.ValueOrEmptyString(o.PrefixMatch),
FullPathMatch: dcl.ValueOrEmptyString(o.FullPathMatch),
RegexMatch: dcl.ValueOrEmptyString(o.RegexMatch),
IgnoreCase: dcl.ValueOrEmptyBool(o.IgnoreCase),
}
for _, r := range o.HeaderMatch {
p.HeaderMatch = append(p.HeaderMatch, ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatchToProto(&r))
}
for _, r := range o.QueryParameterMatch {
p.QueryParameterMatch = append(p.QueryParameterMatch, ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatchToProto(&r))
}
for _, r := range o.MetadataFilter {
p.MetadataFilter = append(p.MetadataFilter, ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterToProto(&r))
}
return p
}
// UrlMapPathMatcherRouteRuleMatchRuleHeaderMatchToProto converts a UrlMapPathMatcherRouteRuleMatchRuleHeaderMatch resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatchToProto(o *beta.UrlMapPathMatcherRouteRuleMatchRuleHeaderMatch) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatch {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatch{
HeaderName: dcl.ValueOrEmptyString(o.HeaderName),
ExactMatch: dcl.ValueOrEmptyString(o.ExactMatch),
RegexMatch: dcl.ValueOrEmptyString(o.RegexMatch),
RangeMatch: ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatchToProto(o.RangeMatch),
PresentMatch: dcl.ValueOrEmptyBool(o.PresentMatch),
PrefixMatch: dcl.ValueOrEmptyString(o.PrefixMatch),
SuffixMatch: dcl.ValueOrEmptyString(o.SuffixMatch),
InvertMatch: dcl.ValueOrEmptyBool(o.InvertMatch),
}
return p
}
// UrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatchToProto converts a UrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatchToProto(o *beta.UrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleHeaderMatchRangeMatch{
RangeStart: dcl.ValueOrEmptyInt64(o.RangeStart),
RangeEnd: dcl.ValueOrEmptyInt64(o.RangeEnd),
}
return p
}
// UrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatchToProto converts a UrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatch resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatchToProto(o *beta.UrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatch) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatch {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleQueryParameterMatch{
Name: dcl.ValueOrEmptyString(o.Name),
PresentMatch: dcl.ValueOrEmptyBool(o.PresentMatch),
ExactMatch: dcl.ValueOrEmptyString(o.ExactMatch),
RegexMatch: dcl.ValueOrEmptyString(o.RegexMatch),
}
return p
}
// UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterToProto converts a UrlMapPathMatcherRouteRuleMatchRuleMetadataFilter resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterToProto(o *beta.UrlMapPathMatcherRouteRuleMatchRuleMetadataFilter) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilter {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilter{
FilterMatchCriteria: ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterMatchCriteriaEnumToProto(o.FilterMatchCriteria),
}
for _, r := range o.FilterLabel {
p.FilterLabel = append(p.FilterLabel, ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabelToProto(&r))
}
return p
}
// UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabelToProto converts a UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabelToProto(o *beta.UrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleMatchRuleMetadataFilterFilterLabel{
Name: dcl.ValueOrEmptyString(o.Name),
Value: dcl.ValueOrEmptyString(o.Value),
}
return p
}
// UrlMapPathMatcherRouteRuleRouteActionToProto converts a UrlMapPathMatcherRouteRuleRouteAction resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleRouteActionToProto(o *beta.UrlMapPathMatcherRouteRuleRouteAction) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteAction {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteAction{
UrlRewrite: ComputeBetaUrlMapPathMatcherRouteRuleRouteActionUrlRewriteToProto(o.UrlRewrite),
Timeout: ComputeBetaUrlMapPathMatcherRouteRuleRouteActionTimeoutToProto(o.Timeout),
RetryPolicy: ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicyToProto(o.RetryPolicy),
RequestMirrorPolicy: ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicyToProto(o.RequestMirrorPolicy),
CorsPolicy: ComputeBetaUrlMapPathMatcherRouteRuleRouteActionCorsPolicyToProto(o.CorsPolicy),
FaultInjectionPolicy: ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyToProto(o.FaultInjectionPolicy),
}
for _, r := range o.WeightedBackendService {
p.WeightedBackendService = append(p.WeightedBackendService, ComputeBetaUrlMapPathMatcherRouteRuleRouteActionWeightedBackendServiceToProto(&r))
}
return p
}
// UrlMapPathMatcherRouteRuleRouteActionWeightedBackendServiceToProto converts a UrlMapPathMatcherRouteRuleRouteActionWeightedBackendService resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleRouteActionWeightedBackendServiceToProto(o *beta.UrlMapPathMatcherRouteRuleRouteActionWeightedBackendService) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionWeightedBackendService {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionWeightedBackendService{
BackendService: dcl.ValueOrEmptyString(o.BackendService),
Weight: dcl.ValueOrEmptyInt64(o.Weight),
HeaderAction: ComputeBetaUrlMapHeaderActionToProto(o.HeaderAction),
}
return p
}
// UrlMapPathMatcherRouteRuleRouteActionUrlRewriteToProto converts a UrlMapPathMatcherRouteRuleRouteActionUrlRewrite resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleRouteActionUrlRewriteToProto(o *beta.UrlMapPathMatcherRouteRuleRouteActionUrlRewrite) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionUrlRewrite {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionUrlRewrite{
PathPrefixRewrite: dcl.ValueOrEmptyString(o.PathPrefixRewrite),
HostRewrite: dcl.ValueOrEmptyString(o.HostRewrite),
}
return p
}
// UrlMapPathMatcherRouteRuleRouteActionTimeoutToProto converts a UrlMapPathMatcherRouteRuleRouteActionTimeout resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleRouteActionTimeoutToProto(o *beta.UrlMapPathMatcherRouteRuleRouteActionTimeout) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionTimeout {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionTimeout{
Seconds: dcl.ValueOrEmptyInt64(o.Seconds),
Nanos: dcl.ValueOrEmptyInt64(o.Nanos),
}
return p
}
// UrlMapPathMatcherRouteRuleRouteActionRetryPolicyToProto converts a UrlMapPathMatcherRouteRuleRouteActionRetryPolicy resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicyToProto(o *beta.UrlMapPathMatcherRouteRuleRouteActionRetryPolicy) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicy {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicy{
NumRetries: dcl.ValueOrEmptyInt64(o.NumRetries),
PerTryTimeout: ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeoutToProto(o.PerTryTimeout),
}
for _, r := range o.RetryCondition {
p.RetryCondition = append(p.RetryCondition, r)
}
return p
}
// UrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeoutToProto converts a UrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeoutToProto(o *beta.UrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRetryPolicyPerTryTimeout{
Seconds: dcl.ValueOrEmptyInt64(o.Seconds),
Nanos: dcl.ValueOrEmptyInt64(o.Nanos),
}
return p
}
// UrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicyToProto converts a UrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicyToProto(o *beta.UrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionRequestMirrorPolicy{
BackendService: dcl.ValueOrEmptyString(o.BackendService),
}
return p
}
// UrlMapPathMatcherRouteRuleRouteActionCorsPolicyToProto converts a UrlMapPathMatcherRouteRuleRouteActionCorsPolicy resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleRouteActionCorsPolicyToProto(o *beta.UrlMapPathMatcherRouteRuleRouteActionCorsPolicy) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionCorsPolicy {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionCorsPolicy{
MaxAge: dcl.ValueOrEmptyInt64(o.MaxAge),
AllowCredentials: dcl.ValueOrEmptyBool(o.AllowCredentials),
Disabled: dcl.ValueOrEmptyBool(o.Disabled),
}
for _, r := range o.AllowOrigin {
p.AllowOrigin = append(p.AllowOrigin, r)
}
for _, r := range o.AllowOriginRegex {
p.AllowOriginRegex = append(p.AllowOriginRegex, r)
}
for _, r := range o.AllowMethod {
p.AllowMethod = append(p.AllowMethod, r)
}
for _, r := range o.AllowHeader {
p.AllowHeader = append(p.AllowHeader, r)
}
for _, r := range o.ExposeHeader {
p.ExposeHeader = append(p.ExposeHeader, r)
}
return p
}
// UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyToProto converts a UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyToProto(o *beta.UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicy{
Delay: ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayToProto(o.Delay),
Abort: ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbortToProto(o.Abort),
}
return p
}
// UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayToProto converts a UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayToProto(o *beta.UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelay{
FixedDelay: ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelayToProto(o.FixedDelay),
Percentage: dcl.ValueOrEmptyDouble(o.Percentage),
}
return p
}
// UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelayToProto converts a UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelayToProto(o *beta.UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyDelayFixedDelay{
Seconds: dcl.ValueOrEmptyInt64(o.Seconds),
Nanos: dcl.ValueOrEmptyInt64(o.Nanos),
}
return p
}
// UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbortToProto converts a UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbortToProto(o *beta.UrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleRouteActionFaultInjectionPolicyAbort{
HttpStatus: dcl.ValueOrEmptyInt64(o.HttpStatus),
Percentage: dcl.ValueOrEmptyDouble(o.Percentage),
}
return p
}
// UrlMapPathMatcherRouteRuleUrlRedirectToProto converts a UrlMapPathMatcherRouteRuleUrlRedirect resource to its proto representation.
func ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectToProto(o *beta.UrlMapPathMatcherRouteRuleUrlRedirect) *betapb.ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirect {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirect{
HostRedirect: dcl.ValueOrEmptyString(o.HostRedirect),
PathRedirect: dcl.ValueOrEmptyString(o.PathRedirect),
PrefixRedirect: dcl.ValueOrEmptyString(o.PrefixRedirect),
RedirectResponseCode: ComputeBetaUrlMapPathMatcherRouteRuleUrlRedirectRedirectResponseCodeEnumToProto(o.RedirectResponseCode),
HttpsRedirect: dcl.ValueOrEmptyBool(o.HttpsRedirect),
StripQuery: dcl.ValueOrEmptyBool(o.StripQuery),
}
return p
}
// UrlMapTestToProto converts a UrlMapTest resource to its proto representation.
func ComputeBetaUrlMapTestToProto(o *beta.UrlMapTest) *betapb.ComputeBetaUrlMapTest {
if o == nil {
return nil
}
p := &betapb.ComputeBetaUrlMapTest{
Description: dcl.ValueOrEmptyString(o.Description),
Host: dcl.ValueOrEmptyString(o.Host),
Path: dcl.ValueOrEmptyString(o.Path),
ExpectedBackendService: dcl.ValueOrEmptyString(o.ExpectedBackendService),
}
return p
}
// UrlMapToProto converts a UrlMap resource to its proto representation.
func UrlMapToProto(resource *beta.UrlMap) *betapb.ComputeBetaUrlMap {
p := &betapb.ComputeBetaUrlMap{
DefaultRouteAction: ComputeBetaUrlMapDefaultRouteActionToProto(resource.DefaultRouteAction),
DefaultService: dcl.ValueOrEmptyString(resource.DefaultService),
DefaultUrlRedirect: ComputeBetaUrlMapDefaultUrlRedirectToProto(resource.DefaultUrlRedirect),
Description: dcl.ValueOrEmptyString(resource.Description),
SelfLink: dcl.ValueOrEmptyString(resource.SelfLink),
HeaderAction: ComputeBetaUrlMapHeaderActionToProto(resource.HeaderAction),
Name: dcl.ValueOrEmptyString(resource.Name),
Region: dcl.ValueOrEmptyString(resource.Region),
Project: dcl.ValueOrEmptyString(resource.Project),
}
for _, r := range resource.HostRule {
p.HostRule = append(p.HostRule, ComputeBetaUrlMapHostRuleToProto(&r))
}
for _, r := range resource.PathMatcher {
p.PathMatcher = append(p.PathMatcher, ComputeBetaUrlMapPathMatcherToProto(&r))
}
for _, r := range resource.Test {
p.Test = append(p.Test, ComputeBetaUrlMapTestToProto(&r))
}
return p
}
// ApplyUrlMap handles the gRPC request by passing it to the underlying UrlMap Apply() method.
func (s *UrlMapServer) applyUrlMap(ctx context.Context, c *beta.Client, request *betapb.ApplyComputeBetaUrlMapRequest) (*betapb.ComputeBetaUrlMap, error) {
p := ProtoToUrlMap(request.GetResource())
res, err := c.ApplyUrlMap(ctx, p)
if err != nil {
return nil, err
}
r := UrlMapToProto(res)
return r, nil
}
// ApplyUrlMap handles the gRPC request by passing it to the underlying UrlMap Apply() method.
func (s *UrlMapServer) ApplyComputeBetaUrlMap(ctx context.Context, request *betapb.ApplyComputeBetaUrlMapRequest) (*betapb.ComputeBetaUrlMap, error) {
cl, err := createConfigUrlMap(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return s.applyUrlMap(ctx, cl, request)
}
// DeleteUrlMap handles the gRPC request by passing it to the underlying UrlMap Delete() method.
func (s *UrlMapServer) DeleteComputeBetaUrlMap(ctx context.Context, request *betapb.DeleteComputeBetaUrlMapRequest) (*emptypb.Empty, error) {
cl, err := createConfigUrlMap(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteUrlMap(ctx, ProtoToUrlMap(request.GetResource()))
}
// ListComputeBetaUrlMap handles the gRPC request by passing it to the underlying UrlMapList() method.
func (s *UrlMapServer) ListComputeBetaUrlMap(ctx context.Context, request *betapb.ListComputeBetaUrlMapRequest) (*betapb.ListComputeBetaUrlMapResponse, error) {
cl, err := createConfigUrlMap(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
resources, err := cl.ListUrlMap(ctx, request.Project)
if err != nil {
return nil, err
}
var protos []*betapb.ComputeBetaUrlMap
for _, r := range resources.Items {
rp := UrlMapToProto(r)
protos = append(protos, rp)
}
return &betapb.ListComputeBetaUrlMapResponse{Items: protos}, nil
}
func createConfigUrlMap(ctx context.Context, service_account_file string) (*beta.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return beta.NewClient(conf), nil
}
|
// Copyright © 2018 Sunface <CTO@188.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package service
import (
"bytes"
"sync"
"time"
"github.com/sunface/talent"
"github.com/weaveworks/mesh"
"github.com/mafanr/meq/proto"
)
type MemStore struct {
In chan []*proto.PubMsg
DB map[string]map[string]*proto.PubMsg
DBIndex map[string][]string
DBIDIndex map[string]string
chatroom map[string]map[string]int
topicCount map[string]int
timerDB []*proto.TimerMsg
bk *Broker
cache []*proto.PubMsg
readCache []proto.Ack
//cluster
send mesh.Gossip
pn mesh.PeerName
msgSyncCache []*proto.PubMsg
readSyncCache []proto.Ack
sync.RWMutex
}
const (
MaxCacheLength = 10000
MaxMemFlushLength = 100
MaxSyncMsgLen = 1000
MaxSyncAckLen = 1000
)
const (
MEM_MSG_ADD = 'a'
MEM_MSG_ACK = 'b'
)
/*------------------------------Storage interface implemented------------------------*/
func (ms *MemStore) Init() {
ms.In = make(chan []*proto.PubMsg, MaxCacheLength)
ms.DB = make(map[string]map[string]*proto.PubMsg)
ms.DBIndex = make(map[string][]string)
ms.DBIDIndex = make(map[string]string)
ms.cache = make([]*proto.PubMsg, 0, MaxCacheLength)
ms.msgSyncCache = make([]*proto.PubMsg, 0, MaxSyncMsgLen)
ms.readSyncCache = make([]proto.Ack, 0, MaxSyncAckLen)
ms.chatroom = make(map[string]map[string]int)
ms.topicCount = make(map[string]int)
go func() {
ms.bk.wg.Add(1)
defer ms.bk.wg.Done()
for ms.bk.running {
msgs := <-ms.In
ms.Lock()
ms.cache = append(ms.cache, msgs...)
ms.Unlock()
}
}()
go func() {
ms.bk.wg.Add(1)
defer ms.bk.wg.Done()
for ms.bk.running {
time.Sleep(100 * time.Millisecond)
ms.flush()
}
}()
go func() {
ms.bk.wg.Add(1)
defer ms.bk.wg.Done()
for ms.bk.running {
time.Sleep(2 * time.Second)
ms.flushRead()
}
}()
go func() {
readTimer := time.NewTicker(200 * time.Millisecond).C
msgTimer := time.NewTicker(200 * time.Millisecond).C
for {
select {
case <-readTimer: // sync acks
if len(ms.readSyncCache) > 0 {
ms.Lock()
m := proto.PackAck(ms.readSyncCache, MEM_MSG_ACK)
ms.readSyncCache = make([]proto.Ack, 0, MAX_CHANNEL_LEN)
ms.Unlock()
ms.send.GossipBroadcast(MemMsg(m))
}
case <-msgTimer: // sync msgs
if len(ms.msgSyncCache) > 0 {
ms.Lock()
m := proto.PackPubBatch(ms.msgSyncCache, MEM_MSG_ADD)
ms.msgSyncCache = make([]*proto.PubMsg, 0, MAX_CHANNEL_LEN)
ms.Unlock()
ms.send.GossipBroadcast(MemMsg(m))
}
}
}
}()
}
func (ms *MemStore) Close() {
close(ms.In)
}
func (ms *MemStore) Store(msgs []*proto.PubMsg) {
if len(msgs) > 0 {
var nmsgs []*proto.PubMsg
for _, msg := range msgs {
if msg.QoS == proto.QOS1 {
// qos 1 need to be persistent
nmsgs = append(nmsgs, msg)
}
}
if len(nmsgs) > 0 {
ms.In <- nmsgs
ms.Lock()
ms.msgSyncCache = append(ms.msgSyncCache, msgs...)
ms.Unlock()
}
}
}
func (ms *MemStore) MarkRead(topic []byte, msgids [][]byte) {
acks := make([]proto.Ack, len(msgids))
for i, id := range msgids {
acks[i] = proto.Ack{topic, id}
}
if len(msgids) > 0 {
ms.Lock()
ms.readCache = append(ms.readCache, acks...)
ms.readSyncCache = append(ms.readSyncCache, acks...)
ms.Unlock()
}
}
func (ms *MemStore) UpdateUnreadCount(topic []byte, user []byte, isAdd bool, count int) {
ms.Lock()
defer ms.Unlock()
tp := proto.GetTopicType(topic)
if tp == proto.TopicTypeNormal {
c, ok := ms.topicCount[talent.Bytes2String(topic)]
if !isAdd {
if ok {
if count == proto.REDUCE_ALL_COUNT {
ms.topicCount[talent.Bytes2String(topic)] = 0
} else {
if c-count > 0 {
ms.topicCount[talent.Bytes2String(topic)] = c - count
} else {
ms.topicCount[talent.Bytes2String(topic)] = 0
}
}
}
} else {
if !ok {
ms.topicCount[talent.Bytes2String(topic)] = count
} else {
ms.topicCount[talent.Bytes2String(topic)] = c + count
}
}
} else {
t, ok := ms.chatroom[talent.Bytes2String(topic)]
if !ok {
return
}
if !isAdd {
_, ok := t[talent.Bytes2String(user)]
if ok {
t[talent.Bytes2String(user)] = 0
}
} else {
for u, c := range t {
t[u] = c + count
}
}
}
}
func (ms *MemStore) Query(t []byte, count int, offset []byte, acked bool) []*proto.PubMsg {
topic := string(t)
ms.Lock()
defer ms.Unlock()
msgs := ms.DB[topic]
index := ms.DBIndex[topic]
var newMsgs []*proto.PubMsg
if bytes.Compare(offset, proto.MSG_NEWEST_OFFSET) == 0 {
if count == 0 { // get all messages
for i := len(index) - 1; i >= 0; i-- {
msg := msgs[index[i]]
newMsgs = append(newMsgs, msg)
}
return newMsgs
}
c := 0
for i := len(index) - 1; i >= 0; i-- {
if c >= count {
break
}
msg := msgs[index[i]]
newMsgs = append(newMsgs, msg)
c++
}
} else {
// find the position of the offset
pos := -1
ot := string(offset)
for i, id := range index {
if id == ot {
pos = i
}
}
// can't find the message or the message is the last one
// just return empty
if pos == -1 || pos == len(index)-1 {
return newMsgs
}
if count == 0 {
// msg push/im pull messages before offset
for i := pos - 1; i >= 0; i-- {
msg := msgs[index[i]]
newMsgs = append(newMsgs, msg)
}
} else {
c := 0
// msg push/im pull messages before offset
for i := pos - 1; i >= 0; i-- {
if c >= count {
break
}
msg := msgs[index[i]]
newMsgs = append(newMsgs, msg)
c++
}
}
}
return newMsgs
}
func (ms *MemStore) UnreadCount(topic []byte, user []byte) int {
ms.RLock()
defer ms.RUnlock()
tp := proto.GetTopicType(topic)
if tp == proto.TopicTypeNormal {
return ms.topicCount[talent.Bytes2String(topic)]
} else {
t, ok := ms.chatroom[talent.Bytes2String(topic)]
if !ok {
return 0
}
return t[talent.Bytes2String(user)]
}
}
func (ms *MemStore) StoreTM(m *proto.TimerMsg) {
ms.Lock()
ms.timerDB = append(ms.timerDB, m)
ms.Unlock()
}
func (ms *MemStore) QueryTM() []*proto.PubMsg {
now := time.Now().Unix()
var newM []*proto.TimerMsg
var msgs []*proto.PubMsg
ms.Lock()
for _, m := range ms.timerDB {
if m.Trigger <= now {
msgs = append(msgs, &proto.PubMsg{m.ID, m.ID, m.Topic, m.Payload, false, proto.TIMER_MSG, 1, 0, nil, nil})
} else {
newM = append(newM, m)
}
}
ms.timerDB = newM
ms.Unlock()
ms.Store(msgs)
return msgs
}
func (ms *MemStore) JoinChat(topic []byte, user []byte) error {
ms.Lock()
defer ms.Unlock()
t, ok := ms.chatroom[talent.Bytes2String(topic)]
if !ok {
ms.chatroom[talent.Bytes2String(topic)] = map[string]int{
talent.Bytes2String(user): 0,
}
} else {
_, ok := t[talent.Bytes2String(user)]
if !ok {
t[talent.Bytes2String(user)] = 0
}
}
return nil
}
func (ms *MemStore) LeaveChat(topic []byte, user []byte) error {
ms.Lock()
defer ms.Unlock()
t, ok := ms.chatroom[talent.Bytes2String(topic)]
if ok {
delete(t, talent.Bytes2String(user))
}
return nil
}
func (ms *MemStore) GetChatUsers(topic []byte) [][]byte {
ms.Lock()
defer ms.Unlock()
users := make([][]byte, 0)
t, ok := ms.chatroom[talent.Bytes2String(topic)]
if ok {
for u := range t {
users = append(users, talent.String2Bytes(u))
}
}
return users
}
func (ms *MemStore) Del(topic []byte, msgid []byte) error {
return nil
}
// Admin part
func (ms *MemStore) SaveAdminInfo(tp int, data interface{}) {
}
func (ms *MemStore) QueryAdminInfo(tp int) interface{} {
return nil
}
/*--------------------------------Internal funcitons--------------------------------*/
func (ms *MemStore) flush() {
temp := ms.cache
if len(temp) > 0 {
for _, msg := range temp {
ms.RLock()
if _, ok := ms.DBIDIndex[talent.Bytes2String(msg.ID)]; ok {
ms.RUnlock()
continue
}
ms.RUnlock()
t := talent.Bytes2String(msg.Topic)
ms.Lock()
_, ok := ms.DB[t]
if !ok {
ms.DB[t] = make(map[string]*proto.PubMsg)
}
//@performance
ms.DB[t][talent.Bytes2String(msg.ID)] = msg
ms.DBIndex[t] = append(ms.DBIndex[t], talent.Bytes2String(msg.ID))
ms.DBIDIndex[talent.Bytes2String(msg.ID)] = t
ms.topicCount[t]++
ms.Unlock()
}
ms.Lock()
ms.cache = ms.cache[len(temp):]
ms.Unlock()
} else {
// rejust the cache cap length
ms.Lock()
if len(ms.cache) == 0 && cap(ms.cache) != MaxCacheLength {
ms.cache = make([]*proto.PubMsg, 0, MaxCacheLength)
}
ms.Unlock()
}
}
func (ms *MemStore) flushRead() {
if len(ms.readCache) == 0 {
return
}
temp := ms.readCache
for _, ack := range temp {
// lookup topic
ms.RLock()
t, ok := ms.DBIDIndex[string(ack.Msgid)]
if !ok {
ms.RUnlock()
// newCache = append(newCache, msgid)
continue
}
// set message status to acked
msg := ms.DB[t][string(ack.Msgid)]
ms.RUnlock()
msg.Acked = true
}
ms.Lock()
ms.readCache = ms.readCache[len(temp):]
ms.Unlock()
}
/* -------------------------- cluster part -----------------------------------------------*/
// Return a copy of our complete state.
func (ms *MemStore) Gossip() (complete mesh.GossipData) {
return
}
// Merge the gossiped data represented by buf into our state.
// Return the state information that was modified.
func (ms *MemStore) OnGossip(buf []byte) (delta mesh.GossipData, err error) {
return
}
// Merge the gossiped data represented by buf into our state.
// Return the state information that was modified.
func (ms *MemStore) OnGossipBroadcast(src mesh.PeerName, buf []byte) (received mesh.GossipData, err error) {
command := buf[4]
switch command {
case MEM_MSG_ADD:
msgs, _ := proto.UnpackPubBatch(buf[5:])
ms.In <- msgs
case MEM_MSG_ACK:
msgids := proto.UnpackAck(buf[5:])
ms.Lock()
for _, msgid := range msgids {
ms.readCache = append(ms.readCache, msgid)
}
ms.Unlock()
}
return
}
// Merge the gossiped data represented by buf into our state.
func (ms *MemStore) OnGossipUnicast(src mesh.PeerName, buf []byte) error {
return nil
}
func (ms *MemStore) register(send mesh.Gossip) {
ms.send = send
}
type MemMsg []byte
func (mm MemMsg) Encode() [][]byte {
return [][]byte{mm}
}
func (mm MemMsg) Merge(new mesh.GossipData) (complete mesh.GossipData) {
return
}
|
package rehearse
import (
"fmt"
"path/filepath"
"sort"
"strconv"
"testing"
"github.com/sirupsen/logrus"
"k8s.io/api/core/v1"
pjapi "k8s.io/test-infra/prow/apis/prowjobs/v1"
"k8s.io/test-infra/prow/client/clientset/versioned/fake"
prowconfig "k8s.io/test-infra/prow/config"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
clientgo_testing "k8s.io/client-go/testing"
)
func makeTestingPresubmitForEnv(env []v1.EnvVar) *prowconfig.Presubmit {
return &prowconfig.Presubmit{
JobBase: prowconfig.JobBase{
Name: "test-job-name",
Spec: &v1.PodSpec{
Containers: []v1.Container{
{Env: env},
},
},
},
}
}
type fakeCiopConfig struct {
fakeFiles map[string]string
}
func (c *fakeCiopConfig) Load(repo, configFile string) (string, error) {
fullPath := filepath.Join(repo, configFile)
content, ok := c.fakeFiles[fullPath]
if ok {
return content, nil
}
return "", fmt.Errorf("no such fake file")
}
func makeCMReference(cmName, key string) *v1.EnvVarSource {
return &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: cmName,
},
Key: key,
},
}
}
func TestInlineCiopConfig(t *testing.T) {
testTargetRepo := "org/repo"
testLogger := logrus.New()
testCases := []struct {
description string
sourceEnv []v1.EnvVar
configs *fakeCiopConfig
expectedEnv []v1.EnvVar
expectedError bool
}{{
description: "empty env -> no changes",
configs: &fakeCiopConfig{},
}, {
description: "no Env.ValueFrom -> no changes",
sourceEnv: []v1.EnvVar{{Name: "T", Value: "V"}},
configs: &fakeCiopConfig{},
expectedEnv: []v1.EnvVar{{Name: "T", Value: "V"}},
}, {
description: "no Env.ValueFrom.ConfigMapKeyRef -> no changes",
sourceEnv: []v1.EnvVar{{Name: "T", ValueFrom: &v1.EnvVarSource{ResourceFieldRef: &v1.ResourceFieldSelector{}}}},
configs: &fakeCiopConfig{},
expectedEnv: []v1.EnvVar{{Name: "T", ValueFrom: &v1.EnvVarSource{ResourceFieldRef: &v1.ResourceFieldSelector{}}}},
}, {
description: "CM reference but not ci-operator-configs -> no changes",
sourceEnv: []v1.EnvVar{{Name: "T", ValueFrom: makeCMReference("test-cm", "key")}},
configs: &fakeCiopConfig{},
expectedEnv: []v1.EnvVar{{Name: "T", ValueFrom: makeCMReference("test-cm", "key")}},
}, {
description: "CM reference to ci-operator-configs -> cm content inlined",
sourceEnv: []v1.EnvVar{{Name: "T", ValueFrom: makeCMReference(ciOperatorConfigsCMName, "filename")}},
configs: &fakeCiopConfig{fakeFiles: map[string]string{"org/repo/filename": "ciopConfigContent"}},
expectedEnv: []v1.EnvVar{{Name: "T", Value: "ciopConfigContent"}},
}, {
description: "bad CM key is handled",
sourceEnv: []v1.EnvVar{{Name: "T", ValueFrom: makeCMReference(ciOperatorConfigsCMName, "filename")}},
configs: &fakeCiopConfig{fakeFiles: map[string]string{}},
expectedError: true,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
job := makeTestingPresubmitForEnv(tc.sourceEnv)
expectedJob := makeTestingPresubmitForEnv(tc.expectedEnv)
newJob, err := inlineCiOpConfig(job, testTargetRepo, tc.configs, testLogger)
if tc.expectedError && err == nil {
t.Errorf("Expected inlineCiopConfig() to return an error, none returned")
return
}
if !tc.expectedError {
if err != nil {
t.Errorf("Unexpected error returned by inlineCiOpConfig(): %v", err)
return
}
if !equality.Semantic.DeepEqual(expectedJob, newJob) {
t.Errorf("Returned job differs from expected:\n%s", diff.ObjectDiff(expectedJob, newJob))
}
}
})
}
}
func makeTestingPresubmit(name, context string, ciopArgs []string) *prowconfig.Presubmit {
return &prowconfig.Presubmit{
JobBase: prowconfig.JobBase{
Name: name,
Labels: map[string]string{rehearseLabel: "123"},
Spec: &v1.PodSpec{
Containers: []v1.Container{{
Command: []string{"ci-operator"},
Args: ciopArgs,
}},
},
},
Context: context,
Brancher: prowconfig.Brancher{Branches: []string{"^master$"}},
}
}
func TestMakeRehearsalPresubmit(t *testing.T) {
testCases := []struct {
source *prowconfig.Presubmit
pr int
expected *prowconfig.Presubmit
}{{
source: makeTestingPresubmit("pull-ci-openshift-ci-operator-master-build", "ci/prow/build", []string{"arg", "arg"}),
pr: 123,
expected: makeTestingPresubmit(
"rehearse-123-pull-ci-openshift-ci-operator-master-build",
"ci/rehearse/openshift/ci-operator/build",
[]string{"arg", "arg", "--git-ref=openshift/ci-operator@master"}),
},
}
for _, tc := range testCases {
rehearsal, err := makeRehearsalPresubmit(tc.source, "openshift/ci-operator", tc.pr)
if err != nil {
t.Errorf("Unexpected error in makeRehearsalPresubmit: %v", err)
}
if !equality.Semantic.DeepEqual(tc.expected, rehearsal) {
t.Errorf("Expected rehearsal Presubmit differs:\n%s", diff.ObjectDiff(tc.expected, rehearsal))
}
}
}
func TestMakeRehearsalPresubmitNegative(t *testing.T) {
testName := "pull-ci-organization-repo-master-test"
testContext := "ci/prow/test"
testArgs := []string{"arg"}
testRepo := "organization/repo"
testPrNumber := 321
testCases := []struct {
description string
crippleFunc func(*prowconfig.Presubmit)
}{{
description: "job with multiple containers",
crippleFunc: func(j *prowconfig.Presubmit) {
j.Spec.Containers = append(j.Spec.Containers, v1.Container{})
},
}, {
description: "job where command is not `ci-operator`",
crippleFunc: func(j *prowconfig.Presubmit) {
j.Spec.Containers[0].Command[0] = "not-ci-operator"
},
}, {
description: "ci-operator job already using --git-ref",
crippleFunc: func(j *prowconfig.Presubmit) {
j.Spec.Containers[0].Args = append(j.Spec.Containers[0].Args, "--git-ref=organization/repo@master")
},
}, {
description: "jobs running over multiple branches",
crippleFunc: func(j *prowconfig.Presubmit) {
j.Brancher.Branches = append(j.Brancher.Branches, "^feature-branch$")
},
}, {
description: "jobs that need additional volumes mounted",
crippleFunc: func(j *prowconfig.Presubmit) {
j.Spec.Volumes = []v1.Volume{{Name: "volume"}}
},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
job := makeTestingPresubmit(testName, testContext, testArgs)
tc.crippleFunc(job)
_, err := makeRehearsalPresubmit(job, testRepo, testPrNumber)
if err == nil {
t.Errorf("Expected makeRehearsalPresubmit to return error")
}
})
}
}
func makeTestingProwJob(name, namespace, jobName, context string, refs *pjapi.Refs, ciopArgs []string) *pjapi.ProwJob {
return &pjapi.ProwJob{
TypeMeta: metav1.TypeMeta{Kind: "ProwJob", APIVersion: "prow.k8s.io/v1"},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
"created-by-prow": "true",
"prow.k8s.io/job": jobName,
"prow.k8s.io/refs.org": refs.Org,
"prow.k8s.io/refs.repo": refs.Repo,
"prow.k8s.io/type": "presubmit",
"prow.k8s.io/refs.pull": strconv.Itoa(refs.Pulls[0].Number),
rehearseLabel: strconv.Itoa(refs.Pulls[0].Number),
},
Annotations: map[string]string{"prow.k8s.io/job": jobName},
},
Spec: pjapi.ProwJobSpec{
Type: pjapi.PresubmitJob,
Job: jobName,
Refs: refs,
Report: true,
Context: context,
PodSpec: &v1.PodSpec{
Containers: []v1.Container{{
Command: []string{"ci-operator"},
Args: ciopArgs,
}},
},
},
Status: pjapi.ProwJobStatus{
State: pjapi.TriggeredState,
},
}
}
func TestExecuteJobs(t *testing.T) {
testLogger := logrus.New()
testPrNumber := 123
testNamespace := "test-namespace"
testRepo := "testRepo"
testOrg := "testOrg"
testRefs := &pjapi.Refs{
Org: testOrg,
Repo: testRepo,
BaseRef: "testBaseRef",
BaseSHA: "testBaseSHA",
Pulls: []pjapi.Pull{{Number: testPrNumber, Author: "testAuthor", SHA: "testPrSHA"}},
}
generatedName := "generatedName"
rehearseJobContextTemplate := "ci/rehearse/%s/%s"
targetRepo := "targetOrg/targetRepo"
anotherTargetRepo := "anotherOrg/anotherRepo"
testCases := []struct {
description string
jobs map[string][]prowconfig.Presubmit
expectedError bool
expectedJobs []pjapi.ProwJob
}{{
description: "two jobs in a single repo",
jobs: map[string][]prowconfig.Presubmit{targetRepo: {
*makeTestingPresubmit("job1", "ci/prow/job1", []string{"arg1"}),
*makeTestingPresubmit("job2", "ci/prow/job2", []string{"arg1"}),
}},
expectedJobs: []pjapi.ProwJob{
*makeTestingProwJob(generatedName,
testNamespace,
"rehearse-123-job1",
fmt.Sprintf(rehearseJobContextTemplate, targetRepo, "job1"),
testRefs,
[]string{"arg1", fmt.Sprintf("--git-ref=%s@master", targetRepo)},
),
*makeTestingProwJob(generatedName,
testNamespace,
"rehearse-123-job2",
fmt.Sprintf(rehearseJobContextTemplate, targetRepo, "job2"),
testRefs,
[]string{"arg1", fmt.Sprintf("--git-ref=%s@master", targetRepo)},
),
}},
{
description: "two jobs in a separate repos",
jobs: map[string][]prowconfig.Presubmit{
targetRepo: {*makeTestingPresubmit("job1", "ci/prow/job1", []string{"arg1"})},
anotherTargetRepo: {*makeTestingPresubmit("job2", "ci/prow/job2", []string{"arg1"})},
},
expectedJobs: []pjapi.ProwJob{
*makeTestingProwJob(generatedName,
testNamespace,
"rehearse-123-job1",
fmt.Sprintf(rehearseJobContextTemplate, targetRepo, "job1"),
testRefs,
[]string{"arg1", fmt.Sprintf("--git-ref=%s@master", targetRepo)},
),
*makeTestingProwJob(generatedName,
testNamespace,
"rehearse-123-job2",
fmt.Sprintf(rehearseJobContextTemplate, anotherTargetRepo, "job2"),
testRefs,
[]string{"arg1", fmt.Sprintf("--git-ref=%s@master", anotherTargetRepo)},
),
},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
fakecs := fake.NewSimpleClientset()
fakeclient := fakecs.ProwV1().ProwJobs(testNamespace)
watcher, err := fakeclient.Watch(metav1.ListOptions{})
if err != nil {
t.Fatalf("Failed to setup watch: %v", err)
}
fakecs.Fake.PrependWatchReactor("prowjobs", func(clientgo_testing.Action) (bool, watch.Interface, error) {
watcher.Stop()
n := 0
for _, jobs := range tc.jobs {
n += len(jobs)
}
ret := watch.NewFakeWithChanSize(n, true)
for event := range watcher.ResultChan() {
pj := event.Object.(*pjapi.ProwJob).DeepCopy()
pj.Status.State = pjapi.SuccessState
ret.Modify(pj)
}
return true, ret, nil
})
err = ExecuteJobs(tc.jobs, testPrNumber, testRepo, testRefs, true, testLogger, fakeclient)
if tc.expectedError && err == nil {
t.Errorf("Expected ExecuteJobs() to return error")
return
}
if !tc.expectedError {
if err != nil {
t.Errorf("Expected ExecuteJobs() to not return error, returned %v", err)
return
}
createdJobs, err := fakeclient.List(metav1.ListOptions{})
if err != nil {
t.Errorf("Failed to get expected ProwJobs from fake client")
return
}
// Overwrite dynamic struct members to allow comparison
for i := range createdJobs.Items {
createdJobs.Items[i].Name = generatedName
createdJobs.Items[i].Status.StartTime.Reset()
}
// Sort to allow comparison
sort.Slice(tc.expectedJobs, func(a, b int) bool { return tc.expectedJobs[a].Spec.Job < tc.expectedJobs[b].Spec.Job })
sort.Slice(createdJobs.Items, func(a, b int) bool { return createdJobs.Items[a].Spec.Job < createdJobs.Items[b].Spec.Job })
if !equality.Semantic.DeepEqual(tc.expectedJobs, createdJobs.Items) {
t.Errorf("Created ProwJobs differ from expected:\n%s", diff.ObjectDiff(tc.expectedJobs, createdJobs.Items))
}
}
})
}
}
func TestWaitForJobs(t *testing.T) {
pjSuccess0 := pjapi.ProwJob{
ObjectMeta: metav1.ObjectMeta{Name: "success0"},
Status: pjapi.ProwJobStatus{State: pjapi.SuccessState},
}
pjSuccess1 := pjapi.ProwJob{
ObjectMeta: metav1.ObjectMeta{Name: "success1"},
Status: pjapi.ProwJobStatus{State: pjapi.SuccessState},
}
pjFailure := pjapi.ProwJob{
ObjectMeta: metav1.ObjectMeta{Name: "failure"},
Status: pjapi.ProwJobStatus{State: pjapi.FailureState},
}
pjPending := pjapi.ProwJob{
ObjectMeta: metav1.ObjectMeta{Name: "pending"},
Status: pjapi.ProwJobStatus{State: pjapi.PendingState},
}
pjAborted := pjapi.ProwJob{
ObjectMeta: metav1.ObjectMeta{Name: "aborted"},
Status: pjapi.ProwJobStatus{State: pjapi.AbortedState},
}
pjTriggered := pjapi.ProwJob{
ObjectMeta: metav1.ObjectMeta{Name: "triggered"},
Status: pjapi.ProwJobStatus{State: pjapi.TriggeredState},
}
pjError := pjapi.ProwJob{
ObjectMeta: metav1.ObjectMeta{Name: "error"},
Status: pjapi.ProwJobStatus{State: pjapi.ErrorState},
}
testCases := []struct {
id string
pjs sets.String
events []*pjapi.ProwJob
success bool
err error
}{{
id: "empty",
success: true,
}, {
id: "one successful job",
success: true,
pjs: sets.NewString("success0"),
events: []*pjapi.ProwJob{&pjSuccess0},
}, {
id: "mixed states",
pjs: sets.NewString("failure", "success0", "aborted", "error"),
events: []*pjapi.ProwJob{
&pjFailure, &pjPending, &pjSuccess0,
&pjTriggered, &pjAborted, &pjError,
},
}, {
id: "ignored states",
success: true,
pjs: sets.NewString("success0"),
events: []*pjapi.ProwJob{&pjPending, &pjSuccess0, &pjTriggered},
}, {
id: "repeated events",
success: true,
pjs: sets.NewString("success0", "success1"),
events: []*pjapi.ProwJob{&pjSuccess0, &pjSuccess0, &pjSuccess1},
}, {
id: "repeated events with failure",
pjs: sets.NewString("success0", "success1", "failure"),
events: []*pjapi.ProwJob{
&pjSuccess0, &pjSuccess0,
&pjSuccess1, &pjFailure,
},
}, {
id: "not watched",
success: true,
pjs: sets.NewString("success1"),
events: []*pjapi.ProwJob{&pjSuccess0, &pjFailure, &pjSuccess1},
}, {
id: "not watched failure",
pjs: sets.NewString("failure"),
events: []*pjapi.ProwJob{&pjSuccess0, &pjFailure},
}}
for _, tc := range testCases {
t.Run(tc.id, func(t *testing.T) {
w := watch.NewFakeWithChanSize(len(tc.events), true)
for _, j := range tc.events {
w.Modify(j)
}
cs := fake.NewSimpleClientset()
cs.Fake.PrependWatchReactor("prowjobs", func(clientgo_testing.Action) (bool, watch.Interface, error) {
return true, w, nil
})
success, err := waitForJobs(tc.pjs, "", cs.ProwV1().ProwJobs("test"), logrus.New())
if err != tc.err {
t.Fatalf("want `err` == %v, got %v", tc.err, err)
}
if success != tc.success {
t.Fatalf("want `success` == %v, got %v", tc.success, success)
}
})
}
}
func TestWaitForJobsRetries(t *testing.T) {
empty := watch.NewEmptyWatch()
mod := watch.NewFakeWithChanSize(1, true)
mod.Modify(&pjapi.ProwJob{
ObjectMeta: metav1.ObjectMeta{Name: "j"},
Status: pjapi.ProwJobStatus{State: pjapi.SuccessState},
})
ws := []watch.Interface{empty, mod}
cs := fake.NewSimpleClientset()
cs.Fake.PrependWatchReactor("prowjobs", func(clientgo_testing.Action) (_ bool, ret watch.Interface, _ error) {
ret, ws = ws[0], ws[1:]
return true, ret, nil
})
success, err := waitForJobs(sets.String{"j": {}}, "", cs.ProwV1().ProwJobs("test"), logrus.New())
if err != nil {
t.Fatal(err)
}
if !success {
t.Fail()
}
}
|
package external
import (
"bytes"
"encoding/json"
"errors"
wrap "github.com/pkg/errors"
"net/http"
"os"
)
type Client struct {
*http.Client
}
func (c *Client) Request(r *PlnRequest) (*http.Response, error) {
reqBytes := new(bytes.Buffer)
err := json.NewEncoder(reqBytes).Encode(r)
if err != nil {
errInvalidEncode := errors.New("was not possible enconde response body")
return nil, wrap.Wrap(err, errInvalidEncode.Error())
}
request, err := http.NewRequest(
http.MethodPost,
os.Getenv("PLN_URL"),
reqBytes,
)
request.Header.Set("Accept", "application/json; charset=utf-8")
if err != nil {
return nil, err
}
resp, err := c.Client.Do(request)
if err != nil {
return nil, err
}
return resp, nil
}
//NewClient return a new client instance
func NewClient() *Client {
return &Client{
&http.Client{},
}
}
|
func findMinDifference(timePoints []string) int {
m:=make([]int,1440)
for _,v:=range timePoints{
s:=strings.Split(v,":")
a,_:=strconv.Atoi(s[0])
b,_:=strconv.Atoi(s[1])
m[a*60+b]++
if m[a*60+b]>1{
return 0
}
}
a,b:=0,0
for _,v:=range m{
if v!=0{
break
}
a++
}
for j:=1439;j>=0;j--{
b++
if m[j]!=0{
break
}
}
min:=a+b
c:=-1
for i,v:=range m{
if v==1 {
if c==-1{
c=i
}else{
if i!=c && i-c<min{
min=i-c
}
c=i
}
}
}
return min
}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.,
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under,
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"flag"
"fmt"
"os"
"strconv"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/Tencent/bk-bcs/bcs-common/common/blog"
"github.com/Tencent/bk-bcs/bcs-common/common/http/httpserver"
netservicev1 "github.com/Tencent/bk-bcs/bcs-runtime/bcs-k8s/bcs-component/bcs-netservice-controller/api/v1"
"github.com/Tencent/bk-bcs/bcs-runtime/bcs-k8s/bcs-component/bcs-netservice-controller/controllers"
"github.com/Tencent/bk-bcs/bcs-runtime/bcs-k8s/bcs-component/bcs-netservice-controller/internal/httpsvr"
"github.com/Tencent/bk-bcs/bcs-runtime/bcs-k8s/bcs-component/bcs-netservice-controller/internal/option"
//+kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(netservicev1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
}
func main() {
opts := &option.ControllerOption{}
var verbosity int
flag.StringVar(&opts.Address, "address", "127.0.0.1", "address for controller")
flag.IntVar(&opts.ProbePort, "probe_port", 8082, "probe port for controller")
flag.IntVar(&opts.MetricPort, "metric_port", 8081, "metric port for controller")
flag.IntVar(&opts.Port, "port", 8080, "port for controller")
flag.BoolVar(&opts.EnableLeaderElect, "leader-elect", true, "enable leader elect for controller")
flag.StringVar(&opts.LogDir, "log_dir", "./logs", "If non-empty, write log files in this directory")
flag.Uint64Var(&opts.LogMaxSize, "log_max_size", 500, "Max size (MB) per log file.")
flag.IntVar(&opts.LogMaxNum, "log_max_num", 10, "Max num of log file.")
flag.BoolVar(&opts.ToStdErr, "logtostderr", false, "log to standard error instead of files")
flag.BoolVar(&opts.AlsoToStdErr, "alsologtostderr", false, "log to standard error as well as files")
flag.IntVar(&verbosity, "v", 0, "log level for V logs")
flag.StringVar(&opts.StdErrThreshold, "stderrthreshold", "2", "logs at or above this threshold go to stderr")
flag.StringVar(&opts.VModule, "vmodule", "", "comma-separated list of pattern=N settings for file-filtered logging")
flag.StringVar(&opts.TraceLocation, "log_backtrace_at", "", "when logging hits line file:N, emit a stack trace")
flag.UintVar(&opts.HttpServerPort, "http_svr_port", 8088, "port for controller http server")
flag.Parse()
opts.Verbosity = int32(verbosity)
ctrl.SetLogger(zap.New(zap.UseDevMode(false)))
blog.InitLogs(opts.LogConfig)
defer blog.CloseLogs()
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: opts.Address + ":" + strconv.Itoa(opts.MetricPort),
Port: opts.Port,
HealthProbeBindAddress: opts.Address + ":" + strconv.Itoa(opts.ProbePort),
LeaderElection: opts.EnableLeaderElect,
LeaderElectionID: "ca387ddc.netservice.bkbcs.tencent.com",
LeaderElectionNamespace: "bcs-system",
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if err = (&controllers.BCSNetPoolReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
IPFilter: controllers.NewIPFilter(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "BCSNetPool")
os.Exit(1)
}
if err = (&netservicev1.BCSNetPool{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "BCSNetPool")
os.Exit(1)
}
if err = (&controllers.BCSNetIPClaimReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("bcs-netservice-controller"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "BCSNetPool")
os.Exit(1)
}
//+kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
if err := initHttpServer(opts, mgr.GetClient()); err != nil {
blog.Errorf("init http server failed, err %v", err)
os.Exit(1)
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
// initHttpServer init netservice controller http server
// httpServer提供
// 1. 申请IP地址接口
// 2. 释放IP地址接口
func initHttpServer(op *option.ControllerOption, client client.Client) error {
server := httpserver.NewHttpServer(op.HttpServerPort, op.Address, "")
if op.Conf.ServCert.IsSSL {
server.SetSsl(op.Conf.ServCert.CAFile, op.Conf.ServCert.CertFile, op.Conf.ServCert.KeyFile,
op.Conf.ServCert.CertPasswd)
}
server.SetInsecureServer(op.Address, op.HttpServerPort)
ws := server.NewWebService("/netservicecontroller", nil)
httpServerClient := &httpsvr.HttpServerClient{
K8SClient: client,
}
httpsvr.InitRouters(ws, httpServerClient)
router := server.GetRouter()
webContainer := server.GetWebContainer()
router.Handle("/netservicecontroller/{sub_path:.*}", webContainer)
blog.Infof("Starting http server on %s:%d", op.Address, op.HttpServerPort)
if err := server.ListenAndServeMux(op.Conf.VerifyClientTLS); err != nil {
return fmt.Errorf("http ListenAndServe error %s", err.Error())
}
return nil
}
|
package vault
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/cinus-ue/securekit/common/bytesutil"
"github.com/cinus-ue/securekit/common/fileutil"
"github.com/cinus-ue/securekit/common/pathutil"
"github.com/cinus-ue/securekit/common/strutil"
"github.com/cinus-ue/securekit/internal/locker"
"github.com/cinus-ue/securekit/internal/obfuscator"
)
const MaxReadSize = bytesutil.MEBI * 50
type Vault struct {
password []byte
}
type DirItem struct {
Name string `json:"name"`
MappingName string `json:"mappingName"`
Size string `json:"size"`
IsDir bool `json:"isDir"`
}
type MediaItem struct {
Name string
MappingName string
Url string
ThumbnailUrl string
}
type FilaData struct {
Name string
Data []byte
}
func (v *Vault) ListDirItems(path string) ([]DirItem, error) {
entries, err := os.ReadDir(path)
if err != nil {
return nil, err
}
var items []DirItem
if pathutil.Exists(filepath.Join(path, obfuscator.NameMapping)) {
mapping, err := obfuscator.UnmarshalNameMapping(filepath.Join(path, obfuscator.NameMapping))
if err != nil {
return nil, err
}
for _, item := range entries {
if value := mapping[item.Name()]; value != strutil.Empty {
if version, ok := obfuscator.LookupVersionByName(item.Name()); ok {
name, err := version.DecryptMappingData(value, v.password)
if err != nil {
return nil, err
}
items = append(items, DirItem{item.Name(), name, itemSize(item), item.IsDir()})
}
} else {
items = append(items, DirItem{item.Name(), strutil.Empty, itemSize(item), item.IsDir()})
}
}
} else {
for _, item := range entries {
items = append(items, DirItem{item.Name(), strutil.Empty, itemSize(item), item.IsDir()})
}
}
return items, nil
}
func (v *Vault) ReadFileData(path string) (*FilaData, error) {
size, err := fileutil.FileSize(path)
if err != nil {
return nil, err
}
if size > MaxReadSize {
return nil, errors.New("failed, file too large")
}
filename, locked, err := v.isLocked(path)
if err != nil {
return nil, err
}
var data []byte
if locked {
if data, err = v.readLockedFile(filename, path); err != nil {
return nil, err
}
filename = locker.RemoveExtension(filename)
} else {
if data, err = ioutil.ReadFile(path); err != nil {
return nil, err
}
}
return &FilaData{filename, data}, nil
}
func (v *Vault) ListMediaItems(path string, extensions []string) ([]MediaItem, error) {
items, err := v.ListDirItems(path)
if err != nil {
return nil, err
}
var mediaItems []MediaItem
for _, item := range items {
if item.IsDir {
continue
}
if len(extensions) > 0 {
if isValidFormat(locker.RemoveExtension(item.Name), extensions) || isValidFormat(locker.RemoveExtension(item.MappingName), extensions) {
mediaItems = append(mediaItems, MediaItem{
item.Name, item.MappingName,
fmt.Sprintf("view?path=%s/%s", path, item.Name),
fmt.Sprintf("thumbnail?path=%s/%s", path, item.Name),
})
}
} else {
mediaItems = append(mediaItems, MediaItem{
item.Name, item.MappingName,
fmt.Sprintf("view?path=%s/%s", path, item.Name),
fmt.Sprintf("thumbnail?path=%s/%s", path, item.Name),
})
}
}
return mediaItems, nil
}
func (v *Vault) readLockedFile(filename, filepath string) ([]byte, error) {
if version, ok := locker.LookupVersionByExtension(pathutil.FileExt(filename)); ok {
return version.ReadLockedFile(filepath, v.password)
}
return nil, errors.New("unrecognized file version")
}
func (v *Vault) isLocked(path string) (string, bool, error) {
filename := pathutil.BaseName(path)
mappingFile := filepath.Join(strings.TrimSuffix(path, filename), obfuscator.NameMapping)
if pathutil.Exists(mappingFile) {
mapping, err := obfuscator.UnmarshalNameMapping(mappingFile)
if err != nil {
return "", false, err
}
if value := mapping[filename]; value != strutil.Empty {
if version, ok := obfuscator.LookupVersionByName(filename); ok {
if filename, err = version.DecryptMappingData(value, v.password); err != nil {
return "", false, err
}
}
}
}
return filename, locker.IsLocked(filename), nil
}
func itemSize(entry os.DirEntry) string {
if entry.IsDir() {
return ""
}
info, err := entry.Info()
if err != nil {
return strutil.Empty
}
return bytesutil.BinaryFormat(info.Size())
}
func isValidFormat(filename string, extensions []string) bool {
for _, ext := range extensions {
if strings.HasSuffix(strings.ToUpper(filename), ext) {
return true
}
}
return false
}
|
package Router
import(
"fmt"
"html/template"
"net/http"
"strings"
"strconv"
_ "github.com/go-sql-driver/mysql"
"database/sql"
"os"
"io"
"io/ioutil"
)
var (
cur_username string
cur_password string
)
var html = `
<!DOCTYPE html>
<html lang="zh-ch">
<head>
<meta charset="utf-8">
<title>主页</title>
</head>
<body>
<embed src={{.Addr}} width="800" height="600" ></embed>
</body>
</html>
`
type Data struct{
Addr string
}
func Login(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
t, _ := template.ParseFiles("Resource/login.gtpl")
t.Execute(w, nil)
} else if r.Method == "POST" {
r.ParseForm()
in_username := r.Form.Get("username")
in_password := r.Form.Get("password")
if check_user(in_username, in_password){
cur_username = in_username
cur_password = in_password
http.Redirect(w, r, "/upload", http.StatusFound)
}else{
http.Redirect(w, r, "/register", http.StatusFound)
}
}
}
func Upload(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
if !check_status() {
http.Redirect(w, r, "/login", http.StatusFound)
}
t, _ := template.ParseFiles("Resource/upload.gtpl")
t.Execute(w, nil)
} else {
r.ParseMultipartForm(32 << 20)
file, handler, err := r.FormFile("Resource/uploadfile")
if err != nil {
fmt.Println(err)
return
}
defer file.Close()
fmt.Fprintf(w, "%v", handler.Header)
f, err := os.OpenFile("./test/" + handler.Filename, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
fmt.Println(err)
return
}
defer f.Close()
io.Copy(f, file)
}
}
func Register(w http.ResponseWriter, r *http.Request){
if r.Method == "GET" {
t, _ := template.ParseFiles("Resource/register.gtpl")
t.Execute(w, nil)
}else{
r.ParseForm()
in_username := r.Form.Get("username")
in_password := r.Form.Get("password")
fmt.Println("Insert: ", in_username, in_password)
if check_user(in_username, in_password) {
http.Redirect(w, r, "/login", http.StatusFound)
return
}
db, err := sql.Open("mysql", "root:baby942.@tcp(localhost)/test?charset=utf8")
stmt, err := db.Prepare("INSERT user SET username=?,password=?")
checkErr(err)
res, err := stmt.Exec(in_username, in_password)
checkErr(err)
id, err := res.LastInsertId()
checkErr(err)
fmt.Println("Sign in ",id)
db.Close()
http.Redirect(w, r, "/login", http.StatusFound)
}
}
func ArticlesByID(w http.ResponseWriter, r *http.Request){ // SOme problem
// if r.Method == "GET" {
// if !check_status() {
// http.Redirect(w, r, "/login", http.StatusFound)
// return
// }
// }
articleId := strings.Split(r.URL.Path, "/")[2] // "/article/{id}/"
id, err := strconv.Atoi(articleId)
checkErr(err)
db, err := sql.Open("mysql", "root:baby942.@tcp(localhost)/test?charset=utf8")
checkErr(err)
rows, err := db.Query("SELECT * FROM article")
checkErr(err)
for rows.Next() {
var article_id int
var article_name, article_content string
err = rows.Scan(&article_id, &article_name, &article_content)
checkErr(err)
if id == article_id {
db.Close()
data := Data{Addr: "./" + article_content}
fmt.Println(data.Addr)
var err error
var t *template.Template
t = template.New("Products")
t, err = t.Parse(html)
checkErr(err)
f, err := os.OpenFile("test_2.gtpl",os.O_WRONLY|os.O_CREATE,0666)
defer f.Close()
err = t.Execute(f, data)
checkErr(err)
t, _ = template.ParseFiles("test_2.gtpl")
t.Execute(w, nil)
}
}
}
func Articles(w http.ResponseWriter, r *http.Request){
if r.Method == "GET" {
if !check_status() {
http.Redirect(w, r, "/login", http.StatusFound)
return
}
t, _ := template.ParseFiles("Resource/articles.gtpl")
t.Execute(w, nil)
}else{
r.ParseForm()
articles_id := r.Form.Get("article_id")
id, err := strconv.Atoi(articles_id)
checkErr(err)
db, err := sql.Open("mysql", "root:baby942.@tcp(localhost)/test?charset=utf8")
checkErr(err)
rows, err := db.Query("SELECT * FROM article")
checkErr(err)
for rows.Next() {
var article_id int
var article_name, article_content string
err = rows.Scan(&article_id, &article_name, &article_content)
checkErr(err)
if id == article_id {
db.Close()
http.Redirect(w,r, "/article/" + articles_id, http.StatusFound)
return
}
}
}
}
func Skip(w http.ResponseWriter, r *http.Request){
str, _ := ioutil.ReadFile("resource/api.json")
w.WriteHeader(http.StatusOK)
w.Header().Set("Content_Type", "application/json") // set response type
w.Write(str)
}
func check_user(in_username, in_password string) bool{
db, err := sql.Open("mysql", "root:baby942.@tcp(localhost)/test?charset=utf8")
checkErr(err)
rows, err := db.Query("SELECT * FROM user")
checkErr(err)
for rows.Next() {
var username string
var password string
err = rows.Scan(&username, &password)
checkErr(err)
if len(username) != 0 && len(password) != 0 && in_username == username && in_password == password {
db.Close()
return true
}
}
db.Close()
return false
}
func check_status() bool {
return check_user(cur_username, cur_password)
}
func checkErr(err error) {
if err != nil {
panic(err)
}
} |
package server
import (
"encoding/json"
"fmt"
"chatserver/pkg/domain"
"chatserver/pkg/usecase"
"github.com/tokopedia/tdk/go/app/http"
"github.com/tokopedia/tdk/go/log"
)
type HttpService struct {
}
func NewHttpServer() HttpService {
return HttpService{}
}
func (s HttpService) RegisterHandler(r *http.Router) {
r.HandleFunc("/", index, "GET")
r.HandleFunc("/new_order", handleNewOrder, "POST")
r.HandleFunc("/get_messages/{username}", handleGetMessage, "GET")
r.HandleFunc("/post_messages", handlePostMessage, "POST")
r.HandleFunc("/rooms", handleGetRooms, "GET")
r.HandleFunc("/current_rooms", handleGetCurrentRooms, "GET")
r.HandleFunc("/rooms/join", handleJoinRooms, "POST")
}
func handleGetMessage(ctx http.TdkContext) error {
username := ctx.Vars()["username"]
if messages, err := chatUsecase.GetMessages(username); err != nil {
log.Error(err.Error())
ctx.JSON(err.Error())
} else {
ctx.JSON(messages)
}
return nil
}
func handlePostMessage(ctx http.TdkContext) error {
message := new(domain.Message)
if err := json.Unmarshal(ctx.Body(), message); err != nil {
return err
}
if err := chatUsecase.SendMessage(message); err != nil {
return err
}
ctx.JSON(message)
return nil
}
func handleGetRooms(ctx http.TdkContext) error {
if rooms, err := chatUsecase.GetAllRooms(); err != nil {
return err
} else {
ctx.JSON(rooms)
}
return nil
}
func handleGetCurrentRooms(ctx http.TdkContext) error {
return nil
}
func handleJoinRooms(ctx http.TdkContext) error {
return nil
}
func dummyFunc(ctx http.TdkContext) error {
return nil
}
func index(ctx http.TdkContext) error {
ctx.Writer().Write([]byte("Hello world"))
return nil
}
// we gonna create new order via http API
func handleNewOrder(ctx http.TdkContext) error {
order := new(usecase.Order)
err := json.Unmarshal(ctx.Body(), order)
if err != nil {
return err
}
invoice, err := orderUsecase.PutNewOrder(*order)
if err != nil {
log.Error(err)
return err
}
txt := fmt.Sprintf("invoice created: %s", invoice)
ctx.Write([]byte(txt))
return nil
}
|
package main
import (
"fmt"
"reflect"
)
type Person struct {
name string
age int
}
func (p Person) SayBye() string {
return p.name
}
func (p Person) SayHello() (string, string) {
return "Hello", "world"
}
func (p Person) Say(word string) (string, string) {
return word, "ok"
}
type SayInterface interface {
Say(string2 string) (string, string)
}
func main() {
p := &Person{"aaaa", 27}
t := reflect.TypeOf(p)
//dst := (*SayInterface)(nil)
var dst *SayInterface
dstType := reflect.TypeOf(dst).Elem()
if t.Implements(dstType) {
fmt.Println("p implements SayInterface")
}
v := reflect.ValueOf(p)
for i := 1; i < v.NumMethod(); i++ {
fmt.Printf("调用第 %d 个方法:%v ,调用结果:%v\n",
i+1,
t.Method(i).Name,
v.Method(i).Call(nil))
}
fmt.Println(v.MethodByName("SayHello").Call(nil))
name := reflect.ValueOf("word test")
input := [1]reflect.Value{name}
//var input []reflect.Value
//input = append(input, name)
fmt.Println(v.MethodByName("Say").Call(input[:]))
}
|
package main
import "fmt"
func main() {
// 타입有
const age int = 10
const name string = "sky"
fmt.Println("타입有 : ", age, name)
/* 컴파일 에러
const score int // 대입값이 없으면 상수로 사용 불가
age = 20 // 타입이 없다면 const를 붙여줘야 한다.
name = "Hippo" // 타입이 없다면 const를 붙여줘야 한다.
*/
//타입無
const height = 190 // 타입이 없어도 const 선언한 후 값을 대입하면 대입값에 따라 타입이 선언된다. (int)
const nickname = "Mans" // 타입이 없어도 const 선언한 후 값을 대입하면 대입값에 따라 타입이 선언된다. (string)
/*컴파일 에러
const address // 타입도 없고 값도 없으면 컴파일 에러 발생
*/
fmt.Println("타입無 : ", height, nickname)
/* 복수의 const 값 대입
cosnt 상수명1, 상수명2 자료형 = 초기값1, 초기값2
const 상수명1, 상수명2 = 초기값1, 초기값2
*/
const x, y int = 30, 50
const email, phone = "hello@naver", 010
fmt.Println("(1) double const : ", x, y)
fmt.Println("(2) double const : ", email, phone)
}
|
package main
import (
"fmt"
"strconv"
"strings"
)
// Find the two values that sum to 2020, multiply them and return the result
const targetSum = 2020
func main() {
data := strings.Split(input, "\n")
fmt.Println("Ans 1:", sum(data))
fmt.Println("Ans 2:", sum3(data))
}
func sum(data []string) int {
// O(n^2) bad :(
for _, j := range data {
n, err := strconv.Atoi(j)
if err != nil {
fmt.Printf("Encountered err: %v", err)
return -1
}
for _, k := range data {
m, err := strconv.Atoi(k)
if err != nil {
fmt.Printf("Encountered err: %v", err)
return -1
}
if n+m == targetSum {
return n * m
}
}
}
return -1
}
func sum3(data []string) int {
// don't have to be too smart if the lang if fast
for _, i := range data {
n1, _ := strconv.Atoi(i)
for _, j := range data {
n2, _ := strconv.Atoi(j)
for _, k := range data {
n3, _ := strconv.Atoi(k)
if n1+n2+n3 == targetSum {
return n1 * n2 * n3
}
}
}
}
return -1
}
|
package main
import (
"fmt"
"github.com/gorilla/mux"
"net/http"
"os"
. "reunion/announcement"
"reunion/announcement/rss"
"reunion/announcement/specy"
"reunion/authentication"
. "reunion/compression"
"reunion/configuration"
"reunion/home"
"reunion/minify"
"reunion/websocket"
)
func main() {
configuration.NewInstance()
minify.MinifyJs("static/js/reunionctrl.js")
minify.MinifyJs("static/js/notify.js")
minify.MinifyJs("static/js/angular-locale_fr-fr.js")
minify.MinifyCss("static/css/reunion.css")
http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir(configuration.Conf.GetFilePath("static/")))))
http.Handle("/photos/", http.StripPrefix("/photos/", http.FileServer(http.Dir("/opt/animalove/photos/"))))
http.HandleFunc("/static/js/javascript-min.js", GziperHandler(minify.AppJsHandler, "application/javascript"))
http.HandleFunc("/static/css/styles-min.css", GziperHandler(minify.AppCssHandler, "text/css"))
http.HandleFunc("/rss.xml", rss.GetFile)
http.HandleFunc("/robots.txt", configuration.GetRobotsHandler)
http.HandleFunc("/sitemap.xml", configuration.GetSitemapHandler)
router := mux.NewRouter()
router.HandleFunc("/", home.GetPage)
router.HandleFunc("/animaux", GziperHandler(GetAnnouncementsPage, "text/html")).Methods("GET")
router.HandleFunc("/animaux/perdu/nouveau", GziperHandler(GetLostFormPage, "text/html")).Methods("GET")
router.HandleFunc("/animaux/errant/nouveau", GziperHandler(GetSeenFormPage, "text/html")).Methods("GET")
router.HandleFunc("/animaux/adopter/nouveau", GziperHandler(GetAdoptFormPage, "text/html")).Methods("GET")
router.HandleFunc("/animaux/id/{announcementId}", GziperHandler(GetAnnouncementPage, "text/html")).Methods("GET")
router.HandleFunc("/login", Login)
router.HandleFunc("/admin", GetAdminHandler)
router.HandleFunc("/admin/login", authentication.Login)
router.Handle("/admin/announcements", SecureHandler(GetAdminAnnouncementsHandler))
router.Handle("/ws/admin/announcements/id/", SecureHandler(GetAdminLostActionHandler))
router.Handle("/ws/admin/announcements/lost/all", SecureHandler(GetAllHandler))
router.HandleFunc("/socket", websocket.WsHandler).Methods("GET")
router.HandleFunc("/ws/mail", GetMailHandler).Methods("POST")
router.HandleFunc("/ws/species", specy.GetSpeciesHandler).Methods("GET")
router.HandleFunc("/ws/contact/message", GetContactMessageHandler).Methods("POST")
router.HandleFunc("/ws/animaux", GetAnnouncementsHandler).Methods("GET")
router.HandleFunc("/ws/animaux/{announcementType}", GetAnnouncementsHandler).Methods("GET")
router.HandleFunc("/ws/animaux/{announcementType}", CreateAnnouncementsHandler).Methods("POST")
router.Handle("/ws/animaux/id/{announcementId}", SecureHandler(GetLostActionHandler)).Methods("PUT")
router.HandleFunc("/ws/animaux/perdu/locations", GetLocationsHandler).Methods("PUT")
http.Handle("/", router)
fmt.Printf("Running on port %s...\n", os.Getenv("PORT"))
err := http.ListenAndServe(":"+os.Getenv("PORT"), nil)
if err != nil {
fmt.Printf("Erreur au démarrage du serveur : %s\n", err.Error())
panic("Erreur au démarrage du serveur")
}
}
|
// SPDX-FileCopyrightText: (c) 2018 Daniel Czerwonk
//
// SPDX-License-Identifier: MIT
package config
import (
"fmt"
"io"
"gopkg.in/yaml.v2"
)
// Config respresents the server configuration
type Config struct {
// Our ASN
LocalAS uint32 `yaml:"local_as"`
// RouterID is the BGP router identifier of our server
RouterID string `yaml:"router_id"`
// Filters to match incoming (via API) routes against
Filters []*RouteFilter `yaml:"route_filters"`
// Sessions to BGP peers
Sessions []*Session `yaml:"sessions"`
Debug bool `yaml:"debug"`
}
// Session defines all parameters needed to establish a BGP session with a peer
type Session struct {
// Name of session
Name string `yaml:"name"`
// ASN of the peer
RemoteAS uint32 `yaml:"remote_as"`
// Local IP address
LocalIP string `yaml:"local_ip"`
// IP of the peer
PeerIP string `yaml:"peer_ip"`
// Passive defines if bioject should initiate a connection or wait to be connected
Passive bool `yaml:"passive,omitempty"`
// MultiProtocol defines if IPv4 routes should be advertised using MP NLRIs
AdvertiseIPv4MultiProtocol bool `yaml:"multiprotocol_ipv4"`
}
// RouteFilter defines all parameters needed to decide wether to accept or to drop a route for a prefix
type RouteFilter struct {
// Net is the network address to match for
Net string
// Length is the prefix length
Length uint8
// Prefix length has to be larger or equal `Min`
Min uint8
// Prefix length has to be less or equal `Max`
Max uint8
}
// Load loads a configuration from a reader
func Load(r io.Reader) (*Config, error) {
b, err := io.ReadAll(r)
if err != nil {
return nil, fmt.Errorf("could not read config: %s", err)
}
c := &Config{}
err = yaml.Unmarshal(b, c)
if err != nil {
return nil, fmt.Errorf("could not parse config: %s", err)
}
return c, nil
}
|
package main
import "github.com/study-golang/resmgr/deferusage"
func main() {
//df.TryDefer()
deferusage.WriteFile("abc.txt")
}
|
package main
import (
"fmt"
"container/heap"
)
// 使用最大最小堆实现,维护两个堆,一个大根堆,一个小根堆,并且两个堆元素个数差不超过1
// 这样大根堆存储前半部分有序数据,小根堆存储后半部分有序数据
// 然后根据两个堆大小判断把来的数据放在哪个堆里 AddNum(num int)
// 若两个堆元素个数相等,取两个堆顶元素平均数即为中位数
// 否则谁的元素多取谁的堆顶元素为中位数 FindMedian()
type intHeap []int
func (h intHeap) Len() int { return len(h) } // 绑定len方法,返回长度
func (h intHeap) Less(i, j int) bool { // 绑定less方法
return h[i] < h[j] // 如果h[i]<h[j]生成的就是小根堆,如果h[i]>h[j]生成的就是大根堆
}
func (h intHeap) Swap(i, j int) { // 绑定swap方法,交换两个元素位置
h[i], h[j] = h[j], h[i]
}
func (h *intHeap) Pop() interface{} { // 绑定pop方法,从最后拿出一个元素并返回
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
func (h *intHeap) Push(x interface{}) { // 绑定push方法,插入新元素
*h = append(*h, x.(int))
}
type smallHeap struct {
// 小根堆
intHeap
}
type bigHeap struct {
// 大根堆
intHeap
}
func (h bigHeap) Less(i, j int) bool { // 绑定less方法
return h.intHeap[i] > h.intHeap[j] // 如果h[i]<h[j]生成的就是小根堆,如果h[i]>h[j]生成的就是大根堆
}
type MedianFinder struct {
left *bigHeap
right *smallHeap
}
/** initialize your data structure here. */
func Constructor() MedianFinder {
right := new(smallHeap)
heap.Init(right)
left := new(bigHeap)
heap.Init(left)
this := MedianFinder{
left,
right,
}
return this
}
func (this *MedianFinder) AddNum(num int) {
if this.left.Len() == this.right.Len() {//每次插入大顶堆,保证大顶堆元素始终大于等于小顶堆元素
heap.Push(this.left, num)
} else {
heap.Push(this.right, num)
}
if this.right.Len() > 0 && this.left.intHeap[0] > this.right.intHeap[0] { //这里用来保证大顶堆元素始终小于小顶堆
this.left.intHeap[0], this.right.intHeap[0] = this.right.intHeap[0], this.left.intHeap[0]
heap.Fix(this.left, 0)
heap.Fix(this.right, 0)
}
}
func (this *MedianFinder) FindMedian() float64 {
if this.left.Len() == this.right.Len() {
return float64(this.left.intHeap[0]+this.right.intHeap[0]) / 2
}
return float64(this.left.intHeap[0])
}
func main() {
this := Constructor()
this.AddNum(-1)
this.AddNum(-2)
this.AddNum(-3)
this.AddNum(-4)
this.AddNum(-5)
fmt.Println(this.left)
fmt.Println(this.right)
fmt.Println(this.FindMedian())
}
|
package filter
import (
"fmt"
"testing"
)
func PrintList(msg string, r, s []uint64) {
fmt.Print(msg)
for _, ele := range r {
fmt.Print(ele, ",")
}
fmt.Println()
for _, ele := range s {
fmt.Print(ele, ",")
}
fmt.Println()
}
// 基本功能测试
func TestBasic(t *testing.T) {
tool := Init("127.0.0.1:6379", 100000, 0.1)
key := "uid1"
var l = []uint64{uint64(201805261420)}
r, s, err := tool.Filter(key, l)
if err == nil {
PrintList("Before Update():", r, s)
}
n, err := tool.Update(key, l)
if err == nil {
fmt.Printf("Update %d success\n", n)
}
r, s, err = tool.Filter(key, l)
if err == nil {
PrintList("After Update():", r, s)
}
var ll = []uint64{uint64(2019052)}
r, s, err = tool.Filter(key, ll)
if err == nil {
PrintList("Before Update():", r, s)
}
}
|
package destiny
type ItemData struct {
ItemHash float64
ItemName string
ItemDescription string
Icon string
SecondaryIcon string
DisplaySource string
ActionName string
HasAction bool
DeleteOnAction bool
TierTypeName string
TierType float64
ItemTypeName string
BucketTypehash float64
PrimaryBaseStatHash float64
Stats interface{}
PerkHashes interface{}
SpecialItemType float64
TalentGridHash float64
HasGeometry bool
StatGroupHash float64
ItemLevels []interface{}
QualityLevel float64
Equippable bool
Instanced bool
RewardItemhash float64
Values interface{}
ItemType float64
ItemSubType float64
ClassType float64
Sources []struct {
ExpansionIndex float64
Level float64
MinQuality float64
MaxQuality float64
MinLevelRequired float64
MaxLevelRequired float64
Exclusivity float64
ComputedStats interface{}
SourceHashes []float64
SpawnIndexes []float64
}
ItemCategoryHashes []interface{}
SourceHashes []interface{}
NonTransferrable bool
Exclusive float64
MaxStackSize float64
ItemIndex float64
SetItemHashes []interface{}
QuestlineItemhash float64
NeedsFullCompletion bool
ObjectiveHashes []interface{}
}
|
/*
A Tour of Go Exercise: Web Crawler
Go语言之旅 - 网络爬虫
https://tour.golang.org/concurrency/10
*/
package main
import (
"fmt"
"sync"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
// 使用sync.Mutex对数据加锁,使用sync.WaitGroup等待所有goroutine执行完毕
type UrlCounter struct {
v map[string]int
mux sync.Mutex
wg sync.WaitGroup
}
func (u *UrlCounter) isVisited(url string) bool {
u.mux.Lock()
defer u.mux.Unlock()
u.v[url]++
// 如果count大于1说明已经抓取过,返回true
if count := u.v[url]; count > 1 {
return true
}
// 未抓取过,返回false
return false
}
// 全局变量,多个goroutine使用时需要加锁
// 实际项目中应避免使用全局变量
// var uc = &UrlCounter{v: make(map[string]int)}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher, uc *UrlCounter) {
// 抓取结束后通知WaitGroup
defer uc.wg.Done()
if uc.isVisited(url) {
return
}
if depth <= 0 {
return
}
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("found: %s %q\n", url, body)
for _, u := range urls {
// 启动goroutine时WaitGroup加1
uc.wg.Add(1)
go Crawl(u, depth-1, fetcher, uc)
}
return
}
func main() {
uc := &UrlCounter{v: make(map[string]int)}
// 所有Crawl方法都调用了wg.Done(),所以这里需要加1
uc.wg.Add(1)
Crawl("https://golang.org/", 4, fetcher, uc)
// 等待所有goroutine执行完毕
uc.wg.Wait()
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
|
package osbuild2
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewTimezoneStage(t *testing.T) {
expectedStage := &Stage{
Type: "org.osbuild.timezone",
Options: &TimezoneStageOptions{},
}
actualStage := NewTimezoneStage(&TimezoneStageOptions{})
assert.Equal(t, expectedStage, actualStage)
}
|
package cmd
import "testing"
func TestCheckURL(t *testing.T) {
tt := []struct {
name string
originalURL string
expectedURL string
expectedToFail bool
}{
{
name: "base URL",
originalURL: "google.com",
expectedURL: "https://google.com/api/commands",
expectedToFail: false,
},
{
name: "no URL",
expectedToFail: true,
},
{
name: "http URL",
originalURL: "https://google.com",
expectedURL: "https://google.com/api/commands",
expectedToFail: false,
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
u, err := checkURL(tc.originalURL, true, true)
if err != nil {
if tc.expectedToFail {
t.Logf("while checking URL failed as expected: %v", err)
return
}
t.Fatalf("while checking URL not expected to fail failed: %v", err)
}
if u != tc.expectedURL {
t.Fatalf("expected checked URL to be %q. got=%q", tc.expectedURL, u)
}
})
}
}
|
package auth
import (
"github.com/spf13/cobra"
)
var Cmd = &cobra.Command{
Use: "auth",
Short: "Authenticate with the Pathbird API",
Long: "Authenticate with the Pathbird API.",
}
|
package main
import "fmt"
func main() {
char := "false11111"
var result bool
switch char {
case "true", "yes", "1":
result = true
case "false", "no", "0":
result = false
default:
fmt.Println("error")
}
fmt.Println(result)
}
|
/*
* @lc app=leetcode.cn id=64 lang=golang
*
* [64] 最小路径和
*/
package main
import (
"fmt"
)
/*
DFS
var dx []int = []int{0, 1}
var dy []int = []int{1, 0}
func dfs(grid [][]int, x, y, sum int, minSum *int) {
rows, cols := len(grid), len(grid[0])
if x == rows-1 && y == cols-1 {
if sum < *minSum {
*minSum = sum
}
return
}
for i := 0; i < 2; i++ {
row, col := x+dx[i], y+dy[i]
if row >= rows || col >= cols {
continue
}
dfs(grid, row, col, sum+grid[row][col], minSum)
}
}
func minPathSum(grid [][]int) int {
min := math.MaxInt64
dfs(grid, 0, 0, grid[0][0], &min)
return min
} */
// @lc code=start
func min(a, b int) int {
if a < b {
return a
}
return b
}
func minPathSum(grid [][]int) int {
rows, cols := len(grid), len(grid[0])
dp := make([][]int, rows)
for i := range dp {
dp[i] = make([]int, cols)
}
for i := 0; i < rows; i++ {
for j := 0; j < cols; j++ {
if i == 0 && j == 0 {
dp[i][j] = grid[i][j]
} else if i == 0 {
dp[i][j] = dp[i][j-1] + grid[i][j]
} else if j == 0 {
dp[i][j] = dp[i-1][j] + grid[i][j]
} else {
dp[i][j] = min(dp[i-1][j], dp[i][j-1]) + grid[i][j]
}
}
}
return dp[rows-1][cols-1]
}
// @lc code=end
func main() {
fmt.Println(minPathSum([][]int{
{1, 2, 3},
{4, 5, 6},
}))
}
|
package routes
import (
"net/http"
"grhamm.com/todo/handler"
)
func RegisterRoute() http.Handler {
mux := http.NewServeMux()
mux.HandleFunc("/", handler.Health)
mux.HandleFunc("/todo/get", handler.GetTodo)
mux.HandleFunc("/todo/post", handler.InsertTodo)
mux.HandleFunc("/todo/set-finished", handler.SetTodoFinished)
return mux
}
|
package network
import (
"crypto/tls"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"time"
)
// IRequest :
type IRequest interface {
Execute(Method string, URL string, Headers map[string][]string, Payload string) (int, string, error)
}
// Request :
type Request struct{}
// Execute :
func (r Request) Execute(Method string, URL string, Headers map[string][]string, Payload string) (int, string, error) {
client := http.Client{
Timeout: time.Second * 60,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
}
validurl, err := url.Parse(URL)
if err != nil {
log.Fatal(err.Error())
}
request := &http.Request{
URL: validurl,
Body: ioutil.NopCloser(strings.NewReader(Payload)),
Header: Headers,
Method: Method,
}
resp, err := client.Do(request)
if err != nil {
return 500, "", err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return 500, "", err
}
return resp.StatusCode, string(body), nil
}
|
package xxhash
/*
#include "c-trunk/xxhash.h"
*/
import "C"
import (
"hash"
"unsafe"
)
type xxHash32 struct {
seed uint32
sum uint32
state unsafe.Pointer
}
// Size returns the number of bytes Sum will return.
func (xx *xxHash32) Size() int {
return 4
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (xx *xxHash32) BlockSize() int {
return 8
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (xx *xxHash32) Sum(in []byte) []byte {
s := xx.Sum32()
return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
func (xx *xxHash32) Write(p []byte) (n int, err error) {
switch {
case xx.state == nil:
return 0, ErrAlreadyComputed
case len(p) > oneGb:
return 0, ErrMemoryLimit
}
C.XXH32_update(xx.state, unsafe.Pointer(&p[0]), C.uint(len(p)))
return len(p), nil
}
func (xx *xxHash32) Sum32() uint32 {
if xx.state == nil {
return xx.sum
}
xx.sum = uint32(C.XXH32_digest(xx.state))
xx.state = nil
return xx.sum
}
// Reset resets the Hash to its initial state.
func (xx *xxHash32) Reset() {
if xx.state != nil {
C.XXH32_digest(xx.state)
}
xx.state = C.XXH32_init(C.uint(xx.seed))
}
// NewS32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the specific seed.
func NewS32(seed uint32) hash.Hash32 {
h := &xxHash32{
seed: seed,
}
h.Reset()
return h
}
// New32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the seed set to 0x0.
func New32() hash.Hash32 {
return NewS32(0x0)
}
// Checksum32S returns the checksum of the input bytes with the specific seed.
func Checksum32S(in []byte, seed uint32) uint32 {
return uint32(C.XXH32(unsafe.Pointer(&in[0]), C.uint(len(in)), C.uint(seed)))
}
// Checksum32 returns the checksum of the input data with the seed set to 0
func Checksum32(in []byte) uint32 {
return Checksum32S(in, 0x0)
}
|
package bot
import (
"fmt"
log "github.com/sirupsen/logrus"
"github.com/wneessen/sotbot/database"
"github.com/wneessen/sotbot/random"
"github.com/wneessen/sotbot/response"
"github.com/wneessen/sotbot/user"
"time"
)
func (b *Bot) CheckSotAuth() {
l := log.WithFields(log.Fields{
"action": "bot.CheckSotAuth",
})
userList, err := database.GetUsers(b.Db)
if err != nil {
l.Errorf("Failed to fetch user list from DB: %v", err)
return
}
for _, curUser := range userList {
userObj, err := user.NewUser(b.Db, b.Config, curUser.UserId)
if err != nil {
l.Errorf("Failed to create user object: %v", err)
break
}
if userObj.HasRatCookie() {
go func() {
randNum, err := random.Number(600)
if err != nil {
l.Errorf("Failed to generate random number: %v", err)
return
}
sleepTime, err := time.ParseDuration(fmt.Sprintf("%ds", randNum))
if err != nil {
l.Errorf("Failed to parse random number as duration: %v", err)
return
}
time.Sleep(sleepTime)
needsNotify, err := userObj.CheckAuth(b.Db, b.HttpClient)
if err != nil {
l.Errorf("CheckAuth failed: %v", err)
return
}
if needsNotify {
userObj.RatCookie = ""
dmMsg := fmt.Sprintf("The last 3 attempts to communicate with the SoT API failed. " +
"This likely means, that your RAT cookie has expired. Please use the !setrat function to " +
"update your cookie.")
response.DmUser(b.Session, userObj, dmMsg, true, false)
}
}()
}
}
}
|
package main
import (
"crypto/sha256"
"fmt"
"strings"
)
func main() {
var a API
a.LocalKey = true
a.GenerateAPIKey()
k := a.GetAPIKey()
prefix := strings.Split(k, ".")[0]
h := sha256.Sum256([]byte(k))
s := fmt.Sprintf("%x", h)
query := "INSERT INTO api (active, name, create_date, last_update, api_key, api_prefix, local_key, access_rights) "
query += "VALUES ('t', 'My API Key', NOW(), NOW(), '" + s + "', '" + prefix + "', 'f', 'ACCESS_RIGHTS');"
fmt.Println("This is your plain API Key. Make sure you don't lose it and it's stored in a safe location:")
fmt.Println(k + "\n")
fmt.Println("You can use the following database query to insert this API key into the database:")
fmt.Println(query + "\n")
fmt.Println("You'll need to replace the ACCESS_RIGHTS string with the actual access rights you want to use.")
fmt.Println("You can choose from the following list:")
rights := GetAllAccessRights()
for _, r := range rights {
fmt.Printf("\t- %s\n", r)
}
fmt.Println("If you just want to choose 1 access right you can replace the ACCESS_RIGHTS.")
fmt.Println("If you want to choose multiple access rights, just make a ; seperated list out of it. E.g.:")
fmt.Println("transaction.read;account.read;statistic.read")
}
|
package htmlp
import (
"bytes"
"io"
"log"
"regexp"
"strings"
"golang.org/x/net/html"
"golang.org/x/net/html/atom"
)
var bannedMap = map[atom.Atom]bool{
atom.Svg: true,
atom.Img: true,
atom.Style: true,
atom.Script: true,
}
var whiteSpaces = regexp.MustCompile(`\s+`)
func Parse(htm string) string {
n, err := html.Parse(strings.NewReader(htm))
if err != nil {
log.Fatal(err)
}
node := shakeTree(n)
return renderNode(node)
}
func shakeTree(n *html.Node) *html.Node {
var banned []*html.Node
for c := n.FirstChild; c != nil; c = c.NextSibling {
if isBanned(c) {
banned = append(banned, c)
} else {
c.Data = whiteSpaces.ReplaceAllString(strings.TrimSpace(c.Data), " ")
shakeTree(c)
}
}
for _, b := range banned {
n.RemoveChild(b)
}
return n
}
func isBanned(n *html.Node) bool {
if _, ok := bannedMap[n.DataAtom]; ok {
return true
}
return false
}
func renderNode(n *html.Node) string {
var buf bytes.Buffer
w := io.Writer(&buf)
_ = html.Render(w, n)
return buf.String()
}
|
package rabbit_streams
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp"
"github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/plumber/tunnel"
"github.com/batchcorp/plumber/validate"
)
func (r *RabbitStreams) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error {
if err := validateTunnelOptions(tunnelOpts); err != nil {
return errors.Wrap(err, "invalid tunnel options")
}
llog := r.log.WithField("pkg", "rabbit-streams/tunnel")
// Make available to handleErr
r.streamName = tunnelOpts.RabbitStreams.Args.Stream
producer, err := r.client.NewProducer(tunnelOpts.RabbitStreams.Args.Stream,
stream.NewProducerOptions().
SetProducerName(tunnelOpts.RedisStreams.Args.WriteId).
SetBatchSize(1))
if err != nil {
return errors.Wrap(err, "unable to create rabbitmq streams producer")
}
defer producer.Close()
if err := tunnelSvc.Start(ctx, "RabbitMQ Streams", errorCh); err != nil {
return errors.Wrap(err, "unable to create tunnel")
}
outboundCh := tunnelSvc.Read()
// Continually loop looking for messages on the channel.
for {
select {
case outbound := <-outboundCh:
if err := producer.Send(amqp.NewMessage(outbound.Blob)); err != nil {
err = fmt.Errorf("unable to replay message: %s", err)
llog.Error(err)
return err
}
llog.Debugf("Replayed message to Rabbit stream '%s' for replay '%s'",
tunnelOpts.RabbitStreams.Args.Stream, outbound.ReplayId)
case <-ctx.Done():
llog.Debug("context cancelled")
return nil
}
}
}
func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error {
if tunnelOpts == nil {
return validate.ErrEmptyTunnelOpts
}
if tunnelOpts.RabbitStreams == nil {
return validate.ErrEmptyBackendGroup
}
if tunnelOpts.RabbitStreams.Args == nil {
return validate.ErrEmptyBackendArgs
}
if tunnelOpts.RabbitStreams.Args.Stream == "" {
return ErrEmptyStream
}
return nil
}
|
// Copyright 2020 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package android
import (
"fmt"
"reflect"
"strings"
"testing"
)
func ExampleDepSet_ToList_postordered() {
a := NewDepSetBuilder(POSTORDER).Direct(PathForTesting("a")).Build()
b := NewDepSetBuilder(POSTORDER).Direct(PathForTesting("b")).Transitive(a).Build()
c := NewDepSetBuilder(POSTORDER).Direct(PathForTesting("c")).Transitive(a).Build()
d := NewDepSetBuilder(POSTORDER).Direct(PathForTesting("d")).Transitive(b, c).Build()
fmt.Println(d.ToList().Strings())
// Output: [a b c d]
}
func ExampleDepSet_ToList_preordered() {
a := NewDepSetBuilder(PREORDER).Direct(PathForTesting("a")).Build()
b := NewDepSetBuilder(PREORDER).Direct(PathForTesting("b")).Transitive(a).Build()
c := NewDepSetBuilder(PREORDER).Direct(PathForTesting("c")).Transitive(a).Build()
d := NewDepSetBuilder(PREORDER).Direct(PathForTesting("d")).Transitive(b, c).Build()
fmt.Println(d.ToList().Strings())
// Output: [d b a c]
}
func ExampleDepSet_ToList_topological() {
a := NewDepSetBuilder(TOPOLOGICAL).Direct(PathForTesting("a")).Build()
b := NewDepSetBuilder(TOPOLOGICAL).Direct(PathForTesting("b")).Transitive(a).Build()
c := NewDepSetBuilder(TOPOLOGICAL).Direct(PathForTesting("c")).Transitive(a).Build()
d := NewDepSetBuilder(TOPOLOGICAL).Direct(PathForTesting("d")).Transitive(b, c).Build()
fmt.Println(d.ToList().Strings())
// Output: [d b c a]
}
func ExampleDepSet_ToSortedList() {
a := NewDepSetBuilder(POSTORDER).Direct(PathForTesting("a")).Build()
b := NewDepSetBuilder(POSTORDER).Direct(PathForTesting("b")).Transitive(a).Build()
c := NewDepSetBuilder(POSTORDER).Direct(PathForTesting("c")).Transitive(a).Build()
d := NewDepSetBuilder(POSTORDER).Direct(PathForTesting("d")).Transitive(b, c).Build()
fmt.Println(d.ToSortedList().Strings())
// Output: [a b c d]
}
// Tests based on Bazel's ExpanderTestBase.java to ensure compatibility
// https://github.com/bazelbuild/bazel/blob/master/src/test/java/com/google/devtools/build/lib/collect/nestedset/ExpanderTestBase.java
func TestDepSet(t *testing.T) {
a := PathForTesting("a")
b := PathForTesting("b")
c := PathForTesting("c")
c2 := PathForTesting("c2")
d := PathForTesting("d")
e := PathForTesting("e")
tests := []struct {
name string
depSet func(t *testing.T, order DepSetOrder) *DepSet
postorder, preorder, topological []string
}{
{
name: "simple",
depSet: func(t *testing.T, order DepSetOrder) *DepSet {
return NewDepSet(order, Paths{c, a, b}, nil)
},
postorder: []string{"c", "a", "b"},
preorder: []string{"c", "a", "b"},
topological: []string{"c", "a", "b"},
},
{
name: "simpleNoDuplicates",
depSet: func(t *testing.T, order DepSetOrder) *DepSet {
return NewDepSet(order, Paths{c, a, a, a, b}, nil)
},
postorder: []string{"c", "a", "b"},
preorder: []string{"c", "a", "b"},
topological: []string{"c", "a", "b"},
},
{
name: "nesting",
depSet: func(t *testing.T, order DepSetOrder) *DepSet {
subset := NewDepSet(order, Paths{c, a, e}, nil)
return NewDepSet(order, Paths{b, d}, []*DepSet{subset})
},
postorder: []string{"c", "a", "e", "b", "d"},
preorder: []string{"b", "d", "c", "a", "e"},
topological: []string{"b", "d", "c", "a", "e"},
},
{
name: "builderReuse",
depSet: func(t *testing.T, order DepSetOrder) *DepSet {
assertEquals := func(t *testing.T, w, g Paths) {
if !reflect.DeepEqual(w, g) {
t.Errorf("want %q, got %q", w, g)
}
}
builder := NewDepSetBuilder(order)
assertEquals(t, nil, builder.Build().ToList())
builder.Direct(b)
assertEquals(t, Paths{b}, builder.Build().ToList())
builder.Direct(d)
assertEquals(t, Paths{b, d}, builder.Build().ToList())
child := NewDepSetBuilder(order).Direct(c, a, e).Build()
builder.Transitive(child)
return builder.Build()
},
postorder: []string{"c", "a", "e", "b", "d"},
preorder: []string{"b", "d", "c", "a", "e"},
topological: []string{"b", "d", "c", "a", "e"},
},
{
name: "builderChaining",
depSet: func(t *testing.T, order DepSetOrder) *DepSet {
return NewDepSetBuilder(order).Direct(b).Direct(d).
Transitive(NewDepSetBuilder(order).Direct(c, a, e).Build()).Build()
},
postorder: []string{"c", "a", "e", "b", "d"},
preorder: []string{"b", "d", "c", "a", "e"},
topological: []string{"b", "d", "c", "a", "e"},
},
{
name: "transitiveDepsHandledSeparately",
depSet: func(t *testing.T, order DepSetOrder) *DepSet {
subset := NewDepSetBuilder(order).Direct(c, a, e).Build()
builder := NewDepSetBuilder(order)
// The fact that we add the transitive subset between the Direct(b) and Direct(d)
// calls should not change the result.
builder.Direct(b)
builder.Transitive(subset)
builder.Direct(d)
return builder.Build()
},
postorder: []string{"c", "a", "e", "b", "d"},
preorder: []string{"b", "d", "c", "a", "e"},
topological: []string{"b", "d", "c", "a", "e"},
},
{
name: "nestingNoDuplicates",
depSet: func(t *testing.T, order DepSetOrder) *DepSet {
subset := NewDepSetBuilder(order).Direct(c, a, e).Build()
return NewDepSetBuilder(order).Direct(b, d, e).Transitive(subset).Build()
},
postorder: []string{"c", "a", "e", "b", "d"},
preorder: []string{"b", "d", "e", "c", "a"},
topological: []string{"b", "d", "c", "a", "e"},
},
{
name: "chain",
depSet: func(t *testing.T, order DepSetOrder) *DepSet {
c := NewDepSetBuilder(order).Direct(c).Build()
b := NewDepSetBuilder(order).Direct(b).Transitive(c).Build()
a := NewDepSetBuilder(order).Direct(a).Transitive(b).Build()
return a
},
postorder: []string{"c", "b", "a"},
preorder: []string{"a", "b", "c"},
topological: []string{"a", "b", "c"},
},
{
name: "diamond",
depSet: func(t *testing.T, order DepSetOrder) *DepSet {
d := NewDepSetBuilder(order).Direct(d).Build()
c := NewDepSetBuilder(order).Direct(c).Transitive(d).Build()
b := NewDepSetBuilder(order).Direct(b).Transitive(d).Build()
a := NewDepSetBuilder(order).Direct(a).Transitive(b).Transitive(c).Build()
return a
},
postorder: []string{"d", "b", "c", "a"},
preorder: []string{"a", "b", "d", "c"},
topological: []string{"a", "b", "c", "d"},
},
{
name: "extendedDiamond",
depSet: func(t *testing.T, order DepSetOrder) *DepSet {
d := NewDepSetBuilder(order).Direct(d).Build()
e := NewDepSetBuilder(order).Direct(e).Build()
b := NewDepSetBuilder(order).Direct(b).Transitive(d).Transitive(e).Build()
c := NewDepSetBuilder(order).Direct(c).Transitive(e).Transitive(d).Build()
a := NewDepSetBuilder(order).Direct(a).Transitive(b).Transitive(c).Build()
return a
},
postorder: []string{"d", "e", "b", "c", "a"},
preorder: []string{"a", "b", "d", "e", "c"},
topological: []string{"a", "b", "c", "e", "d"},
},
{
name: "extendedDiamondRightArm",
depSet: func(t *testing.T, order DepSetOrder) *DepSet {
d := NewDepSetBuilder(order).Direct(d).Build()
e := NewDepSetBuilder(order).Direct(e).Build()
b := NewDepSetBuilder(order).Direct(b).Transitive(d).Transitive(e).Build()
c2 := NewDepSetBuilder(order).Direct(c2).Transitive(e).Transitive(d).Build()
c := NewDepSetBuilder(order).Direct(c).Transitive(c2).Build()
a := NewDepSetBuilder(order).Direct(a).Transitive(b).Transitive(c).Build()
return a
},
postorder: []string{"d", "e", "b", "c2", "c", "a"},
preorder: []string{"a", "b", "d", "e", "c", "c2"},
topological: []string{"a", "b", "c", "c2", "e", "d"},
},
{
name: "orderConflict",
depSet: func(t *testing.T, order DepSetOrder) *DepSet {
child1 := NewDepSetBuilder(order).Direct(a, b).Build()
child2 := NewDepSetBuilder(order).Direct(b, a).Build()
parent := NewDepSetBuilder(order).Transitive(child1).Transitive(child2).Build()
return parent
},
postorder: []string{"a", "b"},
preorder: []string{"a", "b"},
topological: []string{"b", "a"},
},
{
name: "orderConflictNested",
depSet: func(t *testing.T, order DepSetOrder) *DepSet {
a := NewDepSetBuilder(order).Direct(a).Build()
b := NewDepSetBuilder(order).Direct(b).Build()
child1 := NewDepSetBuilder(order).Transitive(a).Transitive(b).Build()
child2 := NewDepSetBuilder(order).Transitive(b).Transitive(a).Build()
parent := NewDepSetBuilder(order).Transitive(child1).Transitive(child2).Build()
return parent
},
postorder: []string{"a", "b"},
preorder: []string{"a", "b"},
topological: []string{"b", "a"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Run("postorder", func(t *testing.T) {
depSet := tt.depSet(t, POSTORDER)
if g, w := depSet.ToList().Strings(), tt.postorder; !reflect.DeepEqual(g, w) {
t.Errorf("expected ToList() = %q, got %q", w, g)
}
})
t.Run("preorder", func(t *testing.T) {
depSet := tt.depSet(t, PREORDER)
if g, w := depSet.ToList().Strings(), tt.preorder; !reflect.DeepEqual(g, w) {
t.Errorf("expected ToList() = %q, got %q", w, g)
}
})
t.Run("topological", func(t *testing.T) {
depSet := tt.depSet(t, TOPOLOGICAL)
if g, w := depSet.ToList().Strings(), tt.topological; !reflect.DeepEqual(g, w) {
t.Errorf("expected ToList() = %q, got %q", w, g)
}
})
})
}
}
func TestDepSetInvalidOrder(t *testing.T) {
orders := []DepSetOrder{POSTORDER, PREORDER, TOPOLOGICAL}
run := func(t *testing.T, order1, order2 DepSetOrder) {
defer func() {
if r := recover(); r != nil {
if err, ok := r.(error); !ok {
t.Fatalf("expected panic error, got %v", err)
} else if !strings.Contains(err.Error(), "incompatible order") {
t.Fatalf("expected incompatible order error, got %v", err)
}
}
}()
NewDepSet(order1, nil, []*DepSet{NewDepSet(order2, nil, nil)})
t.Fatal("expected panic")
}
for _, order1 := range orders {
t.Run(order1.String(), func(t *testing.T) {
for _, order2 := range orders {
t.Run(order2.String(), func(t *testing.T) {
if order1 != order2 {
run(t, order1, order2)
}
})
}
})
}
}
|
package responses
type PasswordChangeResponse struct {
Changed string `json:"changed" mapstructure:"changed"`
}
|
package doc
import (
"fmt"
_ "github.com/russross/blackfriday"
"io/ioutil"
"os"
"strings"
"testing"
)
func readDir(path string) {
dir, _ := ioutil.ReadDir(path)
for _, info := range dir {
fmt.Println(info.Name())
}
}
func getAllFileDic(path string) (result map[string]os.File) {
dir, _ := ioutil.ReadDir(path)
dir2 := make([]os.FileInfo, 0)
for _, info := range dir {
if strings.Index(info.Name(), ".") != 0 {
dir2 = append(dir2, info)
}
}
for _, info := range dir2 {
fmt.Println(info.Name())
}
return nil
}
func TestIdUtils(t *testing.T) {
//getAllFileDic("/Users/winily/Projects/Open-Source/go-utils/")
}
|
package main
import (
"sync/atomic"
"fmt"
)
func main() {
//AddInt32 atomically adds delta to *addr and returns the new value.
var i int32 = 1
atomic.AddInt32(&i,1)
fmt.Println("i=i+1=",i)
atomic.AddInt32(&i,-1)
fmt.Println("i=i-1=",i)
//CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value.
//func CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
var a int32 = 1
var b int32 = 2
var c int32 = 2
d := atomic.CompareAndSwapInt32(&a, a, b)
fmt.Println("a, b, c, d:", a, b, c, d)
e := atomic.CompareAndSwapInt32(&b, b, c)
fmt.Println("a, b, c, d, e:", a, b, c, d, e)
//LoadInt32 atomically loads *addr.
//StoreInt32 atomically stores val into *addr.
var x int32 = 100
var y int32
atomic.StoreInt32(&y, atomic.LoadInt32(&x))
fmt.Println("x,y:",x,y)
var j int32 = 0
var k int32 = 1
fmt.Printf("(%d , %d)\n",j,k)
l := atomic.SwapInt32(&j, k)
fmt.Printf("(%d , %d)\n",j,k)
fmt.Println(l)
var z atomic.Value
z.Store(100)
fmt.Println(z.Load())
} |
package wooter
import (
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"code.cloudfoundry.org/windows2016fs/layer"
"code.cloudfoundry.org/windows2016fs/writer"
"github.com/Microsoft/hcsshim"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const VolumesDir string = "volumes"
const DiffsDir string = "diffs"
type HCSWoot struct {
BaseDir string
}
func (c HCSWoot) Unpack(id, parentID string, allParents []string, blob io.Reader) (size int, err error) {
dest := filepath.Join(c.BaseDir, VolumesDir, id)
if err := os.MkdirAll(dest, 0700); err != nil {
return 0, err
}
blobFile, err := ioutil.TempFile("", "blob")
if err != nil {
return 0, err
}
blobSize, err := io.Copy(blobFile, blob)
if err != nil {
return 0, err
}
lm := layer.NewManager(hcsshim.DriverInfo{HomeDir: dest, Flavour: 1}, &writer.Writer{})
if err := lm.Extract(blobFile.Name(), id, allParents); err != nil {
return 0, err
}
return int(blobSize), nil
}
func (c HCSWoot) Bundle(id string, parentIds []string) (specs.Spec, error) {
dest := filepath.Join(c.BaseDir, DiffsDir, id)
if err := os.MkdirAll(dest, 0700); err != nil {
return specs.Spec{}, err
}
parentPaths := []string{}
for _, parent := range parentIds {
parentPaths = append(parentPaths, path.Join(c.BaseDir, VolumesDir, parent))
}
info := hcsshim.DriverInfo{
HomeDir: dest,
Flavour: 1,
}
parent := parentIds[len(parentIds)-1]
if err := hcsshim.CreateSandboxLayer(info, id, parent, parentPaths); err != nil {
return specs.Spec{}, err
}
if err := hcsshim.ActivateLayer(info, id); err != nil {
return specs.Spec{}, err
}
if err := hcsshim.PrepareLayer(info, id, parentPaths); err != nil {
return specs.Spec{}, err
}
volumePath, err := hcsshim.GetLayerMountPath(info, id)
if err != nil {
return specs.Spec{}, err
}
return specs.Spec{
Root: &specs.Root{
Path: volumePath,
},
Windows: &specs.Windows{
LayerFolders: parentPaths,
},
}, nil
}
func (c HCSWoot) Exists(id string) bool {
dest := filepath.Join(c.BaseDir, VolumesDir, id)
info := hcsshim.DriverInfo{
HomeDir: dest,
Flavour: 1,
}
result, err := hcsshim.LayerExists(info, id)
if err != nil {
return false
}
return result
}
|
package main
import (
"fmt"
)
func main() {
var arr = [5]int{1, 2, 3, 4, 5}
modifyArr(arr)
fmt.Println(arr)
}
func modifyArr(a [5]int) {
a[1] = 20
}
|
package data
import (
pb "github.com/bgokden/veri/veriservice"
)
// Delete delete data to internal kv store
func (dt *Data) Delete(datum *pb.Datum) error {
return dt.DeleteBDMap(datum)
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"time"
"github.com/nlopes/slack"
)
var custom CustomResponses = nil
var config Config
type Config struct {
MtgApiEndpoint string `json:"mtg_api_endpoint"`
CustomResponseFile string `json:"custom_path"`
SlackApiKey string `json:"slack_key"`
}
type CustomResponses []struct {
Trigger string `json:"trigger"`
Response []string `json:"response"`
}
type Card struct {
Name string `json:"name"`
MultiverseId int `json:"multiverseid"`
Set string `json"set"`
SetName string `json:"setName"`
ImageUrl string `json:"imageUrl,omitempty"`
Rarity string `json:"rarity"`
Type string `json:"type,omitempty"`
}
type Cards struct {
Card []Card `json:"cards"`
}
// returns if a given card rarity is allowed to be returned
func allowedCardRarity(rarity string) bool {
// make a map of allowed card rarities from the mtg api
// this will filter out things like promo cards and masterpieces
m := make(map[string]bool)
m["Common"] = true
m["Uncommon"] = true
m["Rare"] = true
m["Mythic Rare"] = true
m["Mythic"] = true
m["Basic Land"] = true
return m[rarity]
}
// loads custom trigger/response pairs from the custom file specified in the config
func loadCustomResponses() {
raw, err := ioutil.ReadFile(config.CustomResponseFile)
if err != nil {
log.Println("loadCustomResponses: ", err)
os.Exit(1)
}
json.Unmarshal(raw, &custom)
}
// loads the config file into our config struct
func loadConfig(configPath string) {
raw, err := ioutil.ReadFile(configPath)
if err != nil {
log.Println("loadConfig: ", err)
os.Exit(1)
}
json.Unmarshal(raw, &config)
}
// leverage the api defined in the config to fetch links to the gatherer image of a card
// NOTE: currently only works with api.magicthegathering.io/v1/
func fetchCard(cardName string) string {
card_name := url.QueryEscape(cardName)
uri := fmt.Sprintf(config.MtgApiEndpoint, card_name)
mtgClient := &http.Client{}
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
log.Println("NewRequest: ", err)
return ""
}
resp, err := mtgClient.Do(req)
if err != nil {
log.Println("Do: ", err)
return ""
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Println(err)
}
var cc Cards
defer resp.Body.Close()
if err := json.Unmarshal(data, &cc); err != nil {
fmt.Println("Decode: ", err)
fmt.Println(data)
return ""
}
var cardToReturn Card
regString := fmt.Sprintf("(?i)^%s", cardName)
reg := regexp.MustCompile(regString)
/*
check for matches as follows:
exact name match (case insensitive):
return immediately
regex for `(?i)^cardname`, and current card set to return doesnt match:
set current card to be returned
no card is currently set to be returned:
set the current card to be returned
*/
for i := range cc.Card {
// we need to go through in reverse because the api returns cards sorted ascending
// and we want the most recent printing
c := cc.Card[len(cc.Card)-1-i]
if c.ImageUrl != "" && allowedCardRarity(c.Rarity) && c.Type != "Vanguard"{
if strings.EqualFold(c.Name, cardName) {
return c.ImageUrl
} else if reg.MatchString(c.Name) && !reg.MatchString(cardToReturn.Name) {
cardToReturn = c
} else if cardToReturn.Name == "" {
cardToReturn = c
}
}
}
return cardToReturn.ImageUrl
}
// returns an arary of strings that were encapsulated by [[string_here]]
func getStringsFromMessage(message string) []string {
reg := regexp.MustCompile(`\[\[[\w ,.!?:\-\(\)\/'"]+\]\]`)
matches := reg.FindAllStringSubmatch(message, -1)
if len(matches) == 0 {
return nil
}
ret := make([]string, len(matches))
for index, match := range matches {
trimmed_string := strings.Trim(match[0], "[]")
ret[index] = trimmed_string
}
return ret
}
// checks the custom response json, and returns a random response for the given trigger
func checkCustomResponseMatches(message string) string {
ret := ""
if custom != nil {
for _, c := range custom {
reg := regexp.MustCompile(c.Trigger)
if reg.MatchString(message) {
// if there is more than one response for a given trigger then print one at random
rand.Seed(time.Now().UTC().UnixNano())
ret = ret + c.Response[rand.Intn(len(c.Response))] + "\n"
}
}
}
return ret
}
// takes a slack message and determines if we need to respond
// NOTE: custom responses override card fetches
func processMessage(message string) string {
ret := checkCustomResponseMatches(message)
if ret != "" {
return ret
}
items := getStringsFromMessage(message)
if items != nil {
for _, s := range items {
ret = ret + fetchCard(s) + "\n"
}
}
return ret
}
// watches slack for events and acts on them
func slackStuff() {
logger := log.New(os.Stdout, "slack-bot: ", log.Lshortfile|log.LstdFlags)
api := slack.New(config.SlackApiKey, slack.OptionLog(logger), slack.OptionDebug(false))
rtm := api.NewRTM()
go rtm.ManageConnection()
for msg := range rtm.IncomingEvents {
switch ev := msg.Data.(type) {
case *slack.MessageEvent:
response := processMessage(ev.Text)
if strings.Trim(response, "\n") != "" {
params := slack.PostMessageParameters{
AsUser: true,
UnfurlLinks: true,
UnfurlMedia: true,
}
api.SendMessage(ev.Channel, slack.MsgOptionText(response, false), slack.MsgOptionPostMessageParameters(params))
}
break
default:
// do nothing
}
}
}
func main() {
if len(os.Args) == 2 {
fmt.Printf("Loading config from '%s'\n", os.Args[1])
loadConfig(os.Args[1])
} else {
fmt.Println("Loading config from './config.json'")
loadConfig("/home/ezimmer/go/src/mtgbot-golang/config.json")
}
if config.CustomResponseFile != "" {
fmt.Printf("Loading custom responses from '%s'\n", config.CustomResponseFile)
loadCustomResponses()
}
slackStuff()
}
|
package main
import (
"crypto/md5"
"database/sql"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"os/user"
"path/filepath"
"runtime"
"syscall"
// "time"
"github.com/cznic/ql"
"github.com/dchest/uniuri"
"github.com/fsnotify/fsnotify"
"github.com/op/go-logging" // more complete package to log to different outputs; we start with file, syslog, and stderr;
"github.com/spf13/viper" // to read config files
"gopkg.in/natefinch/lumberjack.v2" // rolling file logs
)
var (
// Default configurations, hopefully exported to other files and packages
// we probably should have a struct for this (or even several)
Host string = "localhost"
GoSLRentalDSN string = "goslrental.db"
URLPathPrefix string
PDO_Prefix string = "ql"
// PathToStaticFiles string = "~/go/src/goslrental"
PathToStaticFiles string = "."
ServerPort string = ":3333"
FrontEnd string
// If tlsCRT && tlsKEY are set, this means we should set up HTTPS (gwyneth 20211021)
tlsCRT, tlsKEY string = "", ""
LSLSignaturePIN string = "6925"
logFileName string = "goslrental.log"
logMaxSize, logMaxBackups, logMaxAge int = 500, 3, 28 // configurations for the go-logging logger
logSeverityStderr, logSeverityFile, logSeveritySyslog logging.Level = logging.DEBUG, logging.DEBUG, logging.CRITICAL
Log = logging.MustGetLogger("goslrental") // configuration for the go-logging logger, must be available everywhere
logFormat logging.Formatter // must be initialised or all hell breaks loose
)
const NullUUID = "00000000-0000-0000-0000-000000000000" // always useful when we deal with SL/OpenSimulator...
//type templateParameters map[string]string
type templateParameters map[string]interface{}
// setUp tries to create a table on the QL database for testing purposes.
func setUp(db *sql.DB) error {
tx, err := db.Begin()
if err != nil {
return err
}
if _, err = tx.Exec(`
CREATE TABLE IF NOT EXISTS Objects (
UUID STRING NOT NULL,
Name STRING NOT NULL,
OwnerKey STRING NOT NULL,
OwnerName STRING,
PermURL STRING,
Location STRING,
Position STRING,
Rotation STRING,
Velocity STRING,
LastUpdate STRING
);
CREATE UNIQUE INDEX IF NOT EXISTS ObjectsUUID ON Objects (UUID);
CREATE TABLE IF NOT EXISTS Users (Email STRING NOT NULL, Password STRING NOT NULL);
CREATE UNIQUE INDEX IF NOT EXISTS UsersEmail ON Users (Email);
`) ; err != nil {
return err
}
// create one user to make tests, encode password as MD5
pwdmd5 := fmt.Sprintf("%x", md5.Sum([]byte("onetwothree")))
if _, err = tx.Exec(`INSERT INTO Users (Email, Password) VALUES ("gwyneth.llewelyn@gwynethllewelyn.net", "` + pwdmd5 + `");`); err != nil {
Log.Warningf("couldn't insert because probably this user already exists; in any case, database access worked. Error was: %q", err)
} // should fail if already inserted
if err = tx.Commit(); err != nil {
if err == sql.ErrTxDone {
Log.Warningf("insert: transaction failure: %q", err)
} else {
Log.Warningf("couldn't insert because probably this user already exists; in any case, database access worked. Error was: %q", err)
}
}
// now remove this item, since on the next way round it will give an error if it still exists! (gwyneth 20211022)
if _, err = tx.Exec(`DELETE FROM Users WHERE Email="gwyneth.llewelyn@gwynethllewelyn.net"`); err != nil {
Log.Warningf("couldn't delete this user because probably it was deleted; in any case, database access worked. Error was: %q", err)
}
if err = tx.Commit(); err != nil {
if err == sql.ErrTxDone {
Log.Warningf("delete: transaction failure: %q", err)
} else {
Log.Warningf("couldn't delete this user because probably it was deleted; in any case, database access worked. Error was: %q", err)
}
}
return nil
}
// loadConfiguration loads all the configuration from the config.toml file.
// It's a separate function because we want to be able to do a killall -HUP goslrental to force the configuration to be read again.
// Also, if the configuration file changes, this ought to read it back in again without the need of a HUP signal (20170811).
func loadConfiguration() {
fmt.Print("Reading goslrental configuration:") // note that we might not have go-logging active as yet, so we use fmt
// Open our config file and extract relevant data from there
err := viper.ReadInConfig() // Find and read the config file
if err != nil {
fmt.Println(" Error:", err)
// this is a bit weird, but we need it just in case the path comes with a tilde '~' or similar shortcuts
// requiring interpretation.
path, err := expandPath(PathToStaticFiles)
if err != nil {
fmt.Println("Error expanding path:", err)
path = "" // we might get away with this as well
}
PathToStaticFiles = path
// set this here or hell will break out later on
logFormat = logging.MustStringFormatter(`%{color}%{time:2006/01/02 15:04:05.0} %{shortfile} - %{shortfunc} ▶ %{level:.4s}%{color:reset} %{message}`)
} else {
// Without these set, we cannot do anything
Host = viper.GetString("goslrental.Host"); fmt.Print(".")
viper.SetDefault("goslrental.URLPathPrefix", "") // empty by default, but you might add a 'main' website for information later
URLPathPrefix = viper.GetString("goslrental.URLPathPrefix"); fmt.Print(".")
GoSLRentalDSN = viper.GetString("goslrental.GoSLRentalDSN"); fmt.Print(".")
viper.SetDefault("PDO_Prefix", "ql") // for now, nothing else will work anyway...
PDO_Prefix = viper.GetString("goslrental.PDO_Prefix"); fmt.Print(".")
viper.SetDefault("goslrental.PathToStaticFiles", ".")
path, err := expandPath(viper.GetString("goslrental.PathToStaticFiles")); fmt.Print(".")
if err != nil {
fmt.Println("Error expanding path:", err)
path = "" // we might get away with this as well
}
PathToStaticFiles = path
viper.SetDefault("goslrental.ServerPort", ":3333")
ServerPort = viper.GetString("goslrental.ServerPort"); fmt.Print(".")
FrontEnd = viper.GetString("goslrental.FrontEnd"); fmt.Print(".")
tlsKEY = viper.GetString("goslrental.tlsKEY"); fmt.Print(".")
tlsCRT = viper.GetString("goslrental.tlsCRT"); fmt.Print(".")
viper.SetDefault("goslrental.LSLSignaturePIN", generatePIN(4)) // better than no signature at all
LSLSignaturePIN = viper.GetString("opensim.LSLSignaturePIN"); fmt.Print(".")
// logging options
viper.SetDefault("log.FileName", "log/goslrental.log")
logFileName = viper.GetString("log.FileName"); fmt.Print(".")
viper.SetDefault("log.Format", `%{color}%{time:2006/01/02 15:04:05.0} %{shortfile} - %{shortfunc} ▶ %{level:.4s}%{color:reset} %{message}`)
logFormat = logging.MustStringFormatter(viper.GetString("log.Format")); fmt.Print(".")
viper.SetDefault("log.MaxSize", 500)
logMaxSize = viper.GetInt("log.MaxSize"); fmt.Print(".")
viper.SetDefault("log.MaxBackups", 3)
logMaxBackups = viper.GetInt("log.MaxBackups"); fmt.Print(".")
viper.SetDefault("log.MaxAge", 28)
logMaxAge = viper.GetInt("log.MaxAge"); fmt.Print(".")
viper.SetDefault("log.SeverityStderr", logging.DEBUG)
switch viper.GetString("log.SeverityStderr") {
case "CRITICAL":
logSeverityStderr = logging.CRITICAL
case "ERROR":
logSeverityStderr = logging.ERROR
case "WARNING":
logSeverityStderr = logging.WARNING
case "NOTICE":
logSeverityStderr = logging.NOTICE
case "INFO":
logSeverityStderr = logging.INFO
case "DEBUG":
logSeverityStderr = logging.DEBUG
// default case is handled directly by viper
}
fmt.Print(".")
viper.SetDefault("log.SeverityFile", logging.DEBUG)
switch viper.GetString("log.SeverityFile") {
case "CRITICAL":
logSeverityFile = logging.CRITICAL
case "ERROR":
logSeverityFile = logging.ERROR
case "WARNING":
logSeverityFile = logging.WARNING
case "NOTICE":
logSeverityFile = logging.NOTICE
case "INFO":
logSeverityFile = logging.INFO
case "DEBUG":
logSeverityFile = logging.DEBUG
}
fmt.Print(".")
viper.SetDefault("log.SeveritySyslog", logging.CRITICAL) // we don't want to swamp syslog with debugging messages!!
switch viper.GetString("log.SeveritySyslog") {
case "CRITICAL":
logSeveritySyslog = logging.CRITICAL
case "ERROR":
logSeveritySyslog = logging.ERROR
case "WARNING":
logSeveritySyslog = logging.WARNING
case "NOTICE":
logSeveritySyslog = logging.NOTICE
case "INFO":
logSeveritySyslog = logging.INFO
case "DEBUG":
logSeveritySyslog = logging.DEBUG
}
fmt.Print(".")
fmt.Println("read!") // note that we might not have go-logging active as yet, so we use fmt
}
// Setup the lumberjack rotating logger. This is because we need it for the go-logging logger when writing to files. (20170813)
rotatingLogger := &lumberjack.Logger{
Filename: logFileName, // this is an option set on the config.yaml file, eventually the others will be so, too.
MaxSize: logMaxSize, // megabytes
MaxBackups: logMaxBackups,
MaxAge: logMaxAge, //days
}
// Setup the go-logging Logger. (20170812) We have three loggers: one to stderr, one to a logfile, one to syslog for critical stuff. (20170813
backendStderr := logging.NewLogBackend(os.Stderr, "", 0)
backendFile := logging.NewLogBackend(rotatingLogger, "", 0)
backendSyslog,_ := logging.NewSyslogBackend("")
// Set formatting for stderr and file (basically the same). I'm assuming syslog has its own format, but I'll have to see what happens (20170813).
backendStderrFormatter := logging.NewBackendFormatter(backendStderr, logFormat)
backendFileFormatter := logging.NewBackendFormatter(backendFile, logFormat)
// Check if we're overriding the default severity for each backend. This is user-configurable. By default: DEBUG, DEBUG, CRITICAL.
// TODO(gwyneth): What about a WebSocket backend using https://github.com/cryptix/exp/wslog ? (20170813)
backendStderrLeveled := logging.AddModuleLevel(backendStderrFormatter)
backendStderrLeveled.SetLevel(logSeverityStderr, "goslrental")
backendFileLeveled := logging.AddModuleLevel(backendFileFormatter)
backendFileLeveled.SetLevel(logSeverityFile, "goslrental")
backendSyslogLeveled := logging.AddModuleLevel(backendSyslog)
backendSyslogLeveled.SetLevel(logSeveritySyslog, "goslrental")
// Set the backends to be used. Logging should commence now.
logging.SetBackend(backendStderrLeveled, backendFileLeveled, backendSyslogLeveled)
fmt.Println("Logging set up.")
Log.Debug("Logging set up.")
}
// main starts here.
func main() {
// to change the flags on the default logger
// see https://stackoverflow.com/a/24809859/1035977
log.SetFlags(log.LstdFlags | log.Lshortfile)
// Config viper, which reads in the configuration file every time it's needed.
// Note that we need some hard-coded variables for the path and config file name.
viper.SetConfigName("config.ini")
viper.SetConfigType("ini") // just to make sure; it's the same format as OpenSimulator (or MySQL) config files
viper.AddConfigPath(".") // optionally look for config in the working directory
viper.AddConfigPath("$HOME/go/src/goslrental/") // that's how I have it
viper.AddConfigPath("$HOME/go/src/git.gwynethllewelyn.net/GwynethLlewelyn/goslrental/") // that's how you'll have it
loadConfiguration() // this gets loaded always, on the first time it runs
viper.WatchConfig() // if the config file is changed, this is supposed to reload it (20170811)
viper.OnConfigChange(func(e fsnotify.Event) {
if (Log == nil) {
fmt.Println("Config file changed:", e.Name) // if we couldn't configure the logging subsystem, it's better to print it to the console
} else {
Log.Info("Config file changed:", e.Name)
}
loadConfiguration() // I think that this needs to be here, or else, how does Viper know what to call?
})
// prepares a special channel to look for termination signals
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGHUP, syscall.SIGUSR1, syscall.SIGUSR2, syscall.SIGCONT)
// goroutine which listens to signals and calls the loadConfiguration() function if someone sends us a HUP
go func() {
for {
sig := <-sigs
Log.Notice("Got signal", sig)
switch sig {
case syscall.SIGUSR1:
case syscall.SIGUSR2:
case syscall.SIGHUP:
case syscall.SIGCONT:
default:
Log.Warning("Unknown UNIX signal", sig, "caught!! Ignoring...")
}
}
}()
ql.RegisterDriver() // this should allow us to use the 'normal' SQL Go bindings to use QL.
if (Log == nil) {
log.Fatal("Could not set up alternative logger for some reason...")
}
// do some database tests. If it fails, it means the database is broken or corrupted and it's worthless
// to run this application anyway!
fmt.Printf("GoSLRentalDSN: '%v' PathToStaticFiles: '%v'\n", GoSLRentalDSN, PathToStaticFiles)
Log.Info("Testing opening database connection at ", GoSLRentalDSN, "\nPath to static files is:", PathToStaticFiles)
db, err := sql.Open(PDO_Prefix, GoSLRentalDSN)
// check error before deferring db.Close()
if err != nil {
log.Fatalf("failed to open db: %s", err)
}
defer db.Close()
if err = setUp(db); err != nil {
log.Fatalf("failed to create table: %s", err)
}
if err = db.Ping(); err != nil {
log.Fatalf("failed to ping db: %s", err)
}
// Now prepare the web interface
// Check if path makes sense:
Log.Info("Path is:", PathToStaticFiles + "/templates/*.tpl", "URL Path Prefix is:", URLPathPrefix, "Path to static files is:", PathToStaticFiles)
// Load all templates
err = GoSLRentalTemplates.init(PathToStaticFiles + "/templates/*.tpl")
checkErr(err) // abort if templates are not found
// Register in-world handlers for script calls.
http.HandleFunc(URLPathPrefix + "/register/", registerObject)
// Static files. This should be handled directly by nginx, but we include it here
// for a standalone version...
fslib := http.FileServer(http.Dir(PathToStaticFiles + "/lib"))
http.Handle(URLPathPrefix + "/lib/", http.StripPrefix(URLPathPrefix + "/lib/", fslib))
templatelib := http.FileServer(http.Dir(PathToStaticFiles + "/templates"))
http.Handle(URLPathPrefix + "/templates/",
http.StripPrefix(URLPathPrefix + "/templates/", templatelib)) // not sure if this is needed
http.HandleFunc(URLPathPrefix + "/admin/logout/", backofficeLogout)
http.HandleFunc(URLPathPrefix + "/admin/login/", backofficeLogin) // probably not necessary
http.HandleFunc(URLPathPrefix + "/admin/user-management/", backofficeUserManagement)
http.HandleFunc(URLPathPrefix + "/admin/lsl-register-object/", backofficeLSLRegisterObject)
// fallthrough for admin
http.HandleFunc(URLPathPrefix + "/admin/", backofficeMain)
http.HandleFunc(URLPathPrefix + "/", backofficeLogin) // if not auth, then get auth
if (tlsCRT != "" && tlsKEY != "") {
err = http.ListenAndServeTLS(ServerPort, tlsCRT, tlsKEY, nil) // if it works, it will never return
if (err != nil) {
log.Printf("[WARN] Could not run with TLS; either the certificate %q was not found, or the private key %q was not found, or either [maybe even both] are invalid.\n", tlsCRT, tlsKEY)
log.Println("[INFO] Running _without_ TLS on the usual port")
err = http.ListenAndServe(ServerPort, nil)
}
} else {
log.Println("[INFO] Running with standard HTTP on the usual port, no TLS configuration detected")
err = http.ListenAndServe(ServerPort, nil) // set listen port
}
checkErr(err) // if it can't listen to all the above, then it has to abort anyway
}
// checkErrPanic logs a fatal error and panics.
func checkErrPanic(err error) {
if err != nil {
pc, file, line, ok := runtime.Caller(1)
Log.Panic(filepath.Base(file), ":", line, ":", pc, ok, " - panic:", err)
}
}
// checkErr checks if there is an error, and if yes, it logs it out and continues.
// this is for 'normal' situations when we want to get a log if something goes wrong but do not need to panic
func checkErr(err error) {
if err != nil {
pc, file, line, ok := runtime.Caller(1)
Log.Error(filepath.Base(file), ":", line, ":", pc, ok, " - error:", err)
}
}
// expandPath expands the tilde as the user's home directory.
// found at http://stackoverflow.com/a/43578461/1035977
func expandPath(path string) (string, error) {
if len(path) == 0 || path[0] != '~' {
return path, nil
}
usr, err := user.Current()
if err != nil {
return "", err
}
return filepath.Join(usr.HomeDir, path[1:]), nil
}
// generatePIN with `nr` digits (0-9)
func generatePIN(nr int) string {
const digits = "0123456789"
return uniuri.NewLenChars(nr, []byte(digits))
} |
package lcd
import (
"github.com/gorilla/mux"
"github.com/irisnet/irishub/client/context"
"github.com/irisnet/irishub/codec"
)
// RegisterRoutes - Central function to define routes that get registered by the main application
func RegisterRoutes(cliCtx context.CLIContext, r *mux.Router, cdc *codec.Codec) {
r.HandleFunc("/distribution/{delegatorAddr}/withdraw-address", SetWithdrawAddressHandlerFn(cdc, cliCtx)).Methods("POST")
r.HandleFunc("/distribution/{delegatorAddr}/rewards/withdraw", WithdrawRewardsHandlerFn(cdc, cliCtx)).Methods("POST")
r.HandleFunc("/distribution/{delegatorAddr}/withdraw-address",
QueryWithdrawAddressHandlerFn(cliCtx)).Methods("GET")
r.HandleFunc("/distribution/{address}/rewards",
QueryRewardsHandlerFn(cliCtx)).Methods("GET")
}
|
// Package isgd provides wrapper for url shortener services provided by `is.gd`
package isgd
import (
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"net/url"
)
// Shorten calls to shortener services with data provided and returns string
// containing shortened url and error (if any)
func Shorten(longUrl string) (string, error) {
client := &http.Client{}
parameters := url.Values{}
parameters.Add("format", "json")
parameters.Add("url", longUrl)
req, err := http.NewRequest("GET", "http://is.gd/create.php?"+parameters.Encode(), nil)
if err != nil {
return "", err
}
resp, err := client.Do(req)
if resp.StatusCode != 200 {
return "", errors.New(resp.Status)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
var f interface{}
err = json.Unmarshal(body, &f)
if err != nil {
return "", err
}
urlHash := f.(map[string]interface{})
// shortUrl := urlHash["shorturl"]
return urlHash["shorturl"].(string), nil
}
|
package distutil
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
// WasmExecJsPath find wasm_exec.js in the local Go distribution and return it's path.
// Return error if not found.
func WasmExecJsPath() (string, error) {
b, err := exec.Command("go", "env", "GOROOT").CombinedOutput()
if err != nil {
return "", err
}
bstr := strings.TrimSpace(string(b))
if bstr == "" {
return "", fmt.Errorf("failed to find wasm_exec.js, empty path from `go env GOROOT`")
}
p := filepath.Join(bstr, "misc/wasm/wasm_exec.js")
_, err = os.Stat(p)
if err != nil {
return "", err
}
return p, nil
}
// MustWasmExecJsPath find wasm_exec.js in the local Go distribution and return it's path.
// Panic if not found.
func MustWasmExecJsPath() string {
s, err := WasmExecJsPath()
if err != nil {
panic(err)
}
return s
}
|
package requests
import (
"errors"
"fmt"
"log"
"net"
"net/http"
"net/http/httptest"
"runtime"
"strings"
"testing"
"time"
"github.com/alessiosavi/Requests/datastructure"
)
// Remove comment for set the log at debug level
var req Request // = InitDebugRequest()
func TestCreateHeaderList(t *testing.T) {
t.Parallel() // Create a simple headers
headersKey := `Content-Type`
headersValue := `application/json`
err := req.CreateHeaderList(headersKey, headersValue)
if err == nil {
t.Error("Error, request is not initialized!")
}
request, err := InitRequest("http://", "POST", nil, false, false)
if err != nil {
t.Error("Error!: ", err)
}
err = request.CreateHeaderList(headersKey, headersValue)
if err != nil {
t.Error("Error!", err)
}
if strings.Compare(request.Req.Header.Get(headersKey), headersValue) != 0 {
t.Error("Headers key mismatch!")
}
}
func TestSendRequest(t *testing.T) {
t.Parallel()
var resp *datastructure.Response
resp = makeBadRequestURL1()
if resp == nil || resp.Error == nil {
t.Fail()
} else {
t.Log("makeBadRequestURL1 Passed!")
}
// t.Log(resp.Dump())
resp = makeBadRequestURL2()
if resp == nil || resp.Error == nil {
t.Fail()
} else {
t.Log("makeBadRequestURL2 Passed!")
}
// t.Log(resp.Dump())
resp = makeOKRequestURL3()
if resp == nil || resp.Error != nil || resp.StatusCode != 200 {
t.Fail()
} else {
t.Log("makeOKRequestURL3 Passed!")
}
// t.Log(resp.Dump())
}
func BenchmarkRequestGETWithoutTLS(t *testing.B) {
var r Request
for i := 0; i < t.N; i++ {
r.SendRequest("http://127.0.0.1:9999", "GET", nil, []string{"Connection", "Close"}, false, 0)
}
}
func BenchmarkRequestPOSTWithoutTLS(t *testing.B) {
var r Request
for i := 0; i < t.N; i++ {
r.SendRequest("http://127.0.0.1:9999", "POST", []byte{}, []string{"Connection", "Close"}, false, 0)
}
}
func BenchmarkParallelRequestGETWithoutTLS(t *testing.B) {
var n = t.N
var requests = make([]Request, n)
for i := 0; i < n; i++ {
req, err := InitRequest("http://127.0.0.1:9999", "GET", nil, true, false)
if err == nil && req != nil {
req.AddHeader("Connection", "Close")
requests[i] = *req
} else if err != nil {
t.Error("error: ", err)
}
}
for i := 0; i < t.N; i++ {
ParallelRequest(requests, runtime.NumCPU())
}
}
func BenchmarkParallelRequestPOSTWithoutTLS(t *testing.B) {
var n = t.N
var requests = make([]Request, n)
for i := 0; i < n; i++ {
req, err := InitRequest("http://127.0.0.1:9999", "POST", []byte{}, true, false)
if err == nil && req != nil {
req.AddHeader("Connection", "Close")
requests[i] = *req
} else if err != nil {
t.Error("error: ", err)
}
}
for i := 0; i < t.N; i++ {
ParallelRequest(requests, runtime.NumCPU())
}
}
func makeBadRequestURL1() *datastructure.Response {
return req.SendRequest("tcp://google.it", "GET", nil, nil, true, 0)
}
func makeBadRequestURL2() *datastructure.Response {
return req.SendRequest("google.it", "GET", nil, nil, true, 0)
}
func makeOKRequestURL3() *datastructure.Response {
return req.SendRequest("https://google.it", "GET", nil, nil, true, 0)
}
type headerTestCase struct {
input []string
expected bool
number int
}
func TestRequest_CreateHeaderList(t *testing.T) {
t.Parallel()
var request *Request
request, err := InitRequest("http://", "POST", nil, false, false)
if err != nil {
t.Error("Error!", err)
}
cases := []headerTestCase{
{input: []string{"Content-Type", "text/plain"}, expected: true, number: 1},
{input: []string{"Content-Type"}, expected: false, number: 2},
{input: []string{"Content-Type", "text/plain", "Error"}, expected: false, number: 3},
}
for _, c := range cases {
err := request.CreateHeaderList(c.input...)
if (c.expected && err != nil) || (!c.expected && err == nil) {
t.Errorf("Expected %v for input %v [test n. %d]", c.expected, c.input, c.number)
}
}
}
type requestTestCase struct {
host string
method string
body []byte
skipTLS bool
expected error
number int
}
func TestRequest_SendRequest(t *testing.T) {
t.Parallel()
var request Request
// create a listener with the desired port.
l, err := net.Listen("tcp", "127.0.0.1:8082")
if err != nil {
t.Fatal(err)
}
ts := httptest.NewUnstartedServer(nil)
// NewUnstartedServer creates a listener. Close that listener and replace
// with the one we created.
_ = ts.Listener.Close()
ts.Listener = l
// Start the server.
ts.Start()
cases := []requestTestCase{
// GET
{host: "http://localhost:8082/", method: "GET", body: nil, skipTLS: false, expected: nil, number: 1},
{host: "http://localhost:8082/", method: "GET", body: nil, skipTLS: true, expected: nil, number: 2},
{host: "localhost:8082/", method: "GET", body: nil, skipTLS: false, expected: errors.New("PREFIX_URL_NOT_VALID"), number: 3},
// POST
{host: "localhost:8082/", method: "POST", body: []byte{}, skipTLS: true, expected: errors.New("PREFIX_URL_NOT_VALID"), number: 4},
{host: "localhost:8082/", method: "POST", body: nil, skipTLS: true, expected: errors.New("PREFIX_URL_NOT_VALID"), number: 5},
{host: "http://localhost:8082/", method: "HEAD", body: nil, skipTLS: false, expected: errors.New("HTTP_METHOD_NOT_MANAGED"), number: 6},
{host: "http://localhost:8080/", method: "GET", body: nil, skipTLS: false, expected: errors.New("ERROR_SENDING_REQUEST"), number: 7},
}
for _, c := range cases {
resp := request.SendRequest(c.host, c.method, c.body, nil, c.skipTLS, 0)
if c.expected != resp.Error {
if c.expected != nil && resp.Error != nil {
if !strings.Contains(resp.Error.Error(), c.expected.Error()) {
t.Errorf("Expected %v, received %v [test n. %d]", c.expected, resp.Error, c.number)
}
} else {
t.Error("Url not reachable! Spawn a simple server (python3 -m http.server 8081 || python -m SimpleHTTPServer 8081)")
}
}
}
// Cleanup.
ts.Close()
}
func TestRequest_InitRequest(t *testing.T) {
t.Parallel()
cases := []requestTestCase{
// GET
{host: "http://localhost:8081/", method: "GET", body: nil, skipTLS: false, expected: nil, number: 1},
{host: "http://localhost:8081/", method: "GET", body: nil, skipTLS: true, expected: nil, number: 2},
{host: "localhost:8081/", method: "GET", body: nil, skipTLS: false, expected: errors.New("PREFIX_URL_NOT_VALID"), number: 3},
// POST
{host: "localhost:8081/", method: "POST", body: []byte{}, skipTLS: true, expected: errors.New("PREFIX_URL_NOT_VALID"), number: 4},
{host: "localhost:8081/", method: "POST", body: nil, skipTLS: true, expected: errors.New("PREFIX_URL_NOT_VALID"), number: 5},
{host: "http://localhost:8081/", method: "HEAD", body: nil, skipTLS: false, expected: errors.New("HTTP_METHOD_NOT_MANAGED"), number: 6},
}
for _, c := range cases {
_, err := InitRequest(c.host, c.method, c.body, c.skipTLS, false)
if c.expected != err {
if c.expected.Error() != err.Error() {
t.Errorf("Expected %v, received %v [test n. %d]", c.expected, err.Error(), c.number)
}
}
}
}
func Test_Headers(t *testing.T) {
t.Parallel()
var req Request
// create a listener with the desired port.
l, err := net.Listen("tcp", "127.0.0.1:8083")
if err != nil {
log.Fatal(err)
}
f := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("1", "1")
w.Header().Set("2", "2")
w.Header().Set("3", "3")
w.Header().Set("4", "4")
w.Header().Set("5", "5")
w.Header().Set("6", "6")
_, _ = fmt.Fprintf(w, "Hello, %s", r.Proto)
})
ts := httptest.NewUnstartedServer(f)
// NewUnstartedServer creates a listener. Close that listener and replace
// with the one we created.
_ = ts.Listener.Close()
ts.Listener = l
// Start the server.
ts.Start()
time.Sleep(1 * time.Millisecond)
url := `http://127.0.0.1:8083`
resp := req.SendRequest(url, "GET", nil, nil, true, 1*time.Second)
if resp.Error != nil {
t.Error("Request failed: ", resp.Error)
}
if len(resp.Headers) < 6 {
t.Error("Not enough headers: ", len(resp.Headers))
t.Error(resp.Headers)
}
ts.CloseClientConnections()
ts.Close()
}
func TestRequest_ExecuteRequest(t *testing.T) {
t.Parallel() // create a listener with the desired port.
l, err := net.Listen("tcp", "127.0.0.1:8084")
if err != nil {
log.Fatal(err)
}
ts := httptest.NewUnstartedServer(nil)
// NewUnstartedServer creates a listener. Close that listener and replace
// with the one we created.
_ = ts.Listener.Close()
ts.Listener = l
// Start the server.
ts.Start()
cases := []requestTestCase{
// GET
{host: "http://localhost:8084/", method: "GET", body: nil, skipTLS: false, expected: nil, number: 1},
{host: "http://localhost:8084/", method: "GET", body: nil, skipTLS: true, expected: nil, number: 2},
{host: "localhost:8084/", method: "GET", body: nil, skipTLS: false, expected: errors.New("PREFIX_URL_NOT_VALID"), number: 3},
// POST
{host: "localhost:8084/", method: "POST", body: []byte{}, skipTLS: true, expected: errors.New("PREFIX_URL_NOT_VALID"), number: 4},
{host: "localhost:8084/", method: "POST", body: nil, skipTLS: true, expected: errors.New("PREFIX_URL_NOT_VALID"), number: 5},
{host: "http://localhost:8084/", method: "HEAD", body: nil, skipTLS: false, expected: errors.New("HTTP_METHOD_NOT_MANAGED"), number: 6},
{host: "http://localhost:8080/", method: "GET", body: nil, skipTLS: false, expected: errors.New("ERROR_SENDING_REQUEST"), number: 7},
}
client := &http.Client{}
for _, c := range cases {
req, err := InitRequest(c.host, c.method, c.body, c.skipTLS, false)
if err == nil {
resp := req.ExecuteRequest(client)
if c.expected != nil && resp.Error != nil {
if !strings.Contains(resp.Error.Error(), c.expected.Error()) {
t.Errorf("Expected %v, received %v [test n. %d]", c.expected, resp.Error, c.number)
}
}
}
}
// Cleanup.
ts.Close()
}
type timeoutTestCase struct {
host string
method string
body []byte
skipTLS bool
time int
number int
}
func TestRequest_Timeout(t *testing.T) {
t.Parallel()
// Need to run the server present in example/server_example.py
cases := []timeoutTestCase{
// GET
{host: "https://localhost:5000/timeout", method: "GET", body: nil, skipTLS: true, time: 11, number: 1},
}
for _, c := range cases {
var req Request // = InitDebugRequest()
req.SetTimeout(time.Second * time.Duration(c.time))
start := time.Now()
resp := req.SendRequest(c.host, c.method, c.body, nil, c.skipTLS, 0)
elapsed := time.Since(start)
if resp.Error != nil {
t.Errorf("Received an error -> %v [test n. %d].\n Be sure that the python server on ./example folder is up and running", resp.Error, c.number)
}
if time.Duration(c.time)*time.Second < elapsed {
t.Error("Error timeout")
}
}
}
func TestParallelRequest(t *testing.T) {
t.Parallel()
start := time.Now()
// This array will contains the list of request
var reqs []Request
// This array will contains the response from the given request
var response []datastructure.Response
// Set to run at max N request in parallel (use CPU count for best effort)
var N = runtime.NumCPU()
// Create the list of request
for i := 0; i < 1000; i++ {
// Run against the `server_example.py` present in this folder
req, err := InitRequest("https://127.0.0.1:5000", "GET", nil, true, false) // Alternate cert validation
if err != nil {
t.Error("Error request [", i, "]. Error: ", err)
} else {
req.SetTimeout(10 * time.Second)
reqs = append(reqs, *req)
}
}
// Run the request in parallel
response = ParallelRequest(reqs, N)
elapsed := time.Since(start)
for i := range response {
if response[i].Error != nil {
t.Error("Error request [", i, "]. Error: ", response[i].Error)
}
}
t.Logf("Sending %d Requests took %s", len(reqs), elapsed)
}
func Test_escapeURL(t *testing.T) {
type args struct {
url string
}
tests := []struct {
name string
args args
want string
}{
// TODO: Add test cases.
{
name: "testOK",
args: args{"https://example.com/api/items?lang=en&search=escape this path"},
want: "https://example.com/api/items?lang=en&search=escape%20this%20path",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := escapeURL(tt.args.url); got != tt.want {
t.Errorf("escapeURL() = %v, want %v", got, tt.want)
}
})
}
}
|
package main
import "testing"
func TestGetNtpTime(t *testing.T) {
ntpServer := "0.beevik-ntp.pool.ntp.org"
if len(ntpServer) < 1 {
t.Fatalf("Name NTP server is empty")
}
_, e := GetNtpTime(ntpServer)
if e != nil {
t.Fatalf("bad return value for ntp server %s", ntpServer)
}
}
|
package controller
import (
"encoding/json"
"fmt"
"net/http"
"github.com/ksw95/GoIndustrialProject/API/models"
"github.com/labstack/echo"
"golang.org/x/crypto/bcrypt"
)
func (dbHandler *DBHandler) InsertUserCond(c echo.Context) error {
userC := models.UserCond{}
err := json.NewDecoder(c.Request().Body).Decode(&userC)
if err != nil {
fmt.Println(err.Error())
return newResponse(c, "Bad Request", "false", http.StatusBadRequest, nil)
}
// prepare statement to insert record
tx, err := DBHandler.DB.Begin()
if err != nil {
return err
}
//first statement
stmt, err1 := tx.Prepare("INSERT INTO Condition VALUES (?, DATE_ADD(NOW(), INTERVAL 8 HOUR), ?, ?, ?, ?))")
if err1 == nil {
fmt.Println(err1)
}
_, err = stmt.Exec(userC.Username, userC.MaxCalories, userC.Diabetic, userC.Halal, userC.Vegan)
stmt.Close()
switch err {
case nil:
_ = tx.Commit()
return newResponse(c, "ok", "true", http.StatusOK, nil)
default:
tx.Rollback()
return newResponse(c, "rolled back", "false", http.StatusBadRequest, nil)
}
}
func (dbHandler *DBHandler) UpdateUserCond(c echo.Context) error {
userC := models.UserCond{}
err := json.NewDecoder(c.Request().Body).Decode(&userC)
if err != nil {
fmt.Println(err.Error())
return newResponse(c, "Bad Request", "false", http.StatusBadRequest, nil)
}
// prepare statement to insert record
tx, err := DBHandler.DB.Begin()
if err != nil {
return err
}
//first statement
stmt, err1 := tx.Prepare("UPDATE Condition " +
"SET LastLogin=DATE_ADD(NOW(), INTERVAL 8 HOUR), MaxCalories=?, Diabetic=?, Halal=?, Vegan=? " +
"WHERE Username=?")
if err1 == nil {
_, err = stmt.Exec(userC.MaxCalories, userC.Diabetic, userC.Halal, userC.Vegan, userC.Username)
}
stmt.Close()
switch err {
case nil:
_ = tx.Commit()
return newResponse(c, "ok", "true", http.StatusOK, nil)
default:
tx.Rollback()
return newResponse(c, "rolled back", "false", http.StatusBadRequest, nil)
}
}
func (dbHandler *DBHandler) GetUserCond(c echo.Context) error {
userC := models.UserCond{}
//get id param
username := c.QueryParam("Username")
if username == "" {
return newResponse(c, "Bad Request", "false", http.StatusBadRequest, nil)
}
// query mysql
results, err1 := DBHandler.DB.Query("SELECT * FROM MemberType WHERE Username=?", username)
if err1 != nil {
fmt.Println(err1.Error())
return newResponse(c, "Bad Request", "false", http.StatusBadRequest, nil)
}
defer results.Close()
//scan mysql result
results.Next()
err2 := results.Scan(&userC.Username, &userC.LastLogin, &userC.MaxCalories, &userC.Diabetic, &userC.Halal, &userC.Vegan)
if err2 != nil {
fmt.Println(err2.Error())
return newResponse(c, "Bad Request", "false", http.StatusBadRequest, nil)
}
//return json
return newResponse(c, "ok", "true", http.StatusOK, &[]interface{}{restaurant})
}
|
package problem0079
import "testing"
func TestWordSearch(t *testing.T) {
/*board := [][]byte{
[]byte{'A', 'B', 'C', 'E'},
[]byte{'S', 'F', 'C', 'S'},
[]byte{'A', 'D', 'E', 'E'},
}
t.Log(exist(board, "ABC"))
t.Log(exist(board, "ASAD"))
t.Log(exist(board, "FCS"))
t.Log(exist(board, "ABES"))
*/
//t.Log(exist([][]byte{[]byte{'a'}}, "a"))
t.Log(exist(
[][]byte{
[]byte{'A', 'B', 'C', 'E'},
[]byte{'S', 'F', 'E', 'S'},
[]byte{'A', 'D', 'E', 'E'},
},
"ABCESEEEFS",
))
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
)
func TestAzureAuthProvider(t *testing.T) {
t.Run("validate against invalid configurations", func(t *testing.T) {
vectors := []struct {
cfg map[string]string
expectedError string
}{
{
cfg: map[string]string{
cfgClientID: "foo",
cfgApiserverID: "foo",
cfgTenantID: "foo",
cfgConfigMode: "-1",
},
expectedError: "config-mode:-1 is not a valid mode",
},
{
cfg: map[string]string{
cfgClientID: "foo",
cfgApiserverID: "foo",
cfgTenantID: "foo",
cfgConfigMode: "2",
},
expectedError: "config-mode:2 is not a valid mode",
},
{
cfg: map[string]string{
cfgClientID: "foo",
cfgApiserverID: "foo",
cfgTenantID: "foo",
cfgConfigMode: "foo",
},
expectedError: "failed to parse config-mode, error: strconv.Atoi: parsing \"foo\": invalid syntax",
},
}
for _, v := range vectors {
persister := &fakePersister{}
_, err := newAzureAuthProvider("", v.cfg, persister)
if !strings.Contains(err.Error(), v.expectedError) {
t.Errorf("cfg %v should fail with message containing '%s'. actual: '%s'", v.cfg, v.expectedError, err)
}
}
})
t.Run("it should return non-nil provider in happy cases", func(t *testing.T) {
vectors := []struct {
cfg map[string]string
expectedConfigMode configMode
}{
{
cfg: map[string]string{
cfgClientID: "foo",
cfgApiserverID: "foo",
cfgTenantID: "foo",
},
expectedConfigMode: configModeDefault,
},
{
cfg: map[string]string{
cfgClientID: "foo",
cfgApiserverID: "foo",
cfgTenantID: "foo",
cfgConfigMode: "0",
},
expectedConfigMode: configModeDefault,
},
{
cfg: map[string]string{
cfgClientID: "foo",
cfgApiserverID: "foo",
cfgTenantID: "foo",
cfgConfigMode: "1",
},
expectedConfigMode: configModeOmitSPNPrefix,
},
}
for _, v := range vectors {
persister := &fakePersister{}
provider, err := newAzureAuthProvider("", v.cfg, persister)
if err != nil {
t.Errorf("newAzureAuthProvider should not fail with '%s'", err)
}
if provider == nil {
t.Fatalf("newAzureAuthProvider should return non-nil provider")
}
azureProvider := provider.(*azureAuthProvider)
if azureProvider == nil {
t.Fatalf("newAzureAuthProvider should return an instance of type azureAuthProvider")
}
ts := azureProvider.tokenSource.(*azureTokenSource)
if ts == nil {
t.Fatalf("azureAuthProvider should be an instance of azureTokenSource")
}
if ts.configMode != v.expectedConfigMode {
t.Errorf("expected configMode: %d, actual: %d", v.expectedConfigMode, ts.configMode)
}
}
})
}
func TestTokenSourceDeviceCode(t *testing.T) {
var (
clientID = "clientID"
tenantID = "tenantID"
apiserverID = "apiserverID"
configMode = configModeDefault
azureEnv = azure.Environment{}
)
t.Run("validate to create azureTokenSourceDeviceCode", func(t *testing.T) {
if _, err := newAzureTokenSourceDeviceCode(azureEnv, clientID, tenantID, apiserverID, configModeDefault); err != nil {
t.Errorf("newAzureTokenSourceDeviceCode should not have failed. err: %s", err)
}
if _, err := newAzureTokenSourceDeviceCode(azureEnv, clientID, tenantID, apiserverID, configModeOmitSPNPrefix); err != nil {
t.Errorf("newAzureTokenSourceDeviceCode should not have failed. err: %s", err)
}
_, err := newAzureTokenSourceDeviceCode(azureEnv, "", tenantID, apiserverID, configMode)
actual := "client-id is empty"
if err.Error() != actual {
t.Errorf("newAzureTokenSourceDeviceCode should have failed. expected: %s, actual: %s", actual, err)
}
_, err = newAzureTokenSourceDeviceCode(azureEnv, clientID, "", apiserverID, configMode)
actual = "tenant-id is empty"
if err.Error() != actual {
t.Errorf("newAzureTokenSourceDeviceCode should have failed. expected: %s, actual: %s", actual, err)
}
_, err = newAzureTokenSourceDeviceCode(azureEnv, clientID, tenantID, "", configMode)
actual = "apiserver-id is empty"
if err.Error() != actual {
t.Errorf("newAzureTokenSourceDeviceCode should have failed. expected: %s, actual: %s", actual, err)
}
})
}
func TestAzureTokenSource(t *testing.T) {
configModes := []configMode{configModeOmitSPNPrefix, configModeDefault}
expectedConfigModes := []string{"1", "0"}
for i, configMode := range configModes {
t.Run(fmt.Sprintf("validate token from cfg with configMode %v", configMode), func(t *testing.T) {
const (
serverID = "fakeServerID"
clientID = "fakeClientID"
tenantID = "fakeTenantID"
accessToken = "fakeToken"
environment = "fakeEnvironment"
refreshToken = "fakeToken"
expiresIn = "foo"
expiresOn = "foo"
)
cfg := map[string]string{
cfgConfigMode: string(configMode),
cfgApiserverID: serverID,
cfgClientID: clientID,
cfgTenantID: tenantID,
cfgEnvironment: environment,
cfgAccessToken: accessToken,
cfgRefreshToken: refreshToken,
cfgExpiresIn: expiresIn,
cfgExpiresOn: expiresOn,
}
fakeSource := fakeTokenSource{token: newFakeAzureToken("fakeToken", time.Now().Add(3600*time.Second))}
persiter := &fakePersister{cache: make(map[string]string)}
tokenCache := newAzureTokenCache()
tokenSource := newAzureTokenSource(&fakeSource, tokenCache, cfg, configMode, persiter)
azTokenSource := tokenSource.(*azureTokenSource)
token, err := azTokenSource.retrieveTokenFromCfg()
if err != nil {
t.Errorf("failed to retrieve the token form cfg: %s", err)
}
if token.apiserverID != serverID {
t.Errorf("expecting token.apiserverID: %s, actual: %s", serverID, token.apiserverID)
}
if token.clientID != clientID {
t.Errorf("expecting token.clientID: %s, actual: %s", clientID, token.clientID)
}
if token.tenantID != tenantID {
t.Errorf("expecting token.tenantID: %s, actual: %s", tenantID, token.tenantID)
}
expectedAudience := serverID
if configMode == configModeDefault {
expectedAudience = fmt.Sprintf("spn:%s", serverID)
}
if token.token.Resource != expectedAudience {
t.Errorf("expecting adal token.Resource: %s, actual: %s", expectedAudience, token.token.Resource)
}
})
t.Run("validate token against cache", func(t *testing.T) {
fakeAccessToken := "fake token 1"
fakeSource := fakeTokenSource{token: newFakeAzureToken(fakeAccessToken, time.Now().Add(3600*time.Second))}
cfg := make(map[string]string)
persiter := &fakePersister{cache: make(map[string]string)}
tokenCache := newAzureTokenCache()
tokenSource := newAzureTokenSource(&fakeSource, tokenCache, cfg, configMode, persiter)
token, err := tokenSource.Token()
if err != nil {
t.Errorf("failed to retrieve the token form cache: %v", err)
}
wantCacheLen := 1
if len(tokenCache.cache) != wantCacheLen {
t.Errorf("Token() cache length error: got %v, want %v", len(tokenCache.cache), wantCacheLen)
}
if token != tokenCache.cache[azureTokenKey] {
t.Error("Token() returned token != cached token")
}
wantCfg := token2Cfg(token)
wantCfg[cfgConfigMode] = expectedConfigModes[i]
persistedCfg := persiter.Cache()
wantCfgLen := len(wantCfg)
persistedCfgLen := len(persistedCfg)
if wantCfgLen != persistedCfgLen {
t.Errorf("wantCfgLen and persistedCfgLen do not match, wantCfgLen=%v, persistedCfgLen=%v", wantCfgLen, persistedCfgLen)
}
for k, v := range persistedCfg {
if strings.Compare(v, wantCfg[k]) != 0 {
t.Errorf("Token() persisted cfg %s: got %v, want %v", k, v, wantCfg[k])
}
}
fakeSource.token = newFakeAzureToken("fake token 2", time.Now().Add(3600*time.Second))
token, err = tokenSource.Token()
if err != nil {
t.Errorf("failed to retrieve the cached token: %v", err)
}
if token.token.AccessToken != fakeAccessToken {
t.Errorf("Token() didn't return the cached token")
}
})
}
}
func TestAzureTokenSourceScenarios(t *testing.T) {
expiredToken := newFakeAzureToken("expired token", time.Now().Add(-time.Second))
extendedToken := newFakeAzureToken("extend token", time.Now().Add(1000*time.Second))
fakeToken := newFakeAzureToken("fake token", time.Now().Add(1000*time.Second))
wrongToken := newFakeAzureToken("wrong token", time.Now().Add(1000*time.Second))
tests := []struct {
name string
sourceToken *azureToken
refreshToken *azureToken
cachedToken *azureToken
configToken *azureToken
expectToken *azureToken
tokenErr error
refreshErr error
expectErr string
tokenCalls uint
refreshCalls uint
persistCalls uint
}{
{
name: "new config",
sourceToken: fakeToken,
expectToken: fakeToken,
tokenCalls: 1,
persistCalls: 1,
},
{
name: "load token from cache",
sourceToken: wrongToken,
cachedToken: fakeToken,
configToken: wrongToken,
expectToken: fakeToken,
},
{
name: "load token from config",
sourceToken: wrongToken,
configToken: fakeToken,
expectToken: fakeToken,
},
{
name: "cached token timeout, extend success, config token should never load",
cachedToken: expiredToken,
refreshToken: extendedToken,
configToken: wrongToken,
expectToken: extendedToken,
refreshCalls: 1,
persistCalls: 1,
},
{
name: "config token timeout, extend failure, acquire new token",
configToken: expiredToken,
refreshErr: fakeTokenRefreshError{message: "FakeError happened when refreshing"},
sourceToken: fakeToken,
expectToken: fakeToken,
refreshCalls: 1,
tokenCalls: 1,
persistCalls: 1,
},
{
name: "unexpected error when extend",
configToken: expiredToken,
refreshErr: errors.New("unexpected refresh error"),
sourceToken: fakeToken,
expectErr: "unexpected refresh error",
refreshCalls: 1,
},
{
name: "token error",
tokenErr: errors.New("tokenerr"),
expectErr: "tokenerr",
tokenCalls: 1,
},
{
name: "Token() got expired token",
sourceToken: expiredToken,
expectErr: "newly acquired token is expired",
tokenCalls: 1,
},
{
name: "Token() got nil but no error",
sourceToken: nil,
expectErr: "unable to acquire token",
tokenCalls: 1,
},
}
for _, tc := range tests {
configModes := []configMode{configModeOmitSPNPrefix, configModeDefault}
for _, configMode := range configModes {
t.Run(fmt.Sprintf("%s with configMode: %v", tc.name, configMode), func(t *testing.T) {
persister := newFakePersister()
cfg := map[string]string{
cfgConfigMode: string(configMode),
}
if tc.configToken != nil {
cfg = token2Cfg(tc.configToken)
}
tokenCache := newAzureTokenCache()
if tc.cachedToken != nil {
tokenCache.setToken(azureTokenKey, tc.cachedToken)
}
fakeSource := fakeTokenSource{
token: tc.sourceToken,
tokenErr: tc.tokenErr,
refreshToken: tc.refreshToken,
refreshErr: tc.refreshErr,
}
tokenSource := newAzureTokenSource(&fakeSource, tokenCache, cfg, configMode, &persister)
token, err := tokenSource.Token()
if token != nil && fakeSource.token != nil && token.apiserverID != fakeSource.token.apiserverID {
t.Errorf("expecting apiservierID: %s, got: %s", fakeSource.token.apiserverID, token.apiserverID)
}
if fakeSource.tokenCalls != tc.tokenCalls {
t.Errorf("expecting tokenCalls: %v, got: %v", tc.tokenCalls, fakeSource.tokenCalls)
}
if fakeSource.refreshCalls != tc.refreshCalls {
t.Errorf("expecting refreshCalls: %v, got: %v", tc.refreshCalls, fakeSource.refreshCalls)
}
if persister.calls != tc.persistCalls {
t.Errorf("expecting persister calls: %v, got: %v", tc.persistCalls, persister.calls)
}
if tc.expectErr != "" {
if !strings.Contains(err.Error(), tc.expectErr) {
t.Errorf("expecting error %v, got %v", tc.expectErr, err)
}
if token != nil {
t.Errorf("token should be nil in err situation, got %v", token)
}
} else {
if err != nil {
t.Fatalf("error should be nil, got %v", err)
}
if token.token.AccessToken != tc.expectToken.token.AccessToken {
t.Errorf("token should have accessToken %v, got %v", token.token.AccessToken, tc.expectToken.token.AccessToken)
}
}
})
}
}
}
type fakePersister struct {
lock sync.Mutex
cache map[string]string
calls uint
}
func newFakePersister() fakePersister {
return fakePersister{cache: make(map[string]string), calls: 0}
}
func (p *fakePersister) Persist(cache map[string]string) error {
p.lock.Lock()
defer p.lock.Unlock()
p.calls++
p.cache = map[string]string{}
for k, v := range cache {
p.cache[k] = v
}
return nil
}
func (p *fakePersister) Cache() map[string]string {
ret := map[string]string{}
p.lock.Lock()
defer p.lock.Unlock()
for k, v := range p.cache {
ret[k] = v
}
return ret
}
// a simple token source simply always returns the token property
type fakeTokenSource struct {
token *azureToken
tokenCalls uint
tokenErr error
refreshToken *azureToken
refreshCalls uint
refreshErr error
}
func (ts *fakeTokenSource) Token() (*azureToken, error) {
ts.tokenCalls++
return ts.token, ts.tokenErr
}
func (ts *fakeTokenSource) Refresh(*azureToken) (*azureToken, error) {
ts.refreshCalls++
return ts.refreshToken, ts.refreshErr
}
func token2Cfg(token *azureToken) map[string]string {
cfg := make(map[string]string)
cfg[cfgAccessToken] = token.token.AccessToken
cfg[cfgRefreshToken] = token.token.RefreshToken
cfg[cfgEnvironment] = token.environment
cfg[cfgClientID] = token.clientID
cfg[cfgTenantID] = token.tenantID
cfg[cfgApiserverID] = token.apiserverID
cfg[cfgExpiresIn] = string(token.token.ExpiresIn)
cfg[cfgExpiresOn] = string(token.token.ExpiresOn)
return cfg
}
func newFakeAzureToken(accessToken string, expiresOnTime time.Time) *azureToken {
return &azureToken{
token: newFakeADALToken(accessToken, strconv.FormatInt(expiresOnTime.Unix(), 10)),
environment: "testenv",
clientID: "fake",
tenantID: "fake",
apiserverID: "fake",
}
}
func newFakeADALToken(accessToken string, expiresOn string) adal.Token {
return adal.Token{
AccessToken: accessToken,
RefreshToken: "fake",
ExpiresIn: "3600",
ExpiresOn: json.Number(expiresOn),
NotBefore: json.Number(expiresOn),
Resource: "fake",
Type: "fake",
}
}
// copied from go-autorest/adal
type fakeTokenRefreshError struct {
message string
resp *http.Response
}
// Error implements the error interface which is part of the TokenRefreshError interface.
func (tre fakeTokenRefreshError) Error() string {
return tre.message
}
// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation.
func (tre fakeTokenRefreshError) Response() *http.Response {
return tre.resp
}
|
package main
import (
"fmt"
"fiber-gorm-books/book"
"fiber-gorm-books/database"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/template/html"
)
func main() {
// Initialize standard Go html template engine
engine := html.New("./views", ".html")
app := fiber.New(fiber.Config{
Views: engine,
})
initDatabase()
app.Get("/", helloWorld)
setupRoutes(app)
err := app.Listen(":3000")
if err != nil {
panic(err)
}
}
func setupRoutes(app *fiber.App) {
booksRoute := app.Group("/books")
booksRoute.Get("/", book.GetBooks)
booksRoute.Post("/", book.NewBook)
booksRoute.Get("/:id", book.GetBook)
booksRoute.Delete("/:id", book.DeleteBook)
booksRoute.Patch("/:id", book.UpdateBook)
//demo
app.Get("/allbooks", book.DisplayAllBooks)
// try: http://127.0.0.1:3000/allbooks
}
func helloWorld(c *fiber.Ctx) error {
return c.SendString("Hello, World! 👋")
}
func initDatabase() {
if err := database.Open(); err != nil {
fmt.Println("Could not open Database Connection.")
}
database.DB.AutoMigrate(&book.Book{})
fmt.Println("Database successfully auto-migrated.")
}
|
package queries
import (
"context"
"github.com/graphql-go/graphql"
"go.mongodb.org/mongo-driver/bson"
database "graphql-mongo/data"
"graphql-mongo/types"
"os"
)
type todoStruct struct {
NAME string `json:"name"`
DESCRIPTION string `json:"description"`
}
var GetNotTodos = &graphql.Field{
Type: graphql.NewList(types.NotTodo),
Description: "Get all not todos",
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
databaseName := os.Getenv("DATABASE_NAME")
collection := os.Getenv("DATABASE_COLLECTION")
notTodoCollection := database.Client.Database(databaseName).Collection(collection)
cursor, err := notTodoCollection.Find(context.Background(), bson.D{})
if err != nil {
panic(err)
}
var todosList []todoStruct
for cursor.Next(context.Background()) {
var todo todoStruct
err := cursor.Decode(&todo)
if err != nil {
panic(err)
}
todosList = append(todosList, todo)
}
return todosList, nil
},
}
|
/*
Given an image, output the [width in pixels of a full vertical section]1 (if one exists). If no vertical section exists, output 0.
Input may be provided as a local file or a nested array. If you choose to take input as a nested array, white pixels should be represented by a truthy value while non-white pixels should be represented by a falsey value.
1. the number of contiguous, all-white columns
You can assume that
no image will be larger than 1000 square pixels
there will be no more than one full vertical section per image
*/
package main
import (
"flag"
"fmt"
"image"
"image/draw"
_ "image/gif"
_ "image/jpeg"
_ "image/png"
"log"
"os"
)
func main() {
log.SetFlags(0)
log.SetPrefix("verticals: ")
parseflags()
img, err := loadimage(flag.Arg(0))
if err != nil {
log.Fatal(err)
}
fmt.Println(count(img))
}
func parseflags() {
flag.Usage = usage
flag.Parse()
if flag.NArg() != 1 {
usage()
}
}
func usage() {
fmt.Fprintln(os.Stderr, "usage: [options] <image>")
flag.PrintDefaults()
os.Exit(2)
}
func loadimage(name string) (*image.RGBA, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
defer f.Close()
m, _, err := image.Decode(f)
if err != nil {
return nil, err
}
r := m.Bounds()
p := image.NewRGBA(r)
draw.Draw(p, r, m, image.ZP, draw.Src)
return p, nil
}
func count(m *image.RGBA) int {
r := m.Bounds()
v := 0
c := 0
for x := r.Min.X; x < r.Max.X; x++ {
if c++; !vertical(m, x) {
c = 0
}
v = max(v, c)
}
return v
}
func vertical(m *image.RGBA, x int) bool {
r := m.Bounds()
for y := r.Min.Y; y < r.Max.Y; y++ {
c := m.RGBAAt(x, y)
if c.R != 255 || c.G != 255 || c.B != 255 {
return false
}
}
return true
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
|
package controller
import (
"github.com/gin-gonic/gin"
"net/http"
"qipai/enum"
"qipai/game"
"qipai/middleware"
"qipai/model"
"qipai/srv"
"qipai/utils"
)
func user(){
r := R.Group("/users")
r.POST("/login", userLoginFunc)
ar := r.Group("")
ar.Use(middleware.JWTAuth())
ar.POST("/notice", postNoticeFunc)
ar.POST("/rollText", postRollText)
ar.POST("/shareText", postShareText)
}
func postRollText(c *gin.Context) {
type ReqForm struct {
RollText string `form:"rollText" json:"rollText" binding:"required"`
}
var form ReqForm
if err := c.ShouldBind(&form); err != nil {
c.JSON(http.StatusBadRequest, utils.Msg(err.Error()).Code(-1).GetData())
return
}
err :=utils.Lv.Put("user_rollText", form.RollText)
if err!=nil {
c.JSON(http.StatusBadRequest, utils.Msg(err.Error()).Code(-1).GetData())
}
players := game.GetPlayerList()
for _,v:=range players{
utils.Msg("").AddData("rollText", form.RollText).Send(game.ResRollText,v.Session)
}
c.JSON(http.StatusOK, utils.Msg("发布滚动字幕成功").GetData())
}
func postShareText(c *gin.Context) {
type ReqForm struct {
ShareText string `form:"shareText" json:"shareText" binding:"required"`
}
var form ReqForm
if err := c.ShouldBind(&form); err != nil {
c.JSON(http.StatusBadRequest, utils.Msg(err.Error()).Code(-1).GetData())
return
}
err :=utils.Lv.Put("user_shareText", form.ShareText)
if err!=nil {
c.JSON(http.StatusBadRequest, utils.Msg(err.Error()).Code(-1).GetData())
}
c.JSON(http.StatusOK, utils.Msg("更新分享内容成功").GetData())
}
func postNoticeFunc(c *gin.Context) {
type ReqForm struct {
Notice string `form:"notice" json:"notice" binding:"required"`
}
var form ReqForm
if err := c.ShouldBind(&form); err != nil {
c.JSON(http.StatusBadRequest, utils.Msg(err.Error()).Code(-1).GetData())
return
}
err :=utils.Lv.Put("user_notice", form.Notice)
if err!=nil {
c.JSON(http.StatusBadRequest, utils.Msg(err.Error()).Code(-1).GetData())
}
players := game.GetPlayerList()
for _,v:=range players{
utils.Msg("").AddData("notice", form.Notice).Send(game.ResNotice,v.Session)
}
c.JSON(http.StatusOK, utils.Msg("发布通知成功").GetData())
}
func userLoginFunc(c *gin.Context) {
type LoginForm struct {
UserType enum.UserType `form:"type" json:"type" binding:"required"`
Name string `form:"name" json:"name" binding:"required"`
Pass string `form:"pass" json:"pass" binding:"required"`
}
var login LoginForm
if err := c.ShouldBind(&login); err != nil {
c.JSON(http.StatusBadRequest, utils.Msg(err.Error()).Code(-1).GetData())
return
}
token,user, err := srv.User.Login(&model.Auth{UserType: login.UserType, Name: login.Name, Pass: login.Pass})
if err != nil {
c.JSON(http.StatusInternalServerError, utils.Msg(err.Error()).Code(-1).GetData())
return
}
c.JSON(http.StatusOK, utils.Msg("登录成功").AddData("token", token).AddData("user",user).GetData())
}
|
/*
Crie e utilize uma função anônima.
*/
package main
import (
"fmt"
)
func main() {
slice := []int{100, 200}
func(sliceDeInteiros ...int) {
resultadoSoma := 0
for _, valor := range sliceDeInteiros {
resultadoSoma += valor
}
fmt.Println("A soma dos elementos da slice é:", resultadoSoma)
}(slice...)
}
|
package main
func countPrimes(n int) int {
return len(primeGenerator(n - 1)) // 因为题目要求的是 [1,n)的素数个数,所以这里要 -1
}
// 返回[1,n]的质数
// 最容易想到的素数筛 (没有优化)
func primeGenerator(n int) []int {
isNotPrime := make([]bool, n+1)
ans := []int{}
for i := 2; i <= n; i++ {
if isNotPrime[i] == true {
continue
}
ans = append(ans, i)
for t := i + i; t <= n; t += i {
isNotPrime[t] = true
}
}
return ans
}
// Sieve of Eratosthenes
// 返回[1,n]的质数
// 优化版素数筛,高效
func primeGenerator(n int) []int {
isNotPrime := make([]bool, n+1)
ans := []int{}
for i := 2; i*i <= n; i++ {
if isNotPrime[i] == true {
continue
}
for t := i * i; t <= n; t += i {
isNotPrime[t] = true
}
}
for i := 2; i <= n; i++ {
if isNotPrime[i] == false {
ans = append(ans, i)
}
}
return ans
}
func countPrimes(n int) int {
return primeCount(n - 1) // 因为题目要求的是 [1,n)的素数个数,所以这里要 -1
}
// 为了提升提交时的时空效率,可以这样写
// 获取 [1,n]的质数个数
func primeCount(n int) int {
isNotPrime := make([]bool, n+1)
ans := 0
for i := 2; i*i <= n; i++ {
if isNotPrime[i] == true {
continue
}
for t := i * i; t <= n; t += i {
isNotPrime[t] = true
}
}
for i := 2; i <= n; i++ {
if isNotPrime[i] == false {
ans++
}
}
return ans
}
/*
题目链接:
https://leetcode-cn.com/problems/count-primes/ 计数质数
*/
/*
总结
1. 第一版的素数筛时空效率有些差。
2. 如果为了AC的时空效率更好,其实可以不用记录下所有的质数,只需要个数就可以了。
*/
|
// Copyright 2020 Clivern. All rights reserved.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
package module
import (
"context"
"fmt"
"net/http"
"github.com/clivern/walrus/core/driver"
"github.com/clivern/walrus/core/model"
"github.com/clivern/walrus/core/service"
"github.com/clivern/walrus/core/util"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
// Wire type
type Wire struct {
httpClient *service.HTTPClient
db driver.Database
job *model.Job
agent *model.Agent
option *model.Option
}
// BackupMessage type
type BackupMessage struct {
Action string `json:"action"`
Cron model.CronRecord `json:"cron"`
Job model.JobRecord `json:"job"`
Settings map[string]string `json:"settings"`
CorrelationID string `json:"CorrelationID"`
}
// PostbackRequest type
type PostbackRequest struct {
JobID string `json:"jobId"`
CronID string `json:"cronId"`
Status string `json:"status"`
Hostname string `json:"hostname"`
AgentID string `json:"agentID"`
}
// NewWire creates a new instance
func NewWire(httpClient *service.HTTPClient, db driver.Database) *Wire {
result := new(Wire)
result.db = db
result.httpClient = httpClient
result.job = model.NewJobStore(db)
result.agent = model.NewAgentStore(db)
result.option = model.NewOptionStore(db)
return result
}
// AgentPostback trigger agent postback. It updates job status inside a tower
func (w *Wire) AgentPostback(jobID, cronID, status string) error {
log.Debug("Agent postback")
hostname, err := util.GetHostname()
if err != nil {
return fmt.Errorf("Error while getting the hostname")
}
url := fmt.Sprintf(
"%s/api/v1/action/agent_postback",
viper.GetString("agent.tower.url"),
)
body, _ := util.ConvertToJSON(PostbackRequest{
JobID: jobID,
CronID: cronID,
Status: status,
Hostname: hostname,
AgentID: viper.GetString("app.name"),
})
if viper.GetString("agent.tower.encryptionKey") == "" {
return fmt.Errorf("Config agent.tower.encryptionKey is missing")
}
bodyByte, err := util.Encrypt(
[]byte(body),
viper.GetString("agent.tower.encryptionKey"),
)
if err != nil {
return err
}
response, err := w.httpClient.Post(
context.TODO(),
url,
string(bodyByte),
map[string]string{},
map[string]string{"X-API-Key": viper.GetString("agent.tower.apiKey"), "X-Encrypted-Request": "true"},
)
if err != nil {
return err
}
if w.httpClient.GetStatusCode(response) != http.StatusOK {
return fmt.Errorf(
"Invalid response code: %d",
w.httpClient.GetStatusCode(response),
)
}
return nil
}
// UpdateTowerJobStatus updates a job status
func (w *Wire) UpdateTowerJobStatus(hostname, jobID, status string) error {
log.Debug("Update tower job status")
record, err := w.job.GetRecord(hostname, jobID)
if err != nil {
return err
}
record.Status = status
err = w.job.UpdateRecord(*record)
if err != nil {
return err
}
return nil
}
// SendJobToHostAgent updates a job status
func (w *Wire) SendJobToHostAgent(message BackupMessage) error {
agents, err := w.agent.GetAgents(message.Cron.Hostname)
agent := &model.AgentData{}
// TODO: Select a random running agents
for _, v := range agents {
if v.Status == model.UpStatus {
agent = v
break
}
}
if agent.ID == "" {
return fmt.Errorf(
"Unable to find running agent for host: %s",
message.Cron.Hostname,
)
}
s3Key, err := w.option.GetOptionByKey("backup_s3_key")
if err != nil {
return fmt.Errorf(
"Error while getting option backup_s3_key: %s",
err.Error(),
)
}
s3Secret, err := w.option.GetOptionByKey("backup_s3_secret")
if err != nil {
return fmt.Errorf(
"Error while getting option backup_s3_secret: %s",
err.Error(),
)
}
s3Endpoint, err := w.option.GetOptionByKey("backup_s3_endpoint")
if err != nil {
return fmt.Errorf(
"Error while getting option backup_s3_endpoint: %s",
err.Error(),
)
}
s3Region, err := w.option.GetOptionByKey("backup_s3_region")
if err != nil {
return fmt.Errorf(
"Error while getting option backup_s3_region: %s",
err.Error(),
)
}
s3Bucket, err := w.option.GetOptionByKey("backup_s3_bucket")
if err != nil {
return fmt.Errorf(
"Error while getting option backup_s3_bucket: %s",
err.Error(),
)
}
message.Settings["backup_s3_key"] = s3Key.Value
message.Settings["backup_s3_secret"] = s3Secret.Value
message.Settings["backup_s3_endpoint"] = s3Endpoint.Value
message.Settings["backup_s3_region"] = s3Region.Value
message.Settings["backup_s3_bucket"] = s3Bucket.Value
url := fmt.Sprintf(
"%s/api/v1/process",
agent.URL,
)
body, _ := util.ConvertToJSON(message)
if viper.GetString("tower.api.encryptionKey") == "" {
return fmt.Errorf("Config tower.api.encryptionKey is missing")
}
bodyByte, err := util.Encrypt(
[]byte(body),
viper.GetString("tower.api.encryptionKey"),
)
if err != nil {
return err
}
response, err := w.httpClient.Post(
context.TODO(),
url,
string(bodyByte),
map[string]string{},
map[string]string{"X-API-Key": agent.APIKey, "X-Encrypted-Request": "true"},
)
if err != nil {
return err
}
if w.httpClient.GetStatusCode(response) != http.StatusAccepted {
return fmt.Errorf(
"Invalid response code: %d",
w.httpClient.GetStatusCode(response),
)
}
return nil
}
|
// Copyright (c) 2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package bdb
import (
"fmt"
"time"
"github.com/btcsuite/btcwallet/walletdb"
)
const (
dbType = "bdb"
)
// parseArgs parses the arguments from the walletdb Open/Create methods.
func parseArgs(funcName string,
args ...interface{}) (string, bool, time.Duration, error) {
if len(args) != 3 {
return "", false, 0, fmt.Errorf("invalid arguments to %s.%s "+
"-- expected database path, no-freelist-sync and "+
"timeout option",
dbType, funcName)
}
dbPath, ok := args[0].(string)
if !ok {
return "", false, 0, fmt.Errorf("first argument to %s.%s is "+
"invalid -- expected database path string", dbType,
funcName)
}
noFreelistSync, ok := args[1].(bool)
if !ok {
return "", false, 0, fmt.Errorf("second argument to %s.%s is "+
"invalid -- expected no-freelist-sync bool", dbType,
funcName)
}
timeout, ok := args[2].(time.Duration)
if !ok {
return "", false, 0, fmt.Errorf("third argument to %s.%s is "+
"invalid -- expected timeout time.Duration", dbType,
funcName)
}
return dbPath, noFreelistSync, timeout, nil
}
// openDBDriver is the callback provided during driver registration that opens
// an existing database for use.
func openDBDriver(args ...interface{}) (walletdb.DB, error) {
dbPath, noFreelistSync, timeout, err := parseArgs("Open", args...)
if err != nil {
return nil, err
}
return openDB(dbPath, noFreelistSync, false, timeout)
}
// createDBDriver is the callback provided during driver registration that
// creates, initializes, and opens a database for use.
func createDBDriver(args ...interface{}) (walletdb.DB, error) {
dbPath, noFreelistSync, timeout, err := parseArgs("Create", args...)
if err != nil {
return nil, err
}
return openDB(dbPath, noFreelistSync, true, timeout)
}
func init() {
// Register the driver.
driver := walletdb.Driver{
DbType: dbType,
Create: createDBDriver,
Open: openDBDriver,
}
if err := walletdb.RegisterDriver(driver); err != nil {
panic(fmt.Sprintf("Failed to regiser database driver '%s': %v",
dbType, err))
}
}
|
package main
import (
"time"
"fmt"
)
func server1(ch chan string) {
//time.Sleep(time.Millisecond * 7000)
ch <- "from server 1"
}
func server2(ch chan string) {
//time.Sleep(time.Millisecond * 3000)
ch <- "from server 2"
}
func main(){
time.Sleep(time.Second)
output1 := make(chan string)
output2 := make(chan string)
go server1(output1)
go server2(output2)
select{
case s1 := <- output1:
fmt.Println(s1)
case s2 := <- output2:
fmt.Println(s2)
}
} |
package c29_break_sha1_length_extension
import (
"bytes"
"math/rand"
"testing"
"github.com/vodafon/cryptopals/set4/c28_sha1_key_mac"
)
func TestExploit(t *testing.T) {
inp := []byte("comment1=cooking%20MCs;userdata=foo;comment2=%20like%20a%20pound%20of%20bacon")
key := make([]byte, 10+rand.Intn(40))
rand.Read(key)
sha1System := c28_sha1_key_mac.NewSHA1System(key)
mac := sha1System.MAC(inp)
if sha1System.Verify(mac, append(inp, []byte(";admin=true;")...)) {
t.Errorf("Incorect verification. Expected false")
}
mac2, msg, err := Exploit(sha1System, inp, []byte(";admin=true;"), mac)
if err != nil {
t.Errorf("Exploit error: %s\n", err)
}
if !sha1System.Verify(mac2, msg) {
t.Errorf("Incorect result\n")
}
if !bytes.Contains(msg, []byte(";admin=true;")) {
t.Errorf("Not admin")
}
}
|
package main
import "fmt"
func main() {
TestBubbleSort()
}
func TestBubbleSort() {
a := [...]int{9,8,7,4,5,2,1,3}
bubbleSort(a[:])
fmt.Println(a)
}
func bubbleSort(a []int) {
for i := 0; i < len(a); i++ {
//每次冒泡排序固定最右端的数
for j := 1;j < len(a) - i; j++ {
if a[j] < a[j - 1] {
a[j], a[j - 1] = a[j - 1], a[j]
}
}
}
}
|
/*
Go Language Raspberry Pi Interface
(c) Copyright David Thorpe 2016-2017
All Rights Reserved
Documentation http://djthorpe.github.io/gopi/
For Licensing and Usage information, please see LICENSE.md
*/
package bme280
import (
"fmt"
)
////////////////////////////////////////////////////////////////////////////////
// STRINGIFY
func (this *bme280) String() string {
var bus string
if this.i2c != nil {
bus = fmt.Sprintf("%v", this.i2c)
}
if this.spi != nil {
bus = fmt.Sprintf("%v", this.spi)
}
return fmt.Sprintf("<sensors.BME280>{ chipid=0x%02X version=0x%02X mode=%v filter=%v t_sb=%v spi3w_en=%v osrs_t=%v osrs_p=%v osrs_h=%v bus=%v calibration=%v }", this.chipid, this.version, this.mode, this.filter, this.t_sb, this.spi3w_en, this.osrs_t, this.osrs_p, this.osrs_h, bus, this.calibration)
}
func (this *calibation) String() string {
return fmt.Sprintf("<Calibration>{ T1=%v T2=%v T3=%v P1=%v P2=%v P3=%v P4=%v P5=%v P6=%v P7=%v P8=%v P9=%v H1=%v H2=%v H3=%v H4=%v H5=%v H6=%v }", this.T1, this.T2, this.T3, this.P1, this.P2, this.P3, this.P4, this.P5, this.P6, this.P7, this.P8, this.P9, this.H1, this.H2, this.H3, this.H4, this.H5, this.H6)
}
|
package handlers
import (
"fmt"
"net/url"
"regexp"
"strings"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/valyala/fasthttp"
"github.com/authelia/authelia/v4/internal/authentication"
"github.com/authelia/authelia/v4/internal/authorization"
"github.com/authelia/authelia/v4/internal/middlewares"
"github.com/authelia/authelia/v4/internal/mocks"
"github.com/authelia/authelia/v4/internal/utils"
)
func TestRunLegacyAuthzSuite(t *testing.T) {
suite.Run(t, NewLegacyAuthzSuite())
}
func NewLegacyAuthzSuite() *LegacyAuthzSuite {
return &LegacyAuthzSuite{
AuthzSuite: &AuthzSuite{
implementation: AuthzImplLegacy,
setRequest: setRequestLegacy,
},
}
}
type LegacyAuthzSuite struct {
*AuthzSuite
}
func (s *LegacyAuthzSuite) TestShouldHandleAllMethodsDeny() {
for _, method := range testRequestMethods {
s.T().Run(fmt.Sprintf("Method%s", method), func(t *testing.T) {
for _, pairURI := range []urlpair{
{s.RequireParseRequestURI("https://one-factor.example.com/"), s.RequireParseRequestURI("https://auth.example.com/")},
{s.RequireParseRequestURI("https://one-factor.example.com/subpath"), s.RequireParseRequestURI("https://auth.example.com/")},
{s.RequireParseRequestURI("https://one-factor.example2.com/"), s.RequireParseRequestURI("https://auth.example2.com/")},
{s.RequireParseRequestURI("https://one-factor.example2.com/subpath"), s.RequireParseRequestURI("https://auth.example2.com/")},
} {
t.Run(pairURI.TargetURI.String(), func(t *testing.T) {
expected := s.RequireParseRequestURI(pairURI.AutheliaURI.String())
authz := s.Builder().Build()
mock := mocks.NewMockAutheliaCtx(t)
defer mock.Close()
s.ConfigureMockSessionProviderWithAutomaticAutheliaURLs(mock)
mock.Ctx.RequestCtx.QueryArgs().Set(queryArgRD, pairURI.AutheliaURI.String())
mock.Ctx.Request.Header.Set("X-Forwarded-Method", method)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedProto, pairURI.TargetURI.Scheme)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedHost, pairURI.TargetURI.Host)
mock.Ctx.Request.Header.Set("X-Forwarded-URI", pairURI.TargetURI.Path)
mock.Ctx.Request.Header.Set(fasthttp.HeaderAccept, "text/html; charset=utf-8")
authz.Handler(mock.Ctx)
switch method {
case fasthttp.MethodGet, fasthttp.MethodOptions, fasthttp.MethodHead:
assert.Equal(t, fasthttp.StatusFound, mock.Ctx.Response.StatusCode())
default:
assert.Equal(t, fasthttp.StatusSeeOther, mock.Ctx.Response.StatusCode())
}
query := expected.Query()
query.Set(queryArgRD, pairURI.TargetURI.String())
query.Set(queryArgRM, method)
expected.RawQuery = query.Encode()
assert.Equal(t, expected.String(), string(mock.Ctx.Response.Header.Peek(fasthttp.HeaderLocation)))
})
}
})
}
}
func (s *LegacyAuthzSuite) TestShouldHandleAllMethodsOverrideAutheliaURLDeny() {
for _, method := range testRequestMethods {
s.T().Run(fmt.Sprintf("Method%s", method), func(t *testing.T) {
for _, pairURI := range []urlpair{
{s.RequireParseRequestURI("https://one-factor.example.com/"), s.RequireParseRequestURI("https://auth-from-override.example.com/")},
{s.RequireParseRequestURI("https://one-factor.example.com/subpath"), s.RequireParseRequestURI("https://auth-from-override.example.com/")},
{s.RequireParseRequestURI("https://one-factor.example2.com/"), s.RequireParseRequestURI("https://auth-from-override.example2.com/")},
{s.RequireParseRequestURI("https://one-factor.example2.com/subpath"), s.RequireParseRequestURI("https://auth-from-override.example2.com/")},
} {
t.Run(pairURI.TargetURI.String(), func(t *testing.T) {
expected := s.RequireParseRequestURI(pairURI.AutheliaURI.String())
authz := s.Builder().Build()
mock := mocks.NewMockAutheliaCtx(t)
defer mock.Close()
s.ConfigureMockSessionProviderWithAutomaticAutheliaURLs(mock)
mock.Ctx.RequestCtx.QueryArgs().Set(queryArgRD, pairURI.AutheliaURI.String())
mock.Ctx.Request.Header.Set("X-Forwarded-Method", method)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedProto, pairURI.TargetURI.Scheme)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedHost, pairURI.TargetURI.Host)
mock.Ctx.Request.Header.Set("X-Forwarded-URI", pairURI.TargetURI.Path)
mock.Ctx.Request.Header.Set(fasthttp.HeaderAccept, "text/html; charset=utf-8")
authz.Handler(mock.Ctx)
switch method {
case fasthttp.MethodGet, fasthttp.MethodOptions, fasthttp.MethodHead:
assert.Equal(t, fasthttp.StatusFound, mock.Ctx.Response.StatusCode())
default:
assert.Equal(t, fasthttp.StatusSeeOther, mock.Ctx.Response.StatusCode())
}
query := expected.Query()
query.Set(queryArgRD, pairURI.TargetURI.String())
query.Set(queryArgRM, method)
expected.RawQuery = query.Encode()
assert.Equal(t, expected.String(), string(mock.Ctx.Response.Header.Peek(fasthttp.HeaderLocation)))
})
}
})
}
}
func (s *LegacyAuthzSuite) TestShouldHandleAllMethodsMissingAutheliaURLBypassStatus200() {
for _, method := range testRequestMethods {
s.T().Run(fmt.Sprintf("Method%s", method), func(t *testing.T) {
for _, targetURI := range []*url.URL{
s.RequireParseRequestURI("https://bypass.example.com"),
s.RequireParseRequestURI("https://bypass.example.com/subpath"),
s.RequireParseRequestURI("https://bypass.example2.com"),
s.RequireParseRequestURI("https://bypass.example2.com/subpath"),
} {
t.Run(targetURI.String(), func(t *testing.T) {
authz := s.Builder().Build()
mock := mocks.NewMockAutheliaCtx(t)
defer mock.Close()
mock.Ctx.Request.Header.Set("X-Forwarded-Method", method)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedProto, targetURI.Scheme)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedHost, targetURI.Host)
mock.Ctx.Request.Header.Set("X-Forwarded-URI", targetURI.Path)
mock.Ctx.Request.Header.Set(fasthttp.HeaderAccept, "text/html; charset=utf-8")
authz.Handler(mock.Ctx)
assert.Equal(t, fasthttp.StatusOK, mock.Ctx.Response.StatusCode())
assert.Equal(t, "", string(mock.Ctx.Response.Header.Peek(fasthttp.HeaderLocation)))
})
}
})
}
}
func (s *LegacyAuthzSuite) TestShouldHandleAllMethodsMissingAutheliaURLOneFactorStatus401() {
for _, method := range testRequestMethods {
s.T().Run(fmt.Sprintf("Method%s", method), func(t *testing.T) {
for _, targetURI := range []*url.URL{
s.RequireParseRequestURI("https://one-factor.example.com"),
s.RequireParseRequestURI("https://one-factor.example.com/subpath"),
s.RequireParseRequestURI("https://one-factor.example2.com"),
s.RequireParseRequestURI("https://one-factor.example2.com/subpath"),
} {
t.Run(targetURI.String(), func(t *testing.T) {
authz := s.Builder().Build()
mock := mocks.NewMockAutheliaCtx(t)
defer mock.Close()
mock.Ctx.Request.Header.Set("X-Forwarded-Method", method)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedProto, targetURI.Scheme)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedHost, targetURI.Host)
mock.Ctx.Request.Header.Set("X-Forwarded-URI", targetURI.Path)
mock.Ctx.Request.Header.Set(fasthttp.HeaderAccept, "text/html; charset=utf-8")
authz.Handler(mock.Ctx)
assert.Equal(t, fasthttp.StatusUnauthorized, mock.Ctx.Response.StatusCode())
assert.Equal(t, "", string(mock.Ctx.Response.Header.Peek(fasthttp.HeaderLocation)))
})
}
})
}
}
func (s *LegacyAuthzSuite) TestShouldHandleAllMethodsRDAutheliaURLOneFactorStatus302Or303() {
for _, method := range testRequestMethods {
s.T().Run(fmt.Sprintf("Method%s", method), func(t *testing.T) {
for _, targetURI := range []*url.URL{
s.RequireParseRequestURI("https://one-factor.example.com/"),
s.RequireParseRequestURI("https://one-factor.example.com/subpath"),
} {
t.Run(targetURI.String(), func(t *testing.T) {
authz := s.Builder().Build()
mock := mocks.NewMockAutheliaCtx(t)
defer mock.Close()
mock.Ctx.Request.Header.Set("X-Forwarded-Method", method)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedProto, targetURI.Scheme)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedHost, targetURI.Host)
mock.Ctx.Request.Header.Set("X-Forwarded-URI", targetURI.Path)
mock.Ctx.Request.Header.Set(fasthttp.HeaderAccept, "text/html; charset=utf-8")
mock.Ctx.Request.SetRequestURI("/api/verify?rd=https%3A%2F%2Fauth.example.com")
authz.Handler(mock.Ctx)
switch method {
case fasthttp.MethodGet, fasthttp.MethodOptions, fasthttp.MethodHead:
assert.Equal(t, fasthttp.StatusFound, mock.Ctx.Response.StatusCode())
default:
assert.Equal(t, fasthttp.StatusSeeOther, mock.Ctx.Response.StatusCode())
}
query := &url.Values{}
query.Set("rd", targetURI.String())
query.Set("rm", method)
assert.Equal(t, fmt.Sprintf("https://auth.example.com/?%s", query.Encode()), string(mock.Ctx.Response.Header.Peek(fasthttp.HeaderLocation)))
})
}
})
}
}
func (s *LegacyAuthzSuite) TestShouldHandleAllMethodsXHRDeny() {
for _, method := range testRequestMethods {
s.T().Run(fmt.Sprintf("Method%s", method), func(t *testing.T) {
for xname, x := range testXHR {
t.Run(xname, func(t *testing.T) {
for _, pairURI := range []urlpair{
{s.RequireParseRequestURI("https://one-factor.example.com/"), s.RequireParseRequestURI("https://auth.example.com/")},
{s.RequireParseRequestURI("https://one-factor.example.com/subpath"), s.RequireParseRequestURI("https://auth.example.com/")},
{s.RequireParseRequestURI("https://one-factor.example2.com/"), s.RequireParseRequestURI("https://auth.example2.com/")},
{s.RequireParseRequestURI("https://one-factor.example2.com/subpath"), s.RequireParseRequestURI("https://auth.example2.com/")},
} {
t.Run(pairURI.TargetURI.String(), func(t *testing.T) {
expected := s.RequireParseRequestURI(pairURI.AutheliaURI.String())
authz := s.Builder().Build()
mock := mocks.NewMockAutheliaCtx(t)
defer mock.Close()
s.ConfigureMockSessionProviderWithAutomaticAutheliaURLs(mock)
mock.Ctx.RequestCtx.QueryArgs().Set(queryArgRD, pairURI.AutheliaURI.String())
mock.Ctx.Request.Header.Set("X-Forwarded-Method", method)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedProto, pairURI.TargetURI.Scheme)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedHost, pairURI.TargetURI.Host)
mock.Ctx.Request.Header.Set("X-Forwarded-URI", pairURI.TargetURI.Path)
if x {
mock.Ctx.Request.Header.Set(fasthttp.HeaderAccept, "text/html; charset=utf-8")
mock.Ctx.Request.Header.Set(fasthttp.HeaderXRequestedWith, "XMLHttpRequest")
}
authz.Handler(mock.Ctx)
assert.Equal(t, fasthttp.StatusUnauthorized, mock.Ctx.Response.StatusCode())
query := expected.Query()
query.Set(queryArgRD, pairURI.TargetURI.String())
query.Set(queryArgRM, method)
expected.RawQuery = query.Encode()
assert.Equal(t, expected.String(), string(mock.Ctx.Response.Header.Peek(fasthttp.HeaderLocation)))
})
}
})
}
})
}
}
func (s *LegacyAuthzSuite) TestShouldHandleInvalidMethodCharsDeny() {
for _, method := range testRequestMethods {
method += "z"
s.T().Run(fmt.Sprintf("Method%s", method), func(t *testing.T) {
for _, targetURI := range []*url.URL{
s.RequireParseRequestURI("https://bypass.example.com"),
s.RequireParseRequestURI("https://bypass.example.com/subpath"),
s.RequireParseRequestURI("https://bypass.example2.com"),
s.RequireParseRequestURI("https://bypass.example2.com/subpath"),
} {
t.Run(targetURI.String(), func(t *testing.T) {
authz := s.Builder().Build()
mock := mocks.NewMockAutheliaCtx(t)
defer mock.Close()
s.ConfigureMockSessionProviderWithAutomaticAutheliaURLs(mock)
mock.Ctx.Request.Header.Set("X-Forwarded-Method", method)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedProto, targetURI.Scheme)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedHost, targetURI.Host)
mock.Ctx.Request.Header.Set("X-Forwarded-URI", targetURI.Path)
mock.Ctx.Request.Header.Set(fasthttp.HeaderAccept, "text/html; charset=utf-8")
authz.Handler(mock.Ctx)
assert.Equal(t, fasthttp.StatusUnauthorized, mock.Ctx.Response.StatusCode())
assert.Equal(t, []byte(nil), mock.Ctx.Response.Header.Peek(fasthttp.HeaderLocation))
})
}
})
}
}
func (s *LegacyAuthzSuite) TestShouldHandleMissingHostDeny() {
for _, method := range testRequestMethods {
s.T().Run(fmt.Sprintf("Method%s", method), func(t *testing.T) {
authz := s.Builder().Build()
mock := mocks.NewMockAutheliaCtx(t)
defer mock.Close()
s.ConfigureMockSessionProviderWithAutomaticAutheliaURLs(mock)
mock.Ctx.Request.Header.Set("X-Forwarded-Method", method)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedProto, "https")
mock.Ctx.Request.Header.Del(fasthttp.HeaderXForwardedHost)
mock.Ctx.Request.Header.Set("X-Forwarded-URI", "/")
mock.Ctx.Request.Header.Set(fasthttp.HeaderAccept, "text/html; charset=utf-8")
authz.Handler(mock.Ctx)
assert.Equal(t, fasthttp.StatusUnauthorized, mock.Ctx.Response.StatusCode())
assert.Equal(t, []byte(nil), mock.Ctx.Response.Header.Peek(fasthttp.HeaderLocation))
})
}
}
func (s *LegacyAuthzSuite) TestShouldHandleAllMethodsAllow() {
for _, method := range testRequestMethods {
s.T().Run(fmt.Sprintf("Method%s", method), func(t *testing.T) {
for _, targetURI := range []*url.URL{
s.RequireParseRequestURI("https://bypass.example.com"),
s.RequireParseRequestURI("https://bypass.example.com/subpath"),
s.RequireParseRequestURI("https://bypass.example2.com"),
s.RequireParseRequestURI("https://bypass.example2.com/subpath"),
} {
t.Run(targetURI.String(), func(t *testing.T) {
authz := s.Builder().Build()
mock := mocks.NewMockAutheliaCtx(t)
defer mock.Close()
s.ConfigureMockSessionProviderWithAutomaticAutheliaURLs(mock)
mock.Ctx.Request.Header.Set("X-Forwarded-Method", method)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedProto, targetURI.Scheme)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedHost, targetURI.Host)
mock.Ctx.Request.Header.Set("X-Forwarded-URI", targetURI.Path)
mock.Ctx.Request.Header.Set(fasthttp.HeaderAccept, "text/html; charset=utf-8")
authz.Handler(mock.Ctx)
assert.Equal(t, fasthttp.StatusOK, mock.Ctx.Response.StatusCode())
assert.Equal(t, []byte(nil), mock.Ctx.Response.Header.Peek(fasthttp.HeaderLocation))
})
}
})
}
}
func (s *LegacyAuthzSuite) TestShouldHandleAllMethodsWithMethodsACL() {
for _, method := range testRequestMethods {
s.T().Run(fmt.Sprintf("Method%s", method), func(t *testing.T) {
for _, methodACL := range testRequestMethods {
targetURI := s.RequireParseRequestURI(fmt.Sprintf("https://bypass-%s.example.com", strings.ToLower(methodACL)))
t.Run(targetURI.String(), func(t *testing.T) {
authz := s.Builder().Build()
mock := mocks.NewMockAutheliaCtx(t)
defer mock.Close()
s.ConfigureMockSessionProviderWithAutomaticAutheliaURLs(mock)
s.setRequest(mock.Ctx, method, targetURI, true, false)
mock.Ctx.RequestCtx.QueryArgs().Set(queryArgRD, "https://auth.example.com")
authz.Handler(mock.Ctx)
if method == methodACL {
assert.Equal(t, fasthttp.StatusOK, mock.Ctx.Response.StatusCode())
assert.Equal(t, []byte(nil), mock.Ctx.Response.Header.Peek(fasthttp.HeaderLocation))
} else {
expected := s.RequireParseRequestURI("https://auth.example.com/")
query := expected.Query()
query.Set(queryArgRD, targetURI.String())
query.Set(queryArgRM, method)
expected.RawQuery = query.Encode()
switch method {
case fasthttp.MethodHead:
assert.Equal(t, fasthttp.StatusFound, mock.Ctx.Response.StatusCode())
assert.Nil(t, mock.Ctx.Response.Body())
case fasthttp.MethodGet, fasthttp.MethodOptions:
assert.Equal(t, fasthttp.StatusFound, mock.Ctx.Response.StatusCode())
assert.Equal(t, fmt.Sprintf(`<a href="%s">%d %s</a>`, utils.StringHTMLEscape(expected.String()), fasthttp.StatusFound, fasthttp.StatusMessage(fasthttp.StatusFound)), string(mock.Ctx.Response.Body()))
default:
assert.Equal(t, fasthttp.StatusSeeOther, mock.Ctx.Response.StatusCode())
assert.Equal(t, fmt.Sprintf(`<a href="%s">%d %s</a>`, utils.StringHTMLEscape(expected.String()), fasthttp.StatusSeeOther, fasthttp.StatusMessage(fasthttp.StatusSeeOther)), string(mock.Ctx.Response.Body()))
}
assert.Equal(t, expected.String(), string(mock.Ctx.Response.Header.Peek(fasthttp.HeaderLocation)))
}
})
}
})
}
}
func (s *LegacyAuthzSuite) TestShouldHandleAllMethodsAllowXHR() {
for _, method := range testRequestMethods {
s.T().Run(fmt.Sprintf("Method%s", method), func(t *testing.T) {
for _, targetURI := range []*url.URL{
s.RequireParseRequestURI("https://bypass.example.com"),
s.RequireParseRequestURI("https://bypass.example.com/subpath"),
s.RequireParseRequestURI("https://bypass.example2.com"),
s.RequireParseRequestURI("https://bypass.example2.com/subpath"),
} {
t.Run(targetURI.String(), func(t *testing.T) {
authz := s.Builder().Build()
mock := mocks.NewMockAutheliaCtx(t)
defer mock.Close()
s.ConfigureMockSessionProviderWithAutomaticAutheliaURLs(mock)
mock.Ctx.Request.Header.Set("X-Forwarded-Method", method)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedProto, targetURI.Scheme)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedHost, targetURI.Host)
mock.Ctx.Request.Header.Set("X-Forwarded-URI", targetURI.Path)
mock.Ctx.Request.Header.Set(fasthttp.HeaderAccept, "text/html; charset=utf-8")
authz.Handler(mock.Ctx)
assert.Equal(t, fasthttp.StatusOK, mock.Ctx.Response.StatusCode())
assert.Equal(t, []byte(nil), mock.Ctx.Response.Header.Peek(fasthttp.HeaderLocation))
})
}
})
}
}
func (s *LegacyAuthzSuite) TestShouldHandleLegacyBasicAuth() { // TestShouldVerifyAuthBasicArgOk.
authz := s.Builder().Build()
mock := mocks.NewMockAutheliaCtx(s.T())
defer mock.Close()
s.ConfigureMockSessionProviderWithAutomaticAutheliaURLs(mock)
mock.Ctx.QueryArgs().Add("auth", "basic")
mock.Ctx.Request.Header.Set(fasthttp.HeaderAuthorization, "Basic am9objpwYXNzd29yZA==")
mock.Ctx.Request.Header.Set("X-Original-URL", "https://one-factor.example.com")
gomock.InOrder(
mock.UserProviderMock.EXPECT().
CheckUserPassword(gomock.Eq("john"), gomock.Eq("password")).
Return(true, nil),
mock.UserProviderMock.EXPECT().
GetDetails(gomock.Eq("john")).
Return(&authentication.UserDetails{
Emails: []string{"john@example.com"},
Groups: []string{"dev", "admins"},
}, nil),
)
authz.Handler(mock.Ctx)
s.Equal(fasthttp.StatusOK, mock.Ctx.Response.StatusCode())
}
func (s *LegacyAuthzSuite) TestShouldHandleLegacyBasicAuthFailures() {
testCases := []struct {
name string
setup func(mock *mocks.MockAutheliaCtx)
}{
{
"HeaderAbsent", // TestShouldVerifyAuthBasicArgFailingNoHeader.
nil,
},
{
"HeaderEmpty", // TestShouldVerifyAuthBasicArgFailingEmptyHeader.
func(mock *mocks.MockAutheliaCtx) {
mock.Ctx.Request.Header.Set(fasthttp.HeaderAuthorization, "")
},
},
{
"HeaderIncorrect", // TestShouldVerifyAuthBasicArgFailingWrongHeader.
func(mock *mocks.MockAutheliaCtx) {
mock.Ctx.Request.Header.Set(fasthttp.HeaderProxyAuthorization, "Basic am9objpwYXNzd29yZA==")
},
},
{
"IncorrectPassword", // TestShouldVerifyAuthBasicArgFailingWrongPassword.
func(mock *mocks.MockAutheliaCtx) {
mock.Ctx.Request.Header.Set(fasthttp.HeaderAuthorization, "Basic am9objpwYXNzd29yZA==")
mock.UserProviderMock.EXPECT().
CheckUserPassword(gomock.Eq("john"), gomock.Eq("password")).
Return(false, fmt.Errorf("generic error"))
},
},
{
"NoAccess", // TestShouldVerifyAuthBasicArgFailingWrongPassword.
func(mock *mocks.MockAutheliaCtx) {
mock.Ctx.Request.Header.Set(fasthttp.HeaderAuthorization, "Basic am9objpwYXNzd29yZA==")
mock.Ctx.Request.Header.Set("X-Original-URL", "https://admin.example.com/")
gomock.InOrder(
mock.UserProviderMock.EXPECT().
CheckUserPassword(gomock.Eq("john"), gomock.Eq("password")).
Return(true, nil),
mock.UserProviderMock.EXPECT().
GetDetails(gomock.Eq("john")).
Return(&authentication.UserDetails{
Emails: []string{"john@example.com"},
Groups: []string{"dev", "admin"},
}, nil),
)
},
},
}
authz := s.Builder().Build()
for _, tc := range testCases {
s.T().Run(tc.name, func(t *testing.T) {
mock := mocks.NewMockAutheliaCtx(t)
defer mock.Close()
s.ConfigureMockSessionProviderWithAutomaticAutheliaURLs(mock)
mock.Ctx.QueryArgs().Add("auth", "basic")
mock.Ctx.Request.Header.Set("X-Original-URL", "https://one-factor.example.com")
if tc.setup != nil {
tc.setup(mock)
}
authz.Handler(mock.Ctx)
assert.Equal(t, fasthttp.StatusUnauthorized, mock.Ctx.Response.StatusCode())
assert.Equal(t, "401 Unauthorized", string(mock.Ctx.Response.Body()))
assert.Regexp(t, regexp.MustCompile("^Basic realm="), string(mock.Ctx.Response.Header.Peek(fasthttp.HeaderWWWAuthenticate)))
})
}
}
func (s *LegacyAuthzSuite) TestShouldHandleInvalidURLForCVE202132637() {
testCases := []struct {
name string
scheme, host []byte
path string
expected int
}{
// The first byte in the host sequence is the null byte. This should never respond with 200 OK.
{"Should401UnauthorizedWithNullByte",
[]byte("https"), []byte{0, 110, 111, 116, 45, 111, 110, 101, 45, 102, 97, 99, 116, 111, 114, 46, 101, 120, 97, 109, 112, 108, 101, 46, 99, 111, 109}, "/path-example",
fasthttp.StatusUnauthorized,
},
{"Should200OkWithoutNullByte",
[]byte("https"), []byte{110, 111, 116, 45, 111, 110, 101, 45, 102, 97, 99, 116, 111, 114, 46, 101, 120, 97, 109, 112, 108, 101, 46, 99, 111, 109}, "/path-example",
fasthttp.StatusOK,
},
}
for _, tc := range testCases {
s.T().Run(tc.name, func(t *testing.T) {
for _, method := range testRequestMethods {
t.Run(fmt.Sprintf("Method%s", method), func(t *testing.T) {
authz := s.Builder().Build()
mock := mocks.NewMockAutheliaCtx(t)
defer mock.Close()
mock.Ctx.Configuration.AccessControl.DefaultPolicy = testBypass
mock.Ctx.Providers.Authorizer = authorization.NewAuthorizer(&mock.Ctx.Configuration)
s.ConfigureMockSessionProviderWithAutomaticAutheliaURLs(mock)
mock.Ctx.Request.Header.Set("X-Forwarded-Method", method)
mock.Ctx.Request.Header.SetBytesKV([]byte(fasthttp.HeaderXForwardedProto), tc.scheme)
mock.Ctx.Request.Header.SetBytesKV([]byte(fasthttp.HeaderXForwardedHost), tc.host)
mock.Ctx.Request.Header.Set("X-Forwarded-URI", tc.path)
mock.Ctx.Request.Header.Set(fasthttp.HeaderAccept, "text/html; charset=utf-8")
authz.Handler(mock.Ctx)
assert.Equal(t, tc.expected, mock.Ctx.Response.StatusCode())
assert.Equal(t, []byte(nil), mock.Ctx.Response.Header.Peek(fasthttp.HeaderLocation))
})
}
})
}
}
func setRequestLegacy(ctx *middlewares.AutheliaCtx, method string, targetURI *url.URL, accept, xhr bool) {
if method != "" {
ctx.Request.Header.Set("X-Forwarded-Method", method)
}
if targetURI != nil {
ctx.Request.Header.Set(testXOriginalUrl, targetURI.String())
}
setRequestXHRValues(ctx, accept, xhr)
}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package common
import (
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
)
// GetBCSProjectID get projectID from annotations
func GetBCSProjectID(data map[string]string) string {
// projectID is required in Authentication(hash id, not readable).
// so ProjectController store projectID in AppProject.Meta,
// indexer is bkbcs.tencent.com/projectID
if data == nil {
return ""
}
projectID, ok := data[ProjectIDKey]
if !ok {
return ""
}
return projectID
}
// GetBCSProjectBusinessKey return the business id of project
func GetBCSProjectBusinessKey(data map[string]string) string {
if data == nil {
return ""
}
businessID, ok := data[ProjectBusinessIDKey]
if !ok {
return ""
}
return businessID
}
// AddCustomAnnotationForApplication add custom annotation for application
func AddCustomAnnotationForApplication(argoProj *v1alpha1.AppProject, app *v1alpha1.Application) {
app.Annotations[ProjectIDKey] = GetBCSProjectID(argoProj.Annotations)
app.Annotations[ProjectBusinessIDKey] = GetBCSProjectBusinessKey(argoProj.Annotations)
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"github.com/JustinSo1/TVShowFinder/internal"
"github.com/JustinSo1/TVShowFinder/pkg/userinterface"
ui "github.com/gizak/termui/v3"
)
func main() {
if len(os.Args) < 2 {
fmt.Println("Missing parameter, provide file name!")
return
}
data, err := ioutil.ReadFile(os.Args[1])
internal.HandleError(err)
err = ui.Init()
internal.HandleError(err)
defer ui.Close()
window := userinterface.NewTerminalWindow(data)
ui.Render(window.Grid())
window.Display(data)
uiEvents := ui.PollEvents()
for {
select {
case e := <-uiEvents:
switch e.ID {
case "q", "<C-c>":
return
case "<Resize>":
payload := e.Payload.(ui.Resize)
window.Grid().SetRect(0, 0, payload.Width, payload.Height)
ui.Clear()
ui.Render(window.Grid())
}
}
}
}
|
package chartjs
var List = map[string]string{
"chartjs": `{{define "chartjs"}}
{{if ne .Title ""}}
<p class="text-center">
<strong>{{langHtml .Title}}</strong>
</p>
{{end}}
<div class="chart">
<canvas id="{{.ID}}" style="height: {{.Height}}px;"></canvas>
</div>
<script>
new Chart(document.getElementById('{{.ID}}'), {{.Js}});
</script>
{{end}}`,
}
|
package main
func main() {
game := Game{100, 0, ""}
game.playGame()
}
|
// Copyright Jetstack Ltd. See LICENSE for details.
package kubernetes
import (
"testing"
"github.com/golang/mock/gomock"
vault "github.com/hashicorp/vault/api"
)
type tokenCreateRequestMatcher struct {
ID string
name string
}
func (tcrm *tokenCreateRequestMatcher) String() string {
return "matcher"
}
func (tcrm *tokenCreateRequestMatcher) Matches(x interface{}) bool {
tcr, ok := x.(*vault.TokenCreateRequest)
if !ok {
return false
}
if tcrm.ID != tcr.ID {
return false
}
return true
}
// tests a not yet existing init token, with random generated token
func TestInitToken_Ensure_NoExpectedToken_NotExisting(t *testing.T) {
fv := NewFakeVault(t)
defer fv.Finish()
fv.ExpectWrite()
i := &InitToken{
Role: "etcd",
Policies: []string{"etcd"},
kubernetes: fv.Kubernetes(),
ExpectedToken: "",
}
// expects a read and vault says secret is not existing
initTokenPath := "test-cluster-inside/secrets/init_token_etcd"
fv.fakeLogical.EXPECT().Read(initTokenPath).Return(
nil,
nil,
)
// expect a create new orphan
fv.fakeToken.EXPECT().CreateOrphan(&tokenCreateRequestMatcher{}).Return(&vault.Secret{
Auth: &vault.SecretAuth{
ClientToken: "my-new-random-token",
},
}, nil)
// expect a write of the new token
fv.fakeLogical.EXPECT().Write(initTokenPath, map[string]interface{}{"init_token": "my-new-random-token"}).Return(
nil,
nil,
)
fv.fakeToken.EXPECT().Lookup("my-new-random-token").Return(
nil,
nil,
)
fv.fakeToken.EXPECT().Renew("my-new-random-token", 0).Return(
nil,
nil,
)
InitTokenEnsure_EXPECTs(fv)
err := i.Ensure()
if err != nil {
t.Error("unexpected error: ", err)
}
token, err := i.InitToken()
if err != nil {
t.Error("unexpected error: ", err)
}
if exp, act := "my-new-random-token", token; exp != act {
t.Errorf("unexpected token: act=%s exp=%s", act, exp)
}
return
}
// expected token not set, init token already exists
func TestInitToken_Ensure_NoExpectedToken_AlreadyExisting(t *testing.T) {
fv := NewFakeVault(t)
defer fv.Finish()
fv.ExpectWrite()
i := &InitToken{
Role: "etcd",
Policies: []string{"etcd"},
kubernetes: fv.Kubernetes(),
ExpectedToken: "",
}
// expect a read and vault says secret is existing
initTokenPath := "test-cluster-inside/secrets/init_token_etcd"
fv.fakeLogical.EXPECT().Read(initTokenPath).Return(
&vault.Secret{
Data: map[string]interface{}{"init_token": "existing-token"},
},
nil,
)
fv.fakeToken.EXPECT().Lookup("existing-token").Return(
nil,
nil,
)
fv.fakeToken.EXPECT().Renew("existing-token", 0).Return(
nil,
nil,
)
InitTokenEnsure_EXPECTs(fv)
err := i.Ensure()
if err != nil {
t.Error("unexpected error: ", err)
}
token, err := i.InitToken()
if err != nil {
t.Error("unexpected error: ", err)
}
if exp, act := "existing-token", token; exp != act {
t.Errorf("unexpected token: act=%s exp=%s", act, exp)
}
return
}
// excpected token set, init token already exists and it's matching
func TestInitToken_Ensure_ExpectedToken_Existing_Match(t *testing.T) {
fv := NewFakeVault(t)
defer fv.Finish()
fv.ExpectWrite()
i := &InitToken{
Role: "etcd",
Policies: []string{"etcd"},
kubernetes: fv.Kubernetes(),
ExpectedToken: "expected-token",
}
// expect a read and vault says secret is existing
initTokenPath := "test-cluster-inside/secrets/init_token_etcd"
fv.fakeLogical.EXPECT().Read(initTokenPath).Return(
&vault.Secret{
Data: map[string]interface{}{"init_token": "expected-token"},
},
nil,
)
fv.fakeToken.EXPECT().Lookup("expected-token").Return(
nil,
nil,
)
fv.fakeToken.EXPECT().Renew("expected-token", 0).Return(
nil,
nil,
)
InitTokenEnsure_EXPECTs(fv)
err := i.Ensure()
if err != nil {
t.Error("unexpected error: ", err)
}
token, err := i.InitToken()
if err != nil {
t.Error("unexpected error: ", err)
}
if exp, act := "expected-token", token; exp != act {
t.Errorf("unexpected token: act=%s exp=%s", act, exp)
}
return
}
// expected token set, init token doesn't exist
func TestInitToken_Ensure_ExpectedToken_NotExisting(t *testing.T) {
fv := NewFakeVault(t)
defer fv.Finish()
fv.ExpectWrite()
i := &InitToken{
Role: "etcd",
Policies: []string{"etcd"},
kubernetes: fv.Kubernetes(),
ExpectedToken: "expected-token",
}
// expect a new token creation
fv.fakeToken.EXPECT().CreateOrphan(&tokenCreateRequestMatcher{ID: "expected-token"}).Return(&vault.Secret{
Auth: &vault.SecretAuth{
ClientToken: "expected-token",
},
}, nil)
// expect a read and vault says secret is not existing, then after it is written to return token
initTokenPath := "test-cluster-inside/secrets/init_token_etcd"
gomock.InOrder(
fv.fakeLogical.EXPECT().Read(initTokenPath).Return(
nil,
nil,
).MinTimes(1),
// expect a write of the new token from user flag
fv.fakeLogical.EXPECT().Write(initTokenPath, map[string]interface{}{"init_token": "expected-token"}).Return(
nil,
nil,
),
// allow read out of token from user
fv.fakeLogical.EXPECT().Read(initTokenPath).AnyTimes().Return(
&vault.Secret{
Data: map[string]interface{}{"init_token": "expected-token"},
},
nil,
),
)
fv.fakeToken.EXPECT().Lookup("expected-token").Return(
nil,
nil,
)
fv.fakeToken.EXPECT().Renew("expected-token", 0).Return(
nil,
nil,
)
InitTokenEnsure_EXPECTs(fv)
err := i.Ensure()
if err != nil {
t.Error("unexpected error: ", err)
}
token, err := i.InitToken()
if err != nil {
t.Error("unexpected error: ", err)
}
if exp, act := "expected-token", token; exp != act {
t.Errorf("unexpected token: act=%s exp=%s", act, exp)
}
return
}
// General policy and write calls when init token ensuring
func InitTokenEnsure_EXPECTs(fv *fakeVault) {
fv.fakeLogical.EXPECT().Write("auth/token/roles/test-cluster-inside-etcd", gomock.Any()).AnyTimes().Return(nil, nil)
fv.fakeSys.EXPECT().PutPolicy(gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
}
|
/*
Doordog helps you watch your doors.
When somebody entries your room, you will be alerted by a beeping buzzer and a blinking led.
*/
package main
import (
"log"
"net/http"
"net/url"
"time"
"github.com/shanghuiyang/face-recognizer/face"
"github.com/shanghuiyang/go-speech/oauth"
"github.com/shanghuiyang/rpi-devices/dev"
"github.com/shanghuiyang/rpi-devices/util"
"github.com/stianeikeland/go-rpio"
)
const (
pinTrig = 2
pinEcho = 3
pinBtn = 7
pinBzr = 17
pinLed = 23
ifttAPI = "your-iftt-api"
)
const (
// the time of keeping alert in second
alertTime = 60
// the distance of triggering alert in cm
alertDist = 80
groupID = "mygroup"
baiduFaceRecognitionAppKey = "your_face_app_key"
baiduFaceRecognitionSecretKey = "your_face_secret_key"
)
var (
allowlist = []string{
"p1",
"p2",
"p3",
"p4",
"p5",
}
)
func main() {
if err := rpio.Open(); err != nil {
log.Fatalf("[doordog]failed to open rpio, error: %v", err)
return
}
defer rpio.Close()
cam := dev.NewCamera()
bzr := dev.NewBuzzer(pinBzr)
led := dev.NewLed(pinLed)
btn := dev.NewButton(pinBtn)
dist := dev.NewHCSR04(pinTrig, pinEcho)
if dist == nil {
log.Printf("[doordog]failed to new a HCSR04")
return
}
auth := oauth.New(baiduFaceRecognitionAppKey, baiduFaceRecognitionSecretKey, oauth.NewCacheMan())
f := face.New(auth)
dog := newDoordog(cam, dist, bzr, led, btn, f)
util.WaitQuit(func() {
dog.stop()
rpio.Close()
})
dog.start()
}
type doordog struct {
cam *dev.Camera
dist *dev.HCSR04
buzzer *dev.Buzzer
led *dev.Led
button *dev.Button
face *face.Face
alerting bool
chAlert chan bool
}
func newDoordog(cam *dev.Camera, dist *dev.HCSR04, buzzer *dev.Buzzer, led *dev.Led, btn *dev.Button, face *face.Face) *doordog {
return &doordog{
cam: cam,
dist: dist,
buzzer: buzzer,
led: led,
button: btn,
face: face,
alerting: false,
chAlert: make(chan bool, 4),
}
}
func (d *doordog) start() {
log.Printf("[doordog]start to service")
go d.alert()
go d.stopAlert()
d.detect()
}
func (d *doordog) detect() {
// need to warm-up the ultrasonic distance meter first
d.dist.Dist()
time.Sleep(500 * time.Millisecond)
t := 300 * time.Millisecond
for {
time.Sleep(t)
dist := d.dist.Dist()
if dist < 10 {
log.Printf("[doordog]bad data from distant meter, distance = %.2fcm", dist)
continue
}
detected := (dist < alertDist)
if detected {
log.Printf("[doordog]detected objects, distance = %.2fcm", dist)
who, err := d.RecoginzeFace()
if err != nil {
continue
}
log.Printf("[doordog]it is %v", who)
if allowed(who) {
continue
}
d.chAlert <- detected
continue
}
}
}
func (d *doordog) alert() {
trigTime := time.Now()
go func() {
for {
if d.alerting {
go d.buzzer.Beep(1, 200)
go d.led.Blink(1, 200)
}
time.Sleep(1 * time.Second)
}
}()
for detected := range d.chAlert {
if detected {
go ifttt()
d.alerting = true
trigTime = time.Now()
continue
}
timeout := time.Now().Sub(trigTime).Seconds() > alertTime
if timeout && d.alerting {
log.Printf("[doordog]timeout, stop alert")
d.alerting = false
}
}
}
func (d *doordog) RecoginzeFace() (name string, err error) {
imgf, e := d.cam.TakePhoto()
if e != nil {
log.Printf("[doordog]failed to take phote, error: %v", e)
name, err = "unknow", e
return
}
users, e := d.face.Recognize(imgf, groupID)
if e != nil {
log.Printf("[doordog]failed to recognize the image, error: %v", e)
name, err = "unknow", e
return
}
if len(users) == 0 {
name, err = "unknow", nil
return
}
log.Printf("who: %v", *(users[0]))
if users[0].Score > 50 {
return users[0].UserID, nil
}
name, err = "unknow", nil
return
}
func (d *doordog) stopAlert() {
for {
pressed := d.button.Pressed()
if pressed {
log.Printf("[doordog]the button was pressed")
if d.alerting {
d.alerting = false
}
// make a dalay detecting
time.Sleep(1 * time.Second)
continue
}
time.Sleep(100 * time.Millisecond)
}
}
func ifttt() {
resp, err := http.PostForm(ifttAPI, url.Values{})
if err != nil {
log.Printf("failed to request to ifttt, error: %v", err)
return
}
defer resp.Body.Close()
}
func (d *doordog) stop() {
d.buzzer.Off()
d.led.Off()
}
func allowed(user string) bool {
for _, u := range allowlist {
if u == user {
return true
}
}
return false
}
|
package main
import (
"bytes"
"encoding/json"
"strconv"
"github.com/hyperledger/fabric-chaincode-go/shim"
sc "github.com/hyperledger/fabric-protos-go/peer"
)
type SmartContract struct {
}
func (s *SmartContract) Init(stub shim.ChaincodeStubInterface) sc.Response {
return shim.Success(nil)
}
func (s *SmartContract) Invoke(stub shim.ChaincodeStubInterface) sc.Response {
// Retrieve the requested Smart Contract function and arguments
function, args := stub.GetFunctionAndParameters()
// Route to the appropriate handler function to interact with the ledger appropriately
if function == "init" {
return s.Init(stub)
} else if function == "saveCertificate" {
return s.saveCertificate(stub, args)
} else if function == "getAllCertificates" {
return s.getAllCertificates(stub, args)
} else if function == "queryCertificate" {
return s.queryCertificate(stub, args)
} else if function == "deleteCertificate" {
return s.deleteCertificate(stub, args)
} else if function == "getCertificateHistory" {
return s.getCertificateHistory(stub, args)
} else if function == "verifyCertificate" {
return s.verifyCertificate(stub, args)
}
return shim.Error("Invalid Smart Contract function name.")
}
func (s *SmartContract) saveCertificate(stub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 1 {
return shim.Error("Incorrect number of arguments. Expecting 1")
}
certificateJSON := Certificate{}
err := json.Unmarshal([]byte(args[0]), &certificateJSON)
if err != nil {
return shim.Error("Invalid assetJSON")
}
certificateAsBytes, _ := json.Marshal(certificateJSON)
stub.PutState(certificateJSON.SerialNumber, certificateAsBytes)
return shim.Success(certificateAsBytes)
}
func (s *SmartContract) verifyCertificate(stub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 2 {
return shim.Error("Incorrect number of arguments. Expecting 2")
}
serialNumber := args[0]
certificateHash := args[1]
var buffer bytes.Buffer
certificateAsBytes, err := stub.GetState(serialNumber)
if err != nil || len(certificateAsBytes) == 0 {
buffer.WriteString("{")
buffer.WriteString("\"matched\":false","\"serialNumber\":false")
buffer.WriteString("}")
return shim.Success(buffer.Bytes())
}
certificateJSON := Certificate{}
_ = json.Unmarshal(certificateAsBytes, &certificateJSON)
buffer.WriteString("{")
if certificateJSON.CertificateHash == certificateHash {
buffer.WriteString("\"matched\":true","\"serialNumber\":true")
} else {
buffer.WriteString("\"matched\":false","\"serialNumber\":true")
}
buffer.WriteString("}")
return shim.Success(buffer.Bytes())
}
func (s *SmartContract) queryCertificate(stub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 1 {
return shim.Error("Incorrect number of arguments. Expecting 1")
}
serialNumber := args[0]
certificateAsBytes, err := stub.GetState(serialNumber)
if err != nil || len(certificateAsBytes) == 0 {
return shim.Error("The SerialNumber " + serialNumber + " does not exist")
}
return shim.Success(certificateAsBytes)
}
func (s *SmartContract) deleteCertificate(stub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 1 {
return shim.Error("Incorrect number of arguments. Expecting 1")
}
serialNumber := args[0]
certificateAsBytes, err := stub.GetState(serialNumber)
if err != nil || len(certificateAsBytes) == 0 {
return shim.Error("The asset " + serialNumber + " does not exist")
}
stub.DelState(serialNumber)
return shim.Success(nil)
}
func (s *SmartContract) getCertificateHistory(stub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 1 {
return shim.Error("Incorrect number of arguments. Expecting 1")
}
serialNumber := args[0]
resultsIterator, err := stub.GetHistoryForKey(serialNumber)
if err != nil {
return shim.Error("The serialNumber " + serialNumber + " does not exist")
}
defer resultsIterator.Close()
// buffer is a JSON array containing QueryResults
var buffer bytes.Buffer
buffer.WriteString("[")
bArrayMemberAlreadyWritten := false
for resultsIterator.HasNext() {
queryResponse, err := resultsIterator.Next()
if err != nil {
return shim.Error(err.Error())
}
val := string(queryResponse.Value)
// Add a comma before array members, suppress it for the first array member
if bArrayMemberAlreadyWritten == true {
buffer.WriteString(",")
}
buffer.WriteString(val)
bArrayMemberAlreadyWritten = true
}
buffer.WriteString("]")
return shim.Success(buffer.Bytes())
}
func (s *SmartContract) getAllCertificates(stub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 3 {
return shim.Error("Incorrect number of arguments. Expecting 3")
}
startKey := args[0]
endKey := args[1]
pageSize, _ := strconv.Atoi(args[2])
resultsIterator, err := stub.GetStateByRange(startKey, endKey)
if err != nil {
return shim.Error(err.Error())
}
defer resultsIterator.Close()
// buffer is a JSON array containing QueryResults
var buffer bytes.Buffer
buffer.WriteString("[")
bArrayMemberAlreadyWritten := false
for resultsIterator.HasNext() && pageSize > 0 {
queryResponse, err := resultsIterator.Next()
if err != nil {
return shim.Error(err.Error())
}
val := string(queryResponse.Value)
// Add a comma before array members, suppress it for the first array member
if bArrayMemberAlreadyWritten == true {
buffer.WriteString(",")
}
buffer.WriteString(val)
bArrayMemberAlreadyWritten = true
pageSize--
}
buffer.WriteString("]")
var response bytes.Buffer
response.WriteString("{")
response.WriteString("\"page\": {")
response.WriteString("\"pageSize\": " + args[2] + ",")
response.WriteString("\"currentStartKey\": \"" + args[0] + "\",")
response.WriteString("\"currentEndKey\": \"" + args[1] + "\",")
response.WriteString("\"nextStartKey\": \"")
if resultsIterator.HasNext() {
queryResponse, err := resultsIterator.Next()
if err != nil {
return shim.Error(err.Error())
}
certificateJSON := Certificate{}
_ = json.Unmarshal(queryResponse.Value, &certificateJSON)
response.WriteString(certificateJSON.SerialNumber)
} else {
response.WriteString("")
}
response.WriteString("\"")
response.WriteString("},")
response.WriteString("\"content\": " + buffer.String())
response.WriteString("}")
return shim.Success(response.Bytes())
}
|
package apierrtest
import (
"bytes"
"encoding/json"
"fmt"
"io"
"github.com/optiopay/x/apierr"
)
// alias that provides pretty printing
type APIValidationErrors []error
func (errs APIValidationErrors) String() string {
var b bytes.Buffer
for _, e := range errs {
fmt.Fprintf(&b, "%+v\n", e)
}
return b.String()
}
// HasAPIErrors deserialize given response body into api errors and check if
// all expected errors were provided, comparing type, code and param
// attributes. Message attribute is ignored.
func HasAPIErrors(expected apierr.Errors, body io.Reader) APIValidationErrors {
var resp struct {
Errors apierr.Errors `json:"errors"`
}
if err := json.NewDecoder(body).Decode(&resp); err != nil {
return []error{fmt.Errorf("cannot decode response: %s", err)}
}
var errs []error
// special case when we expect no errors
if expected == nil {
for _, got := range resp.Errors {
errs = append(errs, fmt.Errorf("unexpected error: %#v", got))
}
return errs
}
for _, ex := range expected {
has := false
for _, got := range resp.Errors {
if ex.Param == got.Param {
errs = append(errs, compareErrors(ex, got)...)
has = true
break
}
}
if !has {
errs = append(errs, fmt.Errorf("missing error for %q field", ex.Param))
}
}
// error on all unexpected errors
for _, got := range resp.Errors {
has := false
for _, ex := range expected {
if ex.Param == got.Param {
has = true
break
}
}
if !has {
errs = append(errs, fmt.Errorf("unexpected error: %#v", got))
}
}
return errs
}
func compareErrors(expected, got apierr.Error) []error {
var errs []error
if expected.Type != got.Type {
e := fmt.Errorf("expected Type %q, got %q", expected.Type, got.Type)
errs = append(errs, e)
}
if expected.Code != got.Code {
e := fmt.Errorf("expected Code %q, got %q", expected.Code, got.Code)
errs = append(errs, e)
}
return errs
}
|
package checker
import (
"fmt"
"strings"
)
const (
commentPrefix = " BBG-TRANSLATION-CHECKER-NOTES\n\t\t\t"
)
type (
File struct {
Filename string
Error error
Translations Translations
rows Translations
replacements Translations
}
Translation struct {
Comment string `xml:",comment"`
Tag string `xml:",attr"`
LangUpper string `xml:"Language,attr,omitempty"`
LangLower string `xml:"language,attr,omitempty"`
Message string `xml:"Text"`
}
Translations []*Translation
)
func (t Translation) Lang() string {
if t.LangUpper != "" {
return t.LangUpper
}
return t.LangLower
}
func (t *Translation) AddReportToComment(r string) {
// Make sure there is the starting part.
if !strings.HasPrefix(t.Comment, commentPrefix) {
t.Comment = commentPrefix
}
// xml.MarshalIndent does not indent multiline-comments. We do know the indent of the comment
// since our xml-structure is well-defined. Still, this is just a workaround.
t.Comment += fmt.Sprintf("\t\t%s\n\t\t\t", r)
}
func (t Translation) Copy() *Translation {
return &Translation{
Comment: t.Comment,
Tag: t.Tag,
LangUpper: t.LangUpper,
LangLower: t.LangLower,
Message: t.Message,
}
}
func (ts Translations) LookupByTag(tag string) *Translation {
for _, t := range ts {
if t.Tag == tag {
return t
}
}
return nil
}
func (ts Translations) AllByTag(tag string) []*Translation {
rs := make([]*Translation, 0)
for _, t := range ts {
if t.Tag == tag {
rs = append(rs, t)
}
}
return rs
}
|
package lib
import (
"fmt"
"github.com/yamamoto-febc/jobq"
)
var SakuraCloudDefaultZones = []string{"tk1v", "is1a", "is1b", "tk1a"}
type Option struct {
AccessToken string
AccessTokenSecret string
Zones []string
TraceMode bool
ForceMode bool
JobQueueOption *jobq.Option
}
func NewOption() *Option {
return &Option{
Zones: SakuraCloudDefaultZones,
JobQueueOption: jobq.NewOption(),
}
}
func (o *Option) Validate() []error {
var errors []error
if o.AccessToken == "" {
errors = append(errors, fmt.Errorf("[%s] is required", "token"))
}
if o.AccessTokenSecret == "" {
errors = append(errors, fmt.Errorf("[%s] is required", "secret"))
}
return errors
}
|
package main
import "os"
import "flag"
import "time"
import "strings"
import "runtime"
import "net/http"
import "runtime/pprof"
import _ "net/http/pprof"
import "github.com/bnclabs/golog"
import "github.com/bnclabs/gostore/bogn"
import "github.com/bnclabs/gostore/bubt"
import "github.com/bnclabs/gostore/llrb"
var validate = false
var options struct {
db string
cpu int
bogn string
memstore string
period int
load int
inserts int
upserts int
deletes int
gets int
ranges int
limit int
keylen int
vallen int
lsm bool
seed int
setas string
delas string
getas string
rngas string
flushratio float64
memcapacity int
npaths int
msize int
zsize int
vsize int
mmap bool
log string
}
func optparse(args []string) {
f := flag.NewFlagSet("dbperf", flag.ExitOnError)
cpu := runtime.GOMAXPROCS(-1) / 2
f.StringVar(&options.db, "db", "llrb", "pick db storage to benchmark.")
f.IntVar(&options.cpu, "cpu", cpu, "limit number of cores.")
f.StringVar(&options.bogn, "bogn", "memonly", "memonly|durable|dgm|workset")
f.StringVar(&options.memstore, "memstore", "mvcc", "llrb|mvcc for bogn")
f.IntVar(&options.period, "period", 10, "bogn flush period, in seconds")
f.IntVar(&options.load, "load", 0, "items to initially load")
f.IntVar(&options.inserts, "inserts", 0, "new items to create")
f.IntVar(&options.upserts, "upserts", 0, "items to update")
f.IntVar(&options.deletes, "deletes", 0, "items to delete")
f.IntVar(&options.gets, "gets", 0, "items to get")
f.IntVar(&options.ranges, "ranges", 0, "items to iterate")
f.IntVar(&options.limit, "limit", 100, "limit items per iteration")
f.IntVar(&options.keylen, "klen", 32, "size of each key")
f.IntVar(&options.vallen, "vlen", 32, "size of each value")
f.BoolVar(&options.lsm, "lsm", true, "delete in lsm mode.")
f.IntVar(&options.seed, "seed", 0, "seed value to generate randomness")
f.StringVar(&options.setas, "setas", "set", "set|cas|txn|cur|all")
f.StringVar(&options.delas, "delas", "del", "del|txn|cur|delcur|all")
f.StringVar(&options.getas, "getas", "get", "get|txn|view|all")
f.StringVar(&options.rngas, "rngas", "all", "tgn|tyn|vgn|vyn|all")
f.IntVar(&options.memcapacity, "memcap", 0, "memory cap on llrb/mvcc in MB")
f.IntVar(&options.npaths, "npaths", 1, "number of directory paths for bubt")
f.Float64Var(&options.flushratio, "flushratio", 0.25, "m-block size for bubt")
f.IntVar(&options.msize, "msize", 4096, "m-block size for bubt")
f.IntVar(&options.zsize, "zsize", 0, "z-block size for bubt")
f.IntVar(&options.vsize, "vsize", 0, "v-block size for bubt")
f.BoolVar(&options.mmap, "mmap", false, "enable mmap for z-blocks")
f.StringVar(&options.log, "log", "", "llrb,mvcc,bubt,bogn")
f.Parse(args)
if options.seed == 0 {
options.seed = int(time.Now().UnixNano())
}
for _, comp := range strings.Split(options.log, ",") {
switch comp {
case "bubt":
bubt.LogComponents("self")
case "bogn":
bogn.LogComponents("self")
case "llrb", "mvcc":
llrb.LogComponents("self")
case "all":
bubt.LogComponents("all")
bogn.LogComponents("all")
llrb.LogComponents("all")
}
}
}
func main() {
optparse(os.Args[1:])
go func() {
log.Infof("%v", http.ListenAndServe("localhost:6060", nil))
}()
// cpu profile
f1, err := os.Create("dbperf.pprof")
if err != nil {
log.Fatalf("%v", err)
}
defer f1.Close()
pprof.StartCPUProfile(f1)
defer pprof.StopCPUProfile()
// mem profile
f2, err := os.Create("dbperf.mprof")
if err != nil {
log.Fatalf("%v", err)
}
defer f2.Close()
defer pprof.WriteHeapProfile(f2)
switch options.db {
case "lmdb":
perflmdb()
case "bolt":
perfbolt()
case "llrb":
perfllrb()
case "mvcc":
perfmvcc()
case "bubt":
perfbubt()
case "bogn":
perfbogn()
case "badger":
perfbadger()
}
}
|
package request
const AlipaySystemOauthTokenMethod = "alipay.system.oauth.token"
type AlipaySystemOauthTokenRequest struct {
RefreshToken string `json:"refresh_token"`
}
|
package main
import (
"fmt"
"strconv"
"strings"
)
func main() {
cipher := "11211111911310110810910097107108115111112119113101106107971101021101061021041149710511411497"
finalStr := ""
inc := 0
num := 0
if len(cipher) >= 3 {
inc = 3
temp := cipher[0:3]
num1, _ := strconv.Atoi(temp)
num = num1
//finalStr += string(num1)
} else {
//inc = 2
temp := cipher[0:2]
num1, _ := strconv.Atoi(temp)
finalStr += string(num1)
fmt.Println(finalStr)
return
}
for cipher != "" {
if (num >= 65 && num <= 90) || (num >= 97 && num <= 122) {
inc = 3
finalStr += string(num)
} else {
test := cipher[0:2]
num1, _ := strconv.Atoi(test)
finalStr += string(num1)
inc = 2
}
arr := strings.Split(cipher, "")
arr = arr[inc:len(arr)]
cipher = strings.Join(arr, "")
tempStr := ""
if len(cipher) >= 3 {
tempStr = cipher[0:3]
num1, _ := strconv.Atoi(tempStr)
num = num1
} else if len(cipher) == 2 {
tempStr = cipher[0:2]
num1, _ := strconv.Atoi(tempStr)
//num = num1
finalStr += string(num1)
fmt.Println(finalStr)
return
}
}
fmt.Println(finalStr)
}
|
package textextract
import (
"regexp"
"strings"
)
const (
BLOCKSWIDTH = 3
/* 当待抽取的网页正文中遇到成块的新闻标题未剔除时,只要增大此阈值即可。*/
/* 阈值增大,准确率提升,召回率下降;值变小,噪声会大,但可以保证抽到只有一句话的正文 */
THRESHOLD = 86
)
type ExtractServer struct {
source string
threshold int
}
type blockInfo struct {
indexs []int
maxIndex int
threshold int
}
func NewExtract(source string) *ExtractServer {
return &ExtractServer{source, THRESHOLD}
}
// 设置threshold值
func (this *ExtractServer) SetThreshold(threshold int) *ExtractServer {
this.threshold = threshold
return this
}
// 提取标题
func (this *ExtractServer) ExtractTitle() string {
s := regexp.MustCompile("(?is)<title>(.*?)</title>").FindString(this.source)
return regexp.MustCompile("</?title>").ReplaceAllString(s, "")
}
// 提取正文
func (this *ExtractServer) ExtractText() string {
source := cleanData(this.source)
lines := removeAllSpace(source)
if len(lines) < BLOCKSWIDTH {
return ""
}
blockLenIndexs, maxIndex := countBlockInfo(lines)
b := &blockInfo{blockLenIndexs, maxIndex, this.threshold}
startIndex := b.findStart()
endIndex := b.findEnd()
text := ""
for i := startIndex; i <= endIndex; i++ {
text += lines[i] + "\n"
}
return text
}
// 删除掉文本集合中一行中的空白
func removeAllSpace(source string) []string {
lines := strings.Split(source, "\n")
reg := regexp.MustCompile("\\s+")
for i, line := range lines {
lines[i] = reg.ReplaceAllString(line, "")
}
return lines
}
// 块 长度统计信息及最大索引
func countBlockInfo(lines []string) ([]int, int) {
var wordsNum int
blockLenIndexs := make([]int, len(lines)-BLOCKSWIDTH+1)
for i := 0; i < len(lines)-BLOCKSWIDTH; i++ {
wordsNum = 0
for j := i; j < i+BLOCKSWIDTH; j++ {
wordsNum += len(lines[j])
}
blockLenIndexs[i] = wordsNum
}
// 长度最大的块的索引
count := len(blockLenIndexs)
maxIndex := 0
for k, v := range blockLenIndexs {
if v > count {
count = v
maxIndex = k
}
}
return blockLenIndexs, maxIndex
}
// 从maxIndex向前寻找起点
func (b *blockInfo) findStart() int {
i := b.maxIndex - 1
for ; i >= 0; i-- {
if b.indexs[i] < b.threshold {
break
}
}
return i + 1
}
// 从maxIndex向后寻找终点
func (b *blockInfo) findEnd() int {
i := b.maxIndex + 1
for ; i < len(b.indexs); i++ {
if b.indexs[i] < b.threshold {
break
}
}
return i - 1
}
// 清洗掉非正文的数据
func cleanData(source string) string {
source = regexp.MustCompile("(?is)<!DOCTYPE.*?>").ReplaceAllString(source, "")
source = regexp.MustCompile("(?is)<!--.*?-->").ReplaceAllString(source, "")
source = regexp.MustCompile("(?is)<script.*?>.*?</script>").ReplaceAllString(source, "")
source = regexp.MustCompile("(?is)<style.*?>.*?</style>").ReplaceAllString(source, "")
source = regexp.MustCompile("(?is)<.*?>").ReplaceAllString(source, "") // remove HTML Tags
source = regexp.MustCompile("&.{2,5};|&#.{2,5};").ReplaceAllString(source, "") // remove some special charcaters
source = regexp.MustCompile("\r\n").ReplaceAllString(source, "\n")
return source
}
|
package backends
import (
"errors"
"fmt"
"log"
"time"
"github.com/dchest/passwordreset"
"github.com/wealthworks/csmtp"
"github.com/liut/staffio/pkg/common"
"github.com/liut/staffio/pkg/models"
"github.com/liut/staffio/pkg/settings"
)
var (
ErrInvalidResetToken = errors.New("invalid reset token or not found")
)
func (s *serviceImpl) getResetHash(uid string) ([]byte, error) {
_, err := s.Get(uid)
if err != nil {
return nil, fmt.Errorf("no such user %s", uid)
}
uv, err := s.LoadVerify(uid)
if err != nil {
return nil, ErrInvalidResetToken
}
return uv.CodeHashBytes(), nil
}
func (s *serviceImpl) PasswordForgot(at common.AliasType, target, uid string) (err error) {
var staff *models.Staff
staff, err = s.Get(uid)
if err != nil {
return
}
if at != common.AtEmail {
err = fmt.Errorf("invalid alias type %s", at.String())
return
}
if at != common.AtEmail && target != staff.Email {
err = fmt.Errorf("incorrect email %s", target)
return
}
return s.passwordForgotPrepare(staff)
}
func (s *serviceImpl) passwordForgotPrepare(staff *models.Staff) (err error) {
uv := models.NewVerify(common.AtEmail, staff.Email, staff.Uid)
err = s.SaveVerify(uv)
if err != nil {
return
}
err = WriteUserLog(staff.Uid, "password forgot", fmt.Sprintf("id %d, ch %d", uv.Id, uv.CodeHash))
if err != nil {
log.Printf("userLog ERR %s", err)
}
// Generate reset token that expires in 2 hours
secret := []byte(settings.PwdSecret)
token := passwordreset.NewToken(staff.Uid, 2*time.Hour, uv.CodeHashBytes(), secret)
err = sendResetEmail(staff, token)
return
}
func (s *serviceImpl) PasswordResetTokenVerify(token string) (uid string, err error) {
secret := []byte(settings.PwdSecret)
uid, err = passwordreset.VerifyToken(token, s.getResetHash, secret)
if err != nil {
log.Printf("passwordreset.VerifyToken %q ERR %s", token, err)
}
return
}
func (s *serviceImpl) PasswordResetWithToken(login, token, passwd string) (err error) {
var uid string
uid, err = s.PasswordResetTokenVerify(token)
if err != nil {
// verification failed, don't allow password reset
return
}
if login != uid {
return fmt.Errorf("invalid login %s", login)
}
// OK, reset password for uid (e.g. allow to change it)
err = s.PasswordReset(uid, passwd)
if err == nil {
qs := func(db dbTxer) error {
rs, de := db.Exec("DELETE FROM password_reset WHERE uid = $1", uid)
if de == nil {
ra, _ := rs.RowsAffected()
log.Printf("deleted %d", ra)
}
return de
}
err = withTxQuery(qs)
}
return
}
func (s *serviceImpl) SaveVerify(uv *models.Verify) error {
qs := func(db dbTxer) error {
euv, err := s.LoadVerify(uv.Uid)
if err == nil {
str := `DELETE FROM password_reset WHERE id = $1`
_, err = db.Exec(str, euv.Id)
if err != nil {
log.Printf("DELETE password_reset %s ERR %s", uv.Uid, err)
return err
}
}
str := `INSERT INTO password_reset(type_id, target, uid, code_hash, life_seconds)
VALUES($1, $2, $3, $4, $5) RETURNING id`
var id int
err = db.Get(&id, str, uv.Type, uv.Target, uv.Uid, uv.CodeHash, uv.LifeSeconds)
if err == nil {
log.Printf("new password_reset id: %d of %s(%s)", id, uv.Uid, uv.Target)
if id > 0 {
uv.Id = id
}
return nil
}
log.Printf("INSERT password_reset %s ERR %s", uv.Uid, err)
return err
}
return withTxQuery(qs)
}
func (s *serviceImpl) LoadVerify(uid string) (*models.Verify, error) {
var uv models.Verify
qs := func(db dber) error {
return db.Get(&uv, `SELECT id, uid, type_id, target, code_hash, life_seconds, created, updated FROM password_reset
WHERE uid = $1 ORDER BY updated DESC LIMIT 1`, uid)
}
err := withDbQuery(qs)
if err != nil {
log.Printf("query verify with uid %q ERR %s", uid, err)
}
return &uv, err
}
func InitSMTP() {
csmtp.Host = settings.SMTP.Host
csmtp.Port = settings.SMTP.Port
csmtp.Name = settings.SMTP.SenderName
csmtp.From = settings.SMTP.SenderEmail
csmtp.Auth(settings.SMTP.SenderPassword)
}
func sendResetEmail(staff *models.Staff, token string) error {
if !settings.SMTP.Enabled {
log.Print("smtp is disabled")
return nil
}
log.Printf("sending reset email to %s via %s", staff.Email, csmtp.Host)
message := fmt.Sprintf(tplPasswordReset, staff.Name(), settings.BaseURL, token)
err := csmtp.SendMail("Password reset request", message, staff.Email)
if err != nil {
log.Printf("send reset email ERR %s", err)
return err
}
log.Printf("send reset email of %q OK", staff.Email)
return nil
}
const (
// tplPasswordReset = `Dear %s: <br/><br/>
// To reset your password, pls <a href="%s/password/reset?rt=%s">click here</a>.`
tplPasswordReset = `Dear %s: <br/><br/>
To reset your password, pls <a href="%s/reset?token=%s">click here</a>.`
)
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"cmp"
"context"
"fmt"
"math"
"runtime/pprof"
"slices"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/ddl/schematracker"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/executor/internal/exec"
"github.com/pingcap/tidb/executor/internal/pdhelper"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/privilege"
"github.com/pingcap/tidb/resourcemanager/pool/workerpool"
poolutil "github.com/pingcap/tidb/resourcemanager/util"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/sessiontxn"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/channel"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/dbterror/exeerrors"
"github.com/pingcap/tidb/util/deadlockhistory"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/logutil/consistency"
"github.com/pingcap/tidb/util/mathutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/resourcegrouptag"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tidb/util/syncutil"
"github.com/pingcap/tidb/util/topsql"
topsqlstate "github.com/pingcap/tidb/util/topsql/state"
"github.com/pingcap/tidb/util/tracing"
tikverr "github.com/tikv/client-go/v2/error"
tikvstore "github.com/tikv/client-go/v2/kv"
tikvutil "github.com/tikv/client-go/v2/util"
atomicutil "go.uber.org/atomic"
"go.uber.org/zap"
)
var (
_ exec.Executor = &CheckTableExec{}
_ exec.Executor = &HashAggExec{}
_ exec.Executor = &HashJoinExec{}
_ exec.Executor = &IndexLookUpExecutor{}
_ exec.Executor = &IndexReaderExecutor{}
_ exec.Executor = &LimitExec{}
_ exec.Executor = &MaxOneRowExec{}
_ exec.Executor = &MergeJoinExec{}
_ exec.Executor = &ProjectionExec{}
_ exec.Executor = &SelectionExec{}
_ exec.Executor = &SelectLockExec{}
_ exec.Executor = &ShowNextRowIDExec{}
_ exec.Executor = &ShowDDLExec{}
_ exec.Executor = &ShowDDLJobsExec{}
_ exec.Executor = &ShowDDLJobQueriesExec{}
_ exec.Executor = &SortExec{}
_ exec.Executor = &StreamAggExec{}
_ exec.Executor = &TableDualExec{}
_ exec.Executor = &TableReaderExecutor{}
_ exec.Executor = &TableScanExec{}
_ exec.Executor = &TopNExec{}
_ exec.Executor = &UnionExec{}
_ exec.Executor = &FastCheckTableExec{}
// GlobalMemoryUsageTracker is the ancestor of all the Executors' memory tracker and GlobalMemory Tracker
GlobalMemoryUsageTracker *memory.Tracker
// GlobalDiskUsageTracker is the ancestor of all the Executors' disk tracker
GlobalDiskUsageTracker *disk.Tracker
// GlobalAnalyzeMemoryTracker is the ancestor of all the Analyze jobs' memory tracker and child of global Tracker
GlobalAnalyzeMemoryTracker *memory.Tracker
)
var (
_ dataSourceExecutor = &TableReaderExecutor{}
_ dataSourceExecutor = &IndexReaderExecutor{}
_ dataSourceExecutor = &IndexLookUpExecutor{}
_ dataSourceExecutor = &IndexMergeReaderExecutor{}
// CheckTableFastBucketSize is the bucket size of fast check table.
CheckTableFastBucketSize = atomic.Int64{}
)
// dataSourceExecutor is a table DataSource converted Executor.
// Currently, there are TableReader/IndexReader/IndexLookUp/IndexMergeReader.
// Note, partition reader is special and the caller should handle it carefully.
type dataSourceExecutor interface {
exec.Executor
Table() table.Table
}
const (
// globalPanicStorageExceed represents the panic message when out of storage quota.
globalPanicStorageExceed string = "Out Of Quota For Local Temporary Space!"
// globalPanicMemoryExceed represents the panic message when out of memory limit.
globalPanicMemoryExceed string = "Out Of Global Memory Limit!"
// globalPanicAnalyzeMemoryExceed represents the panic message when out of analyze memory limit.
globalPanicAnalyzeMemoryExceed string = "Out Of Global Analyze Memory Limit!"
)
// globalPanicOnExceed panics when GlobalDisTracker storage usage exceeds storage quota.
type globalPanicOnExceed struct {
memory.BaseOOMAction
mutex syncutil.Mutex // For synchronization.
}
func init() {
action := &globalPanicOnExceed{}
GlobalMemoryUsageTracker = memory.NewGlobalTracker(memory.LabelForGlobalMemory, -1)
GlobalMemoryUsageTracker.SetActionOnExceed(action)
GlobalDiskUsageTracker = disk.NewGlobalTrcaker(memory.LabelForGlobalStorage, -1)
GlobalDiskUsageTracker.SetActionOnExceed(action)
GlobalAnalyzeMemoryTracker = memory.NewTracker(memory.LabelForGlobalAnalyzeMemory, -1)
GlobalAnalyzeMemoryTracker.SetActionOnExceed(action)
// register quota funcs
variable.SetMemQuotaAnalyze = GlobalAnalyzeMemoryTracker.SetBytesLimit
variable.GetMemQuotaAnalyze = GlobalAnalyzeMemoryTracker.GetBytesLimit
// TODO: do not attach now to avoid impact to global, will attach later when analyze memory track is stable
//GlobalAnalyzeMemoryTracker.AttachToGlobalTracker(GlobalMemoryUsageTracker)
schematracker.ConstructResultOfShowCreateDatabase = ConstructResultOfShowCreateDatabase
schematracker.ConstructResultOfShowCreateTable = ConstructResultOfShowCreateTable
// CheckTableFastBucketSize is used to set the fast analyze bucket size for check table.
CheckTableFastBucketSize.Store(1024)
}
// Start the backend components
func Start() {
pdhelper.GlobalPDHelper.Start()
}
// Stop the backend components
func Stop() {
pdhelper.GlobalPDHelper.Stop()
}
// Action panics when storage usage exceeds storage quota.
func (a *globalPanicOnExceed) Action(t *memory.Tracker) {
a.mutex.Lock()
defer a.mutex.Unlock()
msg := ""
switch t.Label() {
case memory.LabelForGlobalStorage:
msg = globalPanicStorageExceed
case memory.LabelForGlobalMemory:
msg = globalPanicMemoryExceed
case memory.LabelForGlobalAnalyzeMemory:
msg = globalPanicAnalyzeMemoryExceed
default:
msg = "Out of Unknown Resource Quota!"
}
panic(msg)
}
// GetPriority get the priority of the Action
func (*globalPanicOnExceed) GetPriority() int64 {
return memory.DefPanicPriority
}
// newFirstChunk creates a new chunk to buffer current executor's result.
func newFirstChunk(e exec.Executor) *chunk.Chunk {
base := e.Base()
return chunk.New(base.RetFieldTypes(), base.InitCap(), base.MaxChunkSize())
}
func tryNewCacheChunk(e exec.Executor) *chunk.Chunk {
base := e.Base()
s := base.Ctx().GetSessionVars()
return s.GetNewChunkWithCapacity(base.RetFieldTypes(), base.InitCap(), base.MaxChunkSize(), base.AllocPool)
}
// newList creates a new List to buffer current executor's result.
func newList(e exec.Executor) *chunk.List {
base := e.Base()
return chunk.NewList(base.RetFieldTypes(), base.InitCap(), base.MaxChunkSize())
}
// retTypes returns all output column types.
func retTypes(e exec.Executor) []*types.FieldType {
base := e.Base()
return base.RetFieldTypes()
}
// Next is a wrapper function on e.Next(), it handles some common codes.
func Next(ctx context.Context, e exec.Executor, req *chunk.Chunk) error {
base := e.Base()
if base.RuntimeStats() != nil {
start := time.Now()
defer func() { base.RuntimeStats().Record(time.Since(start), req.NumRows()) }()
}
sessVars := base.Ctx().GetSessionVars()
if atomic.LoadUint32(&sessVars.Killed) == 2 {
return exeerrors.ErrMaxExecTimeExceeded
}
if atomic.LoadUint32(&sessVars.Killed) == 1 {
return exeerrors.ErrQueryInterrupted
}
r, ctx := tracing.StartRegionEx(ctx, fmt.Sprintf("%T.Next", e))
defer r.End()
if topsqlstate.TopSQLEnabled() && sessVars.StmtCtx.IsSQLAndPlanRegistered.CompareAndSwap(false, true) {
registerSQLAndPlanInExecForTopSQL(sessVars)
}
err := e.Next(ctx, req)
if err != nil {
return err
}
// recheck whether the session/query is killed during the Next()
if atomic.LoadUint32(&sessVars.Killed) == 2 {
err = exeerrors.ErrMaxExecTimeExceeded
}
if atomic.LoadUint32(&sessVars.Killed) == 1 {
err = exeerrors.ErrQueryInterrupted
}
return err
}
// CommandDDLJobsExec is the general struct for Cancel/Pause/Resume commands on
// DDL jobs. These command currently by admin have the very similar struct and
// operations, it should be a better idea to have them in the same struct.
type CommandDDLJobsExec struct {
exec.BaseExecutor
cursor int
jobIDs []int64
errs []error
execute func(se sessionctx.Context, ids []int64) (errs []error, err error)
}
// Open implements the Executor for all Cancel/Pause/Resume command on DDL jobs
// just with different processes. And, it should not be called directly by the
// Executor.
func (e *CommandDDLJobsExec) Open(context.Context) error {
// We want to use a global transaction to execute the admin command, so we don't use e.Ctx() here.
newSess, err := e.GetSysSession()
if err != nil {
return err
}
e.errs, err = e.execute(newSess, e.jobIDs)
e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), newSess)
return err
}
// Next implements the Executor Next interface for Cancel/Pause/Resume
func (e *CommandDDLJobsExec) Next(_ context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.MaxChunkSize())
if e.cursor >= len(e.jobIDs) {
return nil
}
numCurBatch := mathutil.Min(req.Capacity(), len(e.jobIDs)-e.cursor)
for i := e.cursor; i < e.cursor+numCurBatch; i++ {
req.AppendString(0, strconv.FormatInt(e.jobIDs[i], 10))
if e.errs != nil && e.errs[i] != nil {
req.AppendString(1, fmt.Sprintf("error: %v", e.errs[i]))
} else {
req.AppendString(1, "successful")
}
}
e.cursor += numCurBatch
return nil
}
// CancelDDLJobsExec represents a cancel DDL jobs executor.
type CancelDDLJobsExec struct {
*CommandDDLJobsExec
}
// PauseDDLJobsExec indicates an Executor for Pause a DDL Job.
type PauseDDLJobsExec struct {
*CommandDDLJobsExec
}
// ResumeDDLJobsExec indicates an Executor for Resume a DDL Job.
type ResumeDDLJobsExec struct {
*CommandDDLJobsExec
}
// ShowNextRowIDExec represents a show the next row ID executor.
type ShowNextRowIDExec struct {
exec.BaseExecutor
tblName *ast.TableName
done bool
}
// Next implements the Executor Next interface.
func (e *ShowNextRowIDExec) Next(_ context.Context, req *chunk.Chunk) error {
req.Reset()
if e.done {
return nil
}
is := domain.GetDomain(e.Ctx()).InfoSchema()
tbl, err := is.TableByName(e.tblName.Schema, e.tblName.Name)
if err != nil {
return err
}
tblMeta := tbl.Meta()
allocators := tbl.Allocators(e.Ctx())
for _, alloc := range allocators.Allocs {
nextGlobalID, err := alloc.NextGlobalAutoID()
if err != nil {
return err
}
var colName, idType string
switch alloc.GetType() {
case autoid.RowIDAllocType:
idType = "_TIDB_ROWID"
if tblMeta.PKIsHandle {
if col := tblMeta.GetAutoIncrementColInfo(); col != nil {
colName = col.Name.O
}
} else {
colName = model.ExtraHandleName.O
}
case autoid.AutoIncrementType:
idType = "AUTO_INCREMENT"
if tblMeta.PKIsHandle {
if col := tblMeta.GetAutoIncrementColInfo(); col != nil {
colName = col.Name.O
}
} else {
colName = model.ExtraHandleName.O
}
case autoid.AutoRandomType:
idType = "AUTO_RANDOM"
colName = tblMeta.GetPkName().O
case autoid.SequenceType:
idType = "SEQUENCE"
colName = ""
default:
return autoid.ErrInvalidAllocatorType.GenWithStackByArgs()
}
req.AppendString(0, e.tblName.Schema.O)
req.AppendString(1, e.tblName.Name.O)
req.AppendString(2, colName)
req.AppendInt64(3, nextGlobalID)
req.AppendString(4, idType)
}
e.done = true
return nil
}
// ShowDDLExec represents a show DDL executor.
type ShowDDLExec struct {
exec.BaseExecutor
ddlOwnerID string
selfID string
ddlInfo *ddl.Info
done bool
}
// Next implements the Executor Next interface.
func (e *ShowDDLExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.done {
return nil
}
ddlJobs := ""
query := ""
l := len(e.ddlInfo.Jobs)
for i, job := range e.ddlInfo.Jobs {
ddlJobs += job.String()
query += job.Query
if i != l-1 {
ddlJobs += "\n"
query += "\n"
}
}
serverInfo, err := infosync.GetServerInfoByID(ctx, e.ddlOwnerID)
if err != nil {
return err
}
serverAddress := serverInfo.IP + ":" +
strconv.FormatUint(uint64(serverInfo.Port), 10)
req.AppendInt64(0, e.ddlInfo.SchemaVer)
req.AppendString(1, e.ddlOwnerID)
req.AppendString(2, serverAddress)
req.AppendString(3, ddlJobs)
req.AppendString(4, e.selfID)
req.AppendString(5, query)
e.done = true
return nil
}
// ShowDDLJobsExec represent a show DDL jobs executor.
type ShowDDLJobsExec struct {
exec.BaseExecutor
DDLJobRetriever
jobNumber int
is infoschema.InfoSchema
sess sessionctx.Context
}
// DDLJobRetriever retrieve the DDLJobs.
// nolint:structcheck
type DDLJobRetriever struct {
runningJobs []*model.Job
historyJobIter meta.LastJobIterator
cursor int
is infoschema.InfoSchema
activeRoles []*auth.RoleIdentity
cacheJobs []*model.Job
TZLoc *time.Location
}
func (e *DDLJobRetriever) initial(txn kv.Transaction, sess sessionctx.Context) error {
m := meta.NewMeta(txn)
jobs, err := ddl.GetAllDDLJobs(sess)
if err != nil {
return err
}
e.historyJobIter, err = ddl.GetLastHistoryDDLJobsIterator(m)
if err != nil {
return err
}
e.runningJobs = jobs
e.cursor = 0
return nil
}
func (e *DDLJobRetriever) appendJobToChunk(req *chunk.Chunk, job *model.Job, checker privilege.Manager) {
schemaName := job.SchemaName
tableName := ""
finishTS := uint64(0)
if job.BinlogInfo != nil {
finishTS = job.BinlogInfo.FinishedTS
if job.BinlogInfo.TableInfo != nil {
tableName = job.BinlogInfo.TableInfo.Name.L
}
if job.BinlogInfo.MultipleTableInfos != nil {
tablenames := new(strings.Builder)
for i, affect := range job.BinlogInfo.MultipleTableInfos {
if i > 0 {
fmt.Fprintf(tablenames, ",")
}
fmt.Fprintf(tablenames, "%s", affect.Name.L)
}
tableName = tablenames.String()
}
if len(schemaName) == 0 && job.BinlogInfo.DBInfo != nil {
schemaName = job.BinlogInfo.DBInfo.Name.L
}
}
if len(tableName) == 0 {
tableName = job.TableName
}
// For compatibility, the old version of DDL Job wasn't store the schema name and table name.
if len(schemaName) == 0 {
schemaName = getSchemaName(e.is, job.SchemaID)
}
if len(tableName) == 0 {
tableName = getTableName(e.is, job.TableID)
}
createTime := ts2Time(job.StartTS, e.TZLoc)
startTime := ts2Time(job.RealStartTS, e.TZLoc)
finishTime := ts2Time(finishTS, e.TZLoc)
// Check the privilege.
if checker != nil && !checker.RequestVerification(e.activeRoles, strings.ToLower(schemaName), strings.ToLower(tableName), "", mysql.AllPrivMask) {
return
}
req.AppendInt64(0, job.ID)
req.AppendString(1, schemaName)
req.AppendString(2, tableName)
req.AppendString(3, job.Type.String()+showAddIdxReorgTp(job))
req.AppendString(4, job.SchemaState.String())
req.AppendInt64(5, job.SchemaID)
req.AppendInt64(6, job.TableID)
req.AppendInt64(7, job.RowCount)
req.AppendTime(8, createTime)
if job.RealStartTS > 0 {
req.AppendTime(9, startTime)
} else {
req.AppendNull(9)
}
if finishTS > 0 {
req.AppendTime(10, finishTime)
} else {
req.AppendNull(10)
}
req.AppendString(11, job.State.String())
if job.Type == model.ActionMultiSchemaChange {
for _, subJob := range job.MultiSchemaInfo.SubJobs {
req.AppendInt64(0, job.ID)
req.AppendString(1, schemaName)
req.AppendString(2, tableName)
req.AppendString(3, subJob.Type.String()+" /* subjob */"+showAddIdxReorgTpInSubJob(subJob))
req.AppendString(4, subJob.SchemaState.String())
req.AppendInt64(5, job.SchemaID)
req.AppendInt64(6, job.TableID)
req.AppendInt64(7, subJob.RowCount)
req.AppendTime(8, createTime)
if subJob.RealStartTS > 0 {
realStartTS := ts2Time(subJob.RealStartTS, e.TZLoc)
req.AppendTime(9, realStartTS)
} else {
req.AppendNull(9)
}
if finishTS > 0 {
req.AppendTime(10, finishTime)
} else {
req.AppendNull(10)
}
req.AppendString(11, subJob.State.String())
}
}
}
func showAddIdxReorgTp(job *model.Job) string {
if job.Type == model.ActionAddIndex || job.Type == model.ActionAddPrimaryKey {
if job.ReorgMeta != nil {
tp := job.ReorgMeta.ReorgTp.String()
if len(tp) > 0 {
return " /* " + tp + " */"
}
}
}
return ""
}
func showAddIdxReorgTpInSubJob(subJob *model.SubJob) string {
if subJob.Type == model.ActionAddIndex || subJob.Type == model.ActionAddPrimaryKey {
tp := subJob.ReorgTp.String()
if len(tp) > 0 {
return " /* " + tp + " */"
}
}
return ""
}
func ts2Time(timestamp uint64, loc *time.Location) types.Time {
duration := time.Duration(math.Pow10(9-types.DefaultFsp)) * time.Nanosecond
t := model.TSConvert2Time(timestamp)
t.Truncate(duration)
return types.NewTime(types.FromGoTime(t.In(loc)), mysql.TypeDatetime, types.DefaultFsp)
}
// ShowDDLJobQueriesExec represents a show DDL job queries executor.
// The jobs id that is given by 'admin show ddl job queries' statement,
// only be searched in the latest 10 history jobs.
type ShowDDLJobQueriesExec struct {
exec.BaseExecutor
cursor int
jobs []*model.Job
jobIDs []int64
}
// Open implements the Executor Open interface.
func (e *ShowDDLJobQueriesExec) Open(ctx context.Context) error {
var err error
var jobs []*model.Job
if err := e.BaseExecutor.Open(ctx); err != nil {
return err
}
session, err := e.GetSysSession()
if err != nil {
return err
}
err = sessiontxn.NewTxn(context.Background(), session)
if err != nil {
return err
}
defer func() {
// ReleaseSysSession will rollbacks txn automatically.
e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), session)
}()
txn, err := session.Txn(true)
if err != nil {
return err
}
session.GetSessionVars().SetInTxn(true)
m := meta.NewMeta(txn)
jobs, err = ddl.GetAllDDLJobs(session)
if err != nil {
return err
}
historyJobs, err := ddl.GetLastNHistoryDDLJobs(m, ddl.DefNumHistoryJobs)
if err != nil {
return err
}
appendedJobID := make(map[int64]struct{})
// deduplicate job results
// for situations when this operation happens at the same time with new DDLs being executed
for _, job := range jobs {
if _, ok := appendedJobID[job.ID]; !ok {
appendedJobID[job.ID] = struct{}{}
e.jobs = append(e.jobs, job)
}
}
for _, historyJob := range historyJobs {
if _, ok := appendedJobID[historyJob.ID]; !ok {
appendedJobID[historyJob.ID] = struct{}{}
e.jobs = append(e.jobs, historyJob)
}
}
return nil
}
// Next implements the Executor Next interface.
func (e *ShowDDLJobQueriesExec) Next(_ context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.MaxChunkSize())
if e.cursor >= len(e.jobs) {
return nil
}
if len(e.jobIDs) >= len(e.jobs) {
return nil
}
numCurBatch := mathutil.Min(req.Capacity(), len(e.jobs)-e.cursor)
for _, id := range e.jobIDs {
for i := e.cursor; i < e.cursor+numCurBatch; i++ {
if id == e.jobs[i].ID {
req.AppendString(0, e.jobs[i].Query)
}
}
}
e.cursor += numCurBatch
return nil
}
// ShowDDLJobQueriesWithRangeExec represents a show DDL job queries with range executor.
// The jobs id that is given by 'admin show ddl job queries' statement,
// can be searched within a specified range in history jobs using offset and limit.
type ShowDDLJobQueriesWithRangeExec struct {
exec.BaseExecutor
cursor int
jobs []*model.Job
offset uint64
limit uint64
}
// Open implements the Executor Open interface.
func (e *ShowDDLJobQueriesWithRangeExec) Open(ctx context.Context) error {
var err error
var jobs []*model.Job
if err := e.BaseExecutor.Open(ctx); err != nil {
return err
}
session, err := e.GetSysSession()
if err != nil {
return err
}
err = sessiontxn.NewTxn(context.Background(), session)
if err != nil {
return err
}
defer func() {
// ReleaseSysSession will rollbacks txn automatically.
e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), session)
}()
txn, err := session.Txn(true)
if err != nil {
return err
}
session.GetSessionVars().SetInTxn(true)
m := meta.NewMeta(txn)
jobs, err = ddl.GetAllDDLJobs(session)
if err != nil {
return err
}
historyJobs, err := ddl.GetLastNHistoryDDLJobs(m, int(e.offset+e.limit))
if err != nil {
return err
}
appendedJobID := make(map[int64]struct{})
// deduplicate job results
// for situations when this operation happens at the same time with new DDLs being executed
for _, job := range jobs {
if _, ok := appendedJobID[job.ID]; !ok {
appendedJobID[job.ID] = struct{}{}
e.jobs = append(e.jobs, job)
}
}
for _, historyJob := range historyJobs {
if _, ok := appendedJobID[historyJob.ID]; !ok {
appendedJobID[historyJob.ID] = struct{}{}
e.jobs = append(e.jobs, historyJob)
}
}
if e.cursor < int(e.offset) {
e.cursor = int(e.offset)
}
return nil
}
// Next implements the Executor Next interface.
func (e *ShowDDLJobQueriesWithRangeExec) Next(_ context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.MaxChunkSize())
if e.cursor >= len(e.jobs) {
return nil
}
if int(e.offset) > len(e.jobs) {
return nil
}
numCurBatch := mathutil.Min(req.Capacity(), len(e.jobs)-e.cursor)
for i := e.cursor; i < e.cursor+numCurBatch; i++ {
// i is make true to be >= int(e.offset)
if i >= int(e.offset+e.limit) {
break
}
req.AppendString(0, strconv.FormatInt(e.jobs[i].ID, 10))
req.AppendString(1, e.jobs[i].Query)
}
e.cursor += numCurBatch
return nil
}
// Open implements the Executor Open interface.
func (e *ShowDDLJobsExec) Open(ctx context.Context) error {
if err := e.BaseExecutor.Open(ctx); err != nil {
return err
}
e.DDLJobRetriever.is = e.is
if e.jobNumber == 0 {
e.jobNumber = ddl.DefNumHistoryJobs
}
sess, err := e.GetSysSession()
if err != nil {
return err
}
e.sess = sess
err = sessiontxn.NewTxn(context.Background(), sess)
if err != nil {
return err
}
txn, err := sess.Txn(true)
if err != nil {
return err
}
sess.GetSessionVars().SetInTxn(true)
err = e.DDLJobRetriever.initial(txn, sess)
return err
}
// Next implements the Executor Next interface.
func (e *ShowDDLJobsExec) Next(_ context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.MaxChunkSize())
if (e.cursor - len(e.runningJobs)) >= e.jobNumber {
return nil
}
count := 0
// Append running ddl jobs.
if e.cursor < len(e.runningJobs) {
numCurBatch := mathutil.Min(req.Capacity(), len(e.runningJobs)-e.cursor)
for i := e.cursor; i < e.cursor+numCurBatch; i++ {
e.appendJobToChunk(req, e.runningJobs[i], nil)
}
e.cursor += numCurBatch
count += numCurBatch
}
// Append history ddl jobs.
var err error
if count < req.Capacity() {
num := req.Capacity() - count
remainNum := e.jobNumber - (e.cursor - len(e.runningJobs))
num = mathutil.Min(num, remainNum)
e.cacheJobs, err = e.historyJobIter.GetLastJobs(num, e.cacheJobs)
if err != nil {
return err
}
for _, job := range e.cacheJobs {
e.appendJobToChunk(req, job, nil)
}
e.cursor += len(e.cacheJobs)
}
return nil
}
// Close implements the Executor Close interface.
func (e *ShowDDLJobsExec) Close() error {
e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), e.sess)
return e.BaseExecutor.Close()
}
func getSchemaName(is infoschema.InfoSchema, id int64) string {
var schemaName string
dbInfo, ok := is.SchemaByID(id)
if ok {
schemaName = dbInfo.Name.O
return schemaName
}
return schemaName
}
func getTableName(is infoschema.InfoSchema, id int64) string {
var tableName string
table, ok := is.TableByID(id)
if ok {
tableName = table.Meta().Name.O
return tableName
}
return tableName
}
// CheckTableExec represents a check table executor.
// It is built from the "admin check table" statement, and it checks if the
// index matches the records in the table.
type CheckTableExec struct {
exec.BaseExecutor
dbName string
table table.Table
indexInfos []*model.IndexInfo
srcs []*IndexLookUpExecutor
done bool
is infoschema.InfoSchema
exitCh chan struct{}
retCh chan error
checkIndex bool
}
// Open implements the Executor Open interface.
func (e *CheckTableExec) Open(ctx context.Context) error {
if err := e.BaseExecutor.Open(ctx); err != nil {
return err
}
for _, src := range e.srcs {
if err := src.Open(ctx); err != nil {
return errors.Trace(err)
}
}
e.done = false
return nil
}
// Close implements the Executor Close interface.
func (e *CheckTableExec) Close() error {
var firstErr error
close(e.exitCh)
for _, src := range e.srcs {
if err := src.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
func (e *CheckTableExec) checkTableIndexHandle(ctx context.Context, idxInfo *model.IndexInfo) error {
// For partition table, there will be multi same index indexLookUpReaders on different partitions.
for _, src := range e.srcs {
if src.index.Name.L == idxInfo.Name.L {
err := e.checkIndexHandle(ctx, src)
if err != nil {
return err
}
}
}
return nil
}
func (e *CheckTableExec) checkIndexHandle(ctx context.Context, src *IndexLookUpExecutor) error {
cols := src.Schema().Columns
retFieldTypes := make([]*types.FieldType, len(cols))
for i := range cols {
retFieldTypes[i] = cols[i].RetType
}
chk := chunk.New(retFieldTypes, e.InitCap(), e.MaxChunkSize())
var err error
for {
err = Next(ctx, src, chk)
if err != nil {
e.retCh <- errors.Trace(err)
break
}
if chk.NumRows() == 0 {
break
}
}
return errors.Trace(err)
}
func (e *CheckTableExec) handlePanic(r interface{}) {
if r != nil {
e.retCh <- errors.Errorf("%v", r)
}
}
// Next implements the Executor Next interface.
func (e *CheckTableExec) Next(ctx context.Context, _ *chunk.Chunk) error {
if e.done || len(e.srcs) == 0 {
return nil
}
defer func() { e.done = true }()
idxNames := make([]string, 0, len(e.indexInfos))
for _, idx := range e.indexInfos {
if idx.MVIndex {
continue
}
idxNames = append(idxNames, idx.Name.O)
}
greater, idxOffset, err := admin.CheckIndicesCount(e.Ctx(), e.dbName, e.table.Meta().Name.O, idxNames)
if err != nil {
// For admin check index statement, for speed up and compatibility, doesn't do below checks.
if e.checkIndex {
return errors.Trace(err)
}
if greater == admin.IdxCntGreater {
err = e.checkTableIndexHandle(ctx, e.indexInfos[idxOffset])
} else if greater == admin.TblCntGreater {
err = e.checkTableRecord(ctx, idxOffset)
}
return errors.Trace(err)
}
// The number of table rows is equal to the number of index rows.
// TODO: Make the value of concurrency adjustable. And we can consider the number of records.
if len(e.srcs) == 1 {
err = e.checkIndexHandle(ctx, e.srcs[0])
if err == nil && e.srcs[0].index.MVIndex {
err = e.checkTableRecord(ctx, 0)
}
if err != nil {
return err
}
}
taskCh := make(chan *IndexLookUpExecutor, len(e.srcs))
failure := atomicutil.NewBool(false)
concurrency := mathutil.Min(3, len(e.srcs))
var wg util.WaitGroupWrapper
for _, src := range e.srcs {
taskCh <- src
}
for i := 0; i < concurrency; i++ {
wg.Run(func() {
util.WithRecovery(func() {
for {
if fail := failure.Load(); fail {
return
}
select {
case src := <-taskCh:
err1 := e.checkIndexHandle(ctx, src)
if err1 == nil && src.index.MVIndex {
for offset, idx := range e.indexInfos {
if idx.ID == src.index.ID {
err1 = e.checkTableRecord(ctx, offset)
break
}
}
}
if err1 != nil {
failure.Store(true)
logutil.Logger(ctx).Info("check index handle failed", zap.Error(err1))
return
}
case <-e.exitCh:
return
default:
return
}
}
}, e.handlePanic)
})
}
wg.Wait()
select {
case err := <-e.retCh:
return errors.Trace(err)
default:
return nil
}
}
func (e *CheckTableExec) checkTableRecord(ctx context.Context, idxOffset int) error {
idxInfo := e.indexInfos[idxOffset]
txn, err := e.Ctx().Txn(true)
if err != nil {
return err
}
if e.table.Meta().GetPartitionInfo() == nil {
idx := tables.NewIndex(e.table.Meta().ID, e.table.Meta(), idxInfo)
return admin.CheckRecordAndIndex(ctx, e.Ctx(), txn, e.table, idx)
}
info := e.table.Meta().GetPartitionInfo()
for _, def := range info.Definitions {
pid := def.ID
partition := e.table.(table.PartitionedTable).GetPartition(pid)
idx := tables.NewIndex(def.ID, e.table.Meta(), idxInfo)
if err := admin.CheckRecordAndIndex(ctx, e.Ctx(), txn, partition, idx); err != nil {
return errors.Trace(err)
}
}
return nil
}
// ShowSlowExec represents the executor of showing the slow queries.
// It is build from the "admin show slow" statement:
//
// admin show slow top [internal | all] N
// admin show slow recent N
type ShowSlowExec struct {
exec.BaseExecutor
ShowSlow *ast.ShowSlow
result []*domain.SlowQueryInfo
cursor int
}
// Open implements the Executor Open interface.
func (e *ShowSlowExec) Open(ctx context.Context) error {
if err := e.BaseExecutor.Open(ctx); err != nil {
return err
}
dom := domain.GetDomain(e.Ctx())
e.result = dom.ShowSlowQuery(e.ShowSlow)
return nil
}
// Next implements the Executor Next interface.
func (e *ShowSlowExec) Next(_ context.Context, req *chunk.Chunk) error {
req.Reset()
if e.cursor >= len(e.result) {
return nil
}
for e.cursor < len(e.result) && req.NumRows() < e.MaxChunkSize() {
slow := e.result[e.cursor]
req.AppendString(0, slow.SQL)
req.AppendTime(1, types.NewTime(types.FromGoTime(slow.Start), mysql.TypeTimestamp, types.MaxFsp))
req.AppendDuration(2, types.Duration{Duration: slow.Duration, Fsp: types.MaxFsp})
req.AppendString(3, slow.Detail.String())
if slow.Succ {
req.AppendInt64(4, 1)
} else {
req.AppendInt64(4, 0)
}
req.AppendUint64(5, slow.ConnID)
req.AppendUint64(6, slow.TxnTS)
req.AppendString(7, slow.User)
req.AppendString(8, slow.DB)
req.AppendString(9, slow.TableIDs)
req.AppendString(10, slow.IndexNames)
if slow.Internal {
req.AppendInt64(11, 1)
} else {
req.AppendInt64(11, 0)
}
req.AppendString(12, slow.Digest)
req.AppendString(13, slow.SessAlias)
e.cursor++
}
return nil
}
// SelectLockExec represents a select lock executor.
// It is built from the "SELECT .. FOR UPDATE" or the "SELECT .. LOCK IN SHARE MODE" statement.
// For "SELECT .. FOR UPDATE" statement, it locks every row key from source Executor.
// After the execution, the keys are buffered in transaction, and will be sent to KV
// when doing commit. If there is any key already locked by another transaction,
// the transaction will rollback and retry.
type SelectLockExec struct {
exec.BaseExecutor
Lock *ast.SelectLockInfo
keys []kv.Key
// The children may be a join of multiple tables, so we need a map.
tblID2Handle map[int64][]plannercore.HandleCols
// When SelectLock work on a partition table, we need the partition ID
// (Physical Table ID) instead of the 'logical' table ID to calculate
// the lock KV. In that case, the Physical Table ID is extracted
// from the row key in the store and as an extra column in the chunk row.
// tblID2PhyTblIDCol is used for partitioned tables.
// The child executor need to return an extra column containing
// the Physical Table ID (i.e. from which partition the row came from)
// Used during building
tblID2PhysTblIDCol map[int64]*expression.Column
// Used during execution
// Map from logic tableID to column index where the physical table id is stored
// For dynamic prune mode, model.ExtraPhysTblID columns are requested from
// storage and used for physical table id
// For static prune mode, model.ExtraPhysTblID is still sent to storage/Protobuf
// but could be filled in by the partitions TableReaderExecutor
// due to issues with chunk handling between the TableReaderExecutor and the
// SelectReader result.
tblID2PhysTblIDColIdx map[int64]int
}
// Open implements the Executor Open interface.
func (e *SelectLockExec) Open(ctx context.Context) error {
if len(e.tblID2PhysTblIDCol) > 0 {
e.tblID2PhysTblIDColIdx = make(map[int64]int)
cols := e.Schema().Columns
for i := len(cols) - 1; i >= 0; i-- {
if cols[i].ID == model.ExtraPhysTblID {
for tblID, col := range e.tblID2PhysTblIDCol {
if cols[i].UniqueID == col.UniqueID {
e.tblID2PhysTblIDColIdx[tblID] = i
break
}
}
}
}
}
return e.BaseExecutor.Open(ctx)
}
// Next implements the Executor Next interface.
func (e *SelectLockExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.MaxChunkSize())
err := Next(ctx, e.Children(0), req)
if err != nil {
return err
}
// If there's no handle or it's not a `SELECT FOR UPDATE` statement.
if len(e.tblID2Handle) == 0 || (!plannercore.IsSelectForUpdateLockType(e.Lock.LockType)) {
return nil
}
if req.NumRows() > 0 {
iter := chunk.NewIterator4Chunk(req)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
for tblID, cols := range e.tblID2Handle {
for _, col := range cols {
handle, err := col.BuildHandle(row)
if err != nil {
return err
}
physTblID := tblID
if physTblColIdx, ok := e.tblID2PhysTblIDColIdx[tblID]; ok {
physTblID = row.GetInt64(physTblColIdx)
if physTblID == 0 {
// select * from t1 left join t2 on t1.c = t2.c for update
// The join right side might be added NULL in left join
// In that case, physTblID is 0, so skip adding the lock.
//
// Note, we can't distinguish whether it's the left join case,
// or a bug that TiKV return without correct physical ID column.
continue
}
}
e.keys = append(e.keys, tablecodec.EncodeRowKeyWithHandle(physTblID, handle))
}
}
}
return nil
}
lockWaitTime := e.Ctx().GetSessionVars().LockWaitTimeout
if e.Lock.LockType == ast.SelectLockForUpdateNoWait {
lockWaitTime = tikvstore.LockNoWait
} else if e.Lock.LockType == ast.SelectLockForUpdateWaitN {
lockWaitTime = int64(e.Lock.WaitSec) * 1000
}
for id := range e.tblID2Handle {
e.UpdateDeltaForTableID(id)
}
lockCtx, err := newLockCtx(e.Ctx(), lockWaitTime, len(e.keys))
if err != nil {
return err
}
return doLockKeys(ctx, e.Ctx(), lockCtx, e.keys...)
}
func newLockCtx(sctx sessionctx.Context, lockWaitTime int64, numKeys int) (*tikvstore.LockCtx, error) {
seVars := sctx.GetSessionVars()
forUpdateTS, err := sessiontxn.GetTxnManager(sctx).GetStmtForUpdateTS()
if err != nil {
return nil, err
}
lockCtx := tikvstore.NewLockCtx(forUpdateTS, lockWaitTime, seVars.StmtCtx.GetLockWaitStartTime())
lockCtx.Killed = &seVars.Killed
lockCtx.PessimisticLockWaited = &seVars.StmtCtx.PessimisticLockWaited
lockCtx.LockKeysDuration = &seVars.StmtCtx.LockKeysDuration
lockCtx.LockKeysCount = &seVars.StmtCtx.LockKeysCount
lockCtx.LockExpired = &seVars.TxnCtx.LockExpire
lockCtx.ResourceGroupTagger = func(req *kvrpcpb.PessimisticLockRequest) []byte {
if req == nil {
return nil
}
if len(req.Mutations) == 0 {
return nil
}
if mutation := req.Mutations[0]; mutation != nil {
label := resourcegrouptag.GetResourceGroupLabelByKey(mutation.Key)
normalized, digest := seVars.StmtCtx.SQLDigest()
if len(normalized) == 0 {
return nil
}
_, planDigest := seVars.StmtCtx.GetPlanDigest()
return resourcegrouptag.EncodeResourceGroupTag(digest, planDigest, label)
}
return nil
}
lockCtx.OnDeadlock = func(deadlock *tikverr.ErrDeadlock) {
cfg := config.GetGlobalConfig()
if deadlock.IsRetryable && !cfg.PessimisticTxn.DeadlockHistoryCollectRetryable {
return
}
rec := deadlockhistory.ErrDeadlockToDeadlockRecord(deadlock)
deadlockhistory.GlobalDeadlockHistory.Push(rec)
}
if lockCtx.ForUpdateTS > 0 && seVars.AssertionLevel != variable.AssertionLevelOff {
lockCtx.InitCheckExistence(numKeys)
}
return lockCtx, nil
}
// doLockKeys is the main entry for pessimistic lock keys
// waitTime means the lock operation will wait in milliseconds if target key is already
// locked by others. used for (select for update nowait) situation
func doLockKeys(ctx context.Context, se sessionctx.Context, lockCtx *tikvstore.LockCtx, keys ...kv.Key) error {
sessVars := se.GetSessionVars()
sctx := sessVars.StmtCtx
if !sctx.InUpdateStmt && !sctx.InDeleteStmt {
atomic.StoreUint32(&se.GetSessionVars().TxnCtx.ForUpdate, 1)
}
// Lock keys only once when finished fetching all results.
txn, err := se.Txn(true)
if err != nil {
return err
}
// Skip the temporary table keys.
keys = filterTemporaryTableKeys(sessVars, keys)
keys = filterLockTableKeys(sessVars.StmtCtx, keys)
var lockKeyStats *tikvutil.LockKeysDetails
ctx = context.WithValue(ctx, tikvutil.LockKeysDetailCtxKey, &lockKeyStats)
err = txn.LockKeys(tikvutil.SetSessionID(ctx, se.GetSessionVars().ConnectionID), lockCtx, keys...)
if lockKeyStats != nil {
sctx.MergeLockKeysExecDetails(lockKeyStats)
}
return err
}
func filterTemporaryTableKeys(vars *variable.SessionVars, keys []kv.Key) []kv.Key {
txnCtx := vars.TxnCtx
if txnCtx == nil || txnCtx.TemporaryTables == nil {
return keys
}
newKeys := keys[:0:len(keys)]
for _, key := range keys {
tblID := tablecodec.DecodeTableID(key)
if _, ok := txnCtx.TemporaryTables[tblID]; !ok {
newKeys = append(newKeys, key)
}
}
return newKeys
}
func filterLockTableKeys(stmtCtx *stmtctx.StatementContext, keys []kv.Key) []kv.Key {
if len(stmtCtx.LockTableIDs) == 0 {
return keys
}
newKeys := keys[:0:len(keys)]
for _, key := range keys {
tblID := tablecodec.DecodeTableID(key)
if _, ok := stmtCtx.LockTableIDs[tblID]; ok {
newKeys = append(newKeys, key)
}
}
return newKeys
}
// LimitExec represents limit executor
// It ignores 'Offset' rows from src, then returns 'Count' rows at maximum.
type LimitExec struct {
exec.BaseExecutor
begin uint64
end uint64
cursor uint64
// meetFirstBatch represents whether we have met the first valid Chunk from child.
meetFirstBatch bool
childResult *chunk.Chunk
// columnIdxsUsedByChild keep column indexes of child executor used for inline projection
columnIdxsUsedByChild []int
// Log the close time when opentracing is enabled.
span opentracing.Span
}
// Next implements the Executor Next interface.
func (e *LimitExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.cursor >= e.end {
return nil
}
for !e.meetFirstBatch {
// transfer req's requiredRows to childResult and then adjust it in childResult
e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.MaxChunkSize())
err := Next(ctx, e.Children(0), e.adjustRequiredRows(e.childResult))
if err != nil {
return err
}
batchSize := uint64(e.childResult.NumRows())
// no more data.
if batchSize == 0 {
return nil
}
if newCursor := e.cursor + batchSize; newCursor >= e.begin {
e.meetFirstBatch = true
begin, end := e.begin-e.cursor, batchSize
if newCursor > e.end {
end = e.end - e.cursor
}
e.cursor += end
if begin == end {
break
}
if e.columnIdxsUsedByChild != nil {
req.Append(e.childResult.Prune(e.columnIdxsUsedByChild), int(begin), int(end))
} else {
req.Append(e.childResult, int(begin), int(end))
}
return nil
}
e.cursor += batchSize
}
e.childResult.Reset()
e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.MaxChunkSize())
e.adjustRequiredRows(e.childResult)
err := Next(ctx, e.Children(0), e.childResult)
if err != nil {
return err
}
batchSize := uint64(e.childResult.NumRows())
// no more data.
if batchSize == 0 {
return nil
}
if e.cursor+batchSize > e.end {
e.childResult.TruncateTo(int(e.end - e.cursor))
batchSize = e.end - e.cursor
}
e.cursor += batchSize
if e.columnIdxsUsedByChild != nil {
for i, childIdx := range e.columnIdxsUsedByChild {
if err = req.SwapColumn(i, e.childResult, childIdx); err != nil {
return err
}
}
} else {
req.SwapColumns(e.childResult)
}
return nil
}
// Open implements the Executor Open interface.
func (e *LimitExec) Open(ctx context.Context) error {
if err := e.BaseExecutor.Open(ctx); err != nil {
return err
}
e.childResult = tryNewCacheChunk(e.Children(0))
e.cursor = 0
e.meetFirstBatch = e.begin == 0
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
e.span = span
}
return nil
}
// Close implements the Executor Close interface.
func (e *LimitExec) Close() error {
start := time.Now()
e.childResult = nil
err := e.BaseExecutor.Close()
elapsed := time.Since(start)
if elapsed > time.Millisecond {
logutil.BgLogger().Info("limit executor close takes a long time",
zap.Duration("elapsed", elapsed))
if e.span != nil {
span1 := e.span.Tracer().StartSpan("limitExec.Close", opentracing.ChildOf(e.span.Context()), opentracing.StartTime(start))
defer span1.Finish()
}
}
return err
}
func (e *LimitExec) adjustRequiredRows(chk *chunk.Chunk) *chunk.Chunk {
// the limit of maximum number of rows the LimitExec should read
limitTotal := int(e.end - e.cursor)
var limitRequired int
if e.cursor < e.begin {
// if cursor is less than begin, it have to read (begin-cursor) rows to ignore
// and then read chk.RequiredRows() rows to return,
// so the limit is (begin-cursor)+chk.RequiredRows().
limitRequired = int(e.begin) - int(e.cursor) + chk.RequiredRows()
} else {
// if cursor is equal or larger than begin, just read chk.RequiredRows() rows to return.
limitRequired = chk.RequiredRows()
}
return chk.SetRequiredRows(mathutil.Min(limitTotal, limitRequired), e.MaxChunkSize())
}
func init() {
// While doing optimization in the plan package, we need to execute uncorrelated subquery,
// but the plan package cannot import the executor package because of the dependency cycle.
// So we assign a function implemented in the executor package to the plan package to avoid the dependency cycle.
plannercore.EvalSubqueryFirstRow = func(ctx context.Context, p plannercore.PhysicalPlan, is infoschema.InfoSchema, sctx sessionctx.Context) ([]types.Datum, error) {
defer func(begin time.Time) {
s := sctx.GetSessionVars()
s.StmtCtx.SetSkipPlanCache(errors.New("query has uncorrelated sub-queries is un-cacheable"))
s.RewritePhaseInfo.PreprocessSubQueries++
s.RewritePhaseInfo.DurationPreprocessSubQuery += time.Since(begin)
}(time.Now())
r, ctx := tracing.StartRegionEx(ctx, "executor.EvalSubQuery")
defer r.End()
e := newExecutorBuilder(sctx, is, nil)
exec := e.build(p)
if e.err != nil {
return nil, e.err
}
err := exec.Open(ctx)
defer terror.Call(exec.Close)
if err != nil {
return nil, err
}
if pi, ok := sctx.(processinfoSetter); ok {
// Before executing the sub-query, we need update the processinfo to make the progress bar more accurate.
// because the sub-query may take a long time.
pi.UpdateProcessInfo()
}
chk := tryNewCacheChunk(exec)
err = Next(ctx, exec, chk)
if err != nil {
return nil, err
}
if chk.NumRows() == 0 {
return nil, nil
}
row := chk.GetRow(0).GetDatumRow(retTypes(exec))
return row, err
}
}
// TableDualExec represents a dual table executor.
type TableDualExec struct {
exec.BaseExecutor
// numDualRows can only be 0 or 1.
numDualRows int
numReturned int
}
// Open implements the Executor Open interface.
func (e *TableDualExec) Open(context.Context) error {
e.numReturned = 0
return nil
}
// Next implements the Executor Next interface.
func (e *TableDualExec) Next(_ context.Context, req *chunk.Chunk) error {
req.Reset()
if e.numReturned >= e.numDualRows {
return nil
}
if e.Schema().Len() == 0 {
req.SetNumVirtualRows(1)
} else {
for i := range e.Schema().Columns {
req.AppendNull(i)
}
}
e.numReturned = e.numDualRows
return nil
}
// SelectionExec represents a filter executor.
type SelectionExec struct {
exec.BaseExecutor
batched bool
filters []expression.Expression
selected []bool
inputIter *chunk.Iterator4Chunk
inputRow chunk.Row
childResult *chunk.Chunk
memTracker *memory.Tracker
}
// Open implements the Executor Open interface.
func (e *SelectionExec) Open(ctx context.Context) error {
if err := e.BaseExecutor.Open(ctx); err != nil {
return err
}
failpoint.Inject("mockSelectionExecBaseExecutorOpenReturnedError", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(errors.New("mock SelectionExec.baseExecutor.Open returned error"))
}
})
return e.open(ctx)
}
func (e *SelectionExec) open(context.Context) error {
if e.memTracker != nil {
e.memTracker.Reset()
} else {
e.memTracker = memory.NewTracker(e.ID(), -1)
}
e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker)
e.childResult = tryNewCacheChunk(e.Children(0))
e.memTracker.Consume(e.childResult.MemoryUsage())
e.batched = expression.Vectorizable(e.filters)
if e.batched {
e.selected = make([]bool, 0, chunk.InitialCapacity)
}
e.inputIter = chunk.NewIterator4Chunk(e.childResult)
e.inputRow = e.inputIter.End()
return nil
}
// Close implements plannercore.Plan Close interface.
func (e *SelectionExec) Close() error {
if e.childResult != nil {
e.memTracker.Consume(-e.childResult.MemoryUsage())
e.childResult = nil
}
e.selected = nil
return e.BaseExecutor.Close()
}
// Next implements the Executor Next interface.
func (e *SelectionExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.MaxChunkSize())
if !e.batched {
return e.unBatchedNext(ctx, req)
}
for {
for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() {
if req.IsFull() {
return nil
}
if !e.selected[e.inputRow.Idx()] {
continue
}
req.AppendRow(e.inputRow)
}
mSize := e.childResult.MemoryUsage()
err := Next(ctx, e.Children(0), e.childResult)
e.memTracker.Consume(e.childResult.MemoryUsage() - mSize)
if err != nil {
return err
}
// no more data.
if e.childResult.NumRows() == 0 {
return nil
}
e.selected, err = expression.VectorizedFilter(e.Ctx(), e.filters, e.inputIter, e.selected)
if err != nil {
return err
}
e.inputRow = e.inputIter.Begin()
}
}
// unBatchedNext filters input rows one by one and returns once an input row is selected.
// For sql with "SETVAR" in filter and "GETVAR" in projection, for example: "SELECT @a FROM t WHERE (@a := 2) > 0",
// we have to set batch size to 1 to do the evaluation of filter and projection.
func (e *SelectionExec) unBatchedNext(ctx context.Context, chk *chunk.Chunk) error {
for {
for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() {
selected, _, err := expression.EvalBool(e.Ctx(), e.filters, e.inputRow)
if err != nil {
return err
}
if selected {
chk.AppendRow(e.inputRow)
e.inputRow = e.inputIter.Next()
return nil
}
}
mSize := e.childResult.MemoryUsage()
err := Next(ctx, e.Children(0), e.childResult)
e.memTracker.Consume(e.childResult.MemoryUsage() - mSize)
if err != nil {
return err
}
e.inputRow = e.inputIter.Begin()
// no more data.
if e.childResult.NumRows() == 0 {
return nil
}
}
}
// TableScanExec is a table scan executor without result fields.
type TableScanExec struct {
exec.BaseExecutor
t table.Table
columns []*model.ColumnInfo
virtualTableChunkList *chunk.List
virtualTableChunkIdx int
}
// Next implements the Executor Next interface.
func (e *TableScanExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.MaxChunkSize())
return e.nextChunk4InfoSchema(ctx, req)
}
func (e *TableScanExec) nextChunk4InfoSchema(ctx context.Context, chk *chunk.Chunk) error {
chk.GrowAndReset(e.MaxChunkSize())
if e.virtualTableChunkList == nil {
e.virtualTableChunkList = chunk.NewList(retTypes(e), e.InitCap(), e.MaxChunkSize())
columns := make([]*table.Column, e.Schema().Len())
for i, colInfo := range e.columns {
columns[i] = table.ToColumn(colInfo)
}
mutableRow := chunk.MutRowFromTypes(retTypes(e))
type tableIter interface {
IterRecords(ctx context.Context, sctx sessionctx.Context, cols []*table.Column, fn table.RecordIterFunc) error
}
err := (e.t.(tableIter)).IterRecords(ctx, e.Ctx(), columns, func(_ kv.Handle, rec []types.Datum, cols []*table.Column) (bool, error) {
mutableRow.SetDatums(rec...)
e.virtualTableChunkList.AppendRow(mutableRow.ToRow())
return true, nil
})
if err != nil {
return err
}
}
// no more data.
if e.virtualTableChunkIdx >= e.virtualTableChunkList.NumChunks() {
return nil
}
virtualTableChunk := e.virtualTableChunkList.GetChunk(e.virtualTableChunkIdx)
e.virtualTableChunkIdx++
chk.SwapColumns(virtualTableChunk)
return nil
}
// Open implements the Executor Open interface.
func (e *TableScanExec) Open(context.Context) error {
e.virtualTableChunkList = nil
return nil
}
// MaxOneRowExec checks if the number of rows that a query returns is at maximum one.
// It's built from subquery expression.
type MaxOneRowExec struct {
exec.BaseExecutor
evaluated bool
}
// Open implements the Executor Open interface.
func (e *MaxOneRowExec) Open(ctx context.Context) error {
if err := e.BaseExecutor.Open(ctx); err != nil {
return err
}
e.evaluated = false
return nil
}
// Next implements the Executor Next interface.
func (e *MaxOneRowExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.evaluated {
return nil
}
e.evaluated = true
err := Next(ctx, e.Children(0), req)
if err != nil {
return err
}
if num := req.NumRows(); num == 0 {
for i := range e.Schema().Columns {
req.AppendNull(i)
}
return nil
} else if num != 1 {
return exeerrors.ErrSubqueryMoreThan1Row
}
childChunk := tryNewCacheChunk(e.Children(0))
err = Next(ctx, e.Children(0), childChunk)
if err != nil {
return err
}
if childChunk.NumRows() != 0 {
return exeerrors.ErrSubqueryMoreThan1Row
}
return nil
}
// UnionExec pulls all it's children's result and returns to its parent directly.
// A "resultPuller" is started for every child to pull result from that child and push it to the "resultPool", the used
// "Chunk" is obtained from the corresponding "resourcePool". All resultPullers are running concurrently.
//
// +----------------+
// +---> resourcePool 1 ---> | resultPuller 1 |-----+
// | +----------------+ |
// | |
// | +----------------+ v
// +---> resourcePool 2 ---> | resultPuller 2 |-----> resultPool ---+
// | +----------------+ ^ |
// | ...... | |
// | +----------------+ | |
// +---> resourcePool n ---> | resultPuller n |-----+ |
// | +----------------+ |
// | |
// | +-------------+ |
// |--------------------------| main thread | <---------------------+
// +-------------+
type UnionExec struct {
exec.BaseExecutor
concurrency int
childIDChan chan int
stopFetchData atomic.Value
finished chan struct{}
resourcePools []chan *chunk.Chunk
resultPool chan *unionWorkerResult
results []*chunk.Chunk
wg sync.WaitGroup
initialized bool
mu struct {
*syncutil.Mutex
maxOpenedChildID int
}
childInFlightForTest int32
}
// unionWorkerResult stores the result for a union worker.
// A "resultPuller" is started for every child to pull result from that child, unionWorkerResult is used to store that pulled result.
// "src" is used for Chunk reuse: after pulling result from "resultPool", main-thread must push a valid unused Chunk to "src" to
// enable the corresponding "resultPuller" continue to work.
type unionWorkerResult struct {
chk *chunk.Chunk
err error
src chan<- *chunk.Chunk
}
func (e *UnionExec) waitAllFinished() {
e.wg.Wait()
close(e.resultPool)
}
// Open implements the Executor Open interface.
func (e *UnionExec) Open(context.Context) error {
e.stopFetchData.Store(false)
e.initialized = false
e.finished = make(chan struct{})
e.mu.Mutex = &syncutil.Mutex{}
e.mu.maxOpenedChildID = -1
return nil
}
func (e *UnionExec) initialize(ctx context.Context) {
if e.concurrency > e.ChildrenLen() {
e.concurrency = e.ChildrenLen()
}
for i := 0; i < e.concurrency; i++ {
e.results = append(e.results, newFirstChunk(e.Children(0)))
}
e.resultPool = make(chan *unionWorkerResult, e.concurrency)
e.resourcePools = make([]chan *chunk.Chunk, e.concurrency)
e.childIDChan = make(chan int, e.ChildrenLen())
for i := 0; i < e.concurrency; i++ {
e.resourcePools[i] = make(chan *chunk.Chunk, 1)
e.resourcePools[i] <- e.results[i]
e.wg.Add(1)
go e.resultPuller(ctx, i)
}
for i := 0; i < e.ChildrenLen(); i++ {
e.childIDChan <- i
}
close(e.childIDChan)
go e.waitAllFinished()
}
func (e *UnionExec) resultPuller(ctx context.Context, workerID int) {
result := &unionWorkerResult{
err: nil,
chk: nil,
src: e.resourcePools[workerID],
}
defer func() {
if r := recover(); r != nil {
logutil.Logger(ctx).Error("resultPuller panicked", zap.Any("recover", r), zap.Stack("stack"))
result.err = errors.Errorf("%v", r)
e.resultPool <- result
e.stopFetchData.Store(true)
}
e.wg.Done()
}()
for childID := range e.childIDChan {
e.mu.Lock()
if childID > e.mu.maxOpenedChildID {
e.mu.maxOpenedChildID = childID
}
e.mu.Unlock()
if err := e.Children(childID).Open(ctx); err != nil {
result.err = err
e.stopFetchData.Store(true)
e.resultPool <- result
}
failpoint.Inject("issue21441", func() {
atomic.AddInt32(&e.childInFlightForTest, 1)
})
for {
if e.stopFetchData.Load().(bool) {
return
}
select {
case <-e.finished:
return
case result.chk = <-e.resourcePools[workerID]:
}
result.err = Next(ctx, e.Children(childID), result.chk)
if result.err == nil && result.chk.NumRows() == 0 {
e.resourcePools[workerID] <- result.chk
break
}
failpoint.Inject("issue21441", func() {
if int(atomic.LoadInt32(&e.childInFlightForTest)) > e.concurrency {
panic("the count of child in flight is larger than e.concurrency unexpectedly")
}
})
e.resultPool <- result
if result.err != nil {
e.stopFetchData.Store(true)
return
}
}
failpoint.Inject("issue21441", func() {
atomic.AddInt32(&e.childInFlightForTest, -1)
})
}
}
// Next implements the Executor Next interface.
func (e *UnionExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.MaxChunkSize())
if !e.initialized {
e.initialize(ctx)
e.initialized = true
}
result, ok := <-e.resultPool
if !ok {
return nil
}
if result.err != nil {
return errors.Trace(result.err)
}
if result.chk.NumCols() != req.NumCols() {
return errors.Errorf("Internal error: UnionExec chunk column count mismatch, req: %d, result: %d",
req.NumCols(), result.chk.NumCols())
}
req.SwapColumns(result.chk)
result.src <- result.chk
return nil
}
// Close implements the Executor Close interface.
func (e *UnionExec) Close() error {
if e.finished != nil {
close(e.finished)
}
e.results = nil
if e.resultPool != nil {
channel.Clear(e.resultPool)
}
e.resourcePools = nil
if e.childIDChan != nil {
channel.Clear(e.childIDChan)
}
// We do not need to acquire the e.mu.Lock since all the resultPuller can be
// promised to exit when reaching here (e.childIDChan been closed).
var firstErr error
for i := 0; i <= e.mu.maxOpenedChildID; i++ {
if err := e.Children(i).Close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
// ResetContextOfStmt resets the StmtContext and session variables.
// Before every execution, we must clear statement context.
func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) {
vars := ctx.GetSessionVars()
var sc *stmtctx.StatementContext
if vars.TxnCtx.CouldRetry || mysql.HasCursorExistsFlag(vars.Status) {
// Must construct new statement context object, the retry history need context for every statement.
// TODO: Maybe one day we can get rid of transaction retry, then this logic can be deleted.
sc = &stmtctx.StatementContext{}
} else {
sc = vars.InitStatementContext()
}
sc.TimeZone = vars.Location()
sc.TaskID = stmtctx.AllocateTaskID()
sc.CTEStorageMap = map[int]*CTEStorages{}
sc.IsStaleness = false
sc.LockTableIDs = make(map[int64]struct{})
sc.EnableOptimizeTrace = false
sc.OptimizeTracer = nil
sc.OptimizerCETrace = nil
sc.IsSyncStatsFailed = false
sc.IsExplainAnalyzeDML = false
// Firstly we assume that UseDynamicPruneMode can be enabled according session variable, then we will check other conditions
// in PlanBuilder.buildDataSource
if ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() {
sc.UseDynamicPruneMode = true
} else {
sc.UseDynamicPruneMode = false
}
sc.StatsLoad.Timeout = 0
sc.StatsLoad.NeededItems = nil
sc.StatsLoad.ResultCh = nil
sc.SysdateIsNow = ctx.GetSessionVars().SysdateIsNow
vars.MemTracker.Detach()
vars.MemTracker.UnbindActions()
vars.MemTracker.SetBytesLimit(vars.MemQuotaQuery)
vars.MemTracker.ResetMaxConsumed()
vars.DiskTracker.Detach()
vars.DiskTracker.ResetMaxConsumed()
vars.MemTracker.SessionID.Store(vars.ConnectionID)
vars.StmtCtx.TableStats = make(map[int64]interface{})
isAnalyze := false
if execStmt, ok := s.(*ast.ExecuteStmt); ok {
prepareStmt, err := plannercore.GetPreparedStmt(execStmt, vars)
if err != nil {
return err
}
_, isAnalyze = prepareStmt.PreparedAst.Stmt.(*ast.AnalyzeTableStmt)
} else if _, ok := s.(*ast.AnalyzeTableStmt); ok {
isAnalyze = true
}
if isAnalyze {
sc.InitMemTracker(memory.LabelForAnalyzeMemory, -1)
vars.MemTracker.SetBytesLimit(-1)
vars.MemTracker.AttachTo(GlobalAnalyzeMemoryTracker)
} else {
sc.InitMemTracker(memory.LabelForSQLText, -1)
}
logOnQueryExceedMemQuota := domain.GetDomain(ctx).ExpensiveQueryHandle().LogOnQueryExceedMemQuota
switch variable.OOMAction.Load() {
case variable.OOMActionCancel:
action := &memory.PanicOnExceed{ConnID: vars.ConnectionID}
action.SetLogHook(logOnQueryExceedMemQuota)
vars.MemTracker.SetActionOnExceed(action)
case variable.OOMActionLog:
fallthrough
default:
action := &memory.LogOnExceed{ConnID: vars.ConnectionID}
action.SetLogHook(logOnQueryExceedMemQuota)
vars.MemTracker.SetActionOnExceed(action)
}
sc.MemTracker.SessionID.Store(vars.ConnectionID)
sc.MemTracker.AttachTo(vars.MemTracker)
sc.InitDiskTracker(memory.LabelForSQLText, -1)
globalConfig := config.GetGlobalConfig()
if variable.EnableTmpStorageOnOOM.Load() && sc.DiskTracker != nil {
sc.DiskTracker.AttachTo(vars.DiskTracker)
if GlobalDiskUsageTracker != nil {
vars.DiskTracker.AttachTo(GlobalDiskUsageTracker)
}
}
if execStmt, ok := s.(*ast.ExecuteStmt); ok {
prepareStmt, err := plannercore.GetPreparedStmt(execStmt, vars)
if err != nil {
return err
}
s = prepareStmt.PreparedAst.Stmt
sc.InitSQLDigest(prepareStmt.NormalizedSQL, prepareStmt.SQLDigest)
// For `execute stmt` SQL, should reset the SQL digest with the prepare SQL digest.
goCtx := context.Background()
if variable.EnablePProfSQLCPU.Load() && len(prepareStmt.NormalizedSQL) > 0 {
goCtx = pprof.WithLabels(goCtx, pprof.Labels("sql", util.QueryStrForLog(prepareStmt.NormalizedSQL)))
pprof.SetGoroutineLabels(goCtx)
}
if topsqlstate.TopSQLEnabled() && prepareStmt.SQLDigest != nil {
sc.IsSQLRegistered.Store(true)
topsql.AttachAndRegisterSQLInfo(goCtx, prepareStmt.NormalizedSQL, prepareStmt.SQLDigest, vars.InRestrictedSQL)
}
if s, ok := prepareStmt.PreparedAst.Stmt.(*ast.SelectStmt); ok {
if s.LockInfo == nil {
sc.WeakConsistency = isWeakConsistencyRead(ctx, execStmt)
}
}
}
// execute missed stmtID uses empty sql
sc.OriginalSQL = s.Text()
if explainStmt, ok := s.(*ast.ExplainStmt); ok {
sc.InExplainStmt = true
sc.ExplainFormat = explainStmt.Format
sc.InExplainAnalyzeStmt = explainStmt.Analyze
sc.IgnoreExplainIDSuffix = strings.ToLower(explainStmt.Format) == types.ExplainFormatBrief
sc.InVerboseExplain = strings.ToLower(explainStmt.Format) == types.ExplainFormatVerbose
s = explainStmt.Stmt
} else {
sc.ExplainFormat = ""
}
if explainForStmt, ok := s.(*ast.ExplainForStmt); ok {
sc.InExplainStmt = true
sc.InExplainAnalyzeStmt = true
sc.InVerboseExplain = strings.ToLower(explainForStmt.Format) == types.ExplainFormatVerbose
}
// TODO: Many same bool variables here.
// We should set only two variables (
// IgnoreErr and StrictSQLMode) to avoid setting the same bool variables and
// pushing them down to TiKV as flags.
sc.InRestrictedSQL = vars.InRestrictedSQL
switch stmt := s.(type) {
case *ast.UpdateStmt:
ResetUpdateStmtCtx(sc, stmt, vars)
case *ast.DeleteStmt:
ResetDeleteStmtCtx(sc, stmt, vars)
case *ast.InsertStmt:
sc.InInsertStmt = true
// For insert statement (not for update statement), disabling the StrictSQLMode
// should make TruncateAsWarning and DividedByZeroAsWarning,
// but should not make DupKeyAsWarning.
sc.DupKeyAsWarning = stmt.IgnoreErr
sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.IgnoreNoPartition = stmt.IgnoreErr
sc.ErrAutoincReadFailedAsWarning = stmt.IgnoreErr
sc.TruncateAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate
sc.Priority = stmt.Priority
case *ast.CreateTableStmt, *ast.AlterTableStmt:
sc.InCreateOrAlterStmt = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.StrictSQLMode || sc.AllowInvalidDate
sc.NoZeroDate = vars.SQLMode.HasNoZeroDateMode()
sc.TruncateAsWarning = !vars.StrictSQLMode
case *ast.LoadDataStmt:
sc.InLoadDataStmt = true
// return warning instead of error when load data meet no partition for value
sc.IgnoreNoPartition = true
case *ast.SelectStmt:
sc.InSelectStmt = true
// see https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-strict
// said "For statements such as SELECT that do not change data, invalid values
// generate a warning in strict mode, not an error."
// and https://dev.mysql.com/doc/refman/5.7/en/out-of-range-and-overflow.html
sc.OverflowAsWarning = true
// Return warning for truncate error in selection.
sc.TruncateAsWarning = true
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
if opts := stmt.SelectStmtOpts; opts != nil {
sc.Priority = opts.Priority
sc.NotFillCache = !opts.SQLCache
}
sc.WeakConsistency = isWeakConsistencyRead(ctx, stmt)
case *ast.SetOprStmt:
sc.InSelectStmt = true
sc.OverflowAsWarning = true
sc.TruncateAsWarning = true
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
case *ast.ShowStmt:
sc.IgnoreTruncate.Store(true)
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
if stmt.Tp == ast.ShowWarnings || stmt.Tp == ast.ShowErrors || stmt.Tp == ast.ShowSessionStates {
sc.InShowWarning = true
sc.SetWarnings(vars.StmtCtx.GetWarnings())
}
case *ast.SplitRegionStmt:
sc.IgnoreTruncate.Store(false)
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
case *ast.SetSessionStatesStmt:
sc.InSetSessionStatesStmt = true
sc.IgnoreTruncate.Store(true)
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
default:
sc.IgnoreTruncate.Store(true)
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
}
sc.SkipUTF8Check = vars.SkipUTF8Check
sc.SkipASCIICheck = vars.SkipASCIICheck
sc.SkipUTF8MB4Check = !globalConfig.Instance.CheckMb4ValueInUTF8.Load()
vars.PlanCacheParams.Reset()
if priority := mysql.PriorityEnum(atomic.LoadInt32(&variable.ForcePriority)); priority != mysql.NoPriority {
sc.Priority = priority
}
if vars.StmtCtx.LastInsertID > 0 {
sc.PrevLastInsertID = vars.StmtCtx.LastInsertID
} else {
sc.PrevLastInsertID = vars.StmtCtx.PrevLastInsertID
}
sc.PrevAffectedRows = 0
if vars.StmtCtx.InUpdateStmt || vars.StmtCtx.InDeleteStmt || vars.StmtCtx.InInsertStmt || vars.StmtCtx.InSetSessionStatesStmt {
sc.PrevAffectedRows = int64(vars.StmtCtx.AffectedRows())
} else if vars.StmtCtx.InSelectStmt {
sc.PrevAffectedRows = -1
}
if globalConfig.Instance.EnableCollectExecutionInfo.Load() {
// In ExplainFor case, RuntimeStatsColl should not be reset for reuse,
// because ExplainFor need to display the last statement information.
reuseObj := vars.StmtCtx.RuntimeStatsColl
if _, ok := s.(*ast.ExplainForStmt); ok {
reuseObj = nil
}
sc.RuntimeStatsColl = execdetails.NewRuntimeStatsColl(reuseObj)
}
sc.TblInfo2UnionScan = make(map[*model.TableInfo]bool)
errCount, warnCount := vars.StmtCtx.NumErrorWarnings()
vars.SysErrorCount = errCount
vars.SysWarningCount = warnCount
vars.ExchangeChunkStatus()
vars.StmtCtx = sc
vars.PrevFoundInPlanCache = vars.FoundInPlanCache
vars.FoundInPlanCache = false
vars.ClearStmtVars()
vars.PrevFoundInBinding = vars.FoundInBinding
vars.FoundInBinding = false
vars.DurationWaitTS = 0
vars.CurrInsertBatchExtraCols = nil
vars.CurrInsertValues = chunk.Row{}
return
}
// registerSQLAndPlanInExecForTopSQL register the sql and plan information if it doesn't register before execution.
// This uses to catch the running SQL when Top SQL is enabled in execution.
func registerSQLAndPlanInExecForTopSQL(sessVars *variable.SessionVars) {
stmtCtx := sessVars.StmtCtx
normalizedSQL, sqlDigest := stmtCtx.SQLDigest()
topsql.RegisterSQL(normalizedSQL, sqlDigest, sessVars.InRestrictedSQL)
normalizedPlan, planDigest := stmtCtx.GetPlanDigest()
if len(normalizedPlan) > 0 {
topsql.RegisterPlan(normalizedPlan, planDigest)
}
}
// ResetUpdateStmtCtx resets statement context for UpdateStmt.
func ResetUpdateStmtCtx(sc *stmtctx.StatementContext, stmt *ast.UpdateStmt, vars *variable.SessionVars) {
sc.InUpdateStmt = true
sc.DupKeyAsWarning = stmt.IgnoreErr
sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.TruncateAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate
sc.Priority = stmt.Priority
sc.IgnoreNoPartition = stmt.IgnoreErr
}
// ResetDeleteStmtCtx resets statement context for DeleteStmt.
func ResetDeleteStmtCtx(sc *stmtctx.StatementContext, stmt *ast.DeleteStmt, vars *variable.SessionVars) {
sc.InDeleteStmt = true
sc.DupKeyAsWarning = stmt.IgnoreErr
sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.TruncateAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate
sc.Priority = stmt.Priority
}
func setOptionForTopSQL(sc *stmtctx.StatementContext, snapshot kv.Snapshot) {
if snapshot == nil {
return
}
snapshot.SetOption(kv.ResourceGroupTagger, sc.GetResourceGroupTagger())
if sc.KvExecCounter != nil {
snapshot.SetOption(kv.RPCInterceptor, sc.KvExecCounter.RPCInterceptor())
}
}
func isWeakConsistencyRead(ctx sessionctx.Context, node ast.Node) bool {
sessionVars := ctx.GetSessionVars()
return sessionVars.ConnectionID > 0 && sessionVars.ReadConsistency.IsWeak() &&
plannercore.IsAutoCommitTxn(ctx) && plannercore.IsReadOnly(node, sessionVars)
}
// FastCheckTableExec represents a check table executor.
// It is built from the "admin check table" statement, and it checks if the
// index matches the records in the table.
// It uses a new algorithms to check table data, which is faster than the old one(CheckTableExec).
type FastCheckTableExec struct {
exec.BaseExecutor
dbName string
table table.Table
indexInfos []*model.IndexInfo
done bool
is infoschema.InfoSchema
err *atomic.Pointer[error]
wg sync.WaitGroup
contextCtx context.Context
}
// Open implements the Executor Open interface.
func (e *FastCheckTableExec) Open(ctx context.Context) error {
if err := e.BaseExecutor.Open(ctx); err != nil {
return err
}
e.done = false
e.contextCtx = ctx
return nil
}
type checkIndexTask struct {
indexOffset int
}
type checkIndexWorker struct {
sctx sessionctx.Context
dbName string
table table.Table
indexInfos []*model.IndexInfo
e *FastCheckTableExec
}
type groupByChecksum struct {
bucket uint64
checksum uint64
count int64
}
func getCheckSum(ctx context.Context, se sessionctx.Context, sql string) ([]groupByChecksum, error) {
ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnAdmin)
rs, err := se.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql)
if err != nil {
return nil, err
}
defer func(rs sqlexec.RecordSet) {
err := rs.Close()
if err != nil {
logutil.BgLogger().Error("close record set failed", zap.Error(err))
}
}(rs)
rows, err := sqlexec.DrainRecordSet(ctx, rs, 256)
if err != nil {
return nil, err
}
checksums := make([]groupByChecksum, 0, len(rows))
for _, row := range rows {
checksums = append(checksums, groupByChecksum{bucket: row.GetUint64(1), checksum: row.GetUint64(0), count: row.GetInt64(2)})
}
return checksums, nil
}
// HandleTask implements the Worker interface.
func (w *checkIndexWorker) HandleTask(task checkIndexTask) (_ workerpool.None) {
defer w.e.wg.Done()
idxInfo := w.indexInfos[task.indexOffset]
bucketSize := int(CheckTableFastBucketSize.Load())
ctx := kv.WithInternalSourceType(w.e.contextCtx, kv.InternalTxnAdmin)
trySaveErr := func(err error) {
w.e.err.CompareAndSwap(nil, &err)
}
se, err := w.e.Base().GetSysSession()
if err != nil {
trySaveErr(err)
return
}
se.GetSessionVars().OptimizerUseInvisibleIndexes = true
defer func() {
se.GetSessionVars().OptimizerUseInvisibleIndexes = false
w.e.Base().ReleaseSysSession(ctx, se)
}()
var pkCols []string
var pkTypes []*types.FieldType
switch {
case w.e.table.Meta().IsCommonHandle:
pkColsInfo := w.e.table.Meta().GetPrimaryKey().Columns
for _, colInfo := range pkColsInfo {
colStr := colInfo.Name.O
pkCols = append(pkCols, colStr)
pkTypes = append(pkTypes, &w.e.table.Meta().Columns[colInfo.Offset].FieldType)
}
case w.e.table.Meta().PKIsHandle:
pkCols = append(pkCols, w.e.table.Meta().GetPkName().O)
default: // support decoding _tidb_rowid.
pkCols = append(pkCols, model.ExtraHandleName.O)
}
// CheckSum of (handle + index columns).
var md5HandleAndIndexCol strings.Builder
md5HandleAndIndexCol.WriteString("crc32(md5(concat_ws(0x2, ")
for _, col := range pkCols {
md5HandleAndIndexCol.WriteString(ColumnName(col))
md5HandleAndIndexCol.WriteString(", ")
}
for offset, col := range idxInfo.Columns {
tblCol := w.table.Meta().Columns[col.Offset]
if tblCol.IsGenerated() && !tblCol.GeneratedStored {
md5HandleAndIndexCol.WriteString(tblCol.GeneratedExprString)
} else {
md5HandleAndIndexCol.WriteString(ColumnName(col.Name.O))
}
if offset != len(idxInfo.Columns)-1 {
md5HandleAndIndexCol.WriteString(", ")
}
}
md5HandleAndIndexCol.WriteString(")))")
// Used to group by and order.
var md5Handle strings.Builder
md5Handle.WriteString("crc32(md5(concat_ws(0x2, ")
for i, col := range pkCols {
md5Handle.WriteString(ColumnName(col))
if i != len(pkCols)-1 {
md5Handle.WriteString(", ")
}
}
md5Handle.WriteString(")))")
handleColumnField := strings.Join(pkCols, ", ")
var indexColumnField strings.Builder
for offset, col := range idxInfo.Columns {
indexColumnField.WriteString(ColumnName(col.Name.O))
if offset != len(idxInfo.Columns)-1 {
indexColumnField.WriteString(", ")
}
}
tableRowCntToCheck := int64(0)
offset := 0
mod := 1
meetError := false
lookupCheckThreshold := int64(100)
checkOnce := false
if w.e.Ctx().GetSessionVars().SnapshotTS != 0 {
se.GetSessionVars().SnapshotTS = w.e.Ctx().GetSessionVars().SnapshotTS
defer func() {
se.GetSessionVars().SnapshotTS = 0
}()
}
_, err = se.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "begin")
if err != nil {
trySaveErr(err)
return
}
times := 0
const maxTimes = 10
for tableRowCntToCheck > lookupCheckThreshold || !checkOnce {
times++
if times == maxTimes {
logutil.BgLogger().Warn("compare checksum by group reaches time limit", zap.Int("times", times))
break
}
whereKey := fmt.Sprintf("((cast(%s as signed) - %d) %% %d)", md5Handle.String(), offset, mod)
groupByKey := fmt.Sprintf("((cast(%s as signed) - %d) div %d %% %d)", md5Handle.String(), offset, mod, bucketSize)
if !checkOnce {
whereKey = "0"
}
checkOnce = true
tblQuery := fmt.Sprintf("select /*+ read_from_storage(tikv[%s]) */ bit_xor(%s), %s, count(*) from %s use index() where %s = 0 group by %s", TableName(w.e.dbName, w.e.table.Meta().Name.String()), md5HandleAndIndexCol.String(), groupByKey, TableName(w.e.dbName, w.e.table.Meta().Name.String()), whereKey, groupByKey)
idxQuery := fmt.Sprintf("select bit_xor(%s), %s, count(*) from %s use index(`%s`) where %s = 0 group by %s", md5HandleAndIndexCol.String(), groupByKey, TableName(w.e.dbName, w.e.table.Meta().Name.String()), idxInfo.Name, whereKey, groupByKey)
logutil.BgLogger().Info("fast check table by group", zap.String("table name", w.table.Meta().Name.String()), zap.String("index name", idxInfo.Name.String()), zap.Int("times", times), zap.Int("current offset", offset), zap.Int("current mod", mod), zap.String("table sql", tblQuery), zap.String("index sql", idxQuery))
// compute table side checksum.
tableChecksum, err := getCheckSum(w.e.contextCtx, se, tblQuery)
if err != nil {
trySaveErr(err)
return
}
slices.SortFunc(tableChecksum, func(i, j groupByChecksum) int {
return cmp.Compare(i.bucket, j.bucket)
})
// compute index side checksum.
indexChecksum, err := getCheckSum(w.e.contextCtx, se, idxQuery)
if err != nil {
trySaveErr(err)
return
}
slices.SortFunc(indexChecksum, func(i, j groupByChecksum) int {
return cmp.Compare(i.bucket, j.bucket)
})
currentOffset := 0
// Every checksum in table side should be the same as the index side.
i := 0
for i < len(tableChecksum) && i < len(indexChecksum) {
if tableChecksum[i].bucket != indexChecksum[i].bucket || tableChecksum[i].checksum != indexChecksum[i].checksum {
if tableChecksum[i].bucket <= indexChecksum[i].bucket {
currentOffset = int(tableChecksum[i].bucket)
tableRowCntToCheck = tableChecksum[i].count
} else {
currentOffset = int(indexChecksum[i].bucket)
tableRowCntToCheck = indexChecksum[i].count
}
meetError = true
break
}
i++
}
if !meetError && i < len(indexChecksum) && i == len(tableChecksum) {
// Table side has fewer buckets.
currentOffset = int(indexChecksum[i].bucket)
tableRowCntToCheck = indexChecksum[i].count
meetError = true
} else if !meetError && i < len(tableChecksum) && i == len(indexChecksum) {
// Index side has fewer buckets.
currentOffset = int(tableChecksum[i].bucket)
tableRowCntToCheck = tableChecksum[i].count
meetError = true
}
if !meetError {
if times != 1 {
logutil.BgLogger().Error("unexpected result, no error detected in this round, but an error is detected in the previous round", zap.Int("times", times), zap.Int("offset", offset), zap.Int("mod", mod))
}
break
}
offset += currentOffset * mod
mod *= bucketSize
}
queryToRow := func(se sessionctx.Context, sql string) ([]chunk.Row, error) {
rs, err := se.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql)
if err != nil {
return nil, err
}
row, err := sqlexec.DrainRecordSet(ctx, rs, 4096)
if err != nil {
return nil, err
}
err = rs.Close()
if err != nil {
logutil.BgLogger().Warn("close result set failed", zap.Error(err))
}
return row, nil
}
if meetError {
groupByKey := fmt.Sprintf("((cast(%s as signed) - %d) %% %d)", md5Handle.String(), offset, mod)
indexSQL := fmt.Sprintf("select %s, %s, %s from %s use index(`%s`) where %s = 0 order by %s", handleColumnField, indexColumnField.String(), md5HandleAndIndexCol.String(), TableName(w.e.dbName, w.e.table.Meta().Name.String()), idxInfo.Name, groupByKey, handleColumnField)
tableSQL := fmt.Sprintf("select /*+ read_from_storage(tikv[%s]) */ %s, %s, %s from %s use index() where %s = 0 order by %s", TableName(w.e.dbName, w.e.table.Meta().Name.String()), handleColumnField, indexColumnField.String(), md5HandleAndIndexCol.String(), TableName(w.e.dbName, w.e.table.Meta().Name.String()), groupByKey, handleColumnField)
idxRow, err := queryToRow(se, indexSQL)
if err != nil {
trySaveErr(err)
return
}
tblRow, err := queryToRow(se, tableSQL)
if err != nil {
trySaveErr(err)
return
}
getHandleFromRow := func(row chunk.Row) (kv.Handle, error) {
handleDatum := make([]types.Datum, 0)
for i, t := range pkTypes {
handleDatum = append(handleDatum, row.GetDatum(i, t))
}
if w.table.Meta().IsCommonHandle {
handleBytes, err := codec.EncodeKey(w.sctx.GetSessionVars().StmtCtx, nil, handleDatum...)
if err != nil {
return nil, err
}
return kv.NewCommonHandle(handleBytes)
}
return kv.IntHandle(row.GetInt64(0)), nil
}
getValueFromRow := func(row chunk.Row) ([]types.Datum, error) {
valueDatum := make([]types.Datum, 0)
for i, t := range idxInfo.Columns {
valueDatum = append(valueDatum, row.GetDatum(i+len(pkCols), &w.table.Meta().Columns[t.Offset].FieldType))
}
return valueDatum, nil
}
ir := func() *consistency.Reporter {
return &consistency.Reporter{
HandleEncode: func(handle kv.Handle) kv.Key {
return tablecodec.EncodeRecordKey(w.table.RecordPrefix(), handle)
},
IndexEncode: func(idxRow *consistency.RecordData) kv.Key {
var idx table.Index
for _, v := range w.table.Indices() {
if strings.EqualFold(v.Meta().Name.String(), idxInfo.Name.O) {
idx = v
break
}
}
if idx == nil {
return nil
}
k, _, err := idx.GenIndexKey(w.sctx.GetSessionVars().StmtCtx, idxRow.Values[:len(idx.Meta().Columns)], idxRow.Handle, nil)
if err != nil {
return nil
}
return k
},
Tbl: w.table.Meta(),
Idx: idxInfo,
Sctx: w.sctx,
}
}
getCheckSum := func(row chunk.Row) uint64 {
return row.GetUint64(len(pkCols) + len(idxInfo.Columns))
}
var handle kv.Handle
var tableRecord *consistency.RecordData
var lastTableRecord *consistency.RecordData
var indexRecord *consistency.RecordData
i := 0
for i < len(tblRow) || i < len(idxRow) {
if i == len(tblRow) {
// No more rows in table side.
tableRecord = nil
} else {
handle, err = getHandleFromRow(tblRow[i])
if err != nil {
trySaveErr(err)
return
}
value, err := getValueFromRow(tblRow[i])
if err != nil {
trySaveErr(err)
return
}
tableRecord = &consistency.RecordData{Handle: handle, Values: value}
}
if i == len(idxRow) {
// No more rows in index side.
indexRecord = nil
} else {
indexHandle, err := getHandleFromRow(idxRow[i])
if err != nil {
trySaveErr(err)
return
}
indexValue, err := getValueFromRow(idxRow[i])
if err != nil {
trySaveErr(err)
return
}
indexRecord = &consistency.RecordData{Handle: indexHandle, Values: indexValue}
}
if tableRecord == nil {
if lastTableRecord != nil && lastTableRecord.Handle.Equal(indexRecord.Handle) {
tableRecord = lastTableRecord
}
err = ir().ReportAdminCheckInconsistent(w.e.contextCtx, indexRecord.Handle, indexRecord, tableRecord)
} else if indexRecord == nil {
err = ir().ReportAdminCheckInconsistent(w.e.contextCtx, tableRecord.Handle, indexRecord, tableRecord)
} else if tableRecord.Handle.Equal(indexRecord.Handle) && getCheckSum(tblRow[i]) != getCheckSum(idxRow[i]) {
err = ir().ReportAdminCheckInconsistent(w.e.contextCtx, tableRecord.Handle, indexRecord, tableRecord)
} else if !tableRecord.Handle.Equal(indexRecord.Handle) {
if tableRecord.Handle.Compare(indexRecord.Handle) < 0 {
err = ir().ReportAdminCheckInconsistent(w.e.contextCtx, tableRecord.Handle, nil, tableRecord)
} else {
if lastTableRecord != nil && lastTableRecord.Handle.Equal(indexRecord.Handle) {
err = ir().ReportAdminCheckInconsistent(w.e.contextCtx, indexRecord.Handle, indexRecord, lastTableRecord)
} else {
err = ir().ReportAdminCheckInconsistent(w.e.contextCtx, indexRecord.Handle, indexRecord, nil)
}
}
}
if err != nil {
trySaveErr(err)
return
}
i++
if tableRecord != nil {
lastTableRecord = &consistency.RecordData{Handle: tableRecord.Handle, Values: tableRecord.Values}
} else {
lastTableRecord = nil
}
}
}
return
}
// Close implements the Worker interface.
func (*checkIndexWorker) Close() {}
func (e *FastCheckTableExec) createWorker() workerpool.Worker[checkIndexTask, workerpool.None] {
return &checkIndexWorker{sctx: e.Ctx(), dbName: e.dbName, table: e.table, indexInfos: e.indexInfos, e: e}
}
// Next implements the Executor Next interface.
func (e *FastCheckTableExec) Next(context.Context, *chunk.Chunk) error {
if e.done || len(e.indexInfos) == 0 {
return nil
}
defer func() { e.done = true }()
// Here we need check all indexes, includes invisible index
e.Ctx().GetSessionVars().OptimizerUseInvisibleIndexes = true
defer func() {
e.Ctx().GetSessionVars().OptimizerUseInvisibleIndexes = false
}()
workerPool := workerpool.NewWorkerPool[checkIndexTask]("checkIndex",
poolutil.CheckTable, 3, e.createWorker)
workerPool.Start()
e.wg.Add(len(e.indexInfos))
for i := range e.indexInfos {
workerPool.AddTask(checkIndexTask{indexOffset: i})
}
e.wg.Wait()
workerPool.ReleaseAndWait()
p := e.err.Load()
if p == nil {
return nil
}
return *p
}
// TableName returns `schema`.`table`
func TableName(schema, table string) string {
return fmt.Sprintf("`%s`.`%s`", escapeName(schema), escapeName(table))
}
// ColumnName returns `column`
func ColumnName(column string) string {
return fmt.Sprintf("`%s`", escapeName(column))
}
func escapeName(name string) string {
return strings.ReplaceAll(name, "`", "``")
}
|
package main
import (
"strings"
"golang.org/x/tour/wc"
//"fmt"
)
func WordCount(s string) map[string]int {
strs := strings.Fields(s)
var wc = make(map[string]int)
for _, str := range strs {
if _, ok := wc[str]; ok {
wc[str]++
} else {
wc[str] = 1
}
}
return wc
}
func main() {
// s := "this is a string is a string"
// WordCount(s)
wc.Test(WordCount)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.