text
stringlengths 11
4.05M
|
|---|
package zbar
// #include <stdlib.h>
// #include <zbar.h>
import "C"
import "unsafe"
type Image struct {
c_image *C.zbar_image_t
}
// NewImage creates new Image instance
func NewImage() *Image {
img := Image{}
img.c_image = C.zbar_image_create()
return &img
}
// Destroy is an image destructor
func (i *Image) Destroy() {
C.zbar_image_destroy(i.c_image)
i = nil
}
func (i *Image) Ref(refs int) {
C.zbar_image_ref(i.c_image, C.int(refs))
}
// Convert represents image format conversion.
func (i *Image) Convert(format uint64) *Image {
newImg := Image{}
newImg.c_image = C.zbar_image_convert(i.c_image, C.ulong(format))
return &newImg
}
// ConvertResize image format conversion with crop/pad.
func (i *Image) ConvertResize(format uint64, width, height uint) *Image {
newImg := Image{}
newImg.c_image = C.zbar_image_convert_resize(i.c_image, C.ulong(format), C.unsigned(width), C.unsigned(height))
return &newImg
}
// Retrieve the image format.
func (i *Image) GetFormat() uint64 {
return uint64(C.zbar_image_get_format(i.c_image))
}
// Retrieve a "sequence" (page/frame) number associated with this image.
func (i *Image) GetSequence() uint {
return uint(C.zbar_image_get_sequence(i.c_image))
}
// Retrieve the width of the image.
func (i *Image) GetWidth() uint {
return uint(C.zbar_image_get_width(i.c_image))
}
// Retrieve the height of the image.
func (i *Image) GetHeight() uint {
return uint(C.zbar_image_get_height(i.c_image))
}
// Return the image sample data.
func (i *Image) GetData() interface{} {
return C.zbar_image_get_data(i.c_image)
}
// Return the size of image data.
func (i *Image) GetDataLength() uint64 {
return uint64(C.zbar_image_get_data_length(i.c_image))
}
// Retrieve the decoded results.
func (i *Image) GetSymbols() *SymbolSet {
ss := SymbolSet{}
ss.c_symbol_set = C.zbar_image_get_symbols(i.c_image)
if ss.c_symbol_set != nil {
return &ss
}
return nil
}
// Associate the specified symbol set with the image, replacing any existing results.
func (i *Image) SetSymbols(symbols *SymbolSet) {
C.zbar_image_set_symbols(i.c_image, symbols.c_symbol_set)
}
// Image_scanner decode result iterator.
func (i *Image) FirstSymbol() *Symbol {
s := Symbol{}
s.c_symbol = C.zbar_image_first_symbol(i.c_image)
if s.c_symbol != nil {
return &s
}
return nil
}
// Specify the fourcc image format code for image sample data.
func (i *Image) SetFormat(format uint64) {
C.zbar_image_set_format(i.c_image, C.ulong(format))
}
// Associate a "sequence" (page/frame) number with this image.
func (i *Image) SetSequence(sequenceNum uint) {
C.zbar_image_set_sequence(i.c_image, C.unsigned(sequenceNum))
}
// Specify the pixel size of the image.
func (i *Image) SetSize(width, height uint) {
C.zbar_image_set_size(i.c_image, C.unsigned(width), C.unsigned(height))
}
// Specify image sample data.
// func (i *Image) SetData() {
//
// }
// Built-in cleanup handler.
func (i *Image) FreeData() {
C.zbar_image_free_data(i.c_image)
}
// Dump raw image data to a file for debug.
func (i *Image) Write(filebase string) int {
cfilebase := C.CString(filebase)
defer C.free(unsafe.Pointer(cfilebase))
return int(C.zbar_image_write(i.c_image, cfilebase))
}
// Read back an image in the format written by zbar_image_write()
// func (i *Image) Read(filename string) *Image {
// cfilename := C.CString(filename)
// defer C.free(unsafe.Pointer(cfilename))
// i := Image{}
// i.c_image = C.zbar_image_read(cfilename)
// return &i
// }
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package session_test
import (
"fmt"
"strconv"
"testing"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
)
func TestSysVarTTLJobEnable(t *testing.T) {
origEnableDDL := variable.EnableTTLJob.Load()
defer func() {
variable.EnableTTLJob.Store(origEnableDDL)
}()
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("set @@global.tidb_ttl_job_enable=0")
require.False(t, variable.EnableTTLJob.Load())
tk.MustQuery("select @@global.tidb_ttl_job_enable").Check(testkit.Rows("0"))
tk.MustQuery("select @@tidb_ttl_job_enable").Check(testkit.Rows("0"))
tk.MustExec("set @@global.tidb_ttl_job_enable=1")
require.True(t, variable.EnableTTLJob.Load())
tk.MustQuery("select @@global.tidb_ttl_job_enable").Check(testkit.Rows("1"))
tk.MustQuery("select @@tidb_ttl_job_enable").Check(testkit.Rows("1"))
tk.MustExec("set @@global.tidb_ttl_job_enable=0")
require.False(t, variable.EnableTTLJob.Load())
tk.MustQuery("select @@global.tidb_ttl_job_enable").Check(testkit.Rows("0"))
tk.MustQuery("select @@tidb_ttl_job_enable").Check(testkit.Rows("0"))
}
func TestSysVarTTLScanBatchSize(t *testing.T) {
origScanBatchSize := variable.TTLScanBatchSize.Load()
defer func() {
variable.TTLScanBatchSize.Store(origScanBatchSize)
}()
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("set @@global.tidb_ttl_scan_batch_size=789")
require.Equal(t, int64(789), variable.TTLScanBatchSize.Load())
tk.MustQuery("select @@global.tidb_ttl_scan_batch_size").Check(testkit.Rows("789"))
tk.MustQuery("select @@tidb_ttl_scan_batch_size").Check(testkit.Rows("789"))
tk.MustExec("set @@global.tidb_ttl_scan_batch_size=0")
require.Equal(t, int64(1), variable.TTLScanBatchSize.Load())
tk.MustQuery("select @@global.tidb_ttl_scan_batch_size").Check(testkit.Rows("1"))
tk.MustQuery("select @@tidb_ttl_scan_batch_size").Check(testkit.Rows("1"))
maxVal := int64(variable.DefTiDBTTLScanBatchMaxSize)
tk.MustExec(fmt.Sprintf("set @@global.tidb_ttl_scan_batch_size=%d", maxVal+1))
require.Equal(t, maxVal, variable.TTLScanBatchSize.Load())
tk.MustQuery("select @@global.tidb_ttl_scan_batch_size").Check(testkit.Rows(strconv.FormatInt(maxVal, 10)))
tk.MustQuery("select @@tidb_ttl_scan_batch_size").Check(testkit.Rows(strconv.FormatInt(maxVal, 10)))
}
func TestSysVarTTLScanDeleteBatchSize(t *testing.T) {
origScanBatchSize := variable.TTLScanBatchSize.Load()
defer func() {
variable.TTLScanBatchSize.Store(origScanBatchSize)
}()
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("set @@global.tidb_ttl_delete_batch_size=789")
require.Equal(t, int64(789), variable.TTLDeleteBatchSize.Load())
tk.MustQuery("select @@global.tidb_ttl_delete_batch_size").Check(testkit.Rows("789"))
tk.MustQuery("select @@tidb_ttl_delete_batch_size").Check(testkit.Rows("789"))
tk.MustExec("set @@global.tidb_ttl_delete_batch_size=0")
require.Equal(t, int64(1), variable.TTLDeleteBatchSize.Load())
tk.MustQuery("select @@global.tidb_ttl_delete_batch_size").Check(testkit.Rows("1"))
tk.MustQuery("select @@tidb_ttl_delete_batch_size").Check(testkit.Rows("1"))
maxVal := int64(variable.DefTiDBTTLDeleteBatchMaxSize)
tk.MustExec(fmt.Sprintf("set @@global.tidb_ttl_delete_batch_size=%d", maxVal+1))
require.Equal(t, maxVal, variable.TTLDeleteBatchSize.Load())
tk.MustQuery("select @@global.tidb_ttl_delete_batch_size").Check(testkit.Rows(strconv.FormatInt(maxVal, 10)))
tk.MustQuery("select @@tidb_ttl_delete_batch_size").Check(testkit.Rows(strconv.FormatInt(maxVal, 10)))
}
func TestSysVarTTLScanDeleteLimit(t *testing.T) {
origDeleteLimit := variable.TTLDeleteRateLimit.Load()
defer func() {
variable.TTLDeleteRateLimit.Store(origDeleteLimit)
}()
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustQuery("select @@global.tidb_ttl_delete_rate_limit").Check(testkit.Rows("0"))
tk.MustExec("set @@global.tidb_ttl_delete_rate_limit=100000")
require.Equal(t, int64(100000), variable.TTLDeleteRateLimit.Load())
tk.MustQuery("select @@global.tidb_ttl_delete_rate_limit").Check(testkit.Rows("100000"))
tk.MustQuery("select @@tidb_ttl_delete_rate_limit").Check(testkit.Rows("100000"))
tk.MustExec("set @@global.tidb_ttl_delete_rate_limit=0")
require.Equal(t, int64(0), variable.TTLDeleteRateLimit.Load())
tk.MustQuery("select @@global.tidb_ttl_delete_rate_limit").Check(testkit.Rows("0"))
tk.MustQuery("select @@tidb_ttl_delete_rate_limit").Check(testkit.Rows("0"))
tk.MustExec("set @@global.tidb_ttl_delete_rate_limit=-1")
require.Equal(t, int64(0), variable.TTLDeleteRateLimit.Load())
tk.MustQuery("select @@global.tidb_ttl_delete_rate_limit").Check(testkit.Rows("0"))
tk.MustQuery("select @@tidb_ttl_delete_rate_limit").Check(testkit.Rows("0"))
}
|
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package myoohoohoo2
import (
"appengine"
//"appengine/blobstore"
"appengine/datastore"
"encoding/json"
"fmt"
//"io"
"net/http"
//"os"
. "github.com/qiniu/api/conf"
//"github.com/qiniu/api/io"
"github.com/qiniu/api/rs"
"strconv"
"time"
)
type UserStruct struct {
Uid int
UserName string
Password string `json:"-"`
Sex string
Date time.Time
TokenTime time.Time `json:"-"`
}
type AudioStruct struct {
Aid int
Uid int
UserName string
AudioKey string
AudioTitle string
IsValid bool `json:"-"`
Favorite int
Date time.Time
Size int
}
type AudioSlice struct {
Audios []AudioStruct
}
type CollectionAudio struct {
Uid int
Aid int
}
//func main() {
// mux := http.NewServeMux()
// r := mux.NewRouter()
// r.HandleFunc("/", root)
// r.HandleFunc("/register", register)
// r.HandleFunc("/login", login)
// r.HandleFunc("/generate", generate) //get toekn
// r.HandleFunc("/checkvalid", checkvalid)
// r.HandleFunc("/query", query)
// r.HandleFunc("/delaudio", delaudio) //delete audio=set IsValid = false
// r.HandleFunc("/recqiniu", recqiniu) //get sth from qiniu
//}
//建表记录设备型号,用户来源国家地区,系统
func init() {
ACCESS_KEY = "iN7NgwM31j4-BZacMjPrOQBs34UG1maYCAQmhdCV"
SECRET_KEY = "6QTOr2Jg1gcZEWDQXKOGZh5PziC2MCV5KsntT70j"
http.HandleFunc("/", root)
http.HandleFunc("/register", register)
http.HandleFunc("/login", login)
http.HandleFunc("/generate", generate) //get toekn
http.HandleFunc("/checkvalid", checkvalid)
http.HandleFunc("/query", query)
http.HandleFunc("/delaudio", delaudio) //delete audio=set IsValid = false
http.HandleFunc("/recqiniu", recqiniu) //get sth from qiniu
http.HandleFunc("/addcollect", addcollect)
http.HandleFunc("/delcollect", delcollect)
//http.HandleFunc("/getinfo", getinfo)//for myself to check users audios count,need xxx=yyy?
}
func linkJson(status string, subKey string, val string) string {
return "{\"status\":" + status + "," + "\"" + subKey + "\"" + ":" + val + "}"
}
func root(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "root!")
//var s AudioSlice
//s.Audios = append(s.Audios, AudioStruct{Aid: 10001, Uid: 111001, AudioKey: "aehjaewb", Favorite: 1, Date: time.Now(), Size: 123456})
//s.Audios = append(s.Audios, AudioStruct{Aid: 121312, Uid: 21312, AudioKey: "feewdwED", Favorite: 1, Date: time.Now(), Size: 123456})
//b, err := json.Marshal(s)
//if err != nil {
// fmt.Fprintln(w, linkJson("0", "msg", "no audio"))
//}
//fmt.Fprint(w, "{\"status\":\"1\""+","+string(b)+"}")
//fmt.Fprintln(w, linkJson("1", nil, string(b)))
}
func uptoken(bucketName string, uid string) string {
//body := "x:uid=" + uid + "&key=$(etag)&size=$(fsize)" // + "&gentime=" + string(time.Now().Unix())
body := "uid=$(x:uid)&username=$(x:username)&audiotitle=$(x:audiotitle)&key=$(etag)&size=$(fsize)" + "&gentime=" + string(time.Now().Unix())
putPolicy := rs.PutPolicy{
Scope: bucketName,
CallbackUrl: "http://www.oohoohoo.com/recqiniu?", //http://<your domain>/recqiniu
CallbackBody: body, //gae body eg:test=$(x:test)&key=$(etag)&size=$(fsize)&uid=$(endUser)
//ReturnUrl: returnUrl,
//ReturnBody: returnBody,
//AsyncOps: asyncOps,
EndUser: uid, //uid
Expires: 3600 * 24 * 7, // 1week?
}
return putPolicy.Token(nil)
}
func generate(w http.ResponseWriter, r *http.Request) {
//get uid
if "POST" == r.Method {
//uid := r.FormValue("uid")
uid, _ := strconv.Atoi(r.FormValue("uid"))
c := appengine.NewContext(r)
q1 := datastore.NewQuery("UserStruct").Filter("Uid =", uid)
existUser := make([]UserStruct, 0, 1)
if _, err := q1.GetAll(c, &existUser); err != nil {
fmt.Fprint(w, "{\"status\":\"0\"}")
return
}
if len(existUser) > 0 {
token := uptoken("qtestbucket", r.FormValue("uid"))
//update user tokentime
thisUser := existUser[0]
thisUser = UserStruct{
Uid: thisUser.Uid,
UserName: thisUser.UserName,
Password: thisUser.Password,
Sex: thisUser.Sex,
Date: thisUser.Date,
TokenTime: time.Now(),
}
//kkk, err1 := datastore.Put(c, datastore.NewIncompleteKey(c, "UserStruct", nil), &thisUser)
//fmt.Fprintln(w, thisUser)
key_str := "UserStruct" + r.FormValue("uid")
key := datastore.NewKey(c, "UserStruct", key_str, 0, nil)
_, err1 := datastore.Put(c, key, &thisUser)
if err1 != nil {
fmt.Fprintln(w, linkJson("0", "uploadToken", "\"\""))
} else {
fmt.Fprintln(w, linkJson("1", "uploadToken", "\""+token+"\""))
//fmt.Fprintln(w, "success")
}
} else {
fmt.Fprintln(w, linkJson("0", "uploadToken", "\"\"")) //not find this uid
}
}
}
func checkvalid(w http.ResponseWriter, r *http.Request) {
if "POST" == r.Method {
uid, _ := strconv.Atoi(r.FormValue("uid"))
c := appengine.NewContext(r)
q1 := datastore.NewQuery("UserStruct").Filter("Uid =", uid)
existUser := make([]UserStruct, 0, 1)
if _, err := q1.GetAll(c, &existUser); err != nil {
fmt.Fprint(w, "{\"status\":\"0\"}")
return
}
if len(existUser) > 0 {
thisUser := existUser[0]
result := isvalid(thisUser.TokenTime)
if result {
fmt.Fprintln(w, linkJson("1", "msg", "\"success\""))
} else {
fmt.Fprintln(w, linkJson("0", "msg", "\"fail\"")) //need re generate token
}
} else {
fmt.Fprintln(w, linkJson("0", "uploadToken", "")) //not find this uid
}
}
}
func isvalid(gentime time.Time) bool {
//now is before valid time is right
if time.Now().Before(gentime.Add(1000 * 1000 * 1000 * 3600 * 24 * 7)) {
return true
} else {
return false
}
return false
}
//http://requestb.in/ 测试七牛返回数据
func recqiniu(w http.ResponseWriter, r *http.Request) {
if "POST" == r.Method {
uid := r.FormValue("uid")
username := r.FormValue("username")
audiokey := r.FormValue("key")
audiotitle := r.FormValue("audiotitle")
size := r.FormValue("size")
fmt.Fprintln(w, linkJson("1", "audiotitle", "\""+audiotitle+"\""))
c := appengine.NewContext(r)
q := datastore.NewQuery("AudioStruct") //query count
audios := make([]AudioStruct, 0, 10) //10need max or auto add
if _, err := q.GetAll(c, &audios); err != nil {
return
}
count := len(audios)
//uid_str := strconv.Itoa(count + 1)
uid_int, _ := strconv.Atoi(uid)
aid_int := count + 1
size_int, _ := strconv.Atoi(size)
audio := AudioStruct{
Aid: aid_int,
Uid: uid_int,
UserName: username,
AudioKey: audiokey,
AudioTitle: audiotitle,
IsValid: true,
Favorite: 0,
Date: time.Now(),
Size: size_int,
}
//_, err1 := datastore.Put(c, datastore.NewIncompleteKey(c, "AudioStruct", nil), &audio)
aid_str := strconv.Itoa(aid_int)
key_str := "AudioStruct" + aid_str //应该从七牛返回的来存,这个key,或者从手机传出去的时候就规则好key
key := datastore.NewKey(c, "AudioStruct", key_str, 0, nil)
_, err1 := datastore.Put(c, key, &audio)
if err1 != nil {
fmt.Fprintln(w, linkJson("0", "msg", "\"fail\""))
return
} else {
fmt.Fprintln(w, linkJson("1", "msg", "\"success\""))
}
}
}
//http://localhost:8080/register?username=aaa&password=123456&sex=1
func register(w http.ResponseWriter, r *http.Request) {
if "POST" == r.Method {
c := appengine.NewContext(r)
q := datastore.NewQuery("UserStruct") //query count
users := make([]UserStruct, 0, 10) //need max or auto add
if _, err := q.GetAll(c, &users); err != nil {
fmt.Fprint(w, "{\"status\":\"0\"}")
return
}
q1 := datastore.NewQuery("UserStruct").Filter("UserName =", r.FormValue("username"))
existUser := make([]UserStruct, 0, 1)
if _, err := q1.GetAll(c, &existUser); err != nil {
fmt.Fprint(w, "{\"status\":\"0\"}")
return
}
if len(existUser) > 0 {
//msg,用户名已存在
fmt.Fprint(w, "{\"status\":\"0\",\"msg\":\"1\"}")
return
}
count := len(users)
//uid_str := strconv.Itoa(count + 1)
//uid_int, _ := strconv.Atoi(uid_str)
uid_int := 100000 + count + 1
//uid_str := strconv.Itoa(uid_int)
u := UserStruct{
Uid: uid_int,
UserName: r.FormValue("username"),
Password: r.FormValue("password"),
Sex: r.FormValue("sex"),
Date: time.Now(),
//TokenTime: nil,
}
uid_str := strconv.Itoa(uid_int)
key_str := "UserStruct" + uid_str
key := datastore.NewKey(c, "UserStruct", key_str, 0, nil)
_, err := datastore.Put(c, key, &u)
//_, err := datastore.Put(c, datastore.NewIncompleteKey(c, "UserStruct", nil), &u)
if err != nil {
fmt.Fprint(w, "{\"status\":\"0\"}")
return
} else {
var s = make(map[string]interface{})
s["Uid"] = u.Uid
s["UserName"] = u.UserName
s["Sex"] = u.Sex
s["Date"] = u.Date.Format("2006-01-02 15:04:05")
b, err := json.Marshal(s)
if err != nil {
fmt.Println(err)
return
}
fmt.Fprintln(w, linkJson("1", "userinfo", string(b)))
//注册成功后,记录原来输入的,客户端执行登录
}
} else {
fmt.Fprintln(w, "get")
}
}
func login(w http.ResponseWriter, r *http.Request) {
if "POST" == r.Method {
c := appengine.NewContext(r)
q := datastore.NewQuery("UserStruct").Filter("UserName =", r.FormValue("username"))
users := make([]UserStruct, 0, 1)
if _, err := q.GetAll(c, &users); err != nil {
return
}
if len(users) > 0 {
realPwd := users[0].Password
if r.FormValue("password") == realPwd {
//返回相应信息
//response := fmt.Sprintf("%v", users[0].Password)
var s = make(map[string]interface{})
s["Uid"] = users[0].Uid
s["UserName"] = users[0].UserName
s["Sex"] = users[0].Sex
s["Date"] = users[0].Date.Format("2006-01-02 15:04:05")
//response := "userinfo:" + s
b, err := json.Marshal(s)
if err != nil {
fmt.Println(err)
return
}
fmt.Fprintln(w, linkJson("1", "userinfo", string(b)))
//fmt.Fprint(w, "{\"status\":\"1\"}")
}
} else {
fmt.Fprint(w, "{\"status\":\"0\"}")
}
} else {
fmt.Fprintln(w, "login get")
}
}
type Sizer interface {
Size() int64
}
func query(w http.ResponseWriter, r *http.Request) {
if "POST" == r.Method {
c := appengine.NewContext(r)
//q := datastore.NewQuery("AudioStruct").Filter("UserName =", r.FormValue("username"))
//type order audio
page, _ := strconv.Atoi(r.FormValue("page"))
countbegin := (page - 1) * 20 //page * 20
q := datastore.NewQuery("AudioStruct").Filter("IsValid =", true).Order("-Date").Limit(20).Offset(countbegin)
audios := make([]AudioStruct, 0, 10) //need max or auto add
if _, err := q.GetAll(c, &audios); err != nil {
fmt.Fprintln(w, linkJson("0", "msg", "\"db error\""))
return
}
if len(audios) > 0 {
//fmt.Fprint(w, "has audio")
//getAudio := audios[0].AudioKey
var s AudioSlice
for _, value := range audios {
s.Audios = append(s.Audios, AudioStruct{Aid: value.Aid, Uid: value.Uid, UserName: value.UserName, AudioKey: value.AudioKey, AudioTitle: value.AudioTitle, Favorite: value.Favorite, Date: value.Date, Size: value.Size})
}
b, err := json.Marshal(s)
if err != nil {
fmt.Fprintln(w, linkJson("0", "msg", "\"json error\""))
return
}
//fmt.Fprint(w, "{\"status\":\"1\""+","+string(b)+"}")
//fmt.Fprintln(w, linkJson("1", "msg", string(b)))
fmt.Fprintln(w, linkJson("1", "audios", string(b)))
} else {
fmt.Fprintln(w, linkJson("0", "msg", "\"no audio\""))
}
}
}
func delaudio(w http.ResponseWriter, r *http.Request) {
if "POST" == r.Method {
c := appengine.NewContext(r)
aid, _ := strconv.Atoi(r.FormValue("aid"))
q := datastore.NewQuery("AudioStruct").Filter("Aid =", aid)
audios := make([]AudioStruct, 0, 1)
if _, err := q.GetAll(c, &audios); err != nil {
return
}
if len(audios) > 0 {
thisAudio := audios[0]
thisAudio = AudioStruct{
Aid: thisAudio.Aid,
Uid: thisAudio.Uid,
UserName: thisAudio.UserName,
AudioKey: thisAudio.AudioKey,
AudioTitle: thisAudio.AudioTitle,
IsValid: false,
Favorite: thisAudio.Favorite,
Date: thisAudio.Date,
Size: thisAudio.Size,
}
key_str := "AudioStruct" + r.FormValue("aid")
key := datastore.NewKey(c, "AudioStruct", key_str, 0, nil)
_, err1 := datastore.Put(c, key, &thisAudio)
if err1 != nil {
fmt.Fprintln(w, linkJson("0", "msg", "\"delete failed\""))
} else {
fmt.Fprintln(w, linkJson("1", "msg", "\"delete succeed\""))
}
}
}
}
func addcollect(w http.ResponseWriter, r *http.Request) {
if "POST" == r.Method {
c := appengine.NewContext(r)
uid, _ := strconv.Atoi(r.FormValue("uid"))
aid, _ := strconv.Atoi(r.FormValue("aid"))
q := datastore.NewQuery("CollectionAudio").Filter("Uid =", uid).Filter("Aid =", aid)
existC := make([]CollectionAudio, 0, 1) //this uid collected this aid before
if _, err := q.GetAll(c, &existC); err != nil {
fmt.Fprint(w, "{\"status\":\"0\"}")
return
}
if len(existC) > 0 {
//msg,this uid collected this aid before
fmt.Fprint(w, "{\"status\":\"0\",\"msg\":\"1\"}")
return
}
collect := CollectionAudio{
Uid: uid,
Aid: aid,
}
key_str := "CollectionAudio" + r.FormValue("uid") + r.FormValue("aid")
key := datastore.NewKey(c, "CollectionAudio", key_str, 0, nil)
_, err := datastore.Put(c, key, &collect)
if err != nil {
fmt.Fprint(w, "{\"status\":\"0\"}")
return
} else {
fmt.Fprintln(w, "{\"status\":\"1\"}") //collect success
}
}
}
func delcollect(w http.ResponseWriter, r *http.Request) {
if "POST" == r.Method {
c := appengine.NewContext(r)
uid, _ := strconv.Atoi(r.FormValue("uid"))
aid, _ := strconv.Atoi(r.FormValue("aid"))
q := datastore.NewQuery("CollectionAudio").Filter("Uid =", uid).Filter("Aid =", aid)
existC := make([]CollectionAudio, 0, 1) //this uid collected this aid before
if _, err := q.GetAll(c, &existC); err != nil {
fmt.Fprint(w, "{\"status\":\"0\"}")
return
}
if len(existC) > 0 {
//delete record
key_str := "CollectionAudio" + r.FormValue("uid") + r.FormValue("aid")
key := datastore.NewKey(c, "CollectionAudio", key_str, 0, nil)
err := datastore.Delete(c, key)
if err != nil {
fmt.Fprint(w, "{\"status\":\"0\"}") //delete fail
return
} else {
fmt.Fprint(w, "{\"status\":\"1\"}") //delete success
return
}
} else {
fmt.Fprint(w, "{\"status\":\"0\"}")
}
}
}
//c := appengine.NewContext(r)
//uid, _ := strconv.Atoi(r.FormValue("uid"))
//q := datastore.NewQuery("UserStruct").Filter("Uid =", uid)
//q := datastore.NewQuery("UserStruct").Order("-Date")
//q = q.Filter("Sex =", "1")
//users := make([]UserStruct, 0, 10)
//if _, err := q.GetAll(c, &users); err != nil {
// return
//}
//if len(users) > 0 {
// fmt.Fprint(w, "has user")
// //getUser := users[0].TokenTime
// name := users[0].UserName
// fmt.Fprint(w, name)
// fmt.Fprint(w, strconv.Itoa(len(users)))
//} else {
// fmt.Fprint(w, "no user")
//}
//return
//type Server struct {
// ServerName string
// ServerIP string
//}
//type Serverslice struct {
// Servers []Server
//}
//func main() {
// var s Serverslice
// s.Servers = append(s.Servers, Server{ServerName: "Shanghai_VPN", ServerIP: "127.0.0.1"})
// s.Servers = append(s.Servers, Server{ServerName: "Beijing_VPN", ServerIP: "127.0.0.2"})
// b, err := json.Marshal(s)
// if err != nil {
// fmt.Println("json err:", err)
// }
// fmt.Println(string(b))
//}
//http://hi.baidu.com/liuhelishuang/item/035bc33f23c389c21b9696a7
//http://golang.usr.cc/thread-52517-1-1.html//可能有用,上传file的defer后正确的
//https://github.com/jimmykuu/gopher/blob/master/src/gopher/account.go
//c := appengine.NewContext(r)
// audio := AudioStruct{
// Aid: 2,
// Uid: 2,
// UserName: "yjmiyf",
// AudioKey: " hmjg ",
// AudioTitle: "hjmhgj",
// IsValid: true,
// Favorite: 0,
// Date: time.Now(),
// Size: 1233,
// }
// //_, err1 := datastore.Put(c, datastore.NewIncompleteKey(c, "AudioStruct", nil), &audio)
// aid_str := strconv.Itoa(2)
// key_str := "AudioStruct" + aid_str //应该从七牛返回的来存,这个key,或者从手机传出去的时候就规则好key
// key := datastore.NewKey(c, "AudioStruct", key_str, 0, nil)
// _, err1 := datastore.Put(c, key, &audio)
// if err1 != nil {
// fmt.Fprintln(w, linkJson("0", "msg", "\"fail\""))
// return
// } else {
// fmt.Fprintln(w, linkJson("1", "msg", "\"success\""))
// }
|
package chapter2
import "fmt"
func init() {
fmt.Println("=== Slices ===")
var carTypes[3] string
carTypes[0] = "Toyota"
carTypes[1] = "Ford"
carTypes[2] = "Nissan"
fmt.Println(carTypes[1])
carTypes2 := [3]string{"Toyota", "Ford", "Nissan"}
fmt.Println(carTypes2[0])
carTypesSlice := []string{"Toyota", "Ford", "Nissan"}
fmt.Println(carTypesSlice[2])
carTypesSlice = append(carTypesSlice, "Telsa")
carTypesSlice = append(carTypesSlice, "Append")
fmt.Println("carTypesSlice len = ", len(carTypesSlice))
carTypesSliceMake := make([]string, 3)
fmt.Println("carTypesSliceMake len = ", len(carTypesSliceMake))
carTypesSliceMake[0] = "Toyota"
carTypesSliceMake[1] = "Ford"
carTypesSliceMake[2] = "Nissan"
// carTypesSlice 배열에 2번째 부터 3(4-1)번째 요소 가져오기기
carTypesSlice2 := carTypesSlice[2:4]
fmt.Println("carTypesSlice2 len ", len(carTypesSlice2))
fmt.Println("slice[2:4] = ", carTypesSlice2)
}
|
package pipeline
import (
"fmt"
"time"
"github.com/sherifabdlnaby/prism/app/component"
"github.com/sherifabdlnaby/prism/app/config"
"github.com/sherifabdlnaby/prism/app/pipeline/persistence"
"github.com/sherifabdlnaby/prism/pkg/job"
"go.uber.org/zap"
)
type wrapper struct {
*pipeline
jobChan chan job.Job
}
type Manager struct {
pipelines map[string]wrapper
persistence persistence.Repository
registry component.Registry
logger zap.SugaredLogger
}
func (m *Manager) Pipelines() map[string]wrapper {
return m.pipelines
}
// initPipelines Initialize and build all configured pipelines
func NewManager(c config.Pipelines, registry component.Registry, logger zap.SugaredLogger) (*Manager, error) {
m := Manager{
pipelines: make(map[string]wrapper),
registry: registry,
logger: zap.SugaredLogger{},
}
m.logger = *logger.Named("pipeline")
repo, err := persistence.NewRepository(config.EnvPrismDataDir.Lookup(), m.logger)
if err != nil {
return nil, fmt.Errorf("error occurred when constructing pipeline persistence: %s", err.Error())
}
m.persistence = *repo
for name, pipConfig := range c.Pipelines {
// check if pipeline already exists
_, ok := m.pipelines[name]
if ok {
return nil, fmt.Errorf("pipeline with name [%s] already declared", name)
}
pip, err := m.NewPipeline(name, *pipConfig)
if err != nil {
return nil, fmt.Errorf("error occurred when constructing pipeline [%s]: %s", name, err.Error())
}
m.pipelines[name] = *pip
}
return &m, nil
}
func (m *Manager) PipelinesReceiveChan() map[string]chan<- job.Job {
chans := make(map[string]chan<- job.Job)
for key, pipeline := range m.pipelines {
chans[key] = pipeline.jobChan
}
return chans
}
// startPipelines start all pipelines and start accepting input
func (m *Manager) Start(name string) error {
var err error
pipeline, ok := m.pipelines[name]
if !ok {
err = fmt.Errorf("pipeline %s doesn't exist", name)
m.logger.Error(err.Error())
return err
}
err = pipeline.Start()
if err != nil {
m.logger.Error(err.Error())
return err
}
return nil
}
// stopPipelines Stop pipelines by calling their Stop() function, any request to these pipelines will return error.
func (m *Manager) Stop(name string) error {
var err error
pipeline, ok := m.pipelines[name]
if !ok {
err = fmt.Errorf("pipeline %s doesn't exist", name)
m.logger.Error(err.Error())
return err
}
errChan := make(chan error)
go func() {
close(pipeline.jobChan)
err = pipeline.Stop()
if err != nil {
m.logger.Error(err.Error())
}
errChan <- err
}()
for {
select {
case <-time.Tick(50 * time.Millisecond):
// Print how many active job for visibility
m.logger.Infof("stopping pipeline [%s]... (jobs in progress: %d)", pipeline.name, pipeline.ActiveJobs())
case err := <-errChan:
return err
}
}
}
// stopPipelines Stop pipelines by calling their Stop() function, any request to these pipelines will return error.
func (m *Manager) Recover(name string) error {
var err error
pipeline, ok := m.pipelines[name]
if !ok {
err = fmt.Errorf("pipeline %s doesn't exist", name)
m.logger.Error(err.Error())
return err
}
err = pipeline.recoverAsyncJobs()
if err != nil {
m.logger.Error(err.Error())
return err
}
return nil
}
// startPipelines start all pipelines and start accepting input
func (m *Manager) StartAll() error {
errChan := make(chan error)
// stop pipelines concurrently
for name := range m.pipelines {
go func(name string) {
err := m.Start(name)
errChan <- err
}(name)
}
// wait for errors
var err error
for i := 0; i < len(m.pipelines); i++ {
err1 := <-errChan
if err1 != nil {
err = err1
}
}
return err
}
// stopPipelines Stop pipelines by calling their Stop() function, any request to these pipelines will return error.
func (m *Manager) StopAll() error {
errChan := make(chan error)
// stop pipelines concurrently
for name := range m.pipelines {
go func(name string) {
err := m.Stop(name)
errChan <- err
}(name)
}
// wait for errors
var err error
for i := 0; i < len(m.pipelines); i++ {
err1 := <-errChan
if err1 != nil {
err = err1
}
}
return err
}
// stopPipelines Stop pipelines by calling their Stop() function, any request to these pipelines will return error.
func (m *Manager) RecoverAsyncAll() error {
errChan := make(chan error)
// recover pipelines concurrently
for name := range m.pipelines {
go func(name string) {
err := m.Recover(name)
errChan <- err
}(name)
}
// wait for errors
var err error
for i := 0; i < len(m.pipelines); i++ {
err1 := <-errChan
if err1 != nil {
err = err1
}
}
return err
}
|
package main
import (
"fmt"
)
type Greeter struct {
helloPhrase string
}
func (g Greeter) Hello() {
fmt.Println(g.helloPhrase)
}
func main() {
g := Greeter{helloPhrase: "Hey everyone!"}
g.Hello()
}
|
package script
import "reflect"
//Table type.
type Table struct {
Type
}
//Make makes a table.
func (*Table) Make(q Ctx, collection Collection, sizes ...int) {
var T = reflect.TypeOf(collection).Elem()
var V = reflect.ValueOf(collection).Elem()
var L, ok = T.FieldByName("L")
if !ok {
panic("table type must have literal `L` field")
}
var ElementType = L.Type.Elem()
var ZeroType = reflect.MapOf(reflect.TypeOf(""),
GoTypeOf(reflect.Zero(ElementType).Interface().(Value)))
var Zero = reflect.MakeMap(ZeroType).Interface()
//Create a runtime representation of the array.
V.FieldByName("Table").Set(reflect.ValueOf(Table{
NewType(q, func() interface{} {
return Zero
}),
}))
//Create Mutate method.
if Mutate, ok := T.FieldByName("Insert"); ok {
V.FieldByName("Insert").Set(reflect.MakeFunc(Mutate.Type,
func(args []reflect.Value) []reflect.Value {
q.Insert(collection, args[0].Interface().(String), args[1].Interface().(Value))
return nil
}))
}
//Create Index method.
if Index, ok := T.FieldByName("Lookup"); ok {
V.FieldByName("Lookup").Set(reflect.MakeFunc(Index.Type,
func(args []reflect.Value) (returns []reflect.Value) {
var result = reflect.New(ElementType).Elem()
result.FieldByName("Type").Set(reflect.ValueOf(Type{
Ctx: q,
Runtime: q.Lookup(collection, args[0].Interface().(String)),
}))
returns = append(returns, result)
return
}))
}
}
|
package main
import (
"os"
"text/template"
)
type person struct {
Name string
Age int
}
func main() {
p := person{"kamil", 35}
tpl, _ := template.New("test").Parse("Hello {{ .Name }}, you are {{ .Age }} years old")
err := tpl.Execute(os.Stdout, p)
if err != nil {
panic(err)
}
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package beta
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"strings"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
)
func (r *Bucket) validate() error {
if err := dcl.RequiredParameter(r.Project, "Project"); err != nil {
return err
}
if err := dcl.Required(r, "location"); err != nil {
return err
}
if err := dcl.Required(r, "name"); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(r.Lifecycle) {
if err := r.Lifecycle.validate(); err != nil {
return err
}
}
if !dcl.IsEmptyValueIndirect(r.Logging) {
if err := r.Logging.validate(); err != nil {
return err
}
}
if !dcl.IsEmptyValueIndirect(r.Versioning) {
if err := r.Versioning.validate(); err != nil {
return err
}
}
if !dcl.IsEmptyValueIndirect(r.Website) {
if err := r.Website.validate(); err != nil {
return err
}
}
return nil
}
func (r *BucketCors) validate() error {
return nil
}
func (r *BucketLifecycle) validate() error {
return nil
}
func (r *BucketLifecycleRule) validate() error {
if !dcl.IsEmptyValueIndirect(r.Action) {
if err := r.Action.validate(); err != nil {
return err
}
}
if !dcl.IsEmptyValueIndirect(r.Condition) {
if err := r.Condition.validate(); err != nil {
return err
}
}
return nil
}
func (r *BucketLifecycleRuleAction) validate() error {
return nil
}
func (r *BucketLifecycleRuleCondition) validate() error {
return nil
}
func (r *BucketLogging) validate() error {
return nil
}
func (r *BucketVersioning) validate() error {
return nil
}
func (r *BucketWebsite) validate() error {
return nil
}
func (r *Bucket) basePath() string {
params := map[string]interface{}{}
return dcl.Nprintf("https://www.googleapis.com/storage/v1/", params)
}
func (r *Bucket) getURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("b/{{name}}?userProject={{project}}", nr.basePath(), userBasePath, params), nil
}
func (r *Bucket) listURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
}
return dcl.URL("b?project={{project}}", nr.basePath(), userBasePath, params), nil
}
func (r *Bucket) createURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
}
return dcl.URL("b?project={{project}}", nr.basePath(), userBasePath, params), nil
}
func (r *Bucket) deleteURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("b/{{name}}?userProject={{project}}", nr.basePath(), userBasePath, params), nil
}
func (r *Bucket) SetPolicyURL(userBasePath string) string {
nr := r.urlNormalized()
fields := map[string]interface{}{
"name": *nr.Name,
}
return dcl.URL("b/{{name}}/iam", nr.basePath(), userBasePath, fields)
}
func (r *Bucket) SetPolicyVerb() string {
return "PUT"
}
func (r *Bucket) getPolicyURL(userBasePath string) string {
nr := r.urlNormalized()
fields := map[string]interface{}{
"name": *nr.Name,
}
return dcl.URL("b/{{name}}/iam", nr.basePath(), userBasePath, fields)
}
func (r *Bucket) IAMPolicyVersion() int {
return 3
}
// bucketApiOperation represents a mutable operation in the underlying REST
// API such as Create, Update, or Delete.
type bucketApiOperation interface {
do(context.Context, *Bucket, *Client) error
}
// newUpdateBucketUpdateRequest creates a request for an
// Bucket resource's update update type by filling in the update
// fields based on the intended state of the resource.
func newUpdateBucketUpdateRequest(ctx context.Context, f *Bucket, c *Client) (map[string]interface{}, error) {
req := map[string]interface{}{}
res := f
_ = res
if v, err := expandBucketCorsSlice(c, f.Cors, res); err != nil {
return nil, fmt.Errorf("error expanding Cors into cors: %w", err)
} else if v != nil {
req["cors"] = v
}
if v, err := expandBucketLifecycle(c, f.Lifecycle, res); err != nil {
return nil, fmt.Errorf("error expanding Lifecycle into lifecycle: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
req["lifecycle"] = v
}
if v, err := expandBucketLogging(c, f.Logging, res); err != nil {
return nil, fmt.Errorf("error expanding Logging into logging: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
req["logging"] = v
}
if v := f.StorageClass; !dcl.IsEmptyValueIndirect(v) {
req["storageClass"] = v
}
if v, err := expandBucketVersioning(c, f.Versioning, res); err != nil {
return nil, fmt.Errorf("error expanding Versioning into versioning: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
req["versioning"] = v
}
if v, err := expandBucketWebsite(c, f.Website, res); err != nil {
return nil, fmt.Errorf("error expanding Website into website: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
req["website"] = v
}
return req, nil
}
// marshalUpdateBucketUpdateRequest converts the update into
// the final JSON request body.
func marshalUpdateBucketUpdateRequest(c *Client, m map[string]interface{}) ([]byte, error) {
return json.Marshal(m)
}
type updateBucketUpdateOperation struct {
// If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated.
// Usually it will be nil - this is to prevent us from accidentally depending on apply
// options, which should usually be unnecessary.
ApplyOptions []dcl.ApplyOption
FieldDiffs []*dcl.FieldDiff
}
// do creates a request and sends it to the appropriate URL. In most operations,
// do will transcribe a subset of the resource into a request object and send a
// PUT request to a single URL.
func (op *updateBucketUpdateOperation) do(ctx context.Context, r *Bucket, c *Client) error {
_, err := c.GetBucket(ctx, r)
if err != nil {
return err
}
u, err := r.updateURL(c.Config.BasePath, "update")
if err != nil {
return err
}
req, err := newUpdateBucketUpdateRequest(ctx, r, c)
if err != nil {
return err
}
c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req)
body, err := marshalUpdateBucketUpdateRequest(c, req)
if err != nil {
return err
}
_, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider)
if err != nil {
return err
}
return nil
}
func (c *Client) listBucketRaw(ctx context.Context, r *Bucket, pageToken string, pageSize int32) ([]byte, error) {
u, err := r.urlNormalized().listURL(c.Config.BasePath)
if err != nil {
return nil, err
}
m := make(map[string]string)
if pageToken != "" {
m["pageToken"] = pageToken
}
if pageSize != BucketMaxPage {
m["pageSize"] = fmt.Sprintf("%v", pageSize)
}
u, err = dcl.AddQueryParams(u, m)
if err != nil {
return nil, err
}
resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider)
if err != nil {
return nil, err
}
defer resp.Response.Body.Close()
return ioutil.ReadAll(resp.Response.Body)
}
type listBucketOperation struct {
Items []map[string]interface{} `json:"items"`
Token string `json:"nextPageToken"`
}
func (c *Client) listBucket(ctx context.Context, r *Bucket, pageToken string, pageSize int32) ([]*Bucket, string, error) {
b, err := c.listBucketRaw(ctx, r, pageToken, pageSize)
if err != nil {
return nil, "", err
}
var m listBucketOperation
if err := json.Unmarshal(b, &m); err != nil {
return nil, "", err
}
var l []*Bucket
for _, v := range m.Items {
res, err := unmarshalMapBucket(v, c, r)
if err != nil {
return nil, m.Token, err
}
res.Project = r.Project
l = append(l, res)
}
return l, m.Token, nil
}
func (c *Client) deleteAllBucket(ctx context.Context, f func(*Bucket) bool, resources []*Bucket) error {
var errors []string
for _, res := range resources {
if f(res) {
// We do not want deleteAll to fail on a deletion or else it will stop deleting other resources.
err := c.DeleteBucket(ctx, res)
if err != nil {
errors = append(errors, err.Error())
}
}
}
if len(errors) > 0 {
return fmt.Errorf("%v", strings.Join(errors, "\n"))
} else {
return nil
}
}
type deleteBucketOperation struct{}
func (op *deleteBucketOperation) do(ctx context.Context, r *Bucket, c *Client) error {
r, err := c.GetBucket(ctx, r)
if err != nil {
if dcl.IsNotFound(err) {
c.Config.Logger.InfoWithContextf(ctx, "Bucket not found, returning. Original error: %v", err)
return nil
}
c.Config.Logger.WarningWithContextf(ctx, "GetBucket checking for existence. error: %v", err)
return err
}
u, err := r.deleteURL(c.Config.BasePath)
if err != nil {
return err
}
// Delete should never have a body
body := &bytes.Buffer{}
_, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider)
if err != nil {
return fmt.Errorf("failed to delete Bucket: %w", err)
}
// We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration.
// This is the reason we are adding retry to handle that case.
retriesRemaining := 10
dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) {
_, err := c.GetBucket(ctx, r)
if dcl.IsNotFound(err) {
return nil, nil
}
if retriesRemaining > 0 {
retriesRemaining--
return &dcl.RetryDetails{}, dcl.OperationNotDone{}
}
return nil, dcl.NotDeletedError{ExistingResource: r}
}, c.Config.RetryProvider)
return nil
}
// Create operations are similar to Update operations, although they do not have
// specific request objects. The Create request object is the json encoding of
// the resource, which is modified by res.marshal to form the base request body.
type createBucketOperation struct {
response map[string]interface{}
}
func (op *createBucketOperation) FirstResponse() (map[string]interface{}, bool) {
return op.response, len(op.response) > 0
}
func (op *createBucketOperation) do(ctx context.Context, r *Bucket, c *Client) error {
c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r)
u, err := r.createURL(c.Config.BasePath)
if err != nil {
return err
}
req, err := r.marshal(c)
if err != nil {
return err
}
resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider)
if err != nil {
return err
}
o, err := dcl.ResponseBodyAsJSON(resp)
if err != nil {
return fmt.Errorf("error decoding response body into JSON: %w", err)
}
op.response = o
if _, err := c.GetBucket(ctx, r); err != nil {
c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err)
return err
}
return nil
}
func (c *Client) getBucketRaw(ctx context.Context, r *Bucket) ([]byte, error) {
u, err := r.getURL(c.Config.BasePath)
if err != nil {
return nil, err
}
resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider)
if err != nil {
return nil, err
}
defer resp.Response.Body.Close()
b, err := ioutil.ReadAll(resp.Response.Body)
if err != nil {
return nil, err
}
return b, nil
}
func (c *Client) bucketDiffsForRawDesired(ctx context.Context, rawDesired *Bucket, opts ...dcl.ApplyOption) (initial, desired *Bucket, diffs []*dcl.FieldDiff, err error) {
c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...")
// First, let us see if the user provided a state hint. If they did, we will start fetching based on that.
var fetchState *Bucket
if sh := dcl.FetchStateHint(opts); sh != nil {
if r, ok := sh.(*Bucket); !ok {
c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Bucket, got %T", sh)
} else {
fetchState = r
}
}
if fetchState == nil {
fetchState = rawDesired
}
// 1.2: Retrieval of raw initial state from API
rawInitial, err := c.GetBucket(ctx, fetchState)
if rawInitial == nil {
if !dcl.IsNotFound(err) {
c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Bucket resource already exists: %s", err)
return nil, nil, nil, fmt.Errorf("failed to retrieve Bucket resource: %v", err)
}
c.Config.Logger.InfoWithContext(ctx, "Found that Bucket resource did not exist.")
// Perform canonicalization to pick up defaults.
desired, err = canonicalizeBucketDesiredState(rawDesired, rawInitial)
return nil, desired, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Bucket: %v", rawInitial)
c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Bucket: %v", rawDesired)
// The Get call applies postReadExtract and so the result may contain fields that are not part of API version.
if err := extractBucketFields(rawInitial); err != nil {
return nil, nil, nil, err
}
// 1.3: Canonicalize raw initial state into initial state.
initial, err = canonicalizeBucketInitialState(rawInitial, rawDesired)
if err != nil {
return nil, nil, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Bucket: %v", initial)
// 1.4: Canonicalize raw desired state into desired state.
desired, err = canonicalizeBucketDesiredState(rawDesired, rawInitial, opts...)
if err != nil {
return nil, nil, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Bucket: %v", desired)
// 2.1: Comparison of initial and desired state.
diffs, err = diffBucket(c, desired, initial, opts...)
return initial, desired, diffs, err
}
func canonicalizeBucketInitialState(rawInitial, rawDesired *Bucket) (*Bucket, error) {
// TODO(magic-modules-eng): write canonicalizer once relevant traits are added.
return rawInitial, nil
}
/*
* Canonicalizers
*
* These are responsible for converting either a user-specified config or a
* GCP API response to a standard format that can be used for difference checking.
* */
func canonicalizeBucketDesiredState(rawDesired, rawInitial *Bucket, opts ...dcl.ApplyOption) (*Bucket, error) {
if rawInitial == nil {
// Since the initial state is empty, the desired state is all we have.
// We canonicalize the remaining nested objects with nil to pick up defaults.
rawDesired.Lifecycle = canonicalizeBucketLifecycle(rawDesired.Lifecycle, nil, opts...)
rawDesired.Logging = canonicalizeBucketLogging(rawDesired.Logging, nil, opts...)
rawDesired.Versioning = canonicalizeBucketVersioning(rawDesired.Versioning, nil, opts...)
rawDesired.Website = canonicalizeBucketWebsite(rawDesired.Website, nil, opts...)
return rawDesired, nil
}
canonicalDesired := &Bucket{}
if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) {
canonicalDesired.Project = rawInitial.Project
} else {
canonicalDesired.Project = rawDesired.Project
}
if dcl.StringCanonicalize(rawDesired.Location, rawInitial.Location) {
canonicalDesired.Location = rawInitial.Location
} else {
canonicalDesired.Location = rawDesired.Location
}
if dcl.StringCanonicalize(rawDesired.Name, rawInitial.Name) {
canonicalDesired.Name = rawInitial.Name
} else {
canonicalDesired.Name = rawDesired.Name
}
canonicalDesired.Cors = canonicalizeBucketCorsSlice(rawDesired.Cors, rawInitial.Cors, opts...)
canonicalDesired.Lifecycle = canonicalizeBucketLifecycle(rawDesired.Lifecycle, rawInitial.Lifecycle, opts...)
canonicalDesired.Logging = canonicalizeBucketLogging(rawDesired.Logging, rawInitial.Logging, opts...)
if dcl.IsZeroValue(rawDesired.StorageClass) || (dcl.IsEmptyValueIndirect(rawDesired.StorageClass) && dcl.IsEmptyValueIndirect(rawInitial.StorageClass)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
canonicalDesired.StorageClass = rawInitial.StorageClass
} else {
canonicalDesired.StorageClass = rawDesired.StorageClass
}
canonicalDesired.Versioning = canonicalizeBucketVersioning(rawDesired.Versioning, rawInitial.Versioning, opts...)
canonicalDesired.Website = canonicalizeBucketWebsite(rawDesired.Website, rawInitial.Website, opts...)
return canonicalDesired, nil
}
func canonicalizeBucketNewState(c *Client, rawNew, rawDesired *Bucket) (*Bucket, error) {
rawNew.Project = rawDesired.Project
if dcl.IsEmptyValueIndirect(rawNew.Location) && dcl.IsEmptyValueIndirect(rawDesired.Location) {
rawNew.Location = rawDesired.Location
} else {
if dcl.StringCanonicalize(rawDesired.Location, rawNew.Location) {
rawNew.Location = rawDesired.Location
}
}
if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) {
rawNew.Name = rawDesired.Name
} else {
if dcl.StringCanonicalize(rawDesired.Name, rawNew.Name) {
rawNew.Name = rawDesired.Name
}
}
if dcl.IsEmptyValueIndirect(rawNew.Cors) && dcl.IsEmptyValueIndirect(rawDesired.Cors) {
rawNew.Cors = rawDesired.Cors
} else {
rawNew.Cors = canonicalizeNewBucketCorsSlice(c, rawDesired.Cors, rawNew.Cors)
}
if dcl.IsEmptyValueIndirect(rawNew.Lifecycle) && dcl.IsEmptyValueIndirect(rawDesired.Lifecycle) {
rawNew.Lifecycle = rawDesired.Lifecycle
} else {
rawNew.Lifecycle = canonicalizeNewBucketLifecycle(c, rawDesired.Lifecycle, rawNew.Lifecycle)
}
if dcl.IsEmptyValueIndirect(rawNew.Logging) && dcl.IsEmptyValueIndirect(rawDesired.Logging) {
rawNew.Logging = rawDesired.Logging
} else {
rawNew.Logging = canonicalizeNewBucketLogging(c, rawDesired.Logging, rawNew.Logging)
}
if dcl.IsEmptyValueIndirect(rawNew.StorageClass) && dcl.IsEmptyValueIndirect(rawDesired.StorageClass) {
rawNew.StorageClass = rawDesired.StorageClass
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.Versioning) && dcl.IsEmptyValueIndirect(rawDesired.Versioning) {
rawNew.Versioning = rawDesired.Versioning
} else {
rawNew.Versioning = canonicalizeNewBucketVersioning(c, rawDesired.Versioning, rawNew.Versioning)
}
if dcl.IsEmptyValueIndirect(rawNew.Website) && dcl.IsEmptyValueIndirect(rawDesired.Website) {
rawNew.Website = rawDesired.Website
} else {
rawNew.Website = canonicalizeNewBucketWebsite(c, rawDesired.Website, rawNew.Website)
}
return rawNew, nil
}
func canonicalizeBucketCors(des, initial *BucketCors, opts ...dcl.ApplyOption) *BucketCors {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &BucketCors{}
if dcl.IsZeroValue(des.MaxAgeSeconds) || (dcl.IsEmptyValueIndirect(des.MaxAgeSeconds) && dcl.IsEmptyValueIndirect(initial.MaxAgeSeconds)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.MaxAgeSeconds = initial.MaxAgeSeconds
} else {
cDes.MaxAgeSeconds = des.MaxAgeSeconds
}
if dcl.StringArrayCanonicalize(des.Method, initial.Method) {
cDes.Method = initial.Method
} else {
cDes.Method = des.Method
}
if dcl.StringArrayCanonicalize(des.Origin, initial.Origin) {
cDes.Origin = initial.Origin
} else {
cDes.Origin = des.Origin
}
if dcl.StringArrayCanonicalize(des.ResponseHeader, initial.ResponseHeader) {
cDes.ResponseHeader = initial.ResponseHeader
} else {
cDes.ResponseHeader = des.ResponseHeader
}
return cDes
}
func canonicalizeBucketCorsSlice(des, initial []BucketCors, opts ...dcl.ApplyOption) []BucketCors {
if des == nil {
return initial
}
if len(des) != len(initial) {
items := make([]BucketCors, 0, len(des))
for _, d := range des {
cd := canonicalizeBucketCors(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]BucketCors, 0, len(des))
for i, d := range des {
cd := canonicalizeBucketCors(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewBucketCors(c *Client, des, nw *BucketCors) *BucketCors {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for BucketCors while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
if dcl.StringArrayCanonicalize(des.Method, nw.Method) {
nw.Method = des.Method
}
if dcl.StringArrayCanonicalize(des.Origin, nw.Origin) {
nw.Origin = des.Origin
}
if dcl.StringArrayCanonicalize(des.ResponseHeader, nw.ResponseHeader) {
nw.ResponseHeader = des.ResponseHeader
}
return nw
}
func canonicalizeNewBucketCorsSet(c *Client, des, nw []BucketCors) []BucketCors {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []BucketCors
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareBucketCorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewBucketCors(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewBucketCorsSlice(c *Client, des, nw []BucketCors) []BucketCors {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []BucketCors
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewBucketCors(c, &d, &n))
}
return items
}
func canonicalizeBucketLifecycle(des, initial *BucketLifecycle, opts ...dcl.ApplyOption) *BucketLifecycle {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &BucketLifecycle{}
cDes.Rule = canonicalizeBucketLifecycleRuleSlice(des.Rule, initial.Rule, opts...)
return cDes
}
func canonicalizeBucketLifecycleSlice(des, initial []BucketLifecycle, opts ...dcl.ApplyOption) []BucketLifecycle {
if dcl.IsEmptyValueIndirect(des) {
return initial
}
if len(des) != len(initial) {
items := make([]BucketLifecycle, 0, len(des))
for _, d := range des {
cd := canonicalizeBucketLifecycle(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]BucketLifecycle, 0, len(des))
for i, d := range des {
cd := canonicalizeBucketLifecycle(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewBucketLifecycle(c *Client, des, nw *BucketLifecycle) *BucketLifecycle {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for BucketLifecycle while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
nw.Rule = canonicalizeNewBucketLifecycleRuleSlice(c, des.Rule, nw.Rule)
return nw
}
func canonicalizeNewBucketLifecycleSet(c *Client, des, nw []BucketLifecycle) []BucketLifecycle {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []BucketLifecycle
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareBucketLifecycleNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewBucketLifecycle(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewBucketLifecycleSlice(c *Client, des, nw []BucketLifecycle) []BucketLifecycle {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []BucketLifecycle
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewBucketLifecycle(c, &d, &n))
}
return items
}
func canonicalizeBucketLifecycleRule(des, initial *BucketLifecycleRule, opts ...dcl.ApplyOption) *BucketLifecycleRule {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &BucketLifecycleRule{}
cDes.Action = canonicalizeBucketLifecycleRuleAction(des.Action, initial.Action, opts...)
cDes.Condition = canonicalizeBucketLifecycleRuleCondition(des.Condition, initial.Condition, opts...)
return cDes
}
func canonicalizeBucketLifecycleRuleSlice(des, initial []BucketLifecycleRule, opts ...dcl.ApplyOption) []BucketLifecycleRule {
if des == nil {
return initial
}
if len(des) != len(initial) {
items := make([]BucketLifecycleRule, 0, len(des))
for _, d := range des {
cd := canonicalizeBucketLifecycleRule(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]BucketLifecycleRule, 0, len(des))
for i, d := range des {
cd := canonicalizeBucketLifecycleRule(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewBucketLifecycleRule(c *Client, des, nw *BucketLifecycleRule) *BucketLifecycleRule {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for BucketLifecycleRule while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
nw.Action = canonicalizeNewBucketLifecycleRuleAction(c, des.Action, nw.Action)
nw.Condition = canonicalizeNewBucketLifecycleRuleCondition(c, des.Condition, nw.Condition)
return nw
}
func canonicalizeNewBucketLifecycleRuleSet(c *Client, des, nw []BucketLifecycleRule) []BucketLifecycleRule {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []BucketLifecycleRule
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareBucketLifecycleRuleNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewBucketLifecycleRule(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewBucketLifecycleRuleSlice(c *Client, des, nw []BucketLifecycleRule) []BucketLifecycleRule {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []BucketLifecycleRule
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewBucketLifecycleRule(c, &d, &n))
}
return items
}
func canonicalizeBucketLifecycleRuleAction(des, initial *BucketLifecycleRuleAction, opts ...dcl.ApplyOption) *BucketLifecycleRuleAction {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &BucketLifecycleRuleAction{}
if dcl.StringCanonicalize(des.StorageClass, initial.StorageClass) || dcl.IsZeroValue(des.StorageClass) {
cDes.StorageClass = initial.StorageClass
} else {
cDes.StorageClass = des.StorageClass
}
if dcl.IsZeroValue(des.Type) || (dcl.IsEmptyValueIndirect(des.Type) && dcl.IsEmptyValueIndirect(initial.Type)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.Type = initial.Type
} else {
cDes.Type = des.Type
}
return cDes
}
func canonicalizeBucketLifecycleRuleActionSlice(des, initial []BucketLifecycleRuleAction, opts ...dcl.ApplyOption) []BucketLifecycleRuleAction {
if dcl.IsEmptyValueIndirect(des) {
return initial
}
if len(des) != len(initial) {
items := make([]BucketLifecycleRuleAction, 0, len(des))
for _, d := range des {
cd := canonicalizeBucketLifecycleRuleAction(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]BucketLifecycleRuleAction, 0, len(des))
for i, d := range des {
cd := canonicalizeBucketLifecycleRuleAction(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewBucketLifecycleRuleAction(c *Client, des, nw *BucketLifecycleRuleAction) *BucketLifecycleRuleAction {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for BucketLifecycleRuleAction while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
if dcl.StringCanonicalize(des.StorageClass, nw.StorageClass) {
nw.StorageClass = des.StorageClass
}
return nw
}
func canonicalizeNewBucketLifecycleRuleActionSet(c *Client, des, nw []BucketLifecycleRuleAction) []BucketLifecycleRuleAction {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []BucketLifecycleRuleAction
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareBucketLifecycleRuleActionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewBucketLifecycleRuleAction(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewBucketLifecycleRuleActionSlice(c *Client, des, nw []BucketLifecycleRuleAction) []BucketLifecycleRuleAction {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []BucketLifecycleRuleAction
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewBucketLifecycleRuleAction(c, &d, &n))
}
return items
}
func canonicalizeBucketLifecycleRuleCondition(des, initial *BucketLifecycleRuleCondition, opts ...dcl.ApplyOption) *BucketLifecycleRuleCondition {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &BucketLifecycleRuleCondition{}
if dcl.IsZeroValue(des.Age) || (dcl.IsEmptyValueIndirect(des.Age) && dcl.IsEmptyValueIndirect(initial.Age)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.Age = initial.Age
} else {
cDes.Age = des.Age
}
if dcl.IsZeroValue(des.CreatedBefore) || (dcl.IsEmptyValueIndirect(des.CreatedBefore) && dcl.IsEmptyValueIndirect(initial.CreatedBefore)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.CreatedBefore = initial.CreatedBefore
} else {
cDes.CreatedBefore = des.CreatedBefore
}
if dcl.IsZeroValue(des.WithState) || (dcl.IsEmptyValueIndirect(des.WithState) && dcl.IsEmptyValueIndirect(initial.WithState)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.WithState = initial.WithState
} else {
cDes.WithState = des.WithState
}
if dcl.StringArrayCanonicalize(des.MatchesStorageClass, initial.MatchesStorageClass) {
cDes.MatchesStorageClass = initial.MatchesStorageClass
} else {
cDes.MatchesStorageClass = des.MatchesStorageClass
}
if dcl.IsZeroValue(des.NumNewerVersions) || (dcl.IsEmptyValueIndirect(des.NumNewerVersions) && dcl.IsEmptyValueIndirect(initial.NumNewerVersions)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.NumNewerVersions = initial.NumNewerVersions
} else {
cDes.NumNewerVersions = des.NumNewerVersions
}
return cDes
}
func canonicalizeBucketLifecycleRuleConditionSlice(des, initial []BucketLifecycleRuleCondition, opts ...dcl.ApplyOption) []BucketLifecycleRuleCondition {
if dcl.IsEmptyValueIndirect(des) {
return initial
}
if len(des) != len(initial) {
items := make([]BucketLifecycleRuleCondition, 0, len(des))
for _, d := range des {
cd := canonicalizeBucketLifecycleRuleCondition(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]BucketLifecycleRuleCondition, 0, len(des))
for i, d := range des {
cd := canonicalizeBucketLifecycleRuleCondition(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewBucketLifecycleRuleCondition(c *Client, des, nw *BucketLifecycleRuleCondition) *BucketLifecycleRuleCondition {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for BucketLifecycleRuleCondition while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
if dcl.StringArrayCanonicalize(des.MatchesStorageClass, nw.MatchesStorageClass) {
nw.MatchesStorageClass = des.MatchesStorageClass
}
return nw
}
func canonicalizeNewBucketLifecycleRuleConditionSet(c *Client, des, nw []BucketLifecycleRuleCondition) []BucketLifecycleRuleCondition {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []BucketLifecycleRuleCondition
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareBucketLifecycleRuleConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewBucketLifecycleRuleCondition(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewBucketLifecycleRuleConditionSlice(c *Client, des, nw []BucketLifecycleRuleCondition) []BucketLifecycleRuleCondition {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []BucketLifecycleRuleCondition
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewBucketLifecycleRuleCondition(c, &d, &n))
}
return items
}
func canonicalizeBucketLogging(des, initial *BucketLogging, opts ...dcl.ApplyOption) *BucketLogging {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &BucketLogging{}
if dcl.StringCanonicalize(des.LogBucket, initial.LogBucket) || dcl.IsZeroValue(des.LogBucket) {
cDes.LogBucket = initial.LogBucket
} else {
cDes.LogBucket = des.LogBucket
}
if dcl.StringCanonicalize(des.LogObjectPrefix, initial.LogObjectPrefix) || dcl.IsZeroValue(des.LogObjectPrefix) {
cDes.LogObjectPrefix = initial.LogObjectPrefix
} else {
cDes.LogObjectPrefix = des.LogObjectPrefix
}
return cDes
}
func canonicalizeBucketLoggingSlice(des, initial []BucketLogging, opts ...dcl.ApplyOption) []BucketLogging {
if dcl.IsEmptyValueIndirect(des) {
return initial
}
if len(des) != len(initial) {
items := make([]BucketLogging, 0, len(des))
for _, d := range des {
cd := canonicalizeBucketLogging(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]BucketLogging, 0, len(des))
for i, d := range des {
cd := canonicalizeBucketLogging(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewBucketLogging(c *Client, des, nw *BucketLogging) *BucketLogging {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for BucketLogging while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
if dcl.StringCanonicalize(des.LogBucket, nw.LogBucket) {
nw.LogBucket = des.LogBucket
}
if dcl.StringCanonicalize(des.LogObjectPrefix, nw.LogObjectPrefix) {
nw.LogObjectPrefix = des.LogObjectPrefix
}
return nw
}
func canonicalizeNewBucketLoggingSet(c *Client, des, nw []BucketLogging) []BucketLogging {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []BucketLogging
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareBucketLoggingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewBucketLogging(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewBucketLoggingSlice(c *Client, des, nw []BucketLogging) []BucketLogging {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []BucketLogging
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewBucketLogging(c, &d, &n))
}
return items
}
func canonicalizeBucketVersioning(des, initial *BucketVersioning, opts ...dcl.ApplyOption) *BucketVersioning {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &BucketVersioning{}
if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) {
cDes.Enabled = initial.Enabled
} else {
cDes.Enabled = des.Enabled
}
return cDes
}
func canonicalizeBucketVersioningSlice(des, initial []BucketVersioning, opts ...dcl.ApplyOption) []BucketVersioning {
if dcl.IsEmptyValueIndirect(des) {
return initial
}
if len(des) != len(initial) {
items := make([]BucketVersioning, 0, len(des))
for _, d := range des {
cd := canonicalizeBucketVersioning(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]BucketVersioning, 0, len(des))
for i, d := range des {
cd := canonicalizeBucketVersioning(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewBucketVersioning(c *Client, des, nw *BucketVersioning) *BucketVersioning {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for BucketVersioning while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) {
nw.Enabled = des.Enabled
}
return nw
}
func canonicalizeNewBucketVersioningSet(c *Client, des, nw []BucketVersioning) []BucketVersioning {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []BucketVersioning
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareBucketVersioningNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewBucketVersioning(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewBucketVersioningSlice(c *Client, des, nw []BucketVersioning) []BucketVersioning {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []BucketVersioning
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewBucketVersioning(c, &d, &n))
}
return items
}
func canonicalizeBucketWebsite(des, initial *BucketWebsite, opts ...dcl.ApplyOption) *BucketWebsite {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &BucketWebsite{}
if dcl.StringCanonicalize(des.MainPageSuffix, initial.MainPageSuffix) || dcl.IsZeroValue(des.MainPageSuffix) {
cDes.MainPageSuffix = initial.MainPageSuffix
} else {
cDes.MainPageSuffix = des.MainPageSuffix
}
if dcl.StringCanonicalize(des.NotFoundPage, initial.NotFoundPage) || dcl.IsZeroValue(des.NotFoundPage) {
cDes.NotFoundPage = initial.NotFoundPage
} else {
cDes.NotFoundPage = des.NotFoundPage
}
return cDes
}
func canonicalizeBucketWebsiteSlice(des, initial []BucketWebsite, opts ...dcl.ApplyOption) []BucketWebsite {
if dcl.IsEmptyValueIndirect(des) {
return initial
}
if len(des) != len(initial) {
items := make([]BucketWebsite, 0, len(des))
for _, d := range des {
cd := canonicalizeBucketWebsite(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]BucketWebsite, 0, len(des))
for i, d := range des {
cd := canonicalizeBucketWebsite(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewBucketWebsite(c *Client, des, nw *BucketWebsite) *BucketWebsite {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for BucketWebsite while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
if dcl.StringCanonicalize(des.MainPageSuffix, nw.MainPageSuffix) {
nw.MainPageSuffix = des.MainPageSuffix
}
if dcl.StringCanonicalize(des.NotFoundPage, nw.NotFoundPage) {
nw.NotFoundPage = des.NotFoundPage
}
return nw
}
func canonicalizeNewBucketWebsiteSet(c *Client, des, nw []BucketWebsite) []BucketWebsite {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []BucketWebsite
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareBucketWebsiteNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewBucketWebsite(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewBucketWebsiteSlice(c *Client, des, nw []BucketWebsite) []BucketWebsite {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []BucketWebsite
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewBucketWebsite(c, &d, &n))
}
return items
}
// The differ returns a list of diffs, along with a list of operations that should be taken
// to remedy them. Right now, it does not attempt to consolidate operations - if several
// fields can be fixed with a patch update, it will perform the patch several times.
// Diffs on some fields will be ignored if the `desired` state has an empty (nil)
// value. This empty value indicates that the user does not care about the state for
// the field. Empty fields on the actual object will cause diffs.
// TODO(magic-modules-eng): for efficiency in some resources, add batching.
func diffBucket(c *Client, desired, actual *Bucket, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) {
if desired == nil || actual == nil {
return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual)
}
c.Config.Logger.Infof("Diff function called with desired state: %v", desired)
c.Config.Logger.Infof("Diff function called with actual state: %v", actual)
var fn dcl.FieldName
var newDiffs []*dcl.FieldDiff
// New style diffs.
if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Cors, actual.Cors, dcl.DiffInfo{ObjectFunction: compareBucketCorsNewStyle, EmptyObject: EmptyBucketCors, OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("Cors")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Lifecycle, actual.Lifecycle, dcl.DiffInfo{ObjectFunction: compareBucketLifecycleNewStyle, EmptyObject: EmptyBucketLifecycle, OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("Lifecycle")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Logging, actual.Logging, dcl.DiffInfo{ObjectFunction: compareBucketLoggingNewStyle, EmptyObject: EmptyBucketLogging, OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("Logging")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.StorageClass, actual.StorageClass, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("StorageClass")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Versioning, actual.Versioning, dcl.DiffInfo{ObjectFunction: compareBucketVersioningNewStyle, EmptyObject: EmptyBucketVersioning, OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("Versioning")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Website, actual.Website, dcl.DiffInfo{ObjectFunction: compareBucketWebsiteNewStyle, EmptyObject: EmptyBucketWebsite, OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("Website")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if len(newDiffs) > 0 {
c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs)
}
return newDiffs, nil
}
func compareBucketCorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*BucketCors)
if !ok {
desiredNotPointer, ok := d.(BucketCors)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketCors or *BucketCors", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*BucketCors)
if !ok {
actualNotPointer, ok := a.(BucketCors)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketCors", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.MaxAgeSeconds, actual.MaxAgeSeconds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("MaxAgeSeconds")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.Method, actual.Method, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("Method")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.Origin, actual.Origin, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("Origin")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.ResponseHeader, actual.ResponseHeader, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("ResponseHeader")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareBucketLifecycleNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*BucketLifecycle)
if !ok {
desiredNotPointer, ok := d.(BucketLifecycle)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketLifecycle or *BucketLifecycle", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*BucketLifecycle)
if !ok {
actualNotPointer, ok := a.(BucketLifecycle)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketLifecycle", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.Rule, actual.Rule, dcl.DiffInfo{ObjectFunction: compareBucketLifecycleRuleNewStyle, EmptyObject: EmptyBucketLifecycleRule, OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("Rule")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareBucketLifecycleRuleNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*BucketLifecycleRule)
if !ok {
desiredNotPointer, ok := d.(BucketLifecycleRule)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketLifecycleRule or *BucketLifecycleRule", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*BucketLifecycleRule)
if !ok {
actualNotPointer, ok := a.(BucketLifecycleRule)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketLifecycleRule", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.Action, actual.Action, dcl.DiffInfo{ObjectFunction: compareBucketLifecycleRuleActionNewStyle, EmptyObject: EmptyBucketLifecycleRuleAction, OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("Action")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.Condition, actual.Condition, dcl.DiffInfo{ObjectFunction: compareBucketLifecycleRuleConditionNewStyle, EmptyObject: EmptyBucketLifecycleRuleCondition, OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("Condition")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareBucketLifecycleRuleActionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*BucketLifecycleRuleAction)
if !ok {
desiredNotPointer, ok := d.(BucketLifecycleRuleAction)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketLifecycleRuleAction or *BucketLifecycleRuleAction", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*BucketLifecycleRuleAction)
if !ok {
actualNotPointer, ok := a.(BucketLifecycleRuleAction)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketLifecycleRuleAction", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.StorageClass, actual.StorageClass, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("StorageClass")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.Type, actual.Type, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("Type")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareBucketLifecycleRuleConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*BucketLifecycleRuleCondition)
if !ok {
desiredNotPointer, ok := d.(BucketLifecycleRuleCondition)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketLifecycleRuleCondition or *BucketLifecycleRuleCondition", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*BucketLifecycleRuleCondition)
if !ok {
actualNotPointer, ok := a.(BucketLifecycleRuleCondition)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketLifecycleRuleCondition", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.Age, actual.Age, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("Age")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.CreatedBefore, actual.CreatedBefore, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("CreatedBefore")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.WithState, actual.WithState, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("IsLive")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.MatchesStorageClass, actual.MatchesStorageClass, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("MatchesStorageClass")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.NumNewerVersions, actual.NumNewerVersions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("NumNewerVersions")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareBucketLoggingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*BucketLogging)
if !ok {
desiredNotPointer, ok := d.(BucketLogging)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketLogging or *BucketLogging", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*BucketLogging)
if !ok {
actualNotPointer, ok := a.(BucketLogging)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketLogging", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.LogBucket, actual.LogBucket, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("LogBucket")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.LogObjectPrefix, actual.LogObjectPrefix, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("LogObjectPrefix")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareBucketVersioningNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*BucketVersioning)
if !ok {
desiredNotPointer, ok := d.(BucketVersioning)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketVersioning or *BucketVersioning", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*BucketVersioning)
if !ok {
actualNotPointer, ok := a.(BucketVersioning)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketVersioning", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareBucketWebsiteNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*BucketWebsite)
if !ok {
desiredNotPointer, ok := d.(BucketWebsite)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketWebsite or *BucketWebsite", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*BucketWebsite)
if !ok {
actualNotPointer, ok := a.(BucketWebsite)
if !ok {
return nil, fmt.Errorf("obj %v is not a BucketWebsite", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.MainPageSuffix, actual.MainPageSuffix, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("MainPageSuffix")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.NotFoundPage, actual.NotFoundPage, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateBucketUpdateOperation")}, fn.AddNest("NotFoundPage")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
// urlNormalized returns a copy of the resource struct with values normalized
// for URL substitutions. For instance, it converts long-form self-links to
// short-form so they can be substituted in.
func (r *Bucket) urlNormalized() *Bucket {
normalized := dcl.Copy(*r).(Bucket)
normalized.Project = dcl.SelfLinkToName(r.Project)
normalized.Location = dcl.SelfLinkToName(r.Location)
normalized.Name = dcl.SelfLinkToName(r.Name)
return &normalized
}
func (r *Bucket) updateURL(userBasePath, updateName string) (string, error) {
nr := r.urlNormalized()
if updateName == "update" {
fields := map[string]interface{}{
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("b/{{name}}", nr.basePath(), userBasePath, fields), nil
}
return "", fmt.Errorf("unknown update name: %s", updateName)
}
// marshal encodes the Bucket resource into JSON for a Create request, and
// performs transformations from the resource schema to the API schema if
// necessary.
func (r *Bucket) marshal(c *Client) ([]byte, error) {
m, err := expandBucket(c, r)
if err != nil {
return nil, fmt.Errorf("error marshalling Bucket: %w", err)
}
return json.Marshal(m)
}
// unmarshalBucket decodes JSON responses into the Bucket resource schema.
func unmarshalBucket(b []byte, c *Client, res *Bucket) (*Bucket, error) {
var m map[string]interface{}
if err := json.Unmarshal(b, &m); err != nil {
return nil, err
}
return unmarshalMapBucket(m, c, res)
}
func unmarshalMapBucket(m map[string]interface{}, c *Client, res *Bucket) (*Bucket, error) {
flattened := flattenBucket(c, m, res)
if flattened == nil {
return nil, fmt.Errorf("attempted to flatten empty json object")
}
return flattened, nil
}
// expandBucket expands Bucket into a JSON request object.
func expandBucket(c *Client, f *Bucket) (map[string]interface{}, error) {
m := make(map[string]interface{})
res := f
_ = res
if v, err := dcl.EmptyValue(); err != nil {
return nil, fmt.Errorf("error expanding Project into project: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["project"] = v
}
if v := f.Location; dcl.ValueShouldBeSent(v) {
m["location"] = v
}
if v := f.Name; dcl.ValueShouldBeSent(v) {
m["name"] = v
}
if v, err := expandBucketCorsSlice(c, f.Cors, res); err != nil {
return nil, fmt.Errorf("error expanding Cors into cors: %w", err)
} else if v != nil {
m["cors"] = v
}
if v, err := expandBucketLifecycle(c, f.Lifecycle, res); err != nil {
return nil, fmt.Errorf("error expanding Lifecycle into lifecycle: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["lifecycle"] = v
}
if v, err := expandBucketLogging(c, f.Logging, res); err != nil {
return nil, fmt.Errorf("error expanding Logging into logging: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["logging"] = v
}
if v := f.StorageClass; dcl.ValueShouldBeSent(v) {
m["storageClass"] = v
}
if v, err := expandBucketVersioning(c, f.Versioning, res); err != nil {
return nil, fmt.Errorf("error expanding Versioning into versioning: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["versioning"] = v
}
if v, err := expandBucketWebsite(c, f.Website, res); err != nil {
return nil, fmt.Errorf("error expanding Website into website: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["website"] = v
}
return m, nil
}
// flattenBucket flattens Bucket from a JSON request object into the
// Bucket type.
func flattenBucket(c *Client, i interface{}, res *Bucket) *Bucket {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
if len(m) == 0 {
return nil
}
resultRes := &Bucket{}
resultRes.Project = dcl.FlattenString(m["project"])
resultRes.Location = dcl.FlattenString(m["location"])
resultRes.Name = dcl.FlattenString(m["name"])
resultRes.Cors = flattenBucketCorsSlice(c, m["cors"], res)
resultRes.Lifecycle = flattenBucketLifecycle(c, m["lifecycle"], res)
resultRes.Logging = flattenBucketLogging(c, m["logging"], res)
resultRes.StorageClass = flattenBucketStorageClassEnum(m["storageClass"])
resultRes.Versioning = flattenBucketVersioning(c, m["versioning"], res)
resultRes.Website = flattenBucketWebsite(c, m["website"], res)
return resultRes
}
// expandBucketCorsMap expands the contents of BucketCors into a JSON
// request object.
func expandBucketCorsMap(c *Client, f map[string]BucketCors, res *Bucket) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandBucketCors(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandBucketCorsSlice expands the contents of BucketCors into a JSON
// request object.
func expandBucketCorsSlice(c *Client, f []BucketCors, res *Bucket) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandBucketCors(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenBucketCorsMap flattens the contents of BucketCors from a JSON
// response object.
func flattenBucketCorsMap(c *Client, i interface{}, res *Bucket) map[string]BucketCors {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]BucketCors{}
}
if len(a) == 0 {
return map[string]BucketCors{}
}
items := make(map[string]BucketCors)
for k, item := range a {
items[k] = *flattenBucketCors(c, item.(map[string]interface{}), res)
}
return items
}
// flattenBucketCorsSlice flattens the contents of BucketCors from a JSON
// response object.
func flattenBucketCorsSlice(c *Client, i interface{}, res *Bucket) []BucketCors {
a, ok := i.([]interface{})
if !ok {
return []BucketCors{}
}
if len(a) == 0 {
return []BucketCors{}
}
items := make([]BucketCors, 0, len(a))
for _, item := range a {
items = append(items, *flattenBucketCors(c, item.(map[string]interface{}), res))
}
return items
}
// expandBucketCors expands an instance of BucketCors into a JSON
// request object.
func expandBucketCors(c *Client, f *BucketCors, res *Bucket) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
m := make(map[string]interface{})
if v := f.MaxAgeSeconds; !dcl.IsEmptyValueIndirect(v) {
m["maxAgeSeconds"] = v
}
if v := f.Method; v != nil {
m["method"] = v
}
if v := f.Origin; v != nil {
m["origin"] = v
}
if v := f.ResponseHeader; v != nil {
m["responseHeader"] = v
}
return m, nil
}
// flattenBucketCors flattens an instance of BucketCors from a JSON
// response object.
func flattenBucketCors(c *Client, i interface{}, res *Bucket) *BucketCors {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &BucketCors{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyBucketCors
}
r.MaxAgeSeconds = dcl.FlattenInteger(m["maxAgeSeconds"])
r.Method = dcl.FlattenStringSlice(m["method"])
r.Origin = dcl.FlattenStringSlice(m["origin"])
r.ResponseHeader = dcl.FlattenStringSlice(m["responseHeader"])
return r
}
// expandBucketLifecycleMap expands the contents of BucketLifecycle into a JSON
// request object.
func expandBucketLifecycleMap(c *Client, f map[string]BucketLifecycle, res *Bucket) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandBucketLifecycle(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandBucketLifecycleSlice expands the contents of BucketLifecycle into a JSON
// request object.
func expandBucketLifecycleSlice(c *Client, f []BucketLifecycle, res *Bucket) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandBucketLifecycle(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenBucketLifecycleMap flattens the contents of BucketLifecycle from a JSON
// response object.
func flattenBucketLifecycleMap(c *Client, i interface{}, res *Bucket) map[string]BucketLifecycle {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]BucketLifecycle{}
}
if len(a) == 0 {
return map[string]BucketLifecycle{}
}
items := make(map[string]BucketLifecycle)
for k, item := range a {
items[k] = *flattenBucketLifecycle(c, item.(map[string]interface{}), res)
}
return items
}
// flattenBucketLifecycleSlice flattens the contents of BucketLifecycle from a JSON
// response object.
func flattenBucketLifecycleSlice(c *Client, i interface{}, res *Bucket) []BucketLifecycle {
a, ok := i.([]interface{})
if !ok {
return []BucketLifecycle{}
}
if len(a) == 0 {
return []BucketLifecycle{}
}
items := make([]BucketLifecycle, 0, len(a))
for _, item := range a {
items = append(items, *flattenBucketLifecycle(c, item.(map[string]interface{}), res))
}
return items
}
// expandBucketLifecycle expands an instance of BucketLifecycle into a JSON
// request object.
func expandBucketLifecycle(c *Client, f *BucketLifecycle, res *Bucket) (map[string]interface{}, error) {
if dcl.IsEmptyValueIndirect(f) {
return nil, nil
}
m := make(map[string]interface{})
if v, err := expandBucketLifecycleRuleSlice(c, f.Rule, res); err != nil {
return nil, fmt.Errorf("error expanding Rule into rule: %w", err)
} else if v != nil {
m["rule"] = v
}
return m, nil
}
// flattenBucketLifecycle flattens an instance of BucketLifecycle from a JSON
// response object.
func flattenBucketLifecycle(c *Client, i interface{}, res *Bucket) *BucketLifecycle {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &BucketLifecycle{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyBucketLifecycle
}
r.Rule = flattenBucketLifecycleRuleSlice(c, m["rule"], res)
return r
}
// expandBucketLifecycleRuleMap expands the contents of BucketLifecycleRule into a JSON
// request object.
func expandBucketLifecycleRuleMap(c *Client, f map[string]BucketLifecycleRule, res *Bucket) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandBucketLifecycleRule(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandBucketLifecycleRuleSlice expands the contents of BucketLifecycleRule into a JSON
// request object.
func expandBucketLifecycleRuleSlice(c *Client, f []BucketLifecycleRule, res *Bucket) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandBucketLifecycleRule(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenBucketLifecycleRuleMap flattens the contents of BucketLifecycleRule from a JSON
// response object.
func flattenBucketLifecycleRuleMap(c *Client, i interface{}, res *Bucket) map[string]BucketLifecycleRule {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]BucketLifecycleRule{}
}
if len(a) == 0 {
return map[string]BucketLifecycleRule{}
}
items := make(map[string]BucketLifecycleRule)
for k, item := range a {
items[k] = *flattenBucketLifecycleRule(c, item.(map[string]interface{}), res)
}
return items
}
// flattenBucketLifecycleRuleSlice flattens the contents of BucketLifecycleRule from a JSON
// response object.
func flattenBucketLifecycleRuleSlice(c *Client, i interface{}, res *Bucket) []BucketLifecycleRule {
a, ok := i.([]interface{})
if !ok {
return []BucketLifecycleRule{}
}
if len(a) == 0 {
return []BucketLifecycleRule{}
}
items := make([]BucketLifecycleRule, 0, len(a))
for _, item := range a {
items = append(items, *flattenBucketLifecycleRule(c, item.(map[string]interface{}), res))
}
return items
}
// expandBucketLifecycleRule expands an instance of BucketLifecycleRule into a JSON
// request object.
func expandBucketLifecycleRule(c *Client, f *BucketLifecycleRule, res *Bucket) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
m := make(map[string]interface{})
if v, err := expandBucketLifecycleRuleAction(c, f.Action, res); err != nil {
return nil, fmt.Errorf("error expanding Action into action: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["action"] = v
}
if v, err := expandBucketLifecycleRuleCondition(c, f.Condition, res); err != nil {
return nil, fmt.Errorf("error expanding Condition into condition: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["condition"] = v
}
return m, nil
}
// flattenBucketLifecycleRule flattens an instance of BucketLifecycleRule from a JSON
// response object.
func flattenBucketLifecycleRule(c *Client, i interface{}, res *Bucket) *BucketLifecycleRule {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &BucketLifecycleRule{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyBucketLifecycleRule
}
r.Action = flattenBucketLifecycleRuleAction(c, m["action"], res)
r.Condition = flattenBucketLifecycleRuleCondition(c, m["condition"], res)
return r
}
// expandBucketLifecycleRuleActionMap expands the contents of BucketLifecycleRuleAction into a JSON
// request object.
func expandBucketLifecycleRuleActionMap(c *Client, f map[string]BucketLifecycleRuleAction, res *Bucket) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandBucketLifecycleRuleAction(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandBucketLifecycleRuleActionSlice expands the contents of BucketLifecycleRuleAction into a JSON
// request object.
func expandBucketLifecycleRuleActionSlice(c *Client, f []BucketLifecycleRuleAction, res *Bucket) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandBucketLifecycleRuleAction(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenBucketLifecycleRuleActionMap flattens the contents of BucketLifecycleRuleAction from a JSON
// response object.
func flattenBucketLifecycleRuleActionMap(c *Client, i interface{}, res *Bucket) map[string]BucketLifecycleRuleAction {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]BucketLifecycleRuleAction{}
}
if len(a) == 0 {
return map[string]BucketLifecycleRuleAction{}
}
items := make(map[string]BucketLifecycleRuleAction)
for k, item := range a {
items[k] = *flattenBucketLifecycleRuleAction(c, item.(map[string]interface{}), res)
}
return items
}
// flattenBucketLifecycleRuleActionSlice flattens the contents of BucketLifecycleRuleAction from a JSON
// response object.
func flattenBucketLifecycleRuleActionSlice(c *Client, i interface{}, res *Bucket) []BucketLifecycleRuleAction {
a, ok := i.([]interface{})
if !ok {
return []BucketLifecycleRuleAction{}
}
if len(a) == 0 {
return []BucketLifecycleRuleAction{}
}
items := make([]BucketLifecycleRuleAction, 0, len(a))
for _, item := range a {
items = append(items, *flattenBucketLifecycleRuleAction(c, item.(map[string]interface{}), res))
}
return items
}
// expandBucketLifecycleRuleAction expands an instance of BucketLifecycleRuleAction into a JSON
// request object.
func expandBucketLifecycleRuleAction(c *Client, f *BucketLifecycleRuleAction, res *Bucket) (map[string]interface{}, error) {
if dcl.IsEmptyValueIndirect(f) {
return nil, nil
}
m := make(map[string]interface{})
if v := f.StorageClass; !dcl.IsEmptyValueIndirect(v) {
m["storageClass"] = v
}
if v := f.Type; !dcl.IsEmptyValueIndirect(v) {
m["type"] = v
}
return m, nil
}
// flattenBucketLifecycleRuleAction flattens an instance of BucketLifecycleRuleAction from a JSON
// response object.
func flattenBucketLifecycleRuleAction(c *Client, i interface{}, res *Bucket) *BucketLifecycleRuleAction {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &BucketLifecycleRuleAction{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyBucketLifecycleRuleAction
}
r.StorageClass = dcl.FlattenString(m["storageClass"])
r.Type = flattenBucketLifecycleRuleActionTypeEnum(m["type"])
return r
}
// expandBucketLifecycleRuleConditionMap expands the contents of BucketLifecycleRuleCondition into a JSON
// request object.
func expandBucketLifecycleRuleConditionMap(c *Client, f map[string]BucketLifecycleRuleCondition, res *Bucket) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandBucketLifecycleRuleCondition(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandBucketLifecycleRuleConditionSlice expands the contents of BucketLifecycleRuleCondition into a JSON
// request object.
func expandBucketLifecycleRuleConditionSlice(c *Client, f []BucketLifecycleRuleCondition, res *Bucket) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandBucketLifecycleRuleCondition(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenBucketLifecycleRuleConditionMap flattens the contents of BucketLifecycleRuleCondition from a JSON
// response object.
func flattenBucketLifecycleRuleConditionMap(c *Client, i interface{}, res *Bucket) map[string]BucketLifecycleRuleCondition {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]BucketLifecycleRuleCondition{}
}
if len(a) == 0 {
return map[string]BucketLifecycleRuleCondition{}
}
items := make(map[string]BucketLifecycleRuleCondition)
for k, item := range a {
items[k] = *flattenBucketLifecycleRuleCondition(c, item.(map[string]interface{}), res)
}
return items
}
// flattenBucketLifecycleRuleConditionSlice flattens the contents of BucketLifecycleRuleCondition from a JSON
// response object.
func flattenBucketLifecycleRuleConditionSlice(c *Client, i interface{}, res *Bucket) []BucketLifecycleRuleCondition {
a, ok := i.([]interface{})
if !ok {
return []BucketLifecycleRuleCondition{}
}
if len(a) == 0 {
return []BucketLifecycleRuleCondition{}
}
items := make([]BucketLifecycleRuleCondition, 0, len(a))
for _, item := range a {
items = append(items, *flattenBucketLifecycleRuleCondition(c, item.(map[string]interface{}), res))
}
return items
}
// expandBucketLifecycleRuleCondition expands an instance of BucketLifecycleRuleCondition into a JSON
// request object.
func expandBucketLifecycleRuleCondition(c *Client, f *BucketLifecycleRuleCondition, res *Bucket) (map[string]interface{}, error) {
if dcl.IsEmptyValueIndirect(f) {
return nil, nil
}
m := make(map[string]interface{})
if v := f.Age; !dcl.IsEmptyValueIndirect(v) {
m["age"] = v
}
if v := f.CreatedBefore; !dcl.IsEmptyValueIndirect(v) {
m["createdBefore"] = v
}
if v, err := expandStorageBucketLifecycleWithState(c, f.WithState, res); err != nil {
return nil, fmt.Errorf("error expanding WithState into isLive: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["isLive"] = v
}
if v := f.MatchesStorageClass; v != nil {
m["matchesStorageClass"] = v
}
if v := f.NumNewerVersions; !dcl.IsEmptyValueIndirect(v) {
m["numNewerVersions"] = v
}
return m, nil
}
// flattenBucketLifecycleRuleCondition flattens an instance of BucketLifecycleRuleCondition from a JSON
// response object.
func flattenBucketLifecycleRuleCondition(c *Client, i interface{}, res *Bucket) *BucketLifecycleRuleCondition {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &BucketLifecycleRuleCondition{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyBucketLifecycleRuleCondition
}
r.Age = dcl.FlattenInteger(m["age"])
r.CreatedBefore = dcl.FlattenString(m["createdBefore"])
r.WithState = flattenStorageBucketLifecycleWithState(c, m["isLive"], res)
r.MatchesStorageClass = dcl.FlattenStringSlice(m["matchesStorageClass"])
r.NumNewerVersions = dcl.FlattenInteger(m["numNewerVersions"])
return r
}
// expandBucketLoggingMap expands the contents of BucketLogging into a JSON
// request object.
func expandBucketLoggingMap(c *Client, f map[string]BucketLogging, res *Bucket) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandBucketLogging(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandBucketLoggingSlice expands the contents of BucketLogging into a JSON
// request object.
func expandBucketLoggingSlice(c *Client, f []BucketLogging, res *Bucket) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandBucketLogging(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenBucketLoggingMap flattens the contents of BucketLogging from a JSON
// response object.
func flattenBucketLoggingMap(c *Client, i interface{}, res *Bucket) map[string]BucketLogging {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]BucketLogging{}
}
if len(a) == 0 {
return map[string]BucketLogging{}
}
items := make(map[string]BucketLogging)
for k, item := range a {
items[k] = *flattenBucketLogging(c, item.(map[string]interface{}), res)
}
return items
}
// flattenBucketLoggingSlice flattens the contents of BucketLogging from a JSON
// response object.
func flattenBucketLoggingSlice(c *Client, i interface{}, res *Bucket) []BucketLogging {
a, ok := i.([]interface{})
if !ok {
return []BucketLogging{}
}
if len(a) == 0 {
return []BucketLogging{}
}
items := make([]BucketLogging, 0, len(a))
for _, item := range a {
items = append(items, *flattenBucketLogging(c, item.(map[string]interface{}), res))
}
return items
}
// expandBucketLogging expands an instance of BucketLogging into a JSON
// request object.
func expandBucketLogging(c *Client, f *BucketLogging, res *Bucket) (map[string]interface{}, error) {
if dcl.IsEmptyValueIndirect(f) {
return nil, nil
}
m := make(map[string]interface{})
if v := f.LogBucket; !dcl.IsEmptyValueIndirect(v) {
m["logBucket"] = v
}
if v := f.LogObjectPrefix; !dcl.IsEmptyValueIndirect(v) {
m["logObjectPrefix"] = v
}
return m, nil
}
// flattenBucketLogging flattens an instance of BucketLogging from a JSON
// response object.
func flattenBucketLogging(c *Client, i interface{}, res *Bucket) *BucketLogging {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &BucketLogging{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyBucketLogging
}
r.LogBucket = dcl.FlattenString(m["logBucket"])
r.LogObjectPrefix = dcl.FlattenString(m["logObjectPrefix"])
return r
}
// expandBucketVersioningMap expands the contents of BucketVersioning into a JSON
// request object.
func expandBucketVersioningMap(c *Client, f map[string]BucketVersioning, res *Bucket) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandBucketVersioning(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandBucketVersioningSlice expands the contents of BucketVersioning into a JSON
// request object.
func expandBucketVersioningSlice(c *Client, f []BucketVersioning, res *Bucket) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandBucketVersioning(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenBucketVersioningMap flattens the contents of BucketVersioning from a JSON
// response object.
func flattenBucketVersioningMap(c *Client, i interface{}, res *Bucket) map[string]BucketVersioning {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]BucketVersioning{}
}
if len(a) == 0 {
return map[string]BucketVersioning{}
}
items := make(map[string]BucketVersioning)
for k, item := range a {
items[k] = *flattenBucketVersioning(c, item.(map[string]interface{}), res)
}
return items
}
// flattenBucketVersioningSlice flattens the contents of BucketVersioning from a JSON
// response object.
func flattenBucketVersioningSlice(c *Client, i interface{}, res *Bucket) []BucketVersioning {
a, ok := i.([]interface{})
if !ok {
return []BucketVersioning{}
}
if len(a) == 0 {
return []BucketVersioning{}
}
items := make([]BucketVersioning, 0, len(a))
for _, item := range a {
items = append(items, *flattenBucketVersioning(c, item.(map[string]interface{}), res))
}
return items
}
// expandBucketVersioning expands an instance of BucketVersioning into a JSON
// request object.
func expandBucketVersioning(c *Client, f *BucketVersioning, res *Bucket) (map[string]interface{}, error) {
if dcl.IsEmptyValueIndirect(f) {
return nil, nil
}
m := make(map[string]interface{})
if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) {
m["enabled"] = v
}
return m, nil
}
// flattenBucketVersioning flattens an instance of BucketVersioning from a JSON
// response object.
func flattenBucketVersioning(c *Client, i interface{}, res *Bucket) *BucketVersioning {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &BucketVersioning{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyBucketVersioning
}
r.Enabled = dcl.FlattenBool(m["enabled"])
return r
}
// expandBucketWebsiteMap expands the contents of BucketWebsite into a JSON
// request object.
func expandBucketWebsiteMap(c *Client, f map[string]BucketWebsite, res *Bucket) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandBucketWebsite(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandBucketWebsiteSlice expands the contents of BucketWebsite into a JSON
// request object.
func expandBucketWebsiteSlice(c *Client, f []BucketWebsite, res *Bucket) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandBucketWebsite(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenBucketWebsiteMap flattens the contents of BucketWebsite from a JSON
// response object.
func flattenBucketWebsiteMap(c *Client, i interface{}, res *Bucket) map[string]BucketWebsite {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]BucketWebsite{}
}
if len(a) == 0 {
return map[string]BucketWebsite{}
}
items := make(map[string]BucketWebsite)
for k, item := range a {
items[k] = *flattenBucketWebsite(c, item.(map[string]interface{}), res)
}
return items
}
// flattenBucketWebsiteSlice flattens the contents of BucketWebsite from a JSON
// response object.
func flattenBucketWebsiteSlice(c *Client, i interface{}, res *Bucket) []BucketWebsite {
a, ok := i.([]interface{})
if !ok {
return []BucketWebsite{}
}
if len(a) == 0 {
return []BucketWebsite{}
}
items := make([]BucketWebsite, 0, len(a))
for _, item := range a {
items = append(items, *flattenBucketWebsite(c, item.(map[string]interface{}), res))
}
return items
}
// expandBucketWebsite expands an instance of BucketWebsite into a JSON
// request object.
func expandBucketWebsite(c *Client, f *BucketWebsite, res *Bucket) (map[string]interface{}, error) {
if dcl.IsEmptyValueIndirect(f) {
return nil, nil
}
m := make(map[string]interface{})
if v := f.MainPageSuffix; !dcl.IsEmptyValueIndirect(v) {
m["mainPageSuffix"] = v
}
if v := f.NotFoundPage; !dcl.IsEmptyValueIndirect(v) {
m["notFoundPage"] = v
}
return m, nil
}
// flattenBucketWebsite flattens an instance of BucketWebsite from a JSON
// response object.
func flattenBucketWebsite(c *Client, i interface{}, res *Bucket) *BucketWebsite {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &BucketWebsite{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyBucketWebsite
}
r.MainPageSuffix = dcl.FlattenString(m["mainPageSuffix"])
r.NotFoundPage = dcl.FlattenString(m["notFoundPage"])
return r
}
// flattenBucketLifecycleRuleActionTypeEnumMap flattens the contents of BucketLifecycleRuleActionTypeEnum from a JSON
// response object.
func flattenBucketLifecycleRuleActionTypeEnumMap(c *Client, i interface{}, res *Bucket) map[string]BucketLifecycleRuleActionTypeEnum {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]BucketLifecycleRuleActionTypeEnum{}
}
if len(a) == 0 {
return map[string]BucketLifecycleRuleActionTypeEnum{}
}
items := make(map[string]BucketLifecycleRuleActionTypeEnum)
for k, item := range a {
items[k] = *flattenBucketLifecycleRuleActionTypeEnum(item.(interface{}))
}
return items
}
// flattenBucketLifecycleRuleActionTypeEnumSlice flattens the contents of BucketLifecycleRuleActionTypeEnum from a JSON
// response object.
func flattenBucketLifecycleRuleActionTypeEnumSlice(c *Client, i interface{}, res *Bucket) []BucketLifecycleRuleActionTypeEnum {
a, ok := i.([]interface{})
if !ok {
return []BucketLifecycleRuleActionTypeEnum{}
}
if len(a) == 0 {
return []BucketLifecycleRuleActionTypeEnum{}
}
items := make([]BucketLifecycleRuleActionTypeEnum, 0, len(a))
for _, item := range a {
items = append(items, *flattenBucketLifecycleRuleActionTypeEnum(item.(interface{})))
}
return items
}
// flattenBucketLifecycleRuleActionTypeEnum asserts that an interface is a string, and returns a
// pointer to a *BucketLifecycleRuleActionTypeEnum with the same value as that string.
func flattenBucketLifecycleRuleActionTypeEnum(i interface{}) *BucketLifecycleRuleActionTypeEnum {
s, ok := i.(string)
if !ok {
return nil
}
return BucketLifecycleRuleActionTypeEnumRef(s)
}
// flattenBucketLifecycleRuleConditionWithStateEnumMap flattens the contents of BucketLifecycleRuleConditionWithStateEnum from a JSON
// response object.
func flattenBucketLifecycleRuleConditionWithStateEnumMap(c *Client, i interface{}, res *Bucket) map[string]BucketLifecycleRuleConditionWithStateEnum {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]BucketLifecycleRuleConditionWithStateEnum{}
}
if len(a) == 0 {
return map[string]BucketLifecycleRuleConditionWithStateEnum{}
}
items := make(map[string]BucketLifecycleRuleConditionWithStateEnum)
for k, item := range a {
items[k] = *flattenBucketLifecycleRuleConditionWithStateEnum(item.(interface{}))
}
return items
}
// flattenBucketLifecycleRuleConditionWithStateEnumSlice flattens the contents of BucketLifecycleRuleConditionWithStateEnum from a JSON
// response object.
func flattenBucketLifecycleRuleConditionWithStateEnumSlice(c *Client, i interface{}, res *Bucket) []BucketLifecycleRuleConditionWithStateEnum {
a, ok := i.([]interface{})
if !ok {
return []BucketLifecycleRuleConditionWithStateEnum{}
}
if len(a) == 0 {
return []BucketLifecycleRuleConditionWithStateEnum{}
}
items := make([]BucketLifecycleRuleConditionWithStateEnum, 0, len(a))
for _, item := range a {
items = append(items, *flattenBucketLifecycleRuleConditionWithStateEnum(item.(interface{})))
}
return items
}
// flattenBucketLifecycleRuleConditionWithStateEnum asserts that an interface is a string, and returns a
// pointer to a *BucketLifecycleRuleConditionWithStateEnum with the same value as that string.
func flattenBucketLifecycleRuleConditionWithStateEnum(i interface{}) *BucketLifecycleRuleConditionWithStateEnum {
s, ok := i.(string)
if !ok {
return nil
}
return BucketLifecycleRuleConditionWithStateEnumRef(s)
}
// flattenBucketStorageClassEnumMap flattens the contents of BucketStorageClassEnum from a JSON
// response object.
func flattenBucketStorageClassEnumMap(c *Client, i interface{}, res *Bucket) map[string]BucketStorageClassEnum {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]BucketStorageClassEnum{}
}
if len(a) == 0 {
return map[string]BucketStorageClassEnum{}
}
items := make(map[string]BucketStorageClassEnum)
for k, item := range a {
items[k] = *flattenBucketStorageClassEnum(item.(interface{}))
}
return items
}
// flattenBucketStorageClassEnumSlice flattens the contents of BucketStorageClassEnum from a JSON
// response object.
func flattenBucketStorageClassEnumSlice(c *Client, i interface{}, res *Bucket) []BucketStorageClassEnum {
a, ok := i.([]interface{})
if !ok {
return []BucketStorageClassEnum{}
}
if len(a) == 0 {
return []BucketStorageClassEnum{}
}
items := make([]BucketStorageClassEnum, 0, len(a))
for _, item := range a {
items = append(items, *flattenBucketStorageClassEnum(item.(interface{})))
}
return items
}
// flattenBucketStorageClassEnum asserts that an interface is a string, and returns a
// pointer to a *BucketStorageClassEnum with the same value as that string.
func flattenBucketStorageClassEnum(i interface{}) *BucketStorageClassEnum {
s, ok := i.(string)
if !ok {
return nil
}
return BucketStorageClassEnumRef(s)
}
// This function returns a matcher that checks whether a serialized resource matches this resource
// in its parameters (as defined by the fields in a Get, which definitionally define resource
// identity). This is useful in extracting the element from a List call.
func (r *Bucket) matcher(c *Client) func([]byte) bool {
return func(b []byte) bool {
cr, err := unmarshalBucket(b, c, r)
if err != nil {
c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.")
return false
}
nr := r.urlNormalized()
ncr := cr.urlNormalized()
c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr)
if nr.Project == nil && ncr.Project == nil {
c.Config.Logger.Info("Both Project fields null - considering equal.")
} else if nr.Project == nil || ncr.Project == nil {
c.Config.Logger.Info("Only one Project field is null - considering unequal.")
return false
} else if *nr.Project != *ncr.Project {
return false
}
if nr.Name == nil && ncr.Name == nil {
c.Config.Logger.Info("Both Name fields null - considering equal.")
} else if nr.Name == nil || ncr.Name == nil {
c.Config.Logger.Info("Only one Name field is null - considering unequal.")
return false
} else if *nr.Name != *ncr.Name {
return false
}
return true
}
}
type bucketDiff struct {
// The diff should include one or the other of RequiresRecreate or UpdateOp.
RequiresRecreate bool
UpdateOp bucketApiOperation
FieldName string // used for error logging
}
func convertFieldDiffsToBucketDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]bucketDiff, error) {
opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff)
// Map each operation name to the field diffs associated with it.
for _, fd := range fds {
for _, ro := range fd.ResultingOperation {
if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok {
fieldDiffs = append(fieldDiffs, fd)
opNamesToFieldDiffs[ro] = fieldDiffs
} else {
config.Logger.Infof("%s required due to diff: %v", ro, fd)
opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd}
}
}
}
var diffs []bucketDiff
// For each operation name, create a bucketDiff which contains the operation.
for opName, fieldDiffs := range opNamesToFieldDiffs {
// Use the first field diff's field name for logging required recreate error.
diff := bucketDiff{FieldName: fieldDiffs[0].FieldName}
if opName == "Recreate" {
diff.RequiresRecreate = true
} else {
apiOp, err := convertOpNameToBucketApiOperation(opName, fieldDiffs, opts...)
if err != nil {
return diffs, err
}
diff.UpdateOp = apiOp
}
diffs = append(diffs, diff)
}
return diffs, nil
}
func convertOpNameToBucketApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (bucketApiOperation, error) {
switch opName {
case "updateBucketUpdateOperation":
return &updateBucketUpdateOperation{FieldDiffs: fieldDiffs}, nil
default:
return nil, fmt.Errorf("no such operation with name: %v", opName)
}
}
func extractBucketFields(r *Bucket) error {
vLifecycle := r.Lifecycle
if vLifecycle == nil {
// note: explicitly not the empty object.
vLifecycle = &BucketLifecycle{}
}
if err := extractBucketLifecycleFields(r, vLifecycle); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vLifecycle) {
r.Lifecycle = vLifecycle
}
vLogging := r.Logging
if vLogging == nil {
// note: explicitly not the empty object.
vLogging = &BucketLogging{}
}
if err := extractBucketLoggingFields(r, vLogging); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vLogging) {
r.Logging = vLogging
}
vVersioning := r.Versioning
if vVersioning == nil {
// note: explicitly not the empty object.
vVersioning = &BucketVersioning{}
}
if err := extractBucketVersioningFields(r, vVersioning); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vVersioning) {
r.Versioning = vVersioning
}
vWebsite := r.Website
if vWebsite == nil {
// note: explicitly not the empty object.
vWebsite = &BucketWebsite{}
}
if err := extractBucketWebsiteFields(r, vWebsite); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vWebsite) {
r.Website = vWebsite
}
return nil
}
func extractBucketCorsFields(r *Bucket, o *BucketCors) error {
return nil
}
func extractBucketLifecycleFields(r *Bucket, o *BucketLifecycle) error {
return nil
}
func extractBucketLifecycleRuleFields(r *Bucket, o *BucketLifecycleRule) error {
vAction := o.Action
if vAction == nil {
// note: explicitly not the empty object.
vAction = &BucketLifecycleRuleAction{}
}
if err := extractBucketLifecycleRuleActionFields(r, vAction); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vAction) {
o.Action = vAction
}
vCondition := o.Condition
if vCondition == nil {
// note: explicitly not the empty object.
vCondition = &BucketLifecycleRuleCondition{}
}
if err := extractBucketLifecycleRuleConditionFields(r, vCondition); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vCondition) {
o.Condition = vCondition
}
return nil
}
func extractBucketLifecycleRuleActionFields(r *Bucket, o *BucketLifecycleRuleAction) error {
return nil
}
func extractBucketLifecycleRuleConditionFields(r *Bucket, o *BucketLifecycleRuleCondition) error {
return nil
}
func extractBucketLoggingFields(r *Bucket, o *BucketLogging) error {
return nil
}
func extractBucketVersioningFields(r *Bucket, o *BucketVersioning) error {
return nil
}
func extractBucketWebsiteFields(r *Bucket, o *BucketWebsite) error {
return nil
}
func postReadExtractBucketFields(r *Bucket) error {
vLifecycle := r.Lifecycle
if vLifecycle == nil {
// note: explicitly not the empty object.
vLifecycle = &BucketLifecycle{}
}
if err := postReadExtractBucketLifecycleFields(r, vLifecycle); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vLifecycle) {
r.Lifecycle = vLifecycle
}
vLogging := r.Logging
if vLogging == nil {
// note: explicitly not the empty object.
vLogging = &BucketLogging{}
}
if err := postReadExtractBucketLoggingFields(r, vLogging); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vLogging) {
r.Logging = vLogging
}
vVersioning := r.Versioning
if vVersioning == nil {
// note: explicitly not the empty object.
vVersioning = &BucketVersioning{}
}
if err := postReadExtractBucketVersioningFields(r, vVersioning); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vVersioning) {
r.Versioning = vVersioning
}
vWebsite := r.Website
if vWebsite == nil {
// note: explicitly not the empty object.
vWebsite = &BucketWebsite{}
}
if err := postReadExtractBucketWebsiteFields(r, vWebsite); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vWebsite) {
r.Website = vWebsite
}
return nil
}
func postReadExtractBucketCorsFields(r *Bucket, o *BucketCors) error {
return nil
}
func postReadExtractBucketLifecycleFields(r *Bucket, o *BucketLifecycle) error {
return nil
}
func postReadExtractBucketLifecycleRuleFields(r *Bucket, o *BucketLifecycleRule) error {
vAction := o.Action
if vAction == nil {
// note: explicitly not the empty object.
vAction = &BucketLifecycleRuleAction{}
}
if err := extractBucketLifecycleRuleActionFields(r, vAction); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vAction) {
o.Action = vAction
}
vCondition := o.Condition
if vCondition == nil {
// note: explicitly not the empty object.
vCondition = &BucketLifecycleRuleCondition{}
}
if err := extractBucketLifecycleRuleConditionFields(r, vCondition); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vCondition) {
o.Condition = vCondition
}
return nil
}
func postReadExtractBucketLifecycleRuleActionFields(r *Bucket, o *BucketLifecycleRuleAction) error {
return nil
}
func postReadExtractBucketLifecycleRuleConditionFields(r *Bucket, o *BucketLifecycleRuleCondition) error {
return nil
}
func postReadExtractBucketLoggingFields(r *Bucket, o *BucketLogging) error {
return nil
}
func postReadExtractBucketVersioningFields(r *Bucket, o *BucketVersioning) error {
return nil
}
func postReadExtractBucketWebsiteFields(r *Bucket, o *BucketWebsite) error {
return nil
}
|
// Copyright 2017 Yahoo Holdings Inc.
// Licensed under the terms of the 3-Clause BSD License.
package provider
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/tools/cache"
)
var (
a = NewATSProvider()
)
func TestATSName(t *testing.T) {
assert.Equal(t, a.Name(), ATS, "should return ATS")
}
func TestATSServesIngress(t *testing.T) {
tests := []struct {
name string
input *v1beta1.Ingress
expected bool
}{
{
"should return true when annotation not present",
&v1beta1.Ingress{},
true,
},
{
"should return false when annotation set to different provider",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress",
Namespace: "test-namespace",
Annotations: map[string]string{
string(IngressClass): "other",
},
},
},
false,
},
{
"should return true when ATS annotation is defined",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress",
Namespace: "test-namespace",
Annotations: map[string]string{
string(IngressClass): ATS,
},
},
},
true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
assert.Equal(t, a.ServesIngress(test.input), test.expected, test.name)
})
}
}
func TestATSGetDomains(t *testing.T) {
tests := []struct {
name string
input *v1beta1.Ingress
expected []string
}{
{
"should return empty for an empty ingress spec",
&v1beta1.Ingress{},
[]string{},
},
{
"should return the domains for an ingress with default domain and aliases",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress",
Annotations: map[string]string{
string(DefaultDomain): "test1.company.com",
string(Aliases): "test2.company.com, test3.company.com",
string(Ports): "80",
},
},
},
[]string{
"test1.company.com",
"test2.company.com",
"test3.company.com",
},
},
{
"should return the domains for an ingress only with default domain",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress",
Annotations: map[string]string{
string(DefaultDomain): "test1.company.com",
},
},
},
[]string{
"test1.company.com",
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
assert.Equal(t, test.expected, a.GetDomains(test.input), test.name)
})
}
}
func TestATSDomainsIndexFunc(t *testing.T) {
type output struct {
domains []string
err error
}
tests := []struct {
name string
input interface{}
expected output
}{
{
"should return error for a non Ingress interface",
&v1beta1.Deployment{
Spec: v1beta1.DeploymentSpec{
Paused: true,
},
},
output{
nil,
errors.New("Resource is not an Ingress kind."),
},
},
{
"should return empty for an empty ingress spec",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress",
Annotations: map[string]string{},
},
},
output{
[]string{},
nil,
},
},
{
"should return domains for an ATS ingress with domains",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress",
Annotations: map[string]string{
string(DefaultDomain): "test1.company.com",
string(Aliases): "test2.company.com",
string(Ports): "80",
},
},
},
output{
[]string{
"test1.company.com",
"test2.company.com",
},
nil,
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var actual output
actual.domains, actual.err = a.DomainsIndexFunc(test.input)
assert.Equal(t, test.expected.err, actual.err, test.name)
assert.Equal(t, test.expected.domains, actual.domains, test.name)
})
}
}
func TestATSValidateSemantics(t *testing.T) {
tests := []struct {
name string
input *v1beta1.Ingress
expected error
}{
{
"should pass for a non ATS ingress spec",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress",
Annotations: map[string]string{
string(IngressClass): Istio,
},
},
},
nil,
},
{
"should pass for an ATS ingress with default domain, aliases and ports",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress",
Annotations: map[string]string{
string(DefaultDomain): "test1.company.com",
string(Aliases): "test2.company.com",
string(Ports): "80",
},
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "test2-svc",
ServicePort: intstr.FromInt(80),
},
},
},
nil,
},
{
"should fail for an ATS ingress without default backend",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress2",
Namespace: "test-ns2",
Annotations: map[string]string{
string(DefaultDomain): "test1.company.com",
string(Aliases): "test2.company.com, test3.company.com",
string(Ports): "80",
},
},
},
errors.New("Ingress test-ingress2 in namespace test-ns2 does not have a default backend " +
"specified."),
},
{
"should fail for an ATS ingress without ports",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress2",
Namespace: "test-ns2",
Annotations: map[string]string{
string(DefaultDomain): "test1.company.com",
string(Aliases): "test2.company.com, test3.company.com",
},
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "test2-svc",
ServicePort: intstr.FromInt(80),
},
},
},
errors.New("Ingress test-ingress2 in namespace test-ns2 does not have a ports annotation " +
"specified."),
},
{
"should fail for an ATS ingress without a default domain",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress2",
Namespace: "test-ns2",
Annotations: map[string]string{
string(Aliases): "test2.company.com, test3.company.com",
string(Ports): "80",
},
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "test2-svc",
ServicePort: intstr.FromInt(80),
},
},
},
errors.New("Ingress test-ingress2 in namespace test-ns2 does not have a default_domain " +
"annotation specified."),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
err := a.ValidateSemantics(test.input)
if test.expected == nil {
assert.Nil(t, err, test.name)
} else if assert.NotNil(t, err, test.name) {
assert.Equal(t, test.expected.Error(), err.Error(), test.name)
}
})
}
}
func TestATSValidateDomainClaims(t *testing.T) {
refIng := &v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress-ref",
Namespace: "test-ns-ref",
Annotations: map[string]string{
string(DefaultDomain): "test-ref1.company.com",
string(Aliases): "test-ref2.company.com",
string(Ports): "80",
},
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "test2-svc",
ServicePort: intstr.FromInt(80),
},
},
}
refIstioIng := &v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-istio-ingress-ref",
Namespace: "test-ns-ref",
Annotations: map[string]string{
string(IngressClass): Istio,
},
},
Spec: v1beta1.IngressSpec{
Rules: []v1beta1.IngressRule{
{
Host: "test-istio-ref1.company.com",
},
{
Host: "test-istio-ref2.company.com",
},
},
},
}
helper.SetIndexer(cache.NewIndexer(
cache.DeletionHandlingMetaNamespaceKeyFunc,
cache.Indexers{
ATS: helper.GetProviderByName(ATS).DomainsIndexFunc,
}))
helper.indexer.Add(refIng)
helper.indexer.Add(refIstioIng)
tests := []struct {
name string
input *v1beta1.Ingress
expected error
}{
{
"should pass for a non ATS ingress spec",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress",
Annotations: map[string]string{
string(IngressClass): Istio,
},
},
},
nil,
},
{
"should pass for an ATS ingress with no duplicate domains",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress",
Namespace: "test-namespace",
Annotations: map[string]string{
string(DefaultDomain): "test1.company.com",
string(Aliases): "test2.company.com,test3.company.com",
string(Ports): "80",
},
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "test2-svc",
ServicePort: intstr.FromInt(80),
},
},
},
nil,
},
{
"should pass for an ATS ingress update on same object",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress-ref",
Namespace: "test-ns-ref",
Annotations: map[string]string{
string(DefaultDomain): "test-ref1.company.com",
string(Aliases): "test-ref2.company.com, test-ref3.company.com",
string(Ports): "80",
},
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "test2-svc",
ServicePort: intstr.FromInt(80),
},
},
},
nil,
},
{
"should fail for an ATS ingress with duplicate domains",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress",
Namespace: "test-namespace",
Annotations: map[string]string{
string(DefaultDomain): "test1.company.com",
string(Aliases): "test2.company.com,test-ref2.company.com",
string(Ports): "80",
},
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "test2-svc",
ServicePort: intstr.FromInt(80),
},
},
},
errors.New("Domain test-ref2.company.com already exists. Ingress test-ingress-ref in namespace " +
"test-ns-ref owns this domain."),
},
{
"should fail for an ATS ingress with duplicate domains on the same namespace",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress",
Namespace: "test-ns-ref",
Annotations: map[string]string{
string(DefaultDomain): "test1.company.com",
string(Aliases): "test2.company.com,test-ref2.company.com",
string(Ports): "80",
},
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "test2-svc",
ServicePort: intstr.FromInt(80),
},
},
},
errors.New("Domain test-ref2.company.com already exists. Ingress test-ingress-ref in namespace " +
"test-ns-ref owns this domain."),
},
{
"should pass for an ATS ingress with hosts same as Istio hosts",
&v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: "test-ingress2",
Namespace: "test-ns2",
Annotations: map[string]string{
string(DefaultDomain): "test-istio-ref1.company.com",
string(Aliases): "test-istio-ref2.company.com",
string(Ports): "80",
},
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "test2-svc",
ServicePort: intstr.FromInt(80),
},
},
},
nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
err := a.ValidateDomainClaims(test.input)
if test.expected == nil {
assert.Nil(t, err, test.name)
} else if assert.NotNil(t, err, test.name) {
assert.Equal(t, test.expected.Error(), err.Error(), test.name)
}
})
}
helper.indexer.Delete(refIng)
helper.indexer.Delete(refIstioIng)
}
|
package main
import (
"flag"
"os"
"github.com/projectdiscovery/gologger"
"github.com/projectdiscovery/httpx/common/customheader"
customport "github.com/projectdiscovery/httpx/common/customports"
"github.com/projectdiscovery/httpx/common/fileutil"
)
// Options contains configuration options for chaos client.
type Options struct {
RawRequestFile string
VHost bool
Smuggling bool
ExtractTitle bool
StatusCode bool
ContentLength bool
Retries int
Threads int
Timeout int
CustomHeaders customheader.CustomHeaders
CustomPorts customport.CustomPorts
Output string
FollowRedirects bool
StoreResponse bool
StoreResponseDir string
HttpProxy string
SocksProxy string
JSONOutput bool
InputFile string
Method string
Silent bool
Version bool
Verbose bool
NoColor bool
OutputServerHeader bool
responseInStdout bool
FollowHostRedirects bool
}
// ParseOptions parses the command line options for application
func ParseOptions() *Options {
options := &Options{}
flag.IntVar(&options.Threads, "threads", 50, "Number of threads")
flag.IntVar(&options.Retries, "retries", 0, "Number of retries")
flag.IntVar(&options.Timeout, "timeout", 5, "Timeout in seconds")
flag.StringVar(&options.Output, "o", "", "File to write output to (optional)")
flag.BoolVar(&options.VHost, "vhost", false, "Check for VHOSTs")
flag.BoolVar(&options.ExtractTitle, "title", false, "Extracts title")
flag.BoolVar(&options.StatusCode, "status-code", false, "Extracts Status Code")
flag.Var(&options.CustomHeaders, "H", "Custom Header")
flag.Var(&options.CustomPorts, "ports", "ports range (nmap syntax: eg 1,2-10,11)")
flag.BoolVar(&options.ContentLength, "content-length", false, "Content Length")
flag.BoolVar(&options.StoreResponse, "store-response", false, "Store Response as domain.txt")
flag.StringVar(&options.StoreResponseDir, "store-response-dir", ".", "Store Response Directory (default current directory)")
flag.BoolVar(&options.FollowRedirects, "follow-redirects", false, "Follow Redirects")
flag.BoolVar(&options.FollowHostRedirects, "follow-host-redirects", false, "Only follow redirects on the same host")
flag.StringVar(&options.HttpProxy, "http-proxy", "", "Http Proxy")
flag.BoolVar(&options.JSONOutput, "json", false, "JSON Output")
flag.StringVar(&options.InputFile, "l", "", "File containing domains")
flag.StringVar(&options.Method, "x", "GET", "Request Method")
flag.BoolVar(&options.Silent, "silent", false, "Silent mode")
flag.BoolVar(&options.Version, "version", false, "Show version of httpx")
flag.BoolVar(&options.Verbose, "verbose", false, "Verbose Mode")
flag.BoolVar(&options.NoColor, "no-color", false, "No Color")
flag.BoolVar(&options.OutputServerHeader, "web-server", false, "Prints out the Server header content")
flag.BoolVar(&options.responseInStdout, "response-in-json", false, "Server response directly in the tool output (-json only)")
flag.Parse()
// Read the inputs and configure the logging
options.configureOutput()
showBanner()
if options.Version {
gologger.Infof("Current Version: %s\n", Version)
os.Exit(0)
}
options.validateOptions()
return options
}
func (options *Options) validateOptions() {
if options.InputFile != "" && !fileutil.FileExists(options.InputFile) {
gologger.Fatalf("File %s does not exist!\n", options.InputFile)
}
}
// configureOutput configures the output on the screen
func (options *Options) configureOutput() {
// If the user desires verbose output, show verbose output
if options.Verbose {
gologger.MaxLevel = gologger.Verbose
}
if options.NoColor {
gologger.UseColors = false
}
if options.Silent {
gologger.MaxLevel = gologger.Silent
}
}
|
// +build windows
package distribution
import (
"encoding/json"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/docker/image"
)
func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error {
var v1Config map[string]*json.RawMessage
if err := json.Unmarshal([]byte(history[len(history)-1].V1Compatibility), &v1Config); err != nil {
return err
}
baseID, err := json.Marshal(rootFS.BaseLayerID())
if err != nil {
return err
}
v1Config["parent"] = (*json.RawMessage)(&baseID)
configJSON, err := json.Marshal(v1Config)
if err != nil {
return err
}
history[len(history)-1].V1Compatibility = string(configJSON)
return nil
}
|
package main
import (
"text/template"
"os"
"fmt"
)
type bikes struct{
Name string
Model int
}
type cars struct{
Name string
Model int
}
type vehicles struct {
Bike1 []bikes
Car1 []cars
}
var file *template.Template
func init(){
file = template.Must(template.ParseFiles("passstruct.gohtml"))
}
func main() {
b1:=bikes{"Fz",12345}
b2:=bikes{"Duke",67896}
c1:=cars{"i10",7476}
c2:=cars{"santro",5789}
bike:=[]bikes{b1,b2}
car:=[]cars{c1,c2}
v:=vehicles{bike,car}
//we are passing a slice of structs "v" here , so we range over this slice in the .gohtml file to get individual structs and then print out
//their value
err:=file.Execute(os.Stdout,v)
if err!=nil{
fmt.Println(err)
}
}
|
package stats
import (
"sync"
"testing"
)
func benchmarkChannelsRoutine(b *testing.B, e chan bool) {
for i := 0; i < b.N; i++ {
Increment("abc123", 5)
Increment("def456", 5)
Increment("ghi789", 5)
Increment("abc123", 5)
Increment("def456", 5)
Increment("ghi789", 5)
}
e <- true
}
func BenchmarkChannels(b *testing.B) {
b.StopTimer()
CounterInitialize()
e := make(chan bool)
b.StartTimer()
go benchmarkChannelsRoutine(b, e)
go benchmarkChannelsRoutine(b, e)
go benchmarkChannelsRoutine(b, e)
go benchmarkChannelsRoutine(b, e)
go benchmarkChannelsRoutine(b, e)
<-e
<-e
<-e
<-e
<-e
}
var mux sync.Mutex
var m map[string]int
func benchmarkMutexIncrement(bucket string, value int) {
mux.Lock()
m[bucket] += value
mux.Unlock()
}
func benchmarkMutexRoutine(b *testing.B, e chan bool) {
for i := 0; i < b.N; i++ {
benchmarkMutexIncrement("abc123", 5)
benchmarkMutexIncrement("def456", 5)
benchmarkMutexIncrement("ghi789", 5)
benchmarkMutexIncrement("abc123", 5)
benchmarkMutexIncrement("def456", 5)
benchmarkMutexIncrement("ghi789", 5)
}
e <- true
}
func BenchmarkMutex(b *testing.B) {
b.StopTimer()
m = make(map[string]int)
e := make(chan bool)
b.StartTimer()
for i := 0; i < b.N; i++ {
benchmarkMutexIncrement("abc123", 5)
benchmarkMutexIncrement("def456", 5)
benchmarkMutexIncrement("ghi789", 5)
benchmarkMutexIncrement("abc123", 5)
benchmarkMutexIncrement("def456", 5)
benchmarkMutexIncrement("ghi789", 5)
}
go benchmarkMutexRoutine(b, e)
go benchmarkMutexRoutine(b, e)
go benchmarkMutexRoutine(b, e)
go benchmarkMutexRoutine(b, e)
go benchmarkMutexRoutine(b, e)
<-e
<-e
<-e
<-e
<-e
}
|
package task
import (
"bankBigData/AutomaticTask/db"
"bankBigData/AutomaticTask/dbConfig/tableTaskFile"
"bankBigData/AutomaticTask/entity/config"
"bankBigData/_public/ftp"
"bankBigData/_public/log"
"bankBigData/_public/util"
"bufio"
"fmt"
"gitee.com/johng/gf/g"
"io"
"os"
"strconv"
"strings"
"sync"
)
type Structure struct {
Index int `json:"index"`
Key string `json:"key"`
Name string `json:"name"`
}
// @Title 根据表名和日期读取结构文件
func ReadStructureFile(entityFile c_entity.TableTaskFile, filePath string) ([]Structure, error) {
structureArr := []Structure{}
log.Instance().Println("正在分析结构文件")
fileData, err := os.Open(filePath)
defer fileData.Close()
if err != nil {
return structureArr, err
}
_, _ = dbc_tableTaskFile.Update(entityFile.Date, entityFile.FileName, g.Map{"db_do_start_time": util.GetLocalNowTimeStr()}, g.Map{})
buf := bufio.NewReader(fileData)
for {
// 读取每一行数据
line, err := buf.ReadBytes('\n')
if err != nil || err == io.EOF {
break
}
// 转换编码之后格式化并追加到结构体数据数组中
structureArr = append(structureArr, FormatStructureLineData(util.ConvertToString(string(line), "GBK", "UTF-8")))
}
_, _ = dbc_tableTaskFile.Update(entityFile.Date, entityFile.FileName, g.Map{"db_do_end_time": util.GetLocalNowTimeStr()}, g.Map{})
return structureArr, nil
}
// 读取数据文件
func readTableDataFile(entityFile, dataFile c_entity.TableTaskFile, readNum chan int, listMap chan []g.List) {
e := error(nil)
defer close(listMap)
defer close(readNum)
hasDataFile := false
hasEntityFile := false
dataFileStr := dataFile.FileName
if dataFile.IsGz {
dataFileStr = dataFileStr[:len(dataFileStr)-3]
}
basePath := strings.Join([]string{pub_ftp.FtpFolder, entityFile.Date}, "/")
dataFilePath := strings.Join([]string{basePath, dataFileStr}, "/")
entityFilePath := strings.Join([]string{basePath, entityFile.FileName}, "/")
hasEntityFile, _ = util.PathExists(entityFilePath)
hasDataFile, _ = util.PathExists(dataFilePath)
structure := []Structure{}
fileData := &os.File{}
temp := g.List{}
box := []g.List{}
if hasEntityFile && hasDataFile {
structure, e = ReadStructureFile(entityFile, entityFilePath)
if len(structure) > 0 && e == nil {
log.Instance().Println("正在分析数据文件")
// 读取文件
fileData, e = os.Open(dataFilePath)
_, _ = dbc_tableTaskFile.Update(dataFile.Date, dataFile.FileName, g.Map{"db_do_start_time": util.GetLocalNowTimeStr()}, g.Map{})
defer fileData.Close()
}
if len(structure) > 0 && e == nil {
buf := bufio.NewReader(fileData)
num := 0
tempNum := 0
for num = 0; true; num++ {
// 读取每一行数据
line, err := buf.ReadBytes('\n')
if err != nil || err == io.EOF {
break
}
if num < dataFile.LineNum {
continue
}
if num != 0 && num%500 == 0 {
box = append(box, temp)
temp = g.List{}
tempNum += 1
}
if tempNum == 10 {
tempNum = 0
readNum <- num
listMap <- box
box = []g.List{}
}
temp = append(temp, FormatTableLineData(util.ConvertToString(string(line), "GBK", "UTF-8"), structure))
}
log.Instance().Println("本次分析数据数量:", num)
if len(temp) > 0 {
box = append(box, temp)
}
if len(box) > 0 {
readNum <- num
listMap <- box
}
}
} else {
log.Instance().Println("文件不存在:", entityFilePath, dataFilePath)
}
structure = nil
fileData = nil
temp = nil
box = nil
}
func WriteData(tbMap c_entity.Table, dataMap c_entity.TableTaskFile, readNum chan int, listMap chan []g.List, err chan bool) {
e := error(nil)
wg := sync.WaitGroup{}
// 转换编码之后格式化并追加到结构体数据数组中
for {
rv, rok := <-readNum
if list, ok := <-listMap; ok && rok && len(list) > 0 {
swg := sync.WaitGroup{}
for _, v := range list {
if len(v) == 0 {
continue
}
wg.Add(1)
swg.Add(1)
go func() {
hasErr := error(nil)
switch tbMap.Type {
case "add":
hasErr = db.AddUpdate(tbMap.TableName, v)
default:
hasErr = db.AllUpdate(tbMap.TableName, v)
}
if hasErr != nil {
log.Instance().Error("写表错误:", tbMap.TableName, " 原因:", hasErr)
_, _ = dbc_tableTaskFile.Update(dataMap.Date, dataMap.FileName, g.Map{"has_err": 1, "err_msg": fmt.Sprintf("%v", hasErr)}, g.Map{})
}
wg.Done()
swg.Done()
}()
}
swg.Wait()
if e != nil {
break
}
_, _ = dbc_tableTaskFile.Update(dataMap.Date, dataMap.FileName, g.Map{"line_num": rv}, g.Map{})
} else {
break
}
}
wg.Wait()
err <- (e != nil)
}
// 格式化结构数据
func FormatStructureLineData(str string) Structure {
structureArr := strings.Split(str, "|")
index, err := strconv.Atoi(structureArr[0])
if err != nil {
index = 0
}
structure := Structure{index - 1, strings.ToLower(structureArr[1]), strings.Replace(structureArr[len(structureArr)-1], "\n", "", -1)}
return structure
}
// 格式化表格数据
func FormatTableLineData(str string, structure []Structure) g.Map {
tableData := g.Map{}
tableArr := strings.Split(str, "|$|")
tableArr[len(tableArr)-1] = strings.Replace(tableArr[len(tableArr)-1], "\n", "", -1)
for index, item := range structure {
tableData[string(item.Key)] = tableArr[index]
}
return tableData
}
|
package app
import (
"net/http"
"github.com/gin-gonic/gin"
)
func pongHandler(c *gin.Context) {
c.Set("rendered", true)
c.String(http.StatusOK, "pong")
}
func blankHandler(c *gin.Context) {
c.Set("controller", `blank`)
c.Set("action", `index`)
}
func simplePugHandler(c *gin.Context) {
db, _ := NewGormDB(c)
defer db.Close()
var users []User
db.Debug().Find(&users)
variables := map[string]interface{}{}
variables[`users`] = users
c.Set(`variables`, variables)
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package beta
import (
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
)
func DCLServerTlsPolicySchema() *dcl.Schema {
return &dcl.Schema{
Info: &dcl.Info{
Title: "NetworkSecurity/ServerTlsPolicy",
Description: "The NetworkSecurity ServerTlsPolicy resource",
StructName: "ServerTlsPolicy",
HasIAM: true,
},
Paths: &dcl.Paths{
Get: &dcl.Path{
Description: "The function used to get information about a ServerTlsPolicy",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "serverTlsPolicy",
Required: true,
Description: "A full instance of a ServerTlsPolicy",
},
},
},
Apply: &dcl.Path{
Description: "The function used to apply information about a ServerTlsPolicy",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "serverTlsPolicy",
Required: true,
Description: "A full instance of a ServerTlsPolicy",
},
},
},
Delete: &dcl.Path{
Description: "The function used to delete a ServerTlsPolicy",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "serverTlsPolicy",
Required: true,
Description: "A full instance of a ServerTlsPolicy",
},
},
},
DeleteAll: &dcl.Path{
Description: "The function used to delete all ServerTlsPolicy",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "project",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
dcl.PathParameters{
Name: "location",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
},
},
List: &dcl.Path{
Description: "The function used to list information about many ServerTlsPolicy",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "project",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
dcl.PathParameters{
Name: "location",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
},
},
},
Components: &dcl.Components{
Schemas: map[string]*dcl.Component{
"ServerTlsPolicy": &dcl.Component{
Title: "ServerTlsPolicy",
ID: "projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}",
ParentContainer: "project",
LabelsField: "labels",
HasCreate: true,
HasIAM: true,
SchemaProperty: dcl.Property{
Type: "object",
Required: []string{
"name",
"project",
"location",
},
Properties: map[string]*dcl.Property{
"allowOpen": &dcl.Property{
Type: "boolean",
GoName: "AllowOpen",
Description: "Optional. Determines if server allows plaintext connections. If set to true, server allows plain text connections. By default, it is set to false. This setting is not exclusive of other encryption modes. For example, if allow_open and mtls_policy are set, server allows both plain text and mTLS connections. See documentation of other encryption modes to confirm compatibility.",
},
"createTime": &dcl.Property{
Type: "string",
Format: "date-time",
GoName: "CreateTime",
ReadOnly: true,
Description: "Output only. The timestamp when the resource was created.",
Immutable: true,
},
"description": &dcl.Property{
Type: "string",
GoName: "Description",
Description: "Optional. Free-text description of the resource.",
},
"labels": &dcl.Property{
Type: "object",
AdditionalProperties: &dcl.Property{
Type: "string",
},
GoName: "Labels",
Description: "Optional. Set of label tags associated with the resource.",
},
"location": &dcl.Property{
Type: "string",
GoName: "Location",
Description: "The location for the resource",
Immutable: true,
},
"mtlsPolicy": &dcl.Property{
Type: "object",
GoName: "MtlsPolicy",
GoType: "ServerTlsPolicyMtlsPolicy",
Description: "Optional. Defines a mechanism to provision peer validation certificates for peer to peer authentication (Mutual TLS - mTLS). If not specified, client certificate will not be requested. The connection is treated as TLS and not mTLS. If allow_open and mtls_policy are set, server allows both plain text and mTLS connections.",
Required: []string{
"clientValidationCa",
},
Properties: map[string]*dcl.Property{
"clientValidationCa": &dcl.Property{
Type: "array",
GoName: "ClientValidationCa",
Description: "Required. Defines the mechanism to obtain the Certificate Authority certificate to validate the client certificate.",
SendEmpty: true,
ListType: "list",
Items: &dcl.Property{
Type: "object",
GoType: "ServerTlsPolicyMtlsPolicyClientValidationCa",
Properties: map[string]*dcl.Property{
"certificateProviderInstance": &dcl.Property{
Type: "object",
GoName: "CertificateProviderInstance",
GoType: "ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstance",
Description: "The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information.",
Conflicts: []string{
"grpcEndpoint",
},
Required: []string{
"pluginInstance",
},
Properties: map[string]*dcl.Property{
"pluginInstance": &dcl.Property{
Type: "string",
GoName: "PluginInstance",
Description: "Required. Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to \"google_cloud_private_spiffe\" to use Certificate Authority Service certificate provider instance.",
},
},
},
"grpcEndpoint": &dcl.Property{
Type: "object",
GoName: "GrpcEndpoint",
GoType: "ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpoint",
Description: "gRPC specific configuration to access the gRPC server to obtain the CA certificate.",
Conflicts: []string{
"certificateProviderInstance",
},
Required: []string{
"targetUri",
},
Properties: map[string]*dcl.Property{
"targetUri": &dcl.Property{
Type: "string",
GoName: "TargetUri",
Description: "Required. The target URI of the gRPC endpoint. Only UDS path is supported, and should start with “unix:”.",
},
},
},
},
},
},
},
},
"name": &dcl.Property{
Type: "string",
GoName: "Name",
Description: "Required. Name of the ServerTlsPolicy resource.",
Immutable: true,
},
"project": &dcl.Property{
Type: "string",
GoName: "Project",
Description: "The project for the resource",
Immutable: true,
ResourceReferences: []*dcl.PropertyResourceReference{
&dcl.PropertyResourceReference{
Resource: "Cloudresourcemanager/Project",
Field: "name",
Parent: true,
},
},
},
"serverCertificate": &dcl.Property{
Type: "object",
GoName: "ServerCertificate",
GoType: "ServerTlsPolicyServerCertificate",
Description: "Optional. Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allow_open as a permissive mode that allows both plain text and TLS is not supported.",
Properties: map[string]*dcl.Property{
"certificateProviderInstance": &dcl.Property{
Type: "object",
GoName: "CertificateProviderInstance",
GoType: "ServerTlsPolicyServerCertificateCertificateProviderInstance",
Description: "The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information.",
Conflicts: []string{
"grpcEndpoint",
},
Required: []string{
"pluginInstance",
},
Properties: map[string]*dcl.Property{
"pluginInstance": &dcl.Property{
Type: "string",
GoName: "PluginInstance",
Description: "Required. Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to \"google_cloud_private_spiffe\" to use Certificate Authority Service certificate provider instance.",
},
},
},
"grpcEndpoint": &dcl.Property{
Type: "object",
GoName: "GrpcEndpoint",
GoType: "ServerTlsPolicyServerCertificateGrpcEndpoint",
Description: "gRPC specific configuration to access the gRPC server to obtain the cert and private key.",
Conflicts: []string{
"certificateProviderInstance",
},
Required: []string{
"targetUri",
},
Properties: map[string]*dcl.Property{
"targetUri": &dcl.Property{
Type: "string",
GoName: "TargetUri",
Description: "Required. The target URI of the gRPC endpoint. Only UDS path is supported, and should start with “unix:”.",
},
},
},
},
},
"updateTime": &dcl.Property{
Type: "string",
Format: "date-time",
GoName: "UpdateTime",
ReadOnly: true,
Description: "Output only. The timestamp when the resource was updated.",
Immutable: true,
},
},
},
},
},
},
}
}
|
package main
import (
"fmt"
"time"
"github.com/dgrijalva/jwt-go"
)
const SigningKey = "somethingsupersecret"
func main() {
// New web token.
token := jwt.New(jwt.SigningMethodHS256)
// Set a header and a claim
token.Header["typ"] = "JWT"
token.Claims["exp"] = time.Now().Add(time.Hour * 96).Unix()
// Generate encoded token
t, _ := token.SignedString([]byte(SigningKey))
fmt.Println(t)
}
|
package kui
import (
"fmt"
"testing"
)
func Race(v1, v2, g int) [3]int {
if v1 >= v2 {
return [3]int{-1, -1, -1}
}
// v1*t + g = v2*t
// g = v2*t - v1*t
// g/t = v2 - v1
// 1/t = (v2 - v1) / g
// t = g / (v2 - v1)
t := float64(float64(g) / float64(v2-v1))
// return [3]int{int(t), int(t*60) % 60, int(math.Round(t*60*60)) % 60}
return [3]int{int(t), int(t*60) % 60, int(t*60*60) % 60}
}
func TestRace(t *testing.T) {
r := Race(80, 91, 37)
fmt.Println(r)
}
|
// SPDX-License-Identifier: Apache-2.0
// Copyright © 2020 Intel Corporation
package af
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
)
// Linger please
var (
_ context.Context
)
// PfdManagementTransactionAppDeleteAPIService type
type PfdManagementTransactionAppDeleteAPIService service
func (a *PfdManagementTransactionAppDeleteAPIService) handlePfdAppDeleteResp(
r *http.Response, body []byte) error {
newErr := GenericError{
body: body,
error: r.Status,
}
switch r.StatusCode {
case 400, 401, 403, 404, 429, 500, 503:
var v ProblemDetails
if r.StatusCode == 401 {
if fetchNEFAuthorizationToken() != nil {
log.Infoln("Token refresh failed")
}
}
err := json.Unmarshal(body, &v)
if err != nil {
newErr.error = err.Error()
return newErr
}
newErr.model = v
return newErr
default:
b, _ := ioutil.ReadAll(r.Body)
err := fmt.Errorf("NEF returned error - %s, %s", r.Status, string(b))
return err
}
}
/*
PfdAppTransactionDelete Deletes an already
existing PFD transaction for an application identifier
Deletes an already existing pfd transaction
* @param ctx context.Context - for authentication, logging, cancellation,
* deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param afID Identifier of the AF
* @param pfdTrans Identifier of the subscription resource
* @param appID External application Identifier
*/
func (a *PfdManagementTransactionAppDeleteAPIService) PfdAppTransactionDelete(
ctx context.Context, afID string, pfdTrans string, appID string) (
*http.Response, error) {
var (
method = strings.ToUpper("Delete")
deleteBody interface{}
)
// create path and map variables
path := a.client.cfg.Protocol + "://" + a.client.cfg.NEFHostname +
a.client.cfg.NEFPort + a.client.cfg.NEFPFDBasePath + "/" + afID +
"/transactions/" + pfdTrans + "/applications/" + appID
headerParams := make(map[string]string)
headerParams["Content-Type"] = contentType
headerParams["Accept"] = contentType
r, err := a.client.prepareRequest(ctx, path, method,
deleteBody, headerParams)
if err != nil {
return nil, err
}
resp, err := a.client.callAPI(r)
if err != nil || resp == nil {
return resp, err
}
respBody, err := ioutil.ReadAll(resp.Body)
defer func() {
err = resp.Body.Close()
if err != nil {
log.Errf("response body was not closed properly")
}
}()
if err != nil {
log.Errf("http response body could not be read")
return resp, err
}
if resp.StatusCode > 300 {
if err = a.handlePfdAppDeleteResp(resp,
respBody); err != nil {
return resp, err
}
}
return resp, nil
}
|
package neatly_test
import (
"github.com/stretchr/testify/assert"
"github.com/viant/neatly"
"github.com/viant/toolbox/data"
"testing"
)
func TestFieldExpression_Set(t *testing.T) {
{
var object = data.NewMap()
field1 := neatly.NewField("Field1")
field1.Set(123, object)
assert.Equal(t, 123, object.GetInt("Field1"))
}
{
var object = data.NewMap()
field1 := neatly.NewField("Req.[]Array.H")
assert.True(t, field1.HasArrayComponent)
assert.False(t, field1.IsArray)
assert.True(t, field1.Child.IsArray)
assert.Equal(t, "H", field1.Child.Child.Field)
field1.Set("v1H", object)
field1.Set("v2H", object, 1)
field2 := neatly.NewField("Req.[]Array.A")
field2.Set("v1A", object)
field2.Set("v2A", object, 1)
field3 := neatly.NewField("Req.Field")
field3.Set("v", object)
assert.True(t, object.Has("Req"))
var reqObject = object.GetMap("Req")
assert.NotNil(t, reqObject)
assert.Equal(t, "v", reqObject.GetString("Field"))
assert.True(t, reqObject.Has("Array"))
assert.True(t, reqObject.Has("Field"))
array := reqObject.GetCollection("Array")
assert.NotNil(t, array)
assert.Equal(t, 2, len(*array))
err := array.RangeMap(func(item data.Map, index int) (bool, error) {
switch index {
case 0:
assert.Equal(t, "v1H", item.GetString("H"))
assert.Equal(t, "v1A", item.GetString("A"))
case 1:
assert.Equal(t, "v2H", item.GetString("H"))
assert.Equal(t, "v2A", item.GetString("A"))
}
return true, nil
})
assert.Nil(t, err)
}
{
var object = data.NewMap()
field1 := neatly.NewField("/Field1")
field1.Set(123, object)
assert.Equal(t, 123, object.GetInt("Field1"))
assert.True(t, field1.IsRoot)
}
{
var object = data.NewMap()
field1 := neatly.NewField(":Field1")
field1.Set(123, object)
assert.Equal(t, 123, object.GetInt("Field1"))
assert.True(t, field1.IsVirtual)
}
}
|
// Copyright (C) 2019 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cc
import (
"android/soong/android"
)
func RegisterRequiredBuildComponentsForTest(ctx android.RegistrationContext) {
RegisterPrebuiltBuildComponents(ctx)
android.RegisterPrebuiltMutators(ctx)
RegisterCCBuildComponents(ctx)
RegisterBinaryBuildComponents(ctx)
RegisterLibraryBuildComponents(ctx)
RegisterLibraryHeadersBuildComponents(ctx)
ctx.RegisterModuleType("toolchain_library", ToolchainLibraryFactory)
ctx.RegisterModuleType("llndk_library", LlndkLibraryFactory)
ctx.RegisterModuleType("cc_object", ObjectFactory)
ctx.RegisterModuleType("ndk_prebuilt_shared_stl", NdkPrebuiltSharedStlFactory)
ctx.RegisterModuleType("ndk_prebuilt_object", NdkPrebuiltObjectFactory)
}
func GatherRequiredDepsForTest(oses ...android.OsType) string {
ret := `
toolchain_library {
name: "libatomic",
vendor_available: true,
recovery_available: true,
native_bridge_supported: true,
src: "",
}
toolchain_library {
name: "libcompiler_rt-extras",
vendor_available: true,
recovery_available: true,
src: "",
}
toolchain_library {
name: "libclang_rt.builtins-arm-android",
vendor_available: true,
recovery_available: true,
native_bridge_supported: true,
src: "",
}
toolchain_library {
name: "libclang_rt.builtins-aarch64-android",
vendor_available: true,
recovery_available: true,
native_bridge_supported: true,
src: "",
}
cc_prebuilt_library_shared {
name: "libclang_rt.hwasan-aarch64-android",
nocrt: true,
vendor_available: true,
recovery_available: true,
system_shared_libs: [],
stl: "none",
srcs: [""],
check_elf_files: false,
sanitize: {
never: true,
},
}
toolchain_library {
name: "libclang_rt.builtins-i686-android",
vendor_available: true,
recovery_available: true,
native_bridge_supported: true,
src: "",
}
toolchain_library {
name: "libclang_rt.builtins-x86_64-android",
vendor_available: true,
recovery_available: true,
native_bridge_supported: true,
src: "",
}
toolchain_library {
name: "libclang_rt.fuzzer-arm-android",
vendor_available: true,
recovery_available: true,
src: "",
}
toolchain_library {
name: "libclang_rt.fuzzer-aarch64-android",
vendor_available: true,
recovery_available: true,
src: "",
}
toolchain_library {
name: "libclang_rt.fuzzer-i686-android",
vendor_available: true,
recovery_available: true,
src: "",
}
toolchain_library {
name: "libclang_rt.fuzzer-x86_64-android",
vendor_available: true,
recovery_available: true,
src: "",
}
toolchain_library {
name: "libclang_rt.fuzzer-x86_64",
vendor_available: true,
recovery_available: true,
src: "",
}
// Needed for sanitizer
cc_prebuilt_library_shared {
name: "libclang_rt.ubsan_standalone-aarch64-android",
vendor_available: true,
recovery_available: true,
system_shared_libs: [],
srcs: [""],
}
toolchain_library {
name: "libgcc",
vendor_available: true,
recovery_available: true,
src: "",
}
toolchain_library {
name: "libgcc_stripped",
vendor_available: true,
recovery_available: true,
sdk_version: "current",
src: "",
}
cc_library {
name: "libc",
no_libcrt: true,
nocrt: true,
stl: "none",
system_shared_libs: [],
recovery_available: true,
stubs: {
versions: ["27", "28", "29"],
},
}
llndk_library {
name: "libc",
symbol_file: "",
sdk_version: "current",
}
cc_library {
name: "libm",
no_libcrt: true,
nocrt: true,
stl: "none",
system_shared_libs: [],
recovery_available: true,
stubs: {
versions: ["27", "28", "29"],
},
apex_available: [
"//apex_available:platform",
"myapex"
],
}
llndk_library {
name: "libm",
symbol_file: "",
sdk_version: "current",
}
cc_library {
name: "libdl",
no_libcrt: true,
nocrt: true,
stl: "none",
system_shared_libs: [],
recovery_available: true,
stubs: {
versions: ["27", "28", "29"],
},
apex_available: [
"//apex_available:platform",
"myapex"
],
}
llndk_library {
name: "libdl",
symbol_file: "",
sdk_version: "current",
}
cc_library {
name: "libft2",
no_libcrt: true,
nocrt: true,
system_shared_libs: [],
recovery_available: true,
}
llndk_library {
name: "libft2",
symbol_file: "",
vendor_available: false,
sdk_version: "current",
}
cc_library {
name: "libc++_static",
no_libcrt: true,
nocrt: true,
system_shared_libs: [],
stl: "none",
vendor_available: true,
recovery_available: true,
host_supported: true,
apex_available: [
"//apex_available:platform",
"//apex_available:anyapex",
],
}
cc_library {
name: "libc++",
no_libcrt: true,
nocrt: true,
system_shared_libs: [],
stl: "none",
vendor_available: true,
recovery_available: true,
host_supported: true,
vndk: {
enabled: true,
support_system_process: true,
},
apex_available: [
"//apex_available:platform",
"myapex"
],
}
cc_library {
name: "libc++demangle",
no_libcrt: true,
nocrt: true,
system_shared_libs: [],
stl: "none",
host_supported: false,
vendor_available: true,
recovery_available: true,
apex_available: [
"//apex_available:platform",
"//apex_available:anyapex",
],
}
cc_library {
name: "libunwind_llvm",
no_libcrt: true,
nocrt: true,
system_shared_libs: [],
stl: "none",
vendor_available: true,
recovery_available: true,
}
cc_defaults {
name: "crt_defaults",
recovery_available: true,
vendor_available: true,
native_bridge_supported: true,
stl: "none",
apex_available: [
"//apex_available:platform",
"//apex_available:anyapex",
],
}
cc_object {
name: "crtbegin_so",
defaults: ["crt_defaults"],
recovery_available: true,
vendor_available: true,
native_bridge_supported: true,
stl: "none",
}
cc_object {
name: "crtbegin_dynamic",
defaults: ["crt_defaults"],
recovery_available: true,
vendor_available: true,
native_bridge_supported: true,
stl: "none",
}
cc_object {
name: "crtbegin_static",
defaults: ["crt_defaults"],
recovery_available: true,
vendor_available: true,
native_bridge_supported: true,
stl: "none",
}
cc_object {
name: "crtend_so",
defaults: ["crt_defaults"],
recovery_available: true,
vendor_available: true,
native_bridge_supported: true,
stl: "none",
}
cc_object {
name: "crtend_android",
defaults: ["crt_defaults"],
recovery_available: true,
vendor_available: true,
native_bridge_supported: true,
stl: "none",
}
cc_library {
name: "libprotobuf-cpp-lite",
}
cc_library {
name: "ndk_libunwind",
sdk_version: "current",
stl: "none",
system_shared_libs: [],
}
cc_library {
name: "libc.ndk.current",
sdk_version: "current",
stl: "none",
system_shared_libs: [],
}
cc_library {
name: "libm.ndk.current",
sdk_version: "current",
stl: "none",
system_shared_libs: [],
}
cc_library {
name: "libdl.ndk.current",
sdk_version: "current",
stl: "none",
system_shared_libs: [],
}
ndk_prebuilt_object {
name: "ndk_crtbegin_so.27",
sdk_version: "27",
}
ndk_prebuilt_object {
name: "ndk_crtend_so.27",
sdk_version: "27",
}
ndk_prebuilt_object {
name: "ndk_crtbegin_dynamic.27",
sdk_version: "27",
}
ndk_prebuilt_object {
name: "ndk_crtend_android.27",
sdk_version: "27",
}
ndk_prebuilt_shared_stl {
name: "ndk_libc++_shared",
}
`
for _, os := range oses {
if os == android.Fuchsia {
ret += `
cc_library {
name: "libbioniccompat",
stl: "none",
}
cc_library {
name: "libcompiler_rt",
stl: "none",
}
`
}
if os == android.Windows {
ret += `
toolchain_library {
name: "libwinpthread",
host_supported: true,
enabled: false,
target: {
windows: {
enabled: true,
},
},
src: "",
}
`
}
}
return ret
}
func GatherRequiredFilesForTest(fs map[string][]byte) {
}
func TestConfig(buildDir string, os android.OsType, env map[string]string,
bp string, fs map[string][]byte) android.Config {
// add some modules that are required by the compiler and/or linker
bp = bp + GatherRequiredDepsForTest(os)
mockFS := map[string][]byte{}
GatherRequiredFilesForTest(mockFS)
for k, v := range fs {
mockFS[k] = v
}
var config android.Config
if os == android.Fuchsia {
config = android.TestArchConfigFuchsia(buildDir, env, bp, mockFS)
} else {
config = android.TestArchConfig(buildDir, env, bp, mockFS)
}
return config
}
func CreateTestContext() *android.TestContext {
ctx := android.NewTestArchContext()
ctx.RegisterModuleType("cc_fuzz", FuzzFactory)
ctx.RegisterModuleType("cc_test", TestFactory)
ctx.RegisterModuleType("llndk_headers", llndkHeadersFactory)
ctx.RegisterModuleType("ndk_library", NdkLibraryFactory)
ctx.RegisterModuleType("vendor_public_library", vendorPublicLibraryFactory)
ctx.RegisterModuleType("filegroup", android.FileGroupFactory)
ctx.RegisterModuleType("vndk_prebuilt_shared", VndkPrebuiltSharedFactory)
ctx.RegisterModuleType("vndk_libraries_txt", VndkLibrariesTxtFactory)
ctx.PreArchMutators(android.RegisterDefaultsPreArchMutators)
RegisterRequiredBuildComponentsForTest(ctx)
ctx.RegisterSingletonType("vndk-snapshot", VndkSnapshotSingleton)
ctx.RegisterSingletonType("vendor-snapshot", VendorSnapshotSingleton)
return ctx
}
|
package handlers
import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/authelia/authelia/v4/internal/mocks"
)
type LogoutSuite struct {
suite.Suite
mock *mocks.MockAutheliaCtx
}
func (s *LogoutSuite) SetupTest() {
s.mock = mocks.NewMockAutheliaCtx(s.T())
provider, err := s.mock.Ctx.GetSessionProvider()
s.Assert().NoError(err)
userSession, err := provider.GetSession(s.mock.Ctx.RequestCtx)
s.Assert().NoError(err)
userSession.Username = testUsername
s.Assert().NoError(provider.SaveSession(s.mock.Ctx.RequestCtx, userSession))
}
func (s *LogoutSuite) TearDownTest() {
s.mock.Close()
}
func (s *LogoutSuite) TestShouldDestroySession() {
LogoutPOST(s.mock.Ctx)
b := s.mock.Ctx.Response.Header.PeekCookie("authelia_session")
// Reset the cookie, meaning it resets the value and expires the cookie by setting
// date to one minute in the past.
assert.True(s.T(), strings.HasPrefix(string(b), "authelia_session=;"))
}
func TestRunLogoutSuite(t *testing.T) {
s := new(LogoutSuite)
suite.Run(t, s)
}
|
package email
import (
// "k8sproject/config"
"github.com/connext-cs/pub/config"
"fmt"
"net"
"net/smtp"
"strings"
)
type MailInfo struct {
loginAuth
unencryptedAuth
host string
content string
title string
}
type unencryptedAuth struct {
smtp.Auth
}
func (a unencryptedAuth) Start(server *smtp.ServerInfo) (string, []byte, error) {
s := *server
s.TLS = true
_, resp, th := a.Auth.Start(&s)
return "LOGIN", resp, th
}
type loginAuth struct {
username, password string
}
func LoginAuth(username, password string) smtp.Auth {
return &loginAuth{username, password}
}
func (a *loginAuth) Start(server *smtp.ServerInfo) (string, []byte, error) {
return "LOGIN", nil, nil
}
func (a *loginAuth) Next(fromServer []byte, more bool) ([]byte, error) {
command := string(fromServer)
command = strings.TrimSpace(command)
command = strings.TrimSuffix(command, ":")
command = strings.ToLower(command)
if more {
if command == "username" {
return []byte(fmt.Sprintf("%s", a.username)), nil
} else if command == "password" {
return []byte(fmt.Sprintf("%s", a.password)), nil
} else {
// We've already sent everything.
return nil, fmt.Errorf("unexpected server challenge: %s", command)
}
}
return nil, nil
}
func (mailInfo *MailInfo) InitHostInfo() {
mailInfo.SetServer(config.CVMSonarEMailHost(), fmt.Sprintf("%d", config.CVMSonarEMailPort()))
mailInfo.SetUser(config.CVMSonarEMailUser(), config.CVMSonarEMailPassword())
}
func (mailInfo *MailInfo) SetTitle(title string) {
mailInfo.title = title
}
func (mailInfo *MailInfo) ClearEMail() {
mailInfo.content = ""
}
func (mailInfo *MailInfo) AddContent(contentLine string) {
mailInfo.content = fmt.Sprintf("%s%s\r\n", mailInfo.content, contentLine)
}
func (mailInfo *MailInfo) SendMail(receiveAddr string) error {
// log.Info("receiveAddr:%+v,mailInfo:%+v", receiveAddr, mailInfo)
//mailInfo.InitHostInfo()
err := mailInfo.send(receiveAddr)
if err != nil {
fmt.Printf("err:%+v\n", err)
return err
}
return nil
}
func (mailInfo *MailInfo) SetServer(hostname, port string) {
//hostname="smtp.qq.com" //qq邮箱服务器地址
//port="587" //qq邮箱SMTP服务器端口
//hostname="smtp.office365.com" //outlook服务器地址
//port="587" //outlookSMTP服务器端口
mailInfo.host = net.JoinHostPort(hostname, port)
}
func (mailInfo *MailInfo) SetUser(username, password string) {
mailInfo.Auth = LoginAuth(username, password)
mailInfo.username = username
}
func (mailInfo *MailInfo) send(receiveAddr string) error {
to := strings.Split(receiveAddr, ";")
msg := []byte("To: " + receiveAddr + "\r\n" + "Subject:" + mailInfo.title + "\r\n" + "\r\n" + mailInfo.content + "\r\n")
//to := []string{receiveAddr}
err := smtp.SendMail(mailInfo.host, mailInfo.Auth, mailInfo.username, to, msg)
if err != nil {
fmt.Printf("err:%v\n", err)
return err
}
return nil
}
//SonarEMail
var sonarQuberEMail *MailInfo
//GetSonarQuberEMail ...
func GetSonarQuberEMail() *MailInfo {
if sonarQuberEMail == nil {
sonarQuberEMail = &MailInfo{}
sonarQuberEMail.InitHostInfo()
}
return sonarQuberEMail
}
|
package main
import (
"log"
"os"
"github.com/brutella/hc"
"github.com/RonMelkhior/homekit-lightify/lightify"
"github.com/brutella/hc/accessory"
_ "github.com/joho/godotenv/autoload"
)
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
if err := lightify.Init(); err != nil {
log.Fatal(err)
}
devices, err := lightify.GetDevices()
if err != nil {
log.Fatal(err)
}
var accessories []*accessory.Accessory
for _, device := range devices {
if device.Type == "GATEWAY" {
continue
}
accessories = append(accessories, device.InitializeAccessory())
}
config := hc.Config{
Port: os.Getenv("HOMEKIT_PORT"),
Pin: os.Getenv("HOMEKIT_PIN"),
StoragePath: "./db",
}
t, err := hc.NewIPTransport(config, accessories[0], accessories[1:]...)
if err != nil {
log.Fatal(err)
}
hc.OnTermination(func() {
<-t.Stop()
})
t.Start()
}
|
package main
import "exchange_websocket/bitfinex_websocket"
func main() {
bf := bitfinex_websocket.BitfinexWebsocketInit()
bf.BFKlineWebsocket()
for true {
bf.WsConnect()
go func() {
bf.Ping()
}()
bf.Subscribe("kline")
bf.ReadMessage()
}
}
|
package middleware
import "sync"
import "math"
import "time"
import "log"
import "errors"
import "reflect"
import "fmt"
import "github.com/nu7hatch/gouuid"
import . "../packet"
import . "../message"
import . "../client_request_handler"
type Subscribed struct{
Map map[string][]MessageListener
}
func (sd *Subscribed) Init() {
sd.Map = make(map[string][]MessageListener)
}
func (sd *Subscribed) Get(key string) ([]MessageListener, bool){
l, found := sd.Map[key]
return l, found
}
func (sd *Subscribed) Set(key string, listeners []MessageListener){
sd.Map[key] = listeners
}
func (sd *Subscribed) Add(key string, fu MessageListener){
_, f := sd.Map[key]
if (!f) {
sd.Map[key] = make([]MessageListener,0)
}
//Checking if this listener is not already in this list
for _, fun := range sd.Map[key] {
if(reflect.DeepEqual(fun, fu)){
return
}
}
sd.Map[key] = append(sd.Map[key], fu)
}
func (sd *Subscribed) Remove(key string, fu MessageListener) bool{
f, e := sd.Map[key]
if(!e){
return e
}
//Removing the function from the list of listeners for the topic
for i, elem := range f {
if(reflect.DeepEqual(elem, fu)){
f[i] = f[len(f)-1]
f[len(f)-1] = nil
f = f[:len(f)-1]
return true
}
}
return false
}
type Connection struct{
Lock sync.Mutex
MessageSent sync.Cond
AckReceived sync.Cond
ClientID string
HostIp string
HostPort string
HostProtocol string
ReceiverConnection ClientRequestHandler
SenderConnection ClientRequestHandler
Subscribed Subscribed
WaitingACK WaitingACKSafe
Sessions []TopicSession
Stopped bool
Open bool
Modified bool
PacketIDGenerator int
}
func (cnn *Connection) CreateConnection(host_ip string, host_port string, host_protocol string){
println("==> Conection created!")
cnn.Lock = sync.Mutex{}
cnn.MessageSent = sync.Cond{L: &cnn.Lock}
cnn.AckReceived = sync.Cond{L: &cnn.Lock}
cnn.HostIp = host_ip
cnn.HostPort = host_port
cnn.HostProtocol = host_protocol
cnn.WaitingACK.Init()
cnn.Subscribed.Init()
cnn.Sessions = make([]TopicSession, 0)
uuid_, _ := uuid.NewV4()
cnn.ClientID = uuid_.String()
cnn.Stopped = true
cnn.Open = false
cnn.Modified = false
cnn.PacketIDGenerator = 0
}
func (cnn *Connection) IsOpen() error{
if (!cnn.Open){
return errors.New("Operation not allowed in closed connection.")
}
return nil
}
func (cnn *Connection) SetModified(){
cnn.Modified = true
}
func (cnn *Connection) GetClientID() string{
return cnn.ClientID
}
func (cnn *Connection) SetClientID(clientid string) error{
if(cnn.Modified){
return errors.New("Change the client id is not allowed afthe the connection has been modified.")
}
cnn.ClientID = clientid
return nil
}
func (cnn *Connection) Close(){
println("+++ Conection [CLOSE]")
cnn.SetModified()
cnn.AckReceived.L.Lock()
cnn.Open = false
for cnn.WaitingACK.Len() > 0{
cnn.AckReceived.Wait()
}
cnn.ReceiverConnection.Close()
cnn.SenderConnection.Close()
cnn.AckReceived.L.Unlock()
}
func (cnn Connection) CreateSession() TopicSession{
println("+++ Conection create[SESSION]")
cnn.SetModified()
tp := TopicSession{}
tp.CreateSession(cnn)
cnn.Sessions = append(cnn.Sessions, tp)
return tp
}
func (cnn *Connection) SendMessage(msg Message) error{
//println("+++ Conection send[MESSAGE]")
err := cnn.IsOpen()
//println("+++ Conection WTF")
if(err != nil){
fmt.Println(err)
return err
}
//println("+++ Conection add WaitingACK")
cnn.WaitingACK.Add(msg.MessageID, MessageWaitingAck{msg, int32(time.Now().Unix() + (5 * 1000)), msg.MessageID})
cnn.MessageSent.L.Lock()
//Broadcasting that there is new messages waiting for an ACK
cnn.MessageSent.Broadcast()
cnn.MessageSent.L.Unlock()
cnn.SetModified()
pkt := Packet{}
params := []string{cnn.ClientID,msg.GetDestination()}
pkt.CreatePacket(MESSAGE.Ordinal(), cnn.PacketIDGenerator, params, msg)
//pkt.CreatePacket(MESSAGE, cnn.PacketIDGenerator, nil, msg)
cnn.PacketIDGenerator++
//println("+++ Conection SendAsync packet")
cnn.SenderConnection.Send(pkt)
return nil
}
func (cnn *Connection) SubscribeSessionToDestination(topic Topic, fu MessageListener){
println("+++ Conection subscribe[SESSION_TO_DESTINATION]")
defer cnn.Lock.Unlock()
cnn.Lock.Lock()
cnn.Subscribed.Add(topic.GetTopicName(), fu)
}
func (cnn *Connection) UnsubscribeSessionToDestination(topic Topic, fu MessageListener) bool{
println("+++ Conection UNsubscribe[SESSION_TO_DESTINATION]")
defer cnn.Lock.Unlock()
cnn.Lock.Lock()
return cnn.Subscribed.Remove(topic.GetTopicName(), fu)
}
func (cnn *Connection) Subscribe(topic Topic, fu MessageListener) error{
println("+++ Conection [SUBSCRIBE]")
err := cnn.IsOpen()
if(err != nil){
log.Print(err)
return err
}
cnn.SetModified()
cnn.SubscribeSessionToDestination(topic, fu)
//fmt.Println(cnn.Subscribed)
pkt := Packet{}
params := []string{cnn.ClientID, topic.GetTopicName()}
pkt.CreatePacket(SUBSCRIBE.Ordinal(), cnn.PacketIDGenerator, params, Message{})
cnn.PacketIDGenerator++
return cnn.SenderConnection.Send(pkt)
}
func (cnn *Connection) Unsubscribe(topic Topic, fu MessageListener) error{
println("+++ Conection [UNSUBSCRIBE]")
err := cnn.IsOpen()
if(err != nil){
log.Print(err)
return err
}
cnn.SetModified()
result := cnn.UnsubscribeSessionToDestination(topic, fu)
if (result){
pkt := Packet{}
params := []string{cnn.ClientID, topic.GetTopicName()}
pkt.CreatePacket(UNSUBSCRIBE.Ordinal(), cnn.PacketIDGenerator, params, Message{})
cnn.PacketIDGenerator++
return cnn.SenderConnection.Send(pkt)
}
return nil
}
func (cnn *Connection) AcknowledgeMessage(msg Message, ts TopicSession) error{
println("+++ Conection [ACK_MESSAGE]")
err := cnn.IsOpen()
if(err != nil){
log.Print(err)
return err
}
cnn.SetModified()
pkt := Packet{}
params := []string{cnn.ClientID, msg.MessageID}
pkt.CreatePacket(ACK.Ordinal(), cnn.PacketIDGenerator, params, Message{})
cnn.PacketIDGenerator++
cnn.SenderConnection.SendAsync(pkt)
return nil
}
func (cnn *Connection) CloseSession(ts TopicSession){
println("+++ Conection [CLOSE_SESSION]")
for k, v := range cnn.Subscribed.Map{
for i, e := range v{
if(reflect.DeepEqual(e,ts)){
v[i] = v[len(v)-1]
v[len(v)-1] = nil
v = v[:len(v)-1]
cnn.Subscribed.Set(k, v )
}
}
}
}
func (cnn *Connection) CreateTopic(tp Topic) error{
println("+++ Conection create[TOPIC]")
err := cnn.IsOpen()
if(err != nil){
log.Print(err)
return err
}
pkt := Packet{}
params := []string{cnn.ClientID, tp.GetTopicName()}
pkt.CreatePacket(CREATE_TOPIC.Ordinal(), cnn.PacketIDGenerator, params, Message{})
cnn.PacketIDGenerator++
cnn.SenderConnection.Send(pkt)
return nil
}
func (cnn *Connection) ProcessACKS(){
println("+++ Conection process[ACKS]")
go func() {
for{
//println("ProcessACKS")
err := cnn.IsOpen()
if(err != nil){
log.Print(err)
break
}
if (cnn.WaitingACK.Len() == 0){
println("Sem ACKS")
cnn.MessageSent.L.Lock()
//println("ProcessACKS")
err := cnn.IsOpen()
if(err != nil){
log.Print(err)
break
}
cnn.MessageSent.Wait() //Waiting for messages to be sent before stat to process ACKS again
cnn.MessageSent.L.Unlock()
}else{
println(cnn.WaitingACK.Len())
}
//println("ProcessACKS")
err = cnn.IsOpen()
if(err != nil){
log.Print(err)
break
}
key, maa, f := cnn.WaitingACK.Peek()
if(f){
curr := int32(time.Now().Unix())
if(maa.TimeStamp <= curr){
cnn.WaitingACK.Remove(key)
println("RESENDING MESSAGE TIMEDOUT!")
cnn.SendMessage(maa.Message)
}else{
time.Sleep(time.Microsecond * time.Duration(curr))
}
}
err = cnn.IsOpen()
if(err != nil){
log.Print(err)
break
}
}
}()
}
func (cnn *Connection) OnPacket(pkt Packet){
println("+++ Conection [ON_PACKET]")
if(!cnn.Stopped){
if(pkt.IsMessage()){
//println("ˆˆˆˆˆˆˆˆˆˆ^chegou no message")
msg := pkt.GetMessage()
destination := msg.Destination
//println("ˆˆˆˆˆˆˆˆˆˆ^ destination :::", destination)
cnn.Lock.Lock()
sessions, found := cnn.Subscribed.Get(destination)
//fmt.Println(sessions)
if(found){
for _, session := range sessions{
session.OnMessage(msg)
println("Shamou on message de", session)
}
}else{
println("No sessions")
}
cnn.Lock.Unlock()
}else if(pkt.IsACK()){
fmt.Println("Precessing packet [OnPacket] ",pkt)
//fmt.Println("Length of params array on OnPacket ",len(pkt.Params))
if(len(pkt.Params) < 1){
fmt.Println(errors.New("Params (an slice) of packet has no ACK index"))
//panic(fmt.Sprintf("halp"))
}else{
key := pkt.Params[1]
// println("A porra da KEY tinha de ser m1 ", key)
// time.Sleep(time.Second * 10)
cnn.Lock.Lock()
cnn.WaitingACK.Remove(key)
cnn.AckReceived.Broadcast()
cnn.Lock.Unlock()
}
}
}else{
println("Connection stopped!")
}
}
func (cnn *Connection) Start(){
println("+++ Conection [START]")
if(!cnn.Open){
tries := 0
for tries <= 5 {
tries++
errr := cnn.ReceiverConnection.NewCRH(cnn.HostProtocol, cnn.HostIp, cnn.HostPort, true, cnn.GetClientID())
errs := cnn.SenderConnection.NewCRH(cnn.HostProtocol, cnn.HostIp, cnn.HostPort, false, cnn.GetClientID())
if(errr != nil || errs != nil){
cnn.ReceiverConnection.Close()
cnn.SenderConnection.Close()
delay := math.Pow(2,float64(tries))
time.Sleep(time.Second * time.Duration(delay))
log.Print("Error stablishing connection for client ", cnn.ClientID, ", trying again in ", delay, " seconds...")
continue
}
cnn.Open = true
cnn.Stopped = false
println("+++ Conection [ReceiverConnection]SetConnection")
cnn.ReceiverConnection.SetConnection(cnn)
println("+++ Conection [SenderConnection]SetConnection")
cnn.SenderConnection.SetConnection(cnn)
println("+++ Conection [ReceiverConnection]ListenIncomingPackets")
cnn.ReceiverConnection.ListenIncomingPackets()
go cnn.ProcessACKS()
break
}
}
}
func (cnn *Connection) Stop(){
println("+++ Conection [STOP]")
cnn.Stopped = true
}
|
package useatomic
import (
"sync/atomic"
"testing"
"time"
)
func TestAtomic(t *testing.T) {
// 原子操作的第一个参数,是被操作的值 因为原子操作函数需要是被操作值得指针,而不是这个值本身,被传入函数的参数值都会被复制
// 原子操作加法函数做原子减法操作 有符号类型
num := int32(18)
t.Logf("the num is %d\n", num)
atomic.AddInt32(&num, int32(3))
t.Logf("the num is %d\n", num)
uintNum := uint32(18)
t.Logf("the uintNum is %d\n", uintNum)
delta := int32(-3)
atomic.AddUint32(&uintNum, uint32(delta))
t.Logf("the uintNum is %d\n", uintNum)
uintNum = uint32(18)
t.Logf("the uintNum is %d\n", uintNum)
atomic.AddUint32(&uintNum, ^uint32(-(-3)-1))
t.Logf("the uintNum is %d\n", uintNum)
// -3的补码
t.Logf("The two's complement of %d: %b\n", delta, uint32(int32(delta)))
t.Logf("The equivalent:%b\n", ^uint32(-(-3)-1))
}
func TestForAndCAS1(t *testing.T) {
// 原子交换自旋锁
sign := make(chan struct{}, 2)
num := int32(0)
t.Logf("The number:%d\n", num)
// 定时增加num的值
go func() {
defer func() {
sign <- struct{}{}
}()
for {
time.Sleep(time.Millisecond * 500)
newNum := atomic.AddInt32(&num, 2)
t.Logf("The number:%d\n", newNum)
if newNum == 10 {
break
}
}
}()
// 定时检查num的值,如果等于10就重置
go func() {
defer func() {
sign <- struct{}{}
}()
// 不断比较检查&num地址指向的值与10比较,如果相等就和0进行交换,并返回true 否则为false
for {
if atomic.CompareAndSwapInt32(&num, 10, 0) {
t.Log("The number has gone to zero.")
break
}
time.Sleep(time.Millisecond * 500)
}
}()
<-sign
<-sign
t.Logf("The number has been swaped to :%d\n", num)
}
func TestForAndCAS2(t *testing.T) {
// 原子交换的互斥锁,
sign := make(chan struct{}, 2)
num := int32(0)
t.Logf("The number:%d\n", num)
max := int32(20)
// 定时增加 num的值
go func(id int, max int32) {
defer func() {
sign <- struct{}{}
}()
for i := 0; ; i++ {
curNum := atomic.LoadInt32(&num)
if curNum >= max {
break
}
newNum := curNum + 2
time.Sleep(time.Millisecond * 200)
if atomic.CompareAndSwapInt32(&num, curNum, newNum) {
t.Logf("The number: %d [%d-%d]\n", newNum, id, i)
} else {
t.Logf("The CAS operation failed. [%d-%d]\n", id, i)
}
}
}(1, max)
// 定时增加num的值
go func(id int, max int32) {
defer func() {
sign <- struct{}{}
}()
for j := 0; ; j++ {
curNum := atomic.LoadInt32(&num)
if curNum >= max {
break
}
newNum := curNum + 2
time.Sleep(time.Millisecond * 200)
if atomic.CompareAndSwapInt32(&num, curNum, newNum) {
t.Logf("The number:%d [%d-%d]\n", newNum, id, j)
} else {
t.Logf("The CAS operation failed. [%d-%d]\n", id, j)
}
}
}(2, max)
<-sign
<-sign
}
|
package fetcher
import (
"fmt"
)
func newRedHatFetchRequests(target []string) (reqs []fetchRequest) {
const t = "https://www.redhat.com/security/data/oval/com.redhat.rhsa-RHEL%s.xml.bz2"
for _, v := range target {
reqs = append(reqs, fetchRequest{
target: v,
url: fmt.Sprintf(t, v),
bzip2: true,
concurrently: false,
})
}
return
}
// FetchRedHatFiles fetch OVAL from RedHat
func FetchRedHatFiles(versions []string) ([]FetchResult, error) {
reqs := newRedHatFetchRequests(versions)
if len(reqs) == 0 {
return nil,
fmt.Errorf("There are no versions to fetch")
}
results, err := fetchFeedFiles(reqs)
if err != nil {
return nil,
fmt.Errorf("Failed to fetch. err: %s", err)
}
return results, nil
}
|
package storage
import (
"context"
"github.com/mongodb/mongo-go-driver/bson"
)
// GetByName queries mongodb for an item with
// the correct name
func (m *MongoStorage) GetByName(ctx context.Context, name string) (*Item, error) {
c := m.Client.Database(m.DB).Collection(m.Collection)
var i Item
if err := c.FindOne(ctx, bson.M{"name": name}).Decode(&i); err != nil {
return nil, err
}
return &i, nil
}
// Put adds an item to our mongo instance
func (m *MongoStorage) Put(ctx context.Context, i *Item) error {
c := m.Client.Database(m.DB).Collection(m.Collection)
_, err := c.InsertOne(ctx, i)
return err
}
|
package main
import (
"ethos/altEthos"
"ethos/syscall"
"ethos/kernelTypes"
"ethos/defined"
"log"
"strings"
)
var userName string
var currentTransactionID int64
func init() {
SetupMyRpcTransactionStartIReply(transactionStartIReply)
SetupMyRpcTransactionEndIReply(transactionEndIReply)
SetupMyRpcReadIReply(readIReply)
SetupMyRpcWriteIReply(writeIReply)
SetupMyRpcAbortIReply(abortIReply)
}
func transactionStartIReply(transactionID int64, status string) (MyRpcProcedure) {
//currentTransactionID = transactionID
if (status == "1") {
currentTransactionID = transactionID
printToScreen("Started a new transaction\n")
}
return nil
}
func transactionEndIReply(status string) (MyRpcProcedure) {
if (status != "-1") {
currentTransactionID = -1
}
if(status == "2"){
printToScreen("Nothing to commit for the current transaction\n")
}
if(status == "3"){
printToScreen("Committed all changes to the database\n")
}
return nil
}
func readIReply(value string, status string) (MyRpcProcedure) {
if (status == "1") {
printToScreen("Value: ")
printToScreen(kernelTypes.String(value))
printToScreen("\n")
}
return nil
}
func writeIReply(status string) (MyRpcProcedure) {
if (status == "1") {
printToScreen("Updated the value\n")
}
return nil
}
func abortIReply(status string) (MyRpcProcedure) {
if (status == "1") {
currentTransactionID = -1
printToScreen("Aborted the transaction\n")
}
return nil
}
func sendCall(call defined.Rpc){
fd, status := altEthos.IpcRepeat("myRpc", "", nil)
if status != syscall.StatusOk {
log.Printf("Ipc failed: %v\n", status)
altEthos.Exit(status)
}
status = altEthos.ClientCall(fd, call)
if status != syscall.StatusOk {
log.Printf("clientCall failed: %v\n", status)
altEthos.Exit(status)
}
}
func startTransaction() {
call := MyRpcTransactionStartI{}
sendCall(&call)
}
func endTransaction() {
if(currentTransactionID == -1) {
printToScreen("Please start a transaction before any queries\n")
return
}
call := MyRpcTransactionEndI{currentTransactionID}
sendCall(&call)
}
func readDatabase() {
if(currentTransactionID == -1) {
printToScreen("Please start a transaction before any queries\n")
return
}
printToScreen("Enter variable Name: ")
var variableName kernelTypes.String
status := altEthos.ReadStream(syscall.Stdin, &variableName)
if status != syscall.StatusOk {
log.Printf("Error while reading syscall.Stdin: %v", status)
}
call := MyRpcReadI{currentTransactionID, string(strings.TrimRight(string(variableName), "\n"))}
sendCall(&call)
}
func writeDatabase() {
if(currentTransactionID == -1) {
printToScreen("Please start a transaction before any queries\n")
return
}
printToScreen("Enter variable Name: ")
var variableName kernelTypes.String
status := altEthos.ReadStream(syscall.Stdin, &variableName)
if status != syscall.StatusOk {
log.Printf("Error while reading syscall.Stdin: %v", status)
}
printToScreen("Enter variable value: ")
var variableValue kernelTypes.String
status = altEthos.ReadStream(syscall.Stdin, &variableValue)
if status != syscall.StatusOk {
log.Printf("Error while reading syscall.Stdin: %v", status)
}
call := MyRpcWriteI{currentTransactionID, string(strings.TrimRight(string(variableName), "\n")), string(strings.TrimRight(string(variableValue), "\n"))}
sendCall(&call)
}
func abortTransaction() {
if(currentTransactionID == -1) {
printToScreen("Please start a transaction before any queries\n")
return
}
call := MyRpcAbortI{currentTransactionID}
sendCall(&call)
}
func printToScreen(prompt kernelTypes.String) {
statusW := altEthos.WriteStream(syscall.Stdout, &prompt)
if statusW != syscall.StatusOk {
log.Printf("Error writing to syscall.Stdout: %v", statusW)
}
}
func printCommands(){
printToScreen("\n\nCommands\n")
printToScreen("---------------------\n")
printToScreen("-start : Start a transaction\n")
printToScreen("-end : End a transaction\n")
printToScreen("-read : Read a varible from the database\n")
printToScreen("-write : Write to a variable in the database\n")
printToScreen("-abort : Abort a transaction\n")
printToScreen("---------------------\n\n")
}
func userInputHandler(userInput string) {
if (userInput == "\n"){
printCommands()
} else if (userInput == "??\n") {
printCommands()
} else if (userInput == "-start\n"){
startTransaction()
} else if (userInput == "-end\n") {
endTransaction()
} else if (userInput == "-read\n") {
readDatabase()
} else if (userInput == "-write\n") {
writeDatabase()
} else if (userInput == "-abort\n") {
abortTransaction()
} else {
printCommands()
}
}
func getInput(){
for {
printToScreen("Enter Input (?? for commands) : ")
var userInput kernelTypes.String
status := altEthos.ReadStream(syscall.Stdin, &userInput)
if status != syscall.StatusOk {
log.Printf("Error while reading syscall.Stdin: %v", status)
}
userInputHandler(string(userInput));
}
}
func main () {
altEthos.LogToDirectory("test/myRpcClient")
log.Printf("Database Service: before call\n")
userName = altEthos.GetUser()
getInput()
log.Printf("Database Service: done\n")
}
|
package order
import (
"context"
"time"
"tpay_backend/merchantapi/internal/common"
"tpay_backend/model"
"tpay_backend/utils"
"tpay_backend/merchantapi/internal/svc"
"tpay_backend/merchantapi/internal/types"
"github.com/tal-tech/go-zero/core/logx"
)
type TransferOrderNotifyLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewTransferOrderNotifyLogic(ctx context.Context, svcCtx *svc.ServiceContext) TransferOrderNotifyLogic {
return TransferOrderNotifyLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *TransferOrderNotifyLogic) TransferOrderNotify(merchantId int64, req types.TransferOrderNotifyRequest) (*types.TransferOrderNotifyResponse, error) {
order, err := model.NewTransferOrderModel(l.svcCtx.DbEngine).FindByMerchantId(merchantId, req.OrderNo)
if err != nil {
if err == model.ErrRecordNotFound {
l.Errorf("代付订单[%v]不存在", req.OrderNo)
return nil, common.NewCodeError(common.OrderNotExist)
} else {
l.Errorf("查询代付订单[%]失败, err=%v", req.OrderNo, err)
return nil, common.NewCodeError(common.SystemInternalErr)
}
}
if order.OrderNo == "" {
l.Errorf("代付订单[%v]不存在", req.OrderNo)
return nil, common.NewCodeError(common.OrderNotExist)
}
// 检查订单状态
// 只有支付成功和支付失败的订单才可以通知
if order.OrderStatus != model.TransferOrderStatusPaid && order.OrderStatus != model.TransferOrderStatusFail {
l.Errorf("代付订单[%]当前支付状态[%v]不能进行通知", order.OrderNo, order.OrderStatus)
return nil, common.NewCodeError(common.OrderNotOp)
}
// 检查订单是否有异步通知URL
if order.NotifyUrl == "" {
l.Errorf("代付订单[%v]缺少异步通知地址", order.OrderNo)
return nil, common.NewCodeError(common.OrderMissingNotifyUrl)
}
merchant, err := model.NewMerchantModel(l.svcCtx.DbEngine).FindOneById(merchantId)
if err != nil {
l.Errorf("查询商户失败, MerchantNo=%v, err=%v", order.MerchantNo, err)
return nil, common.NewCodeError(common.SystemInternalErr)
}
postData := &utils.PackTransferNotifyParamsRequest{
MerchantNo: order.MerchantNo,
Timestamp: time.Now().Unix(),
NotifyType: utils.TransferNotifyType,
OrderNo: order.OrderNo,
MerchantOrderNo: order.MerchantOrderNo,
ReqAmount: order.ReqAmount,
Currency: order.Currency,
OrderStatus: order.OrderStatus,
PayTime: order.UpdateTime,
}
param, err := utils.PackTransferNotifyParams(postData, merchant.Md5Key)
if err != nil {
l.Errorf("打包参数失败, data=%v, err=%v", order.MerchantNo, err)
return nil, common.NewCodeError(common.SystemInternalErr)
}
body, err := utils.PostForm(order.NotifyUrl, param)
if err != nil {
l.Errorf("发送数据失败, url=%v, param=%v, err=%v", order.NotifyUrl, param, err)
return nil, common.NewCodeError(common.OrderNotifyFail)
}
bodyStr := string(body)
if bodyStr != "success" {
l.Errorf("通知失败, body=%v", bodyStr)
}
l.Infof("代付订单[%v]通知成功, body:%v", order.OrderNo, bodyStr)
return &types.TransferOrderNotifyResponse{
NotifyResponse: bodyStr,
}, nil
}
|
package scanner
import (
"fmt"
"go/token"
"io/ioutil"
"runtime"
"sort"
"testing"
"h12.io/gombi/scan"
)
var sampleGoFile = runtime.GOROOT() + "/src/go/scanner/scanner.go"
func TestSingle(t *testing.T) {
// fmt.Println(int(token.INT))
}
type sortItem struct {
count int
tok token.Token
}
type sortItems []sortItem
func (rs sortItems) Len() int { return len(rs) }
func (rs sortItems) Less(i, j int) bool { return rs[i].count < rs[j].count }
func (rs sortItems) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] }
func ATestCount(t *testing.T) {
src, err := ioutil.ReadFile(sampleGoFile)
if err != nil {
panic(err)
}
fset := token.NewFileSet()
file := fset.AddFile(sampleGoFile, fset.Base(), len(src))
var s Scanner
s.Init(file, src, nil, ScanComments)
m := make(map[token.Token]int)
for {
_, tok, _ := s.Scan()
m[tok]++
if tok == token.EOF {
break
}
}
items := sortItems{}
for k, c := range m {
items = append(items, sortItem{c, k})
}
sort.Sort(items)
for _, item := range items {
fmt.Println(item.count, item.tok)
}
}
func BenchmarkSpec(b *testing.B) {
for i := 0; i < b.N; i++ {
spec()
}
}
func test() {
var (
c = scan.Char
or = scan.Or
class = scan.CharClass
unicodeLetter = class(`L`)
unicodeDigit = class(`Nd`)
letter = or(unicodeLetter, c(`_`))
ident = or(letter, unicodeDigit)
)
_ = ident
}
|
package main
import (
"fmt"
"os"
"sort"
"strconv"
)
// parseFloat
func pf(s string) float64 {
f, err := strconv.ParseFloat(s, 64)
if err != nil {
fmt.Fprintf(os.Stderr, "\nError converting '%s' to float\n\n", s)
return 0.0000
}
return f
}
func sortSpotPrice(entry []spotPriceItem, ascending bool) {
sort.SliceStable(entry, func(i, j int) bool {
if ascending {
return pf(entry[i].SpotPrice) < pf(entry[j].SpotPrice)
}
return pf(entry[i].SpotPrice) > pf(entry[j].SpotPrice)
})
}
func sortAvailabilityZone(entry []spotPriceItem, ascending bool) {
sort.SliceStable(entry, func(i, j int) bool {
if ascending {
return entry[i].AvailabilityZone < entry[j].AvailabilityZone
}
return entry[i].AvailabilityZone > entry[j].AvailabilityZone
})
}
func sortInstanceType(entry []spotPriceItem, ascending bool) {
sort.SliceStable(entry, func(i, j int) bool {
if ascending {
return entry[i].InstanceType < entry[j].InstanceType
}
return entry[i].InstanceType > entry[j].InstanceType
})
}
func sortRegion(entry []spotPriceItem, ascending bool) {
sort.SliceStable(entry, func(i, j int) bool {
if ascending {
return entry[i].Region < entry[j].Region
}
return entry[i].Region > entry[j].Region
})
}
func sortProductDescription(entry []spotPriceItem, ascending bool) {
sort.SliceStable(entry, func(i, j int) bool {
if ascending {
return entry[i].ProductDescription < entry[j].ProductDescription
}
return entry[i].ProductDescription > entry[j].ProductDescription
})
}
|
package models
import (
"errors"
)
var ErrInvalidEmailCode = errors.New("Invalid or expired email code")
var ErrSmtpNotEnabled = errors.New("SMTP not configured, check your grafana.ini config file's [smtp] section")
// EmailAttachFile is a definition of the attached files without path
type EmailAttachFile struct {
Name string
Content []byte
}
type EmailContent struct {
To []string
SingleEmail bool
Template string
Subject string
Data map[string]interface{}
Info string
ReplyTo []string
EmbededFiles []string
AttachedFiles []*EmailAttachFile
}
|
package routers
import (
"github.com/astaxie/beego"
)
func init() {
beego.GlobalControllerRouter["GoldenTimes-web/controllers:ArtistController"] = append(beego.GlobalControllerRouter["GoldenTimes-web/controllers:ArtistController"],
beego.ControllerComments{
"CreateArtist",
`/artist`,
[]string{"post"},
nil})
beego.GlobalControllerRouter["GoldenTimes-web/controllers:ArtistController"] = append(beego.GlobalControllerRouter["GoldenTimes-web/controllers:ArtistController"],
beego.ControllerComments{
"ReadArtists",
`/artist`,
[]string{"get"},
nil})
beego.GlobalControllerRouter["GoldenTimes-web/controllers:ArtistController"] = append(beego.GlobalControllerRouter["GoldenTimes-web/controllers:ArtistController"],
beego.ControllerComments{
"ReadArtist",
`/artist/:id`,
[]string{"get"},
nil})
beego.GlobalControllerRouter["GoldenTimes-web/controllers:ArtistController"] = append(beego.GlobalControllerRouter["GoldenTimes-web/controllers:ArtistController"],
beego.ControllerComments{
"UpdateArtist",
`/artist/:id`,
[]string{"put"},
nil})
beego.GlobalControllerRouter["GoldenTimes-web/controllers:ArtistController"] = append(beego.GlobalControllerRouter["GoldenTimes-web/controllers:ArtistController"],
beego.ControllerComments{
"DeleteArtist",
`/artist/:id`,
[]string{"delete"},
nil})
beego.GlobalControllerRouter["GoldenTimes-web/controllers:AlbumController"] = append(beego.GlobalControllerRouter["GoldenTimes-web/controllers:AlbumController"],
beego.ControllerComments{
"CreateAlbum",
`/album`,
[]string{"post"},
nil})
beego.GlobalControllerRouter["GoldenTimes-web/controllers:AlbumController"] = append(beego.GlobalControllerRouter["GoldenTimes-web/controllers:AlbumController"],
beego.ControllerComments{
"ReadAlbums",
`/album`,
[]string{"get"},
nil})
beego.GlobalControllerRouter["GoldenTimes-web/controllers:AlbumController"] = append(beego.GlobalControllerRouter["GoldenTimes-web/controllers:AlbumController"],
beego.ControllerComments{
"ReadAlbum",
`/album/:id`,
[]string{"get"},
nil})
beego.GlobalControllerRouter["GoldenTimes-web/controllers:AlbumController"] = append(beego.GlobalControllerRouter["GoldenTimes-web/controllers:AlbumController"],
beego.ControllerComments{
"UpdateAlbum",
`/album:id`,
[]string{"put"},
nil})
beego.GlobalControllerRouter["GoldenTimes-web/controllers:AlbumController"] = append(beego.GlobalControllerRouter["GoldenTimes-web/controllers:AlbumController"],
beego.ControllerComments{
"DeleteAlbum",
`/album/:id`,
[]string{"delete"},
nil})
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO combine with the pkg/kv package outside.
package kv
import (
"context"
"sync/atomic"
"github.com/pingcap/tidb/meta/autoid"
)
// panickingAllocator is an ID allocator which panics on all operations except Rebase
type panickingAllocator struct {
autoid.Allocator
base *int64
ty autoid.AllocatorType
}
// NewPanickingAllocators creates a PanickingAllocator shared by all allocation types.
// we use this to collect the max id(either _tidb_rowid or auto_increment id or auto_random) used
// during import, and we will use this info to do ALTER TABLE xxx AUTO_RANDOM_BASE or AUTO_INCREMENT
// on post-process phase.
func NewPanickingAllocators(base int64) autoid.Allocators {
sharedBase := &base
return autoid.NewAllocators(
false,
&panickingAllocator{base: sharedBase, ty: autoid.RowIDAllocType},
&panickingAllocator{base: sharedBase, ty: autoid.AutoIncrementType},
&panickingAllocator{base: sharedBase, ty: autoid.AutoRandomType},
)
}
// Rebase implements the autoid.Allocator interface
func (alloc *panickingAllocator) Rebase(_ context.Context, newBase int64, _ bool) error {
// CAS
for {
oldBase := atomic.LoadInt64(alloc.base)
if newBase <= oldBase {
break
}
if atomic.CompareAndSwapInt64(alloc.base, oldBase, newBase) {
break
}
}
return nil
}
// Base implements the autoid.Allocator interface
func (alloc *panickingAllocator) Base() int64 {
return atomic.LoadInt64(alloc.base)
}
func (alloc *panickingAllocator) GetType() autoid.AllocatorType {
return alloc.ty
}
|
package meta_test
import (
"testing"
"time"
"github.com/BurntSushi/toml"
"github.com/messagedb/messagedb/meta"
)
func TestConfig_Parse(t *testing.T) {
// Parse configuration.
var c meta.Config
if _, err := toml.Decode(`
dir = "/tmp/foo"
election-timeout = "10s"
heartbeat-timeout = "20s"
leader-lease-timeout = "30h"
commit-timeout = "40m"
`, &c); err != nil {
t.Fatal(err)
}
// Validate configuration.
if c.Dir != "/tmp/foo" {
t.Fatalf("unexpected dir: %s", c.Dir)
} else if time.Duration(c.ElectionTimeout) != 10*time.Second {
t.Fatalf("unexpected election timeout: %v", c.ElectionTimeout)
} else if time.Duration(c.HeartbeatTimeout) != 20*time.Second {
t.Fatalf("unexpected heartbeat timeout: %v", c.HeartbeatTimeout)
} else if time.Duration(c.LeaderLeaseTimeout) != 30*time.Hour {
t.Fatalf("unexpected leader lease timeout: %v", c.LeaderLeaseTimeout)
} else if time.Duration(c.CommitTimeout) != 40*time.Minute {
t.Fatalf("unexpected commit timeout: %v", c.CommitTimeout)
}
}
|
/*
Package logger sets up logging for the application, based on Uber zap's logger.
*/
package logger
import (
"go.uber.org/zap"
"sync"
)
// Package internal variable to implement singleton
var (
innerLogger *zap.Logger
innerSugar *zap.SugaredLogger
onceLogger sync.Once
)
// GetLogger returns singleton logger object.
func GetLogger() (*zap.Logger, *zap.SugaredLogger) {
var err error
onceLogger.Do(func() {
innerLogger, err = zap.NewDevelopment()
if err != nil {
panic("Unable to create logger. Quitting application.")
}
innerSugar = innerLogger.Sugar()
})
return innerLogger, innerSugar
}
|
package data
import (
"database/sql"
"errors"
"github.com/google/wire"
_ "github.com/go-sql-driver/mysql"
xerrors "github.com/pkg/errors"
"geektime/Go-000/Week04/internal/biz"
)
const (
MYSQLSRC = "root:123456@tcp(192.168.141.180:3306)/test?charset=utf8"
)
var ErrRecordNotFound = errors.New("record not found")
var Provider = wire.NewSet(NewDB, NewUserRepo)
func NewUserRepo(db *sql.DB) biz.UserRepo {
return &userRepo{db: db}
}
func NewDB() (db *sql.DB, cf func(), err error) {
//TODO: 需要从配置文件中加载
db, err = sql.Open("mysql", MYSQLSRC)
cf = func() {
db.Close()
}
return
}
type userRepo struct {
db *sql.DB
}
func (u *userRepo) GetUserById(id int64) (*biz.User, error) {
user := &biz.User{}
stmt, err := u.db.Prepare("select id, user_name, mobile from tbl_user where id=? limit 1")
if err != nil {
return nil, xerrors.Wrap(err, "prepare statement failed")
}
defer stmt.Close()
err = stmt.QueryRow(id).Scan(&user.Id, &user.Name, &user.Mobile)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
err = xerrors.Wrap(ErrRecordNotFound, "no result found in sql")
} else {
err = xerrors.Wrap(err, "queryRow failed")
}
return nil, err
}
return user, nil
}
|
package main
import (
"fmt"
"io/ioutil"
"strings"
)
func part1() {
// Assumes current working directory is `day-03/`!
fileContent, err := ioutil.ReadFile("puzzle-input.txt")
if err != nil {
fmt.Println(err)
}
polymerUnits := strings.Split(string(fileContent), "")
processedUnits := processUnits(polymerUnits)
fmt.Println("POLYMERS: ", len(processedUnits))
}
func processUnits(polymerUnits []string) []string {
index := 0
trimmedUnits := []string{}
for index < len(polymerUnits) {
currentUnit := polymerUnits[index]
if index == len(polymerUnits)-1 {
trimmedUnits = append(trimmedUnits, currentUnit)
index++
continue
}
nextUnit := polymerUnits[index+1]
if currentUnit != nextUnit {
isCurrentUpperCase := currentUnit == strings.ToUpper(currentUnit)
isNextUpperCase := nextUnit == strings.ToUpper(nextUnit)
if isCurrentUpperCase && !isNextUpperCase {
if currentUnit == strings.ToUpper(nextUnit) {
index += 2
continue
}
} else if !isCurrentUpperCase && isNextUpperCase {
if currentUnit == strings.ToLower(nextUnit) {
index += 2
continue
}
}
}
trimmedUnits = append(trimmedUnits, currentUnit)
index++
}
if len(polymerUnits) == len(trimmedUnits) {
return trimmedUnits
}
return processUnits(trimmedUnits)
}
|
/*
* Copyright 2018-present Open Networking Foundation
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import (
"context"
"github.com/stretchr/testify/assert"
"testing"
)
func TestSaramaClientEnableLivenessChannel(t *testing.T) {
// Note: This doesn't actually start the client
client := NewSaramaClient()
ch := client.EnableLivenessChannel(context.Background(), true)
// The channel should have one "true" message on it
assert.NotEmpty(t, ch)
select {
case stuff := <-ch:
assert.True(t, stuff)
default:
t.Error("Failed to read from the channel")
}
}
|
package database
import (
"memoapp/model"
"net/url"
)
// Client データベースクライアントのインターフェース
type Client interface {
Set(*model.Memo) ([]byte, error)
Get(url.Values) ([]byte, error)
DEL(url.Values) ([]byte, error)
Exists(url.Values) (bool, error)
SetByte(url.Values, []byte) error
Close() error
}
var (
pkgName = "database"
)
// CheckCache キャッシュの有無の確認を行う
func CheckCache() (Client, error) {
return nil, nil
}
|
package models
import (
"fmt"
"time"
"github.com/go-redis/redis"
)
func SaveUrlRecord(uniqueKey, longURL string) *redis.StatusCmd {
status := DB.Set(uniqueKey, longURL, 5*time.Minute)
return status
}
func IsUniqueKeyAlreadyUsed(uniqueKey uint64) bool {
result := DB.Exists(fmt.Sprint(uniqueKey))
var isAlreadyUsed bool = int(result.Val()) != 0
return isAlreadyUsed
}
|
package events
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/cloudevents/sdk-go/v2/binding/format"
root "github.com/direktiv/direktiv/cmd/exec/cmd"
"github.com/spf13/cobra"
goutil "golang.org/x/tools/godoc/util"
)
var (
Source string
Type string
Id string
Specversion string
ContentType string
Attachment string
)
var eventsCmd = &cobra.Command{
Use: "events",
Short: "Event-related commands",
PersistentPreRun: root.InitConfiguration,
}
var sendEventCmd = &cobra.Command{
Use: "send EVENT DATA",
Short: "Remotely trigger direktiv events",
Args: cobra.MaximumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
urlExecuteEvent := fmt.Sprintf("%s/broadcast", root.UrlPrefix)
filter, err := cmd.Flags().GetString("filter")
if err != nil {
root.Fail(cmd, "could not parse event filter %v", err.Error())
}
if filter != "" {
urlExecuteEvent += "/" + strings.TrimPrefix(filter, "/")
}
cmd.Printf("sending events to %s\n", urlExecuteEvent)
event, err := executeEvent(cmd, urlExecuteEvent, args)
if err != nil {
root.Fail(cmd, "failed to trigger event: %s %v\n", event, err)
}
cmd.Printf("successfully triggered event: %s\n", event)
},
}
func executeEvent(cmd *cobra.Command, url string, args []string) (string, error) {
event := cloudevents.NewEvent()
// read event file in if provided
if len(args) > 0 {
cmd.Printf("reading cloudevent file %s\n", args[0])
e, err := os.ReadFile(args[0])
if err != nil {
return "", err
}
// we only do json http
err = format.Unmarshal("application/cloudevents+json", e, &event)
if err != nil {
return "", err
}
}
// overwrite data if provided
if Id != "" {
event.SetID(Id)
}
if Specversion != "" {
event.SetSpecVersion(Specversion)
}
if Source != "" {
event.SetSource(Source)
}
if Type != "" {
event.SetType(Type)
}
// attach data
if len(Attachment) > 0 {
attachment, err := os.ReadFile(Attachment)
if err != nil {
return "", err
}
// attach and guess attachment type
ct := ContentType
// var attach interface{}
var attach interface{}
err = json.Unmarshal(attachment, &attach)
// it is not json we guess the content type if not set
if err != nil {
if ct == "" {
ct = http.DetectContentType(attachment)
}
if goutil.IsText(attachment) {
attach = string(attachment)
} else {
attach = attachment
}
} else {
// if not set we assume json
// reson for not setting it static: it could be something like whatever+json
if ct == "" {
ct = "application/json"
}
// we leave attach and use it as object
// this converts it to json and not string json with escapes
}
err = event.SetData(ct, attach)
if err != nil {
return "", err
}
}
b, err := format.JSON.Marshal(&event)
if err != nil {
return "", err
}
req, err := http.NewRequestWithContext(
context.Background(),
http.MethodPost,
url,
bytes.NewReader(b),
)
if err != nil {
return "", err
}
req.Header.Add("Content-Type", "application/cloudevents+json")
root.AddAuthHeaders(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
// the root command checks if the namespace exists
// this not found has to be a wrong filter
if resp.StatusCode == http.StatusNotFound {
return "", fmt.Errorf("eventfilter does not exist")
} else if resp.StatusCode == http.StatusForbidden {
return "", fmt.Errorf("access to server forbidden")
} else if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("server responded with status %d", resp.StatusCode)
}
return string(b), err
}
var setFilterCmd = &cobra.Command{
Use: "set-filter NAME SCRIPT",
Short: "Define an event filter.",
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
filterName := args[0]
var (
inputData *bytes.Buffer
err error
)
// Read input data as arg or stdin
if len(args) > 1 {
inputData, err = root.SafeLoadFile(args[1])
if err != nil {
root.Fail(cmd, "Failed to load input file: %v", err)
}
} else {
inputData, err = root.SafeLoadStdIn()
if err != nil {
root.Fail(cmd, "Failed to load stdin: %v", err)
}
}
// fail if there is nothig to create
if inputData.Len() == 0 {
root.Fail(cmd, "no filter function provided")
}
// set method to force if filter already exists
force, err := cmd.Flags().GetBool("force")
if err != nil {
root.Fail(cmd, "can not read force flag: %s", err.Error())
}
method := http.MethodPost
if force {
method = http.MethodPatch
}
err = executeCreateCloudEventFilter(filterName, inputData, method)
if err != nil {
root.Fail(cmd, "can not create filter: %s\n", err.Error())
}
cmd.Printf("successfully created cloud event filter: %s\n", filterName)
},
}
func executeCreateCloudEventFilter(filterName string, data io.Reader, method string) error {
if filterName == "" {
return errors.New("filter name not set")
}
url := fmt.Sprintf("%s/eventfilter/%s", root.UrlPrefix, filterName)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(
ctx,
method,
url,
data,
)
if err != nil {
return err
}
root.AddAuthHeaders(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
// this can not happen on update
if resp.StatusCode == http.StatusConflict {
return fmt.Errorf("event filter %s already exists", filterName)
}
if resp.StatusCode == http.StatusBadRequest {
return fmt.Errorf("event filter %s invalid. check syntax", filterName)
}
return nil
}
type listFiltersResp struct {
EventFilter []struct {
Name string `json:"name"`
} `json:"eventFilter"`
}
var listFilterCmd = &cobra.Command{
Use: "list-filters",
Short: "List event filters for namespace.",
Args: cobra.ExactArgs(0),
Run: func(cmd *cobra.Command, args []string) {
resp, err := executeListCloudEventFilter()
if err != nil {
root.Fail(cmd, "can not fetch event filter: %v\n", err)
}
var eventfilter listFiltersResp
err = json.Unmarshal(resp, &eventfilter)
if err != nil {
root.Fail(cmd, "can not unmarshall event filter response: %v\n", err)
}
for i := range eventfilter.EventFilter {
cmd.Println(eventfilter.EventFilter[i].Name)
}
},
}
func executeListCloudEventFilter() ([]byte, error) {
var err error
url := fmt.Sprintf("%s/eventfilter", root.UrlPrefix)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(
ctx,
http.MethodGet,
url,
nil,
)
if err != nil {
return nil, err
}
root.AddAuthHeaders(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("failed to list filters (rejected by server)")
return nil, err
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, err
}
var deleteFilterCmd = &cobra.Command{
Use: "delete-filter NAME",
Short: "Delete an event filter.",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
filterName := args[0]
err := executeDeleteCloudEventFilter(filterName)
if err != nil {
root.Fail(cmd, "error: %v\n", err)
}
cmd.Printf("successfully deleted cloud event filter: %s\n", filterName)
},
}
func executeDeleteCloudEventFilter(filterName string) error {
var err error
if filterName == "" {
err = fmt.Errorf("filtername was not set")
return err
}
url := fmt.Sprintf("%s/eventfilter/%s", root.UrlPrefix, filterName)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(
ctx,
http.MethodDelete,
url,
nil,
)
if err != nil {
return err
}
root.AddAuthHeaders(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
if resp.StatusCode == http.StatusNotFound {
err = fmt.Errorf("filter " + filterName + " does not exist")
return err
}
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("failed to delete filter: %s (error code: %d)", filterName, resp.StatusCode)
return err
}
return err
}
type getFilterResp struct {
Filtername string `json:"filtername"`
JsCode string `json:"jsCode"`
}
var getFilterCmd = &cobra.Command{
Use: "get-filter NAME",
Short: "Get an event filter.",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
filterName := args[0]
resp, err := executeGetCloudEventFilter(filterName)
if err != nil {
root.Fail(cmd, "error: %v\n", err)
}
var eventfilter getFilterResp
err = json.Unmarshal(resp, &eventfilter)
if err != nil {
root.Fail(cmd, "error: %v\n", err)
}
cmd.Printf("filtername: %s\n", eventfilter.Filtername)
cmd.Printf("script: %s\n", eventfilter.JsCode)
},
}
func executeGetCloudEventFilter(filterName string) ([]byte, error) {
var err error
if filterName == "" {
return nil, fmt.Errorf("filter name was not set")
}
url := fmt.Sprintf("%s/eventfilter/%s", root.UrlPrefix, filterName)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(
ctx,
http.MethodGet,
url,
nil,
)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
root.AddAuthHeaders(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode == http.StatusNotFound {
err = fmt.Errorf("filter " + filterName + " does not exist")
return nil, err
}
if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("failed to get filter: %s (error code: %d)", filterName, resp.StatusCode)
return nil, err
}
body, err := io.ReadAll(resp.Body)
return body, err
}
func init() {
root.RootCmd.AddCommand(eventsCmd)
eventsCmd.AddCommand(sendEventCmd)
eventsCmd.AddCommand(setFilterCmd)
eventsCmd.AddCommand(deleteFilterCmd)
eventsCmd.AddCommand(getFilterCmd)
eventsCmd.AddCommand(listFilterCmd)
sendEventCmd.Flags().StringVar(&Attachment, "attachment", "", "Path to file used as data of the cloud event.")
sendEventCmd.Flags().StringVar(&Source, "source", "", "Cloudevent source.")
sendEventCmd.Flags().StringVar(&Type, "type", "", "CloudEvent type.")
sendEventCmd.Flags().StringVar(&Id, "id", "", "Clouedevent ID. Required by spec but automatically set if not provided.")
sendEventCmd.Flags().StringVar(&ContentType, "contentType", "", "Content type of attachment if read from file. Guessing if it is not set.")
sendEventCmd.Flags().StringVar(&Specversion, "specversion", "", "The version of the CloudEvents specification which the event uses.")
sendEventCmd.Flags().String("filter", "", "Custom filter for CloudEvents.")
setFilterCmd.PersistentFlags().BoolP("force", "f", false, "Forced update for event filter if it already exists.")
}
|
package linkaja
import "fmt"
func GenerateItems(items []PublicTokenItemRequest) string {
var is string
for i, v := range items {
if i > 0 {
is = is + ","
}
is = is + fmt.Sprintf("[\"%v\", \"%v\", \"%v\"]", v.Name, v.Price, v.Quantity)
}
return fmt.Sprintf("[%v]", is)
}
|
package reminder
import (
"fmt"
"os/exec"
"time"
)
// Reminder -
type Reminder interface {
Start()
Stop()
}
type reminder struct {
done chan struct{}
config Config
}
// Task -
type Task struct {
Title string `json:"title"`
Message string `json:"message"`
Interval string `json:"interval"`
}
// Config -
type Config struct {
Reminders []Task `json:"reminders"`
}
func notify(title, message string) error {
osa, err := exec.LookPath("osascript")
if err != nil {
return err
}
cmd := exec.Command(osa, "-e", fmt.Sprintf(`display notification "%s" with title "%s"`, message, title))
return cmd.Run()
}
func remind(r <-chan Task, t chan<- Task, d <-chan struct{}) {
for {
select {
case re := <-r:
notify(re.Title, re.Message)
t <- re
case <-d:
return
}
}
}
func ticker(re Task, r chan<- Task, d <-chan struct{}) {
dur, err := time.ParseDuration(re.Interval)
if err != nil {
return
}
t := time.Tick(dur)
for {
select {
case <-t:
r <- re
return
case <-d:
return
}
}
}
func timer(t <-chan Task, r chan<- Task, d <-chan struct{}) {
for {
select {
case re := <-t:
go ticker(re, r, d)
case <-d:
return
}
}
}
// Start -
func (r *reminder) Start() {
reminderChan := make(chan Task, len(r.config.Reminders))
timerChan := make(chan Task, len(r.config.Reminders))
go remind(reminderChan, timerChan, r.done)
for _, re := range r.config.Reminders {
timerChan <- re
}
notify("Starting", "Have a good day.")
timer(timerChan, reminderChan, r.done)
}
// Stop -
func (r *reminder) Stop() {
close(r.done)
}
// New -
func New(config Config) Reminder {
return &reminder{
config: config,
done: make(chan struct{}),
}
}
|
package main
import (
"log"
"math/rand"
"sync"
"sync/atomic"
"time"
)
type (
// semaphore 是一个接收struct类型的channel,这样定义 semaphore 既是一个channel,也可以实现自定义的方法
semaphore chan struct{}
readerWriter struct {
name string
write sync.WaitGroup
readerControl semaphore
shutdown chan struct{}
reportShutdown sync.WaitGroup // 用于等待reader,writer执行完毕
maxReads int
maxReaders int
currentReads int32
}
)
func init() {
rand.Seed(time.Now().Unix())
}
// 使用channel实现一个semaphore,允许多个读,只能有一个写
func main() {
log.Println("Starting Process")
// 同一时刻最多只能有3个读,有6个reader
first := start("First", 3, 6)
second := start("Second", 2, 2)
time.Sleep(2 * time.Second)
// shutdown就是调用各个readerWriter的stop方法,stop中会通过rw.reportShutdown 来等待所有的reader以及Writer执行完毕
shutdown(first, second)
log.Println("Process End")
return
}
func start(name string, maxReads int, maxReaders int) *readerWriter {
rw := readerWriter{
name: name,
shutdown: make(chan struct{}),
maxReads: maxReads,
maxReaders: maxReaders,
// readerControl 控制reader的数量(最多maxReads个)
readerControl: make(semaphore, maxReads),
}
// 在shutdown中,reportShutdown.Wait(),等待Reader和Writer执行完毕
rw.reportShutdown.Add(maxReaders)
// readerWriter 创建 maxReaders 个 goroutine
for goroutine := 0; goroutine < maxReaders; goroutine++ {
go rw.reader(goroutine)
}
rw.reportShutdown.Add(1)
go rw.writer()
return &rw
}
func shutdown(writer ...*readerWriter) {
var wg sync.WaitGroup
wg.Add(len(writer))
for _, readerWriter := range writer {
go readerWriter.stop(&wg)
}
wg.Wait()
}
func (rw *readerWriter) stop(group *sync.WaitGroup) {
defer group.Done()
log.Printf("%s\t: #####> Stop", rw.name)
// close掉的channel,在select中也是可执行的
close(rw.shutdown)
// 等待readerWriter的所有goroutine执行完成
rw.reportShutdown.Wait()
log.Printf("%s\t: #####> Stopped", rw.name)
}
func (rw *readerWriter) reader(reader int) {
defer rw.reportShutdown.Done()
for {
select {
case <-rw.shutdown: // rw.shutdown关闭时,这个case会被执行
log.Printf("%s\t: #> Reader Shutdown", rw.name)
return
default:
rw.performRead(reader)
}
}
}
func (rw *readerWriter) performRead(reader int) {
rw.ReadLock(reader)
count := atomic.AddInt32(&rw.currentReads, 1)
log.Printf("%s\t: [%d] Start\t- [%d] Reads\n", rw.name, reader, count)
time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)
count = atomic.AddInt32(&rw.currentReads, -1)
log.Printf("%s\t: [%d] Finish\t- [%d] Reads\n", rw.name, reader, count)
rw.ReadUnlock(reader)
}
func (rw *readerWriter) writer() {
defer rw.reportShutdown.Done()
for {
select {
case <-rw.shutdown: // rw.shutdown关闭时,这个case会被执行
log.Printf("%s\t: #> Writer Shutdown", rw.name)
return
default:
rw.performWrite()
}
}
}
func (rw *readerWriter) performWrite() {
time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)
log.Printf("%s\t: *****> Writing Pending\n", rw.name)
rw.WriteLock()
log.Printf("%s\t: *****> Writing Start", rw.name)
time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)
log.Printf("%s\t: *****> Writing Finish", rw.name)
rw.WriteUnlock()
}
func (rw *readerWriter) ReadLock(reader int) {
// write 是一个waitGroup,这里等待就是等待写完成(写之前会先进入rw.write.Add(1),写完成后会Done)
rw.write.Wait()
// Acquire 方法往 semaphore (chan struct{})中写数据,缓冲区写满之后,再写入会被阻塞,
// 故缓冲区大小就是能并发读(readerWriter.maxReads)的数量
rw.readerControl.Acquire(1)
}
func (rw *readerWriter) ReadUnlock(reader int) {
// Release 就是从 semaphore中读数据,释放掉semaphore中的缓冲区
rw.readerControl.Release(1)
}
func (rw *readerWriter) WriteLock() {
// 这里Add(1),在ReadLock()中会Wait,从而进入写之后,新的read操作无法再进行
rw.write.Add(1)
// 写满缓冲区,如果存在正在读的routine,这里也会被阻塞,直到写满缓冲区,写满后,再次调用Acquire()也会被阻塞
rw.readerControl.Acquire(rw.maxReads)
}
func (rw *readerWriter) WriteUnlock() {
rw.readerControl.Release(rw.maxReads)
rw.write.Done()
}
func (s semaphore) Acquire(buffers int) {
var e struct{}
for buffer := 0; buffer < buffers; buffer++ {
s <- e
}
}
func (s semaphore) Release(buffers int) {
for buffer := 0; buffer < buffers; buffer++ {
<-s
}
}
|
package interfaces
type PasswordServiceProvider interface {
EncodePassword(password string) string
}
|
/*
Copyright Greg Haskins <gregory.haskins@gmail.com> 2017, All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/spf13/viper"
)
//SetupCoreYAMLConfig set config file
func SetupCoreYAMLConfig() {
viper.SetConfigName("config")
viper.SetEnvPrefix("CORE")
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AutomaticEnv()
viper.AddConfigPath("config")
// err := config.AddDevConfigPath(nil)
// if err != nil {
// panic(fmt.Errorf("Fatal error adding dev dir: %s \n", err))
// }
err := viper.ReadInConfig()
if err != nil { // Handle errors reading the config file
panic(fmt.Errorf("fatal error config file: %s", err))
}
}
func dirExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
func addConfigPath(v *viper.Viper, p string) {
if v != nil {
v.AddConfigPath(p)
} else {
viper.AddConfigPath(p)
}
}
//----------------------------------------------------------------------------------
// GetDevConfigDir()
//----------------------------------------------------------------------------------
// Returns the path to the default configuration that is maintained with the source
// tree. Only valid to call from a test/development context.
//----------------------------------------------------------------------------------
func GetDevConfigDir() (string, error) {
gopath := os.Getenv("GOPATH")
if gopath == "" {
return "", fmt.Errorf("GOPATH not set")
}
for _, p := range filepath.SplitList(gopath) {
devPath := filepath.Join(p, ".")
if !dirExists(devPath) {
continue
}
return devPath, nil
}
return "", fmt.Errorf("DevConfigDir not found in %s", gopath)
}
//----------------------------------------------------------------------------------
// TranslatePath()
//----------------------------------------------------------------------------------
// Translates a relative path into a fully qualified path relative to the config
// file that specified it. Absolute paths are passed unscathed.
//----------------------------------------------------------------------------------
func TranslatePath(base, p string) string {
if filepath.IsAbs(p) {
return p
}
return filepath.Join(base, p)
}
//----------------------------------------------------------------------------------
// TranslatePathInPlace()
//----------------------------------------------------------------------------------
// Translates a relative path into a fully qualified path in-place (updating the
// pointer) relative to the config file that specified it. Absolute paths are
// passed unscathed.
//----------------------------------------------------------------------------------
func TranslatePathInPlace(base string, p *string) {
*p = TranslatePath(base, *p)
}
//----------------------------------------------------------------------------------
// GetPath()
//----------------------------------------------------------------------------------
// GetPath allows configuration strings that specify a (config-file) relative path
//
// For example: Assume our config is located in /etc/hyperledger/fabric/core.yaml with
// a key "msp.configPath" = "msp/config.yaml".
//
// This function will return:
// GetPath("msp.configPath") -> /etc/hyperledger/fabric/msp/config.yaml
//
//----------------------------------------------------------------------------------
func GetPath(key string) string {
p := viper.GetString(key)
if p == "" {
return ""
}
return TranslatePath(filepath.Dir(viper.ConfigFileUsed()), p)
}
const OfficialPath = "/etc/cuproad"
//----------------------------------------------------------------------------------
// InitViper()
//----------------------------------------------------------------------------------
// Performs basic initialization of our viper-based configuration layer.
// Primary thrust is to establish the paths that should be consulted to find
// the configuration we need. If v == nil, we will initialize the global
// Viper instance
//----------------------------------------------------------------------------------
func InitViper(v *viper.Viper, configName string) error {
var altPath = os.Getenv("CUPROAD_CFG_PATH")
if altPath != "" {
// If the user has overridden the path with an envvar, its the only path
// we will consider
if !dirExists(altPath) {
return fmt.Errorf("CUPROAD_CFG_PATH %s does not exist", altPath)
}
addConfigPath(v, altPath)
} else {
// If we get here, we should use the default paths in priority order:
//
// *) CWD
// *) The $GOPATH based development tree
// *) /etc/hyperledger/fabric
//
// CWD
addConfigPath(v, "./")
// DevConfigPath
AddDevConfigPath(v)
// And finally, the official path
if dirExists(OfficialPath) {
addConfigPath(v, OfficialPath)
}
}
// Now set the configuration file.
if v != nil {
v.SetConfigName(configName)
} else {
viper.SetConfigName(configName)
}
return nil
}
// AddDevConfigPath()
//----------------------------------------------------------------------------------
// Helper utility that automatically adds our DevConfigDir to the viper path
//----------------------------------------------------------------------------------
func AddDevConfigPath(v *viper.Viper) error {
devPath, err := GetDevConfigDir()
if err != nil {
return err
}
addConfigPath(v, devPath)
return nil
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cloudresourcemanager
import (
"context"
"fmt"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
dclService "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured"
iamUnstruct "github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured/google/iam"
)
type Project struct{}
func ProjectToUnstructured(r *dclService.Project) *unstructured.Resource {
u := &unstructured.Resource{
STV: unstructured.ServiceTypeVersion{
Service: "cloudresourcemanager",
Version: "ga",
Type: "Project",
},
Object: make(map[string]interface{}),
}
if r.DisplayName != nil {
u.Object["displayname"] = *r.DisplayName
}
if r.Labels != nil {
rLabels := make(map[string]interface{})
for k, v := range r.Labels {
rLabels[k] = v
}
u.Object["labels"] = rLabels
}
if r.LifecycleState != nil {
u.Object["lifecycleState"] = string(*r.LifecycleState)
}
if r.Name != nil {
u.Object["name"] = *r.Name
}
if r.Parent != nil {
u.Object["parent"] = *r.Parent
}
if r.ProjectNumber != nil {
u.Object["projectNumber"] = *r.ProjectNumber
}
return u
}
func UnstructuredToProject(u *unstructured.Resource) (*dclService.Project, error) {
r := &dclService.Project{}
if _, ok := u.Object["displayname"]; ok {
if s, ok := u.Object["displayname"].(string); ok {
r.DisplayName = dcl.String(s)
} else {
return nil, fmt.Errorf("r.DisplayName: expected string")
}
}
if _, ok := u.Object["labels"]; ok {
if rLabels, ok := u.Object["labels"].(map[string]interface{}); ok {
m := make(map[string]string)
for k, v := range rLabels {
if s, ok := v.(string); ok {
m[k] = s
}
}
r.Labels = m
} else {
return nil, fmt.Errorf("r.Labels: expected map[string]interface{}")
}
}
if _, ok := u.Object["lifecycleState"]; ok {
if s, ok := u.Object["lifecycleState"].(string); ok {
r.LifecycleState = dclService.ProjectLifecycleStateEnumRef(s)
} else {
return nil, fmt.Errorf("r.LifecycleState: expected string")
}
}
if _, ok := u.Object["name"]; ok {
if s, ok := u.Object["name"].(string); ok {
r.Name = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Name: expected string")
}
}
if _, ok := u.Object["parent"]; ok {
if s, ok := u.Object["parent"].(string); ok {
r.Parent = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Parent: expected string")
}
}
if _, ok := u.Object["projectNumber"]; ok {
if i, ok := u.Object["projectNumber"].(int64); ok {
r.ProjectNumber = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.ProjectNumber: expected int64")
}
}
return r, nil
}
func GetProject(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToProject(u)
if err != nil {
return nil, err
}
r, err = c.GetProject(ctx, r)
if err != nil {
return nil, err
}
return ProjectToUnstructured(r), nil
}
func ListProject(ctx context.Context, config *dcl.Config, parent string) ([]*unstructured.Resource, error) {
c := dclService.NewClient(config)
l, err := c.ListProject(ctx, parent)
if err != nil {
return nil, err
}
var resources []*unstructured.Resource
for {
for _, r := range l.Items {
resources = append(resources, ProjectToUnstructured(r))
}
if !l.HasNext() {
break
}
if err := l.Next(ctx, c); err != nil {
return nil, err
}
}
return resources, nil
}
func ApplyProject(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToProject(u)
if err != nil {
return nil, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToProject(ush)
if err != nil {
return nil, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
r, err = c.ApplyProject(ctx, r, opts...)
if err != nil {
return nil, err
}
return ProjectToUnstructured(r), nil
}
func ProjectHasDiff(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToProject(u)
if err != nil {
return false, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToProject(ush)
if err != nil {
return false, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
opts = append(opts, dcl.WithLifecycleParam(dcl.BlockDestruction), dcl.WithLifecycleParam(dcl.BlockCreation), dcl.WithLifecycleParam(dcl.BlockModification))
_, err = c.ApplyProject(ctx, r, opts...)
if err != nil {
if _, ok := err.(dcl.ApplyInfeasibleError); ok {
return true, nil
}
return false, err
}
return false, nil
}
func DeleteProject(ctx context.Context, config *dcl.Config, u *unstructured.Resource) error {
c := dclService.NewClient(config)
r, err := UnstructuredToProject(u)
if err != nil {
return err
}
return c.DeleteProject(ctx, r)
}
func ProjectID(u *unstructured.Resource) (string, error) {
r, err := UnstructuredToProject(u)
if err != nil {
return "", err
}
return r.ID()
}
func (r *Project) STV() unstructured.ServiceTypeVersion {
return unstructured.ServiceTypeVersion{
"cloudresourcemanager",
"Project",
"ga",
}
}
func SetPolicyProject(ctx context.Context, config *dcl.Config, u *unstructured.Resource, p *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToProject(u)
if err != nil {
return nil, err
}
policy, err := iamUnstruct.UnstructuredToPolicy(p)
if err != nil {
return nil, err
}
policy.Resource = r
iamClient := iam.NewClient(config)
newPolicy, err := iamClient.SetPolicy(ctx, policy)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(newPolicy), nil
}
func SetPolicyWithEtagProject(ctx context.Context, config *dcl.Config, u *unstructured.Resource, p *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToProject(u)
if err != nil {
return nil, err
}
policy, err := iamUnstruct.UnstructuredToPolicy(p)
if err != nil {
return nil, err
}
policy.Resource = r
iamClient := iam.NewClient(config)
newPolicy, err := iamClient.SetPolicyWithEtag(ctx, policy)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(newPolicy), nil
}
func GetPolicyProject(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToProject(u)
if err != nil {
return nil, err
}
iamClient := iam.NewClient(config)
policy, err := iamClient.GetPolicy(ctx, r)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(policy), nil
}
func SetPolicyMemberProject(ctx context.Context, config *dcl.Config, u *unstructured.Resource, m *unstructured.Resource) (*unstructured.Resource, error) {
r, err := UnstructuredToProject(u)
if err != nil {
return nil, err
}
member, err := iamUnstruct.UnstructuredToMember(m)
if err != nil {
return nil, err
}
member.Resource = r
iamClient := iam.NewClient(config)
policy, err := iamClient.SetMember(ctx, member)
if err != nil {
return nil, err
}
return iamUnstruct.PolicyToUnstructured(policy), nil
}
func GetPolicyMemberProject(ctx context.Context, config *dcl.Config, u *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
r, err := UnstructuredToProject(u)
if err != nil {
return nil, err
}
iamClient := iam.NewClient(config)
policyMember, err := iamClient.GetMember(ctx, r, role, member)
if err != nil {
return nil, err
}
return iamUnstruct.MemberToUnstructured(policyMember), nil
}
func DeletePolicyMemberProject(ctx context.Context, config *dcl.Config, u *unstructured.Resource, m *unstructured.Resource) error {
r, err := UnstructuredToProject(u)
if err != nil {
return err
}
member, err := iamUnstruct.UnstructuredToMember(m)
if err != nil {
return err
}
member.Resource = r
iamClient := iam.NewClient(config)
if err := iamClient.DeleteMember(ctx, member); err != nil {
return err
}
return nil
}
func (r *Project) SetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) (*unstructured.Resource, error) {
return SetPolicyMemberProject(ctx, config, resource, member)
}
func (r *Project) GetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
return GetPolicyMemberProject(ctx, config, resource, role, member)
}
func (r *Project) DeletePolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) error {
return DeletePolicyMemberProject(ctx, config, resource, member)
}
func (r *Project) SetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return SetPolicyProject(ctx, config, resource, policy)
}
func (r *Project) SetPolicyWithEtag(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return SetPolicyWithEtagProject(ctx, config, resource, policy)
}
func (r *Project) GetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetPolicyProject(ctx, config, resource)
}
func (r *Project) Get(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetProject(ctx, config, resource)
}
func (r *Project) Apply(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
return ApplyProject(ctx, config, resource, opts...)
}
func (r *Project) HasDiff(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
return ProjectHasDiff(ctx, config, resource, opts...)
}
func (r *Project) Delete(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) error {
return DeleteProject(ctx, config, resource)
}
func (r *Project) ID(resource *unstructured.Resource) (string, error) {
return ProjectID(resource)
}
func init() {
unstructured.Register(&Project{})
}
|
package translate
import (
"context"
"fmt"
"strings"
"cloud.google.com/go/translate"
"golang.org/x/text/language"
)
// Translator ...
type Translator interface {
Close()
Translate(ctx context.Context, input, source, target string) (string, error)
DetectLanguage(ctx context.Context, input string) (string, error)
}
// NewClient ...
func NewClient(ctx context.Context) (Translator, error) {
c, err := translate.NewClient(ctx)
if err != nil {
return nil, err
}
return &Client{Client: c}, nil
}
// Client ...
type Client struct {
*translate.Client
}
// Close ...
func (c *Client) Close() {
c.Close()
}
// Translate ...
func (c *Client) Translate(ctx context.Context, input, source, target string) (string, error) {
opt := &translate.Options{
Format: translate.Text,
}
if source != "" {
opt.Source = language.Make(source)
}
translated, err := c.Client.Translate(ctx, []string{input}, language.Make(target), opt)
if err != nil {
return "", err
}
if len(translated) == 0 {
return "", fmt.Errorf("have no response translated text: %s", input)
}
return strings.TrimRight(translated[0].Text, "\n"), nil
}
// DetectLanguage ...
func (c *Client) DetectLanguage(ctx context.Context, input string) (string, error) {
detections, err := c.Client.DetectLanguage(ctx, []string{input})
if err != nil {
return "", err
}
if len(detections) == 0 {
return "", fmt.Errorf("have no response detected language: %s", input)
}
return detections[0][0].Language.String(), nil
}
|
package server
import (
"net/http"
"net/url"
"os"
"path"
"strings"
"github.com/cinus-ue/securekit/internal/webapps/fileserver/util"
)
type archiveCallback func(f *os.File, fInfo os.FileInfo, relPath string) error
func matchSelection(info os.FileInfo, selections []string) (matchName, matchPrefix bool, childSelections []string) {
if len(selections) == 0 {
return true, false, nil
}
name := info.Name()
isNameEqual := getIsNameEqualFunc(info)
for _, selName := range selections {
if isNameEqual(selName, name) {
matchName = true
continue
}
slashIndex := strings.IndexByte(selName, '/')
if slashIndex <= 0 {
continue
}
selNamePart1 := selName[:slashIndex]
if isNameEqual(selNamePart1, name) {
childSel := selName[slashIndex+1:]
if len(childSel) > 0 {
matchPrefix = true
childSelections = append(childSelections, childSel)
}
continue
}
}
return
}
func (h *handler) visitTreeNode(
fsPath, rawReqPath, relPath string,
statNode bool,
childSelections []string,
archiveCallback archiveCallback,
) {
var fInfo os.FileInfo
var childInfos []os.FileInfo
// wrap func to run defer ASAP
err := func() error {
var f *os.File
var err error
if statNode {
f, err = os.Open(fsPath)
if f != nil {
defer f.Close()
}
if h.errHandler.LogError(err) {
if os.IsExist(err) {
return err
}
fInfo = createPlaceholderFileInfo(path.Base(fsPath), true) // prefix path for alias
} else {
fInfo, err = f.Stat()
if h.errHandler.LogError(err) {
return err
}
}
} else {
fInfo = createPlaceholderFileInfo(path.Base(fsPath), true)
}
if len(relPath) > 0 {
if err := archiveCallback(f, fInfo, relPath); err != nil {
return err
}
}
if f != nil && fInfo.IsDir() {
childInfos, err = f.Readdir(0)
if h.errHandler.LogError(err) {
return err
}
}
return nil
}()
if err != nil {
return
}
if fInfo.IsDir() {
childInfos, _, _ := h.mergeAlias(rawReqPath, fInfo, childInfos, true)
childInfos = h.FilterItems(childInfos)
// childInfo can be regular dir/file, or aliased item that shadows regular dir/file
for _, childInfo := range childInfos {
matchChildName, matchChildPrefix, childChildSelections := matchSelection(childInfo, childSelections)
if !matchChildName && !matchChildPrefix {
continue
}
childPath := "/" + childInfo.Name()
childFsPath := fsPath + childPath
childRawReqPath := util.CleanUrlPath(rawReqPath + childPath)
childRelPath := relPath + childPath
if childAlias, hasChildAlias := h.aliases.byUrlPath(childRawReqPath); hasChildAlias {
h.visitTreeNode(childAlias.fsPath(), childRawReqPath, childRelPath, true, childChildSelections, archiveCallback)
} else {
h.visitTreeNode(childFsPath, childRawReqPath, childRelPath, statNode, childChildSelections, archiveCallback)
}
}
}
}
func (h *handler) archive(
w http.ResponseWriter,
r *http.Request,
pageData *responseData,
selections []string,
fileSuffix string,
contentType string,
cbWriteFile archiveCallback,
) {
var itemName string
_, hasAlias := h.aliases.byUrlPath(pageData.rawReqPath)
if hasAlias {
itemName = path.Base(pageData.rawReqPath)
}
if len(itemName) == 0 || itemName == "/" {
itemName = pageData.ItemName
}
targetFilename := itemName + fileSuffix
writeArchiveHeader(w, contentType, targetFilename)
if !needResponseBody(r.Method) {
return
}
h.visitTreeNode(
path.Clean(h.root+pageData.handlerReqPath),
pageData.rawReqPath,
"",
pageData.Item != nil, // not empty root
selections,
func(f *os.File, fInfo os.FileInfo, relPath string) error {
h.logArchive(targetFilename, relPath, r)
err := cbWriteFile(f, fInfo, relPath)
h.errHandler.LogError(err)
return err
},
)
}
func writeArchiveHeader(w http.ResponseWriter, contentType, filename string) {
filename = url.PathEscape(filename)
header := w.Header()
header.Set("Content-Type", contentType)
header.Set("Content-Disposition", "attachment; filename*=UTF-8''"+filename)
header.Set("Cache-Control", "public, max-age=0")
w.WriteHeader(http.StatusOK)
}
func (h *handler) normalizeArchiveSelections(r *http.Request) ([]string, bool) {
if h.errHandler.LogError(r.ParseForm()) {
return nil, false
}
inputs := r.Form["name"]
if len(inputs) == 0 {
return nil, true
}
count := len(inputs)
selections := make([]string, count)
for i := 0; i < count; i++ {
var ok bool
selections[i], ok = getCleanDirFilePath(inputs[i])
if !ok {
h.logger.Error("archive: illegal path " + inputs[i])
return nil, false
}
}
return selections, true
}
|
package slice
import "github.com/cheekybits/genny/generic"
type T generic.Type
type V generic.Type
func Map_T_V(sl []T, f func(e T) V) []V {
res := []V{}
for _, e := range sl {
res = append(res, f(e))
}
return res
}
|
package system
import "errors"
var ErrorSizesDoesNotMatch = errors.New("could not load full file")
var ErrorCreateFile = errors.New("could not create file")
var ErrorWriteFile = errors.New("could not write to file")
var ErrorLoading = errors.New("unexpected error occurred while loading file")
var ErrorOpening = errors.New("error while opening file")
var ErrorSeeking = errors.New("error while seeking file")
|
package goSolution
func numMatchingSubseq(s string, words []string) int {
n := len(s)
m := len(words)
f := make([]int, m)
for i := 0; i < n; i++ {
for j := 0; j < m; j++ {
if f[j] < len(words[j]) && words[j][f[j]] == s[i] {
f[j] += 1
}
}
}
ret := 0
for i := 0; i < m; i++ {
if f[i] == len(words[i]) {
ret += 1
}
}
return ret
}
|
package worldx
import (
"testing"
)
type TestDataItem struct {
receiver City
result []Direction
}
func TestAvailableDirs(t *testing.T) {
testDataItems := []TestDataItem{
{City{name: "Foo"}, []Direction{}},
{City{name: "Foo", north: "some"}, []Direction{North}},
{City{name: "Foo", south: "some"}, []Direction{South}},
{City{name: "Foo", west: "some"}, []Direction{West}},
{City{name: "Foo", east: "some"}, []Direction{East}},
{City{name: "Foo", north: "some", south: "some", west: "some", east: "some"}, []Direction{North, South, West, East}},
}
for _, item := range testDataItems {
result := item.receiver.AvailableDirs()
if testEq(result, item.result) {
t.Logf("%v.AvailableDirs(): PASSED", item.receiver)
} else {
t.Errorf("%v.AvailableDirs(): FAILED, expected %v but got %v", item.receiver, item.result, result)
}
}
}
func testEq(a, b []Direction) bool {
if (a == nil) != (b == nil) {
return false
}
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
|
package main
import (
"github.com/dearcj/golangproj/bitmask"
"github.com/dearcj/golangproj/msutil"
pb "github.com/dearcj/golangproj/network"
)
type Player struct {
additionalMoves uint32
emotion bitmask.Bitmask
parent *Object
currentGun *Gun
angle1 int32
angle2 int32
needNotifyAngles bool
needNotifyShoot bool
shootTarget int
NetworkData *pb.Player
PlayerRelative PlayerRelativeInserter
}
type PlayerRelativeInserter struct {
p *Player
}
func (a Player) onCollide(me *Object, col *Object) FList { return nil }
func (l *Player) processIteration(me *Object, inum uint32, iprop float32) FList {
return nil
}
func (l *Player) InsertToList(m *msutil.XServerDataMsg) *pb.Player {
msg := m.WriteToMsg()
players := msg.Players
var obj *pb.Player
if players != nil {
for inx, v := range players {
if v.NetworkObject.ID == l.parent.NetworkObject.ID {
obj = players[inx]
break
}
}
}
if obj == nil {
players = append(players, l.NetworkData)
}
msg.Players = players
return obj
}
func (l *Player) Insert(m *msutil.XServerDataMsg) {
l.InsertToList(m)
}
//todo: maybe change it later
func (a *Player) process(me *Object, dt float64) {
}
func (a *Player) onInit(o *Object) {
a.parent = o
a.PlayerRelative.p = a
}
func (a *Player) onDestroy() {
a.parent = nil
}
func (a *Player) getTypeId() bitmask.Bitmask {
return config.Components.Player
}
func (a *Player) SetAngle(i int32, i2 int32) {
if a.angle1 != i && a.angle2 != i2 {
a.needNotifyAngles = true
a.angle1 = i
a.angle2 = i2
}
}
func (a *Player) MakeBet(bet float32) msutil.Insertable {
a.parent.session.account.Balance -= float64(bet)
return a.parent.Effect(confActions.MakeBet).V(bet)
}
func (a *Player) AddMoney(amount float32) msutil.Insertable {
a.parent.session.account.Balance += float64(amount)
return a.parent.Effect(confActions.MoneyChange).V(amount)
}
func (a *Player) ShootFx(target int, gun int) msutil.Insertable {
return a.parent.Effect(confActions.Shoot).V(float32(target)).V2(float32(gun))
}
func (a *Player) AngleChangedFx() msutil.Insertable {
return a.parent.Effect(confActions.AngleChange).V(float32(a.angle1)).V2(float32(a.angle2))
}
func (a *Player) Shoot() {
a.needNotifyShoot = true
}
func CreateDefaultPlayer(p *Progress) (*Unit, *Player) {
return &Unit{
BaseCharacter: BaseCharacter{
HP: p.MaxHP,
MaxHP: p.MaxHP,
},
}, &Player{}
}
|
package rpcdb
import (
"fmt"
"github.com/alioygur/gores"
"golang.org/x/net/context"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
)
func TestDebugContext(t *testing.T) {
req, _ := http.NewRequest("GET", "/", nil)
req.Header.Add("debug-breakpoint", "request example:*")
req.Header.Add("debug-session", "http://example/123")
session, _ := BuildSession("example", req.Header)
ctx := AttachSession(context.Background(), session)
session, ok := ExtractSession(ctx)
if !ok {
t.Errorf("session not found on context!")
}
if session.Name != "example" {
t.Errorf("wrong name!")
}
}
func TestResponseHook(t *testing.T) {
// target of client request
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gores.String(w, 200, "hello world")
}))
defer ts.Close()
// debug server transforming body
ds := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gores.JSON(w, 200, ResponseBody{
Body: "TRANSFORMED",
})
}))
defer ds.Close()
// sadly, easiest to make session this way!
// TODO make instantiating a session less convoluted!
// TODO client breakpoint definitions are on call to, or call from?
req, _ := http.NewRequest("GET", "/", nil)
req.Header.Add("debug-breakpoint", "response example:/")
req.Header.Add("debug-session", ds.URL)
session, _ := BuildSession("example", req.Header)
c := NewClient(http.DefaultClient)
ctx := AttachSession(context.Background(), session)
r, err := c.Get(ctx, fmt.Sprintf("%s/", ts.URL))
if err != nil {
t.Errorf("error issuing request: %s", err)
}
defer r.Body.Close()
body, _ := ioutil.ReadAll(r.Body)
if string(body) != "TRANSFORMED" {
t.Errorf("expected body to be TRANSFORMED, it was '%s'", body)
}
}
func TestRequestHook(t *testing.T) {
// target of client request
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buf, _ := ioutil.ReadAll(r.Body)
defer r.Body.Close()
gores.String(w, 200, string(buf))
}))
defer ts.Close()
// debug server transforming body
ds := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gores.JSON(w, 200, RequestBody{
Body: "TRANSFORMED",
})
}))
defer ds.Close()
// TODO make instantiating a session less convoluted!
// TODO client breakpoint definitions are on call to, or call from?
req, _ := http.NewRequest("GET", "/", nil)
req.Header.Add("debug-breakpoint", "request example:/")
req.Header.Add("debug-session", ds.URL)
session, _ := BuildSession("example", req.Header)
c := NewClient(http.DefaultClient)
ctx := AttachSession(context.Background(), session)
r, err := c.Post(ctx, fmt.Sprintf("%s/", ts.URL), "text/plain", strings.NewReader("hello world"))
if err != nil {
t.Errorf("error issuing request: %s", err)
}
defer r.Body.Close()
body, _ := ioutil.ReadAll(r.Body)
if string(body) != "TRANSFORMED" {
t.Errorf("expected body to be TRANSFORMED, it was '%s'", body)
}
}
|
package mathhelper
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestIsEqualFloat32(t *testing.T) {
// Given
// When
val1 := IsEqual(float32(10.0001), float32(10.00009))
val2 := IsEqual(float32(10.0005), float32(10.00001))
val3 := IsEqual(float32(10.00009), float32(10.0001))
val4 := IsEqual(float32(10.00001), float32(10.0005))
// Then
assert.Equal(t, val1, true)
assert.Equal(t, val2, false)
assert.Equal(t, val3, true)
assert.Equal(t, val4, false)
}
|
package lib
import (
"fmt"
"github.com/keptn/go-utils/pkg/api/models"
api "github.com/keptn/go-utils/pkg/api/utils"
"github.com/keptn/keptn/distributor/pkg/config"
"strings"
"sync"
)
type ControlPlane struct {
UniformHandler *api.UniformHandler
EnvConfig config.EnvConfig
currentID string
mux sync.Mutex
}
func (c *ControlPlane) Register() (string, error) {
c.mux.Lock()
defer c.mux.Unlock()
data := c.getRegistrationDataFromEnv()
id, err := c.UniformHandler.RegisterIntegration(data)
if err != nil {
return "", err
}
c.currentID = id
return c.currentID, nil
}
func (c *ControlPlane) Unregister() error {
c.mux.Lock()
defer c.mux.Unlock()
if c.currentID == "" {
return fmt.Errorf("tried to unregister integration without being registered first")
}
err := c.UniformHandler.UnregisterIntegration(c.currentID)
if err != nil {
return err
}
c.currentID = ""
return nil
}
func (c *ControlPlane) getRegistrationDataFromEnv() models.Integration {
var topics []string
if c.EnvConfig.PubSubTopic == "" {
topics = []string{}
} else {
topics = strings.Split(c.EnvConfig.PubSubTopic, ",")
}
return models.Integration{
Name: c.EnvConfig.K8sDeploymentName,
MetaData: models.MetaData{
Hostname: c.EnvConfig.K8sNodeName,
IntegrationVersion: c.EnvConfig.Version,
DistributorVersion: c.EnvConfig.DistributorVersion,
Location: c.EnvConfig.Location,
KubernetesMetaData: models.KubernetesMetaData{
Namespace: c.EnvConfig.K8sNamespace,
PodName: c.EnvConfig.K8sPodName,
DeploymentName: c.EnvConfig.K8sDeploymentName,
},
},
Subscription: models.Subscription{
Topics: topics,
Filter: models.SubscriptionFilter{
Project: c.EnvConfig.ProjectFilter,
Stage: c.EnvConfig.StageFilter,
Service: c.EnvConfig.ServiceFilter,
},
},
}
}
|
package atomix
import (
"reflect"
"testing"
)
func mustEqual(tb testing.TB, got, want interface{}) {
tb.Helper()
if !reflect.DeepEqual(got, want) {
tb.Fatalf("got: %v, want: %v", got, want)
}
}
|
package gojson
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
)
// Marshal has no documentation
func Marshal(v interface{}) ([]byte, error) {
enc := &encoder{buf: new(bytes.Buffer)}
var data json.RawMessage
var err error
if data, err = enc.marshal(reflect.ValueOf(v)); err != nil {
return nil, err
}
return json.Marshal(data)
}
// MarshalIndent has no documentation
func MarshalIndent(v interface{}, prefix string, indent string) ([]byte, error) {
enc := &encoder{buf: new(bytes.Buffer)}
var data json.RawMessage
var err error
if data, err = enc.marshal(reflect.ValueOf(v)); err != nil {
return nil, err
}
return json.MarshalIndent(data, prefix, indent)
}
type encoder struct {
buf *bytes.Buffer
}
func (enc *encoder) marshal(v reflect.Value) ([]byte, error) {
switch v.Type().Kind() {
case reflect.Ptr:
if v.IsNil() {
return []byte("null"), nil
}
return enc.marshal(v.Elem())
case reflect.String, reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64:
return json.Marshal(v.Interface())
case reflect.Map:
return enc.marshalMap(v)
case reflect.Slice:
if v.IsNil() {
return []byte("null"), nil
}
fallthrough
case reflect.Array:
return enc.marshalSlice(v)
case reflect.Struct:
return enc.marshalStruct(v)
case reflect.Interface:
if v.IsNil() {
return []byte("null"), nil
}
return enc.marshal(v.Elem())
default:
return nil, fmt.Errorf("unsupported type %v (kind: %v)", v.Type(), v.Type().Kind())
}
}
|
package main
/*
* @lc app=leetcode.cn id=84 lang=golang
*
* [84] 柱状图中最大的矩形
*/
// 单调递增栈,优化暴力解中重复的步骤
// @lc code=start
func largestRectangleArea(heights []int) int {
maxArea := 0
var stack []int
var left = make([]int, len(heights))
var right = make([]int, len(heights))
stack = nil
for i := 0; i < len(heights); i++ {
for len(stack) > 0 && heights[stack[len(stack)-1]] >= heights[i] {
stack = stack[:len(stack)-1]
}
if len(stack) > 0 {
left[i] = stack[len(stack)-1]
} else {
left[i] = -1
}
stack = append(stack, i)
}
stack = nil
for i := len(heights) - 1; i >= 0; i-- {
for len(stack) > 0 && heights[stack[len(stack)-1]] >= heights[i] {
stack = stack[:len(stack)-1]
}
if len(stack) > 0 {
right[i] = stack[len(stack)-1]
} else {
right[i] = len(heights)
}
stack = append(stack, i)
}
area := 0
for k := range left {
area = (right[k] - left[k] - 1) * heights[k]
if area > maxArea {
maxArea = area
}
}
return maxArea
}
// @lc code=end
|
package test
import (
"fmt"
)
func test2() {
fmt.Println("test2")
}
|
package jarviscore
import "errors"
var (
// ErrLoadFileReadSize - loadfile invalid file read size
ErrLoadFileReadSize = errors.New("loadfile invalid file read size")
// ErrNotConnectNode - not connect node
ErrNotConnectNode = errors.New("not connect node")
// ErrNoCtrlCmd - no ctrl cmd
ErrNoCtrlCmd = errors.New("no ctrl cmd")
// ErrCoreDBNoAddr - coredb no addr
ErrCoreDBNoAddr = errors.New("coredb no addr")
// ErrSign - sign err
ErrSign = errors.New("sign err")
// ErrInvalidAddr - invalid addr
ErrInvalidAddr = errors.New("invalid addr")
// ErrInvalidPublishKey - invalid publish key
ErrInvalidPublishKey = errors.New("invalid publish key")
// ErrExistCtrlID - exist ctrlid
ErrExistCtrlID = errors.New("exist ctrlid")
// ErrAlreadyJoin - already join
ErrAlreadyJoin = errors.New("already join")
// ErrNotConnectMe - not connect me
ErrNotConnectMe = errors.New("not connect me")
// ErrGRPCPeerFromContext - grpc.peer.FromContext err
ErrGRPCPeerFromContext = errors.New("grpc.peer.FromContext err")
// ErrGRPCPeerAddr - grpc.peer.Addr err
ErrGRPCPeerAddr = errors.New("grpc.peer.Addr err")
// ErrPublicKeyAddr - public key and address do not match
ErrPublicKeyAddr = errors.New("public key and address do not match")
// ErrPublicKeyVerify - public key verify err
ErrPublicKeyVerify = errors.New("public key verify err")
// ErrJarvisMsgTimeOut - JarvisMsg timeout
ErrJarvisMsgTimeOut = errors.New("JarvisMsg timeout")
// ErrStreamNil - stream nil
ErrStreamNil = errors.New("stream nil")
// ErrInvalidMsgType - invalid msgtype
ErrInvalidMsgType = errors.New("invalid msgtype")
// ErrServAddrIsMe - servaddr is me
ErrServAddrIsMe = errors.New("servaddr is me")
// ErrInvalidEvent - invalid event
ErrInvalidEvent = errors.New("invalid event")
// ErrInvalidServAddr - invalid servaddr
ErrInvalidServAddr = errors.New("invalid servaddr")
// ErrInvalidNodeName - invalid nodename
ErrInvalidNodeName = errors.New("invalid nodename")
// ErrUnknowNode - unknow node
ErrUnknowNode = errors.New("unknow node")
// ErrInvalidMsgID - invalid msgid
ErrInvalidMsgID = errors.New("invalid msgid")
// ErrDuplicateMsgID - duplicate msgid
ErrDuplicateMsgID = errors.New("duplicate msgid")
// ErrInvalidRequestData4Node - invalid requestData4Node
ErrInvalidRequestData4Node = errors.New("invalid requestData4Node")
// ErrInvalidRequestNodeData - invalid requestNodeData
ErrInvalidRequestNodeData = errors.New("invalid requestNodeData")
// ErrFuncOnSendMsgResultLength - FuncOnSendMsgResult length err
ErrFuncOnSendMsgResultLength = errors.New("FuncOnSendMsgResult length err")
// ErrAutoUpdateClosed - auto update closed
ErrAutoUpdateClosed = errors.New("auto update closed")
// ErrServAddrConnFail - the servaddr connect fail
ErrServAddrConnFail = errors.New("the servaddr connect fail")
// ErrAssertGetNode - assert(GetNode() is not nil)
ErrAssertGetNode = errors.New("assert(GetNode() is not nil)")
// ErrDeprecatedNode - deprecated node
ErrDeprecatedNode = errors.New("deprecated node")
// ErrNoFileData - no filedata
ErrNoFileData = errors.New("no filedata")
// ErrFileDataNoMD5String - filedata no md5tring
ErrFileDataNoMD5String = errors.New("filedata no md5tring")
// ErrInvalidFileDataMD5String - invalid filedata md5tring
ErrInvalidFileDataMD5String = errors.New("invalid filedata md5tring")
// ErrNotConnectedNode - not connected node
ErrNotConnectedNode = errors.New("not connected node")
// ErrInvalidReadFileLength - invalid readfile length
ErrInvalidReadFileLength = errors.New("invalid readfile length")
// ErrInvalidSeekFileOffset - invalid seekfile offset
ErrInvalidSeekFileOffset = errors.New("invalid seekfile offset")
// ErrNoConnOrInvalidConn - no connection or invalid connection
ErrNoConnOrInvalidConn = errors.New("no connection or invalid connection")
// ErrNoCtrlInfo - no ctrlinfo
ErrNoCtrlInfo = errors.New("no ctrlinfo")
// ErrCannotFindNodeWithAddr - can not find node with addr
ErrCannotFindNodeWithAddr = errors.New("can not find node with addr")
// ErrNoFuncOnFileData - no FuncOnFileData
ErrNoFuncOnFileData = errors.New("no FuncOnFileData")
// ErrNoProcMsgResultData - no ProcMsgResultData
ErrNoProcMsgResultData = errors.New("no ProcMsgResultData")
// ErrInvalidProcMsgResultData - invalid ProcMsgResultData
ErrInvalidProcMsgResultData = errors.New("invalid ProcMsgResultData")
// ErrDuplicateProcMsgResultData - duplicate ProcMsgResultData
ErrDuplicateProcMsgResultData = errors.New("duplicate ProcMsgResultData")
// ErrNoCtrl - no ctrl
ErrNoCtrl = errors.New("no ctrl")
// ErrUnknownCtrlError - unknown ctrl error
ErrUnknownCtrlError = errors.New("unknown ctrl error")
// ErrProcMsgStreamNil - ProcMsgStream return nil
ErrProcMsgStreamNil = errors.New("ProcMsgStream return nil")
// ErrIDontTrustYou - I don't trust you
ErrIDontTrustYou = errors.New("I don't trust you")
// ErrInvalidTimer - Invalid timer
ErrInvalidTimer = errors.New("Invalid timer")
// ErrInvalidTimerFunc - Invalid timer func
ErrInvalidTimerFunc = errors.New("Invalid timer func")
// ErrInvalidWait4MyReplyMsgID - Invalid wait for my reply msgid
ErrInvalidWait4MyReplyMsgID = errors.New("Invalid wait for my reply msgid")
// ErrInvalidWait4MyReplyAddr - Invalid wait for my reply addr
ErrInvalidWait4MyReplyAddr = errors.New("Invalid wait for my reply addr")
// ErrInvalidJarvisMsgReplyStreamDestAddr - Invalid JarvisMsgReplyStream dest addr
ErrInvalidJarvisMsgReplyStreamDestAddr = errors.New("Invalid JarvisMsgReplyStream dest addr")
// ErrInvalidJarvisMsgReplyStreamSendMsg - Invalid JarvisMsgReplyStream sendmsg
ErrInvalidJarvisMsgReplyStreamSendMsg = errors.New("Invalid JarvisMsgReplyStream sendmsg")
// ErrJarvisMsgReplyStreamSent - JarvisMsgReplyStream has been sent
ErrJarvisMsgReplyStreamSent = errors.New("JarvisMsgReplyStream has been sent")
// ErrInvalidJarvisMsgReplyStreamReplyMsgID - Invalid JarvisMsgReplyStream ReplyMsgID
ErrInvalidJarvisMsgReplyStreamReplyMsgID = errors.New("Invalid JarvisMsgReplyStream ReplyMsgID")
// ErrInvalidStreamMsgTransferFile2 - Invalid StreamMsg TRANSFER_FILE2
ErrInvalidStreamMsgTransferFile2 = errors.New("Invalid StreamMsg TRANSFER_FILE2")
// ErrInvalidCMDStdOutErrErrFile - Invalid CMDStdOutErr errfile
ErrInvalidCMDStdOutErrErrFile = errors.New("Invalid CMDStdOutErr errfile")
// ErrCfgInvalidUpdateScript - Invalid UpdateScript
ErrCfgInvalidUpdateScript = errors.New("Invalid UpdateScript")
)
|
package config
import (
"time"
"gopkg.in/ini.v1"
)
// AppConfig App配置项
type AppConfig struct {
Release bool `ini:"release"`
Port uint `ini:"port"`
*EtcdConfig `ini:"etcd"`
}
// EtcdConfig Etcd集群配置文件
type EtcdConfig struct {
Endpoints []string `ini:"endpoints"`
DialTimeout time.Duration `ini:"timeout"`
Key string `ini:"key"`
}
// Conf 配置
var Conf = new(AppConfig)
// Init 初始化
func Init(file string) error {
return ini.MapTo(Conf, file)
}
|
package boom
import (
"testing"
"go.mercari.io/datastore/v2/internal/testutils"
)
func TestBoom_NewTransaction(t *testing.T) {
ctx, client, cleanUp := testutils.SetupCloudDatastore(t)
defer cleanUp()
type Data struct {
ID int64 `datastore:"-" boom:"id"`
Str string
}
bm := FromClient(ctx, client)
key, err := bm.Put(&Data{Str: "Str1"})
if err != nil {
t.Fatal(err)
}
tx, err := bm.NewTransaction()
if err != nil {
t.Fatal(err)
}
obj := &Data{ID: key.ID()}
err = tx.Get(obj)
if err != nil {
t.Fatal(err)
}
if v := obj.Str; v != "Str1" {
t.Errorf("unexpected: %v", v)
}
obj = &Data{Str: "Str2"}
_, err = tx.Put(obj)
if err != nil {
t.Fatal(err)
}
// Key is PendingKey state still...
if v := obj.ID; v != 0 {
t.Errorf("unexpected: %v", v)
}
err = tx.Delete(key)
if err != nil {
t.Fatal(err)
}
_, err = tx.Commit()
if err != nil {
t.Fatal(err)
}
if v := obj.ID; v == 0 {
t.Errorf("unexpected: %v", v)
}
}
func TestBoom_RunInTransaction(t *testing.T) {
ctx, client, cleanUp := testutils.SetupCloudDatastore(t)
defer cleanUp()
type Data struct {
ID int64 `datastore:"-" boom:"id"`
Str string
}
bm := FromClient(ctx, client)
key, err := bm.Put(&Data{Str: "Str1"})
if err != nil {
t.Fatal(err)
}
var pObj *Data
_, err = bm.RunInTransaction(func(tx *Transaction) error {
obj := &Data{ID: key.ID()}
err = tx.Get(obj)
if err != nil {
t.Fatal(err)
}
if v := obj.Str; v != "Str1" {
t.Errorf("unexpected: %v", v)
}
pObj = &Data{Str: "Str2"}
_, err = tx.Put(pObj)
if err != nil {
t.Fatal(err)
}
// Key is PendingKey state still...
if v := pObj.ID; v != 0 {
t.Errorf("unexpected: %v", v)
}
err = tx.Delete(key)
if err != nil {
t.Fatal(err)
}
return nil
})
if err != nil {
t.Fatal(err)
}
if v := pObj.ID; v == 0 {
t.Errorf("unexpected: %v", v)
}
}
func TestBoom_TxRollback(t *testing.T) {
ctx, client, cleanUp := testutils.SetupCloudDatastore(t)
defer cleanUp()
type Data struct {
ID int64 `datastore:"-" boom:"id"`
Str string
}
bm := FromClient(ctx, client)
tx, err := bm.NewTransaction()
if err != nil {
t.Fatal(err)
}
_, err = tx.Put(&Data{Str: "Str1"})
if err != nil {
t.Fatal(err)
}
err = tx.Rollback()
if err != nil {
t.Fatal(err)
}
}
func TestBoom_TxWithCompleteKey(t *testing.T) {
ctx, client, cleanUp := testutils.SetupCloudDatastore(t)
defer cleanUp()
bm := FromClient(ctx, client)
type Data struct {
ID string `boom:"id" datastore:"-"`
}
tx, err := bm.NewTransaction()
if err != nil {
t.Fatal(err)
}
_, err = tx.PutMulti([]*Data{{ID: "hoge"}})
if err != nil {
t.Fatal(err)
}
_, err = tx.Commit()
if err != nil {
t.Fatal(err)
}
}
|
package api
import (
"bytes"
"fmt"
"log"
routing "github.com/qiangxue/fasthttp-routing"
"github.com/guilhermesteves/aclow"
"github.com/valyala/fasthttp"
)
func RegisterRoutes(app *aclow.App) {
router := app.Resources["router"].(*routing.Router)
router.Use(logHandler(), panicHandler(), corsHandler())
listToDo(app, router)
createToDo(app, router)
loadToDo(app, router)
updateToDo(app, router)
deleteToDo(app, router)
}
func corsHandler() routing.Handler {
return func(c *routing.Context) (err error) {
c.Response.Header.Set("Access-Control-Allow-Origin", "*")
c.Response.Header.Set("Access-Control-Allow-Headers", "Authorization")
if bytes.Equal(c.Method(), []byte("OPTIONS")) {
c.SetStatusCode(fasthttp.StatusOK)
return nil
}
r := c.Next()
return r
}
}
func logHandler() routing.Handler {
return func(c *routing.Context) (err error) {
log.Println("Request: ", string(c.Method())+" "+string(c.Request.RequestURI()))
r := c.Next()
log.Println("Response Status: ", c.Response.StatusCode())
return r
}
}
func panicHandler() routing.Handler {
return func(c *routing.Context) (err error) {
defer func() {
if e := recover(); e != nil {
err := e.(error)
c.Response.Header.SetStatusCode(fasthttp.StatusInternalServerError)
eStr := fmt.Sprintf("%v", err)
c.SetBody([]byte(eStr))
}
}()
return c.Next()
}
}
|
package ffprobe
import (
"log"
"os"
"os/exec"
"runtime"
"strings"
)
// Prober has logic that changes based on platform
type Prober interface {
getDevicesCmd() string
getFfmpegCmd(ProberCommon) ([]string, error)
}
// Devices has information about ffmpeg multimedia devices
type Devices struct {
Audios []string
Videos []string
}
type plt int
const (
lin plt = iota
mac //TODO use this
)
var (
cfgname = "common_presets.toml"
uiOptsFname = "uiopts.toml"
presetFname = "presets.toml"
logi = log.New(os.Stdout, "INFO: ", log.Lshortfile|log.Ltime)
loge = log.New(os.Stderr, "ERROR: ", log.Lshortfile|log.Ltime)
)
// ProberCommon has all options; common and platform prober
type ProberCommon struct {
devicesKey string
deviceKey string //input device
opts *Options
cmd *exec.Cmd
prober Prober
config *tomlConfig
Devices
}
var opts *Options = &Options{} // TODO1 move to devcommon
//SetInputs to set configure input streams
func (pc ProberCommon) SetInputs(uiips []UIInput, resumeCount int) {
opts.UIInputs = uiips
pc.config.resumeCount = resumeCount
}
// GetInputs gets
func GetInputs() []UIInput {
return opts.UIInputs
}
// GetLoggers returns info and error logger
func GetLoggers() (*log.Logger, *log.Logger) {
return logi, loge
}
func filterList(ss []string, f func(string) bool) (res []string) {
for _, s := range ss {
if f(s) {
res = append(res, s)
}
}
return res
}
// GetFfmpegDevices returns audio and video devices available
func GetFfmpegDevices(p ProberCommon) Devices {
devs := Devices{}
devs.Audios = parseFfmpegDeviceType(p, "audio")
devs.Videos = parseFfmpegDeviceType(p, "video")
return devs
}
func parseFfmpegDeviceType(p ProberCommon, dtype string) []string {
devs := map[string][]string{}
res := runCmdStr(p.prober.getDevicesCmd(), true)
resLines := strings.Split(res, "\n")
filterfn := func(s string) bool { return strings.Contains(s, p.deviceKey) }
resLines = filterList(resLines, filterfn)
var currDevType, dtypeKey string
for _, ln := range resLines {
if strings.Contains(ln, p.devicesKey) {
currDevType = ln
if strings.Contains(ln, dtype) {
dtypeKey = ln
}
} else if currDevType != "" {
lnprsds := strings.Split(ln, "] [")
if len(lnprsds) == 2 {
lnprsd := lnprsds[1]
lnprsd = strings.Replace(lnprsd, "]", " ", -1)
devs[currDevType] = append(devs[currDevType], lnprsd)
}
}
}
logi.Printf("%s: %+v\n", dtype, devs[dtypeKey])
return devs[dtypeKey]
}
// GetVersion returns ffmpeg version
func GetVersion() string {
return "ffmpeg 1234.22" //TODO
}
func getCommand(prober Prober, pc ProberCommon) ([]string, error) {
return prober.getFfmpegCmd(pc)
}
// NewProber returns prober for correct platform
func NewProber() ProberCommon {
var dc = ProberCommon{}
switch runtime.GOOS {
case "darwin":
dc.prober = newProberMac()
default:
panic("OS not supported")
}
dc.opts = opts
Ffoutchan = make(chan Ffoutmsg)
dc.probeDefaults()
GetFfmpegDevices(dc)
dc.config = loadCommonConfig(presetTomlStr())
return dc
}
|
package unit
type UnitFileState int
const (
UnitFileStateError UnitFileState = iota - 1
UnitFileStateDisabled
UnitFileStateEnabled
UnitFileStateStatic
UnitFileStateMasked
UnitFileStateLinked
)
var MapUnitFileState = map[string]UnitFileState{
"disabled": UnitFileStateDisabled,
"enabled": UnitFileStateEnabled,
"enabled-runtime": UnitFileStateEnabled,
"static": UnitFileStateStatic,
"masked": UnitFileStateMasked,
"masked-runtime": UnitFileStateMasked,
"linked": UnitFileStateLinked,
"linked-runtime": UnitFileStateLinked,
}
|
//author xinbing
//time 2018/8/28 14:18
//字符串工具
package utilities
import (
"math/rand"
"time"
)
var randomStrSource = []byte("0123456789abcdefghijklmnopqrstuvwxyz")
//获取随机字符串
func GetRandomStr(length int) string {
result := make([]byte,length)
r := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63())) //增大随机性
for i:=0;i<length;i++ {
result[i] = randomStrSource[r.Intn(len(randomStrSource))]
}
return string(result)
}
//生成纯数字的随机字符串
func GetRandomNumStr(length int) string {
result := make([]byte, length)
r := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63())) //增大随机性
for i:=0;i<length;i++ {
result[i] = byte('0' + r.Intn(10)) //0 - 9
}
return string(result)
}
|
package bitbar
import (
"fmt"
"strconv"
"time"
"github.com/DennisDenuto/igrb/data/diskstore"
"github.com/DennisDenuto/igrb/multicast"
"github.com/concourse/atc"
"github.com/git-duet/git-duet"
"strings"
)
type Painter struct {
MainItems []string
}
func (p *Painter) AddMainMenuItems(item string) {
p.MainItems = append(p.MainItems, item)
}
func JobToString(targetUrl string, build atc.Build) string {
timeElapsed := time.Now().Sub(time.Unix(build.EndTime, 0))
commandToInvestigate := fmt.Sprintf("bash=/usr/local/bin/igrb param1=send param2=\"%s\" param3=\"%s\" param4=\"%s\" param5=\"%d\" terminal=false refresh=true", GetGitUser(), build.PipelineName, build.JobName, build.ID)
commandToIgnore := fmt.Sprintf("bash=/usr/local/bin/igrb param1=ignore param2=\"%s\" param3=\"%s\" param4=\"%s\" param5=\"%d\" terminal=false refresh=true", "_", build.PipelineName, build.JobName, build.ID)
var icon string = ":exclamation:"
buildTakenByDev := &multicast.DevLookingIntoBuild{}
diskstore.NewDiskPersistor().ReadAndUnmarshal(strconv.Itoa(build.ID), buildTakenByDev)
if buildTakenByDev.DevName != "" {
icon = ":grey_question:"
}
buildSummaryText := fmt.Sprintf(`---
%s %s/%s %s | href=%s
--I got it! | %s
--Ignore | %s
Time red: %s`, icon, build.PipelineName, build.JobName, build.Status, targetUrl + build.URL, commandToInvestigate, commandToIgnore, timeElapsed)
var buildFooter string
if buildTakenByDev.DevName != "" {
buildFooter = fmt.Sprintf("%s is looking into it!", buildTakenByDev.DevName)
}
return buildSummaryText + "\n" + buildFooter
}
func (p *Painter) Print() {
fmt.Println(fmt.Sprintf("%d :red_circle: | color=red", len(p.MainItems)))
fmt.Println("---")
for _, value := range p.MainItems {
fmt.Println(value)
}
}
func GetGitUser() string {
configuration, err := duet.NewConfiguration()
if err != nil {
fmt.Println(err)
return ""
}
gitConfig := &duet.GitConfig{Namespace: "user", SetUserConfig: configuration.SetGitUserConfig}
gitDuetConfig := &duet.GitConfig{Namespace: configuration.Namespace, SetUserConfig: configuration.SetGitUserConfig}
pair1, err := gitDuetConfig.GetAuthor()
if err == nil && pair1 != nil {
pair2, err := gitDuetConfig.GetCommitters()
if err == nil && len(pair2) > 0 {
return pair1.Name + " & " + getCommitterNames(pair2)
}
return pair1.Name
}
name, err := gitConfig.GetKey("name")
return name
}
func getCommitterNames(pairs []*duet.Pair) string {
var names []string
for _, value := range pairs {
names = append(names, value.Name)
}
return strings.Join(names, "&")
}
|
// Copyright (c) 2017-2018 The qitmeer developers
// Copyright (c) 2014-2016 The btcsuite developers
// Copyright (c) 2015-2017 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package params
import (
"time"
"math/big"
"github.com/Qitmeer/qitmeer-lib/common"
"github.com/Qitmeer/qitmeer-lib/core/protocol"
)
// testNetPowLimit is the highest proof of work value a block can
// have for the test network. It is the value 2^232 - 1.
var testNetPowLimit = new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 232), common.Big1)
// TestNetParams defines the network parameters for the test network.
var TestNetParams = Params{
Name: "testnet",
Net: protocol.TestNet,
DefaultPort: "18130",
DNSSeeds: []DNSSeed{
{"testnet-seed.hlcwallet.info", true},
{"testnet-seed.qitmeer.xyz", true},
{"testnet-seed.qitmeer.top", true},
},
// Chain parameters
GenesisBlock: &testNetGenesisBlock,
GenesisHash: &testNetGenesisHash,
PowLimit: testNetPowLimit,
PowLimitBits: 0x1e00ffff,
ReduceMinDifficulty: false,
MinDiffReductionTime: 0, // Does not apply since ReduceMinDifficulty false
GenerateSupported: true,
WorkDiffAlpha: 1,
WorkDiffWindowSize: 144,
WorkDiffWindows: 20,
MaximumBlockSizes: []int{1310720},
MaxTxSize: 1000000,
TargetTimePerBlock: time.Minute * 2,
TargetTimespan: time.Minute * 2 * 144, // TimePerBlock * WindowSize
RetargetAdjustmentFactor: 4,
// Subsidy parameters.
BaseSubsidy: 2500000000, // 25 Coin
MulSubsidy: 100,
DivSubsidy: 101,
SubsidyReductionInterval: 2048,
WorkRewardProportion: 10,
StakeRewardProportion: 0,
BlockTaxProportion: 0,
// Checkpoints ordered from oldest to newest.
Checkpoints: []Checkpoint{
},
// Consensus rule change deployments.
//
Deployments: map[uint32][]ConsensusDeployment{
},
// Address encoding magics
NetworkAddressPrefix: "T",
PubKeyAddrID: [2]byte{0x0f, 0x0f}, // starts with Tk
PubKeyHashAddrID: [2]byte{0x0f, 0x12}, // starts with Tm
PKHEdwardsAddrID: [2]byte{0x0f, 0x01}, // starts with Te
PKHSchnorrAddrID: [2]byte{0x0f, 0x1e}, // starts with Tr
ScriptHashAddrID: [2]byte{0x0e, 0xe2}, // starts with TS
PrivateKeyID: [2]byte{0x0c, 0xe2}, // starts with Pt
// BIP32 hierarchical deterministic extended key magics
HDPrivateKeyID: [4]byte{0x04, 0x35, 0x83, 0x97}, // starts with tprv
HDPublicKeyID: [4]byte{0x04, 0x35, 0x87, 0xd1}, // starts with tpub
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 223,
CoinbaseMaturity: 16,
//OrganizationPkScript: hexMustDecode("76a914868b9b6bc7e4a9c804ad3d3d7a2a6be27476941e88ac"),
}
|
package files
import (
"errors"
"os"
"path/filepath"
shared "github.com/cazier/resume/pkg/shared"
)
func Exists(path string, is_file bool) bool {
resp, err := os.Stat(path)
if os.IsNotExist(err) {
return false
} else if errors.Is(err, os.ErrPermission) {
shared.Exit(1, "The destination path (%s) has bad permissions", path)
}
return is_file != resp.IsDir()
}
func MakeDirectories(path string, include bool) {
var fpath string
if include && filepath.Ext(path) == "" {
fpath = path
} else {
fpath = filepath.Dir(path)
}
err := os.MkdirAll(fpath, 0775)
shared.HandleError(err)
}
|
package main
import (
"flag"
"github.com/gin-gonic/gin"
"net/http"
"os"
)
var addr = flag.String("addr", ":8080", "address")
func main() {
flag.Parse()
r := gin.New()
r.GET("/", func(c *gin.Context) {
c.String(http.StatusOK, "hello %s this message from %s/%s", c.Query("name"), os.Getenv("POD"), os.Getenv("NODE"))
})
r.Run(*addr)
}
|
package main
import (
"strings"
"sync"
"testing"
"github.com/go-test/deep"
"golang.org/x/tools/go/loader"
)
type programCache struct {
sync.Mutex
loadedProgs map[string]*loader.Program
}
func (p *programCache) load(path string) (prog *loader.Program, err error) {
p.Lock()
defer p.Unlock()
if p.loadedProgs == nil {
p.loadedProgs = make(map[string]*loader.Program)
}
prog = p.loadedProgs[path]
if prog != nil {
return prog, nil
}
prog, _, err = progFromArgs([]string{path})
p.loadedProgs[path] = prog
return prog, err
}
var programs = &programCache{}
const examples = "testdata/examples.go"
func TestRecordFromStructErrors(t *testing.T) {
prog, err := programs.load(examples)
if err != nil {
t.Fatal(err)
}
var tests = []struct {
name string
errorExpected bool
}{
{"DoesNotExist", true},
{"AnInterface", true},
{"Empty", true},
{"Strings", false},
{"OptionalValues", false},
}
for _, tt := range tests {
structType, err := structFromProg(prog, "main", tt.name)
if err == nil {
_, err = recordFromStruct(NewResolver(make(TypeNamePairs)), structType, tt.name)
}
got := err != nil
if got != tt.errorExpected {
t.Errorf("recordFromStruct(%q): got error %v, want error %v\nerror was: %v",
tt.name, got, tt.errorExpected, err)
}
}
}
func TestRecordFromStructAbbrev(t *testing.T) {
prog, err := programs.load(examples)
if err != nil {
t.Fatal(err)
}
input := "JSONObject"
want := "JsonObject"
structType, err := structFromProg(prog, "main", input)
if err != nil {
t.Fatalf("%+v", err)
}
record, err := recordFromStruct(NewResolver(make(TypeNamePairs)), structType, input)
if err != nil {
t.Fatalf("%+v", err)
}
got := record.Name()
if got != want {
t.Errorf("Got record name %q, want %q", got, want)
}
}
func TestRecordFromStructNameConversions(t *testing.T) {
prog, err := programs.load(examples)
if err != nil {
t.Fatal(err)
}
name := "Strings"
want := &ElmRecord{
name: name,
Fields: []*ElmField{
{
JSONName: "ExportedBareString",
ElmName: "exportedBareString",
ElmType: elmString,
},
{
JSONName: "exported-tagged-string",
ElmName: "exportedTaggedString",
ElmType: elmString,
},
{
JSONName: "exported-optional-string",
ElmName: "exportedOptionalString",
ElmType: elmString,
Optional: true,
},
{
JSONName: "AnotherOptionalString",
ElmName: "anotherOptionalString",
ElmType: elmString,
Optional: true,
},
},
}
structType, err := structFromProg(prog, "main", name)
if err != nil {
t.Fatalf("%+v", err)
}
got, err := recordFromStruct(NewResolver(make(TypeNamePairs)), structType, name)
if err != nil {
t.Fatalf("%+v", err)
}
if diff := deep.Equal(got, want); diff != nil {
t.Fatal("ElmRecord struct did not match expectations:\n" + strings.Join(diff, "\n"))
}
if !got.Equal(want) {
t.Error("ElmRecord struct did not match expectations, likely in an ElmType field.")
}
}
func TestParseStructMultipleNames(t *testing.T) {
prog, err := programs.load(examples)
if err != nil {
t.Fatal(err)
}
name := "MultiNames"
want := &ElmRecord{
name: name,
Fields: []*ElmField{
{
JSONName: "One",
ElmName: "one",
ElmType: elmString,
},
{
JSONName: "Two",
ElmName: "two",
ElmType: elmString,
},
{
JSONName: "Three",
ElmName: "three",
ElmType: elmString,
},
},
}
structType, err := structFromProg(prog, "main", name)
if err != nil {
t.Fatalf("%+v", err)
}
got, err := recordFromStruct(NewResolver(make(TypeNamePairs)), structType, name)
if err != nil {
t.Fatalf("%+v", err)
}
if diff := deep.Equal(got, want); diff != nil {
t.Fatal("ElmRecord struct did not match expectations:\n" + strings.Join(diff, "\n"))
}
if !got.Equal(want) {
t.Error("ElmRecord struct did not match expectations, likely in an ElmType field.")
}
}
func TestRecordFromStructTypeConversions(t *testing.T) {
prog, err := programs.load(examples)
if err != nil {
t.Fatal(err)
}
name := "OtherTypes"
want := &ElmRecord{
name: name,
Fields: []*ElmField{
{
JSONName: "AnInteger",
ElmName: "anInteger",
ElmType: elmInt,
},
{
JSONName: "BigInteger",
ElmName: "bigInteger",
ElmType: elmInt,
},
{
JSONName: "AFloat",
ElmName: "aFloat",
ElmType: elmFloat,
},
{
JSONName: "BigFloat",
ElmName: "bigFloat",
ElmType: elmFloat,
},
{
JSONName: "NoNoNo",
ElmName: "noNoNo",
ElmType: elmBool,
},
},
}
structType, err := structFromProg(prog, "main", name)
if err != nil {
t.Fatalf("%+v", err)
}
got, err := recordFromStruct(NewResolver(make(TypeNamePairs)), structType, name)
if err != nil {
t.Fatalf("%+v", err)
}
if diff := deep.Equal(got, want); diff != nil {
t.Fatal("ElmRecord struct did not match expectations:\n" + strings.Join(diff, "\n"))
}
if !got.Equal(want) {
t.Error("ElmRecord struct did not match expectations, likely in an ElmType field.")
}
}
func TestRecordFromStructSlices(t *testing.T) {
prog, err := programs.load(examples)
if err != nil {
t.Fatal(err)
}
name := "SliceTypes"
want := &ElmRecord{
name: name,
Fields: []*ElmField{
{
JSONName: "Bools",
ElmName: "bools",
ElmType: &ElmList{elem: elmBool},
},
{
JSONName: "Floats",
ElmName: "floats",
ElmType: &ElmList{elem: elmFloat},
},
{
JSONName: "Strings",
ElmName: "strings",
ElmType: &ElmList{elem: elmString},
},
},
}
structType, err := structFromProg(prog, "main", name)
if err != nil {
t.Fatalf("%+v", err)
}
got, err := recordFromStruct(NewResolver(make(TypeNamePairs)), structType, name)
if err != nil {
t.Fatalf("%+v", err)
}
if diff := deep.Equal(got, want); diff != nil {
t.Fatal("ElmRecord struct did not match expectations:\n" + strings.Join(diff, "\n"))
}
if !got.Equal(want) {
t.Error("ElmRecord struct did not match expectations, likely in an ElmType field.")
}
}
func TestRecordFromStructOptionals(t *testing.T) {
prog, err := programs.load(examples)
if err != nil {
t.Fatal(err)
}
name := "OptionalValues"
want := &ElmRecord{
name: name,
Fields: []*ElmField{
{
JSONName: "opt-string",
ElmName: "optString",
ElmType: elmString,
Optional: true,
},
{
JSONName: "OptInt",
ElmName: "optInt",
ElmType: elmInt,
Optional: true,
},
{
JSONName: "OptBool",
ElmName: "optBool",
ElmType: elmBool,
Optional: true,
},
},
}
structType, err := structFromProg(prog, "main", name)
if err != nil {
t.Fatalf("%+v", err)
}
got, err := recordFromStruct(NewResolver(make(TypeNamePairs)), structType, name)
if err != nil {
t.Fatalf("%+v", err)
}
if diff := deep.Equal(got, want); diff != nil {
t.Fatal("ElmRecord struct did not match expectations:\n" + strings.Join(diff, "\n"))
}
if !got.Equal(want) {
t.Error("ElmRecord struct did not match expectations, likely in an ElmType field.")
}
}
func TestRecordFromStructNullables(t *testing.T) {
prog, err := programs.load(examples)
if err != nil {
t.Fatal(err)
}
name := "NullableValues"
innerType := &ElmRecord{
name: "InnerStruct",
Fields: []*ElmField{
{
JSONName: "Value",
ElmName: "value",
ElmType: elmString,
},
},
}
want := &ElmRecord{
name: name,
Fields: []*ElmField{
{
JSONName: "NullString",
ElmName: "nullString",
ElmType: &ElmPointer{elem: elmString},
},
{
JSONName: "OptNullString",
ElmName: "optNullString",
ElmType: &ElmPointer{elem: elmString},
Optional: true,
},
{
JSONName: "NullInt",
ElmName: "nullInt",
ElmType: &ElmPointer{elem: elmInt},
},
{
JSONName: "NullStruct",
ElmName: "nullStruct",
ElmType: &ElmPointer{elem: innerType},
},
},
}
structType, err := structFromProg(prog, "main", name)
if err != nil {
t.Fatalf("%+v", err)
}
got, err := recordFromStruct(NewResolver(make(TypeNamePairs)), structType, name)
if err != nil {
t.Fatalf("%+v", err)
}
if diff := deep.Equal(got, want); diff != nil {
t.Fatal("ElmRecord struct did not match expectations:\n" + strings.Join(diff, "\n"))
}
if !got.Equal(want) {
t.Error("ElmRecord struct did not match expectations, likely in an ElmType field.")
}
}
func TestRecordFromStructNested(t *testing.T) {
prog, err := programs.load(examples)
if err != nil {
t.Fatal(err)
}
name := "NestedStructs"
innerType := &ElmRecord{
name: "InnerStruct",
Fields: []*ElmField{
{
JSONName: "Value",
ElmName: "value",
ElmType: elmString,
},
},
}
want := &ElmRecord{
name: name,
Fields: []*ElmField{
{
JSONName: "OuterName",
ElmName: "outerName",
ElmType: elmString,
},
{
JSONName: "InnerValue1",
ElmName: "innerValue1",
ElmType: innerType,
},
{
JSONName: "InnerValue2",
ElmName: "innerValue2",
ElmType: innerType,
},
},
}
structType, err := structFromProg(prog, "main", name)
if err != nil {
t.Fatalf("%+v", err)
}
got, err := recordFromStruct(NewResolver(make(TypeNamePairs)), structType, name)
if err != nil {
t.Fatalf("%+v", err)
}
if diff := deep.Equal(got, want); diff != nil {
t.Fatal("ElmRecord struct did not match expectations:\n" + strings.Join(diff, "\n"))
}
if innerRecord, ok := got.Fields[1].ElmType.(*ElmRecord); ok {
t.Logf("innerRecord: %#v", innerRecord)
for i, f := range innerRecord.Fields {
t.Logf("innerRecord[%v]: %#v", i, f)
t.Logf("innerRecord[%v].ElmType: %s", i, f.ElmType.Name())
}
} else {
t.Errorf("Fields[1].ElmType was %T, not *ElmRecord", got.Fields[1].ElmType)
}
if !got.Equal(want) {
t.Error("ElmRecord struct did not match expectations, likely in an ElmType field.")
}
}
func TestRecordFromStructNestedRenames(t *testing.T) {
prog, err := programs.load(examples)
if err != nil {
t.Fatal(err)
}
renames := make(TypeNamePairs)
renames.Add("NestedStructs:NewOuter")
renames.Add("innerStruct:NewInner")
name := "NestedStructs"
structType, err := structFromProg(prog, "main", name)
if err != nil {
t.Fatalf("%+v", err)
}
got, err := recordFromStruct(NewResolver(renames), structType, name)
if err != nil {
t.Fatalf("%+v", err)
}
wantName := "NewOuter"
gotName := got.name
if gotName != wantName {
t.Errorf("got name %q, want %q", gotName, wantName)
}
wantName = "NewInner"
gotType, ok := got.Fields[1].ElmType.(*ElmRecord)
if !ok {
t.Fatalf("want type *ElmRecord, got %T", got.Fields[1].ElmType)
}
gotName = gotType.name
if gotName != wantName {
t.Errorf("got name %q, want %q", gotName, wantName)
}
}
|
package biz
import (
"context"
"github.com/go-kratos/kratos/v2/log"
)
type User struct {
Name string
Email string
}
type UserRepo interface {
CreateUser(ctx context.Context, a *User) (int64, error)
}
type CardRepo interface {
CreateCard(ctx context.Context, id int64) (int64, error)
}
type UserUsecase struct {
userRepo UserRepo
cardRepo CardRepo
tm Transaction
}
func NewUserUsecase(user UserRepo, card CardRepo, tm Transaction, logger log.Logger) *UserUsecase {
return &UserUsecase{userRepo: user, cardRepo: card, tm: tm}
}
func (u *UserUsecase) CreateUser(ctx context.Context, m *User) (int, error) {
var (
err error
id int64
)
err = u.tm.ExecTx(ctx, func(ctx context.Context) error {
id, err = u.userRepo.CreateUser(ctx, m)
if err != nil {
return err
}
_, err = u.cardRepo.CreateCard(ctx, id)
if err != nil {
return err
}
return nil
})
if err != nil {
return 0, err
}
return int(id), nil
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"slices"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessionctx"
transaction "github.com/pingcap/tidb/store/driver/txn"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/rowcodec"
"github.com/pingcap/tidb/util/tracing"
)
type memReader interface {
getMemRows(ctx context.Context) ([][]types.Datum, error)
getMemRowsHandle() ([]kv.Handle, error)
}
var (
_ memReader = &memIndexReader{}
_ memReader = &memTableReader{}
_ memReader = &memIndexLookUpReader{}
_ memReader = &memIndexMergeReader{}
)
type memIndexReader struct {
ctx sessionctx.Context
index *model.IndexInfo
table *model.TableInfo
kvRanges []kv.KeyRange
conditions []expression.Expression
addedRows [][]types.Datum
addedRowsLen int
retFieldTypes []*types.FieldType
outputOffset []int
cacheTable kv.MemBuffer
keepOrder bool
compareExec
}
func buildMemIndexReader(ctx context.Context, us *UnionScanExec, idxReader *IndexReaderExecutor) *memIndexReader {
defer tracing.StartRegion(ctx, "buildMemIndexReader").End()
kvRanges := idxReader.kvRanges
outputOffset := make([]int, 0, len(us.columns))
for _, col := range idxReader.outputColumns {
outputOffset = append(outputOffset, col.Index)
}
if us.desc {
for i, j := 0, len(kvRanges)-1; i < j; i, j = i+1, j-1 {
kvRanges[i], kvRanges[j] = kvRanges[j], kvRanges[i]
}
}
return &memIndexReader{
ctx: us.Ctx(),
index: idxReader.index,
table: idxReader.table.Meta(),
kvRanges: kvRanges,
conditions: us.conditions,
retFieldTypes: retTypes(us),
outputOffset: outputOffset,
cacheTable: us.cacheTable,
keepOrder: us.keepOrder,
compareExec: us.compareExec,
}
}
func (m *memIndexReader) getMemRowsIter(ctx context.Context) (memRowsIter, error) {
data, err := m.getMemRows(ctx)
if err != nil {
return nil, errors.Trace(err)
}
return &defaultRowsIter{data: data}, nil
}
func (m *memIndexReader) getMemRows(ctx context.Context) ([][]types.Datum, error) {
defer tracing.StartRegion(ctx, "memIndexReader.getMemRows").End()
tps := make([]*types.FieldType, 0, len(m.index.Columns)+1)
cols := m.table.Columns
for _, col := range m.index.Columns {
tps = append(tps, &cols[col.Offset].FieldType)
}
switch {
case m.table.PKIsHandle:
for _, col := range m.table.Columns {
if mysql.HasPriKeyFlag(col.GetFlag()) {
tps = append(tps, &(col.FieldType))
break
}
}
case m.table.IsCommonHandle:
pkIdx := tables.FindPrimaryIndex(m.table)
for _, pkCol := range pkIdx.Columns {
colInfo := m.table.Columns[pkCol.Offset]
tps = append(tps, &colInfo.FieldType)
}
default: // ExtraHandle Column tp.
tps = append(tps, types.NewFieldType(mysql.TypeLonglong))
}
mutableRow := chunk.MutRowFromTypes(m.retFieldTypes)
err := iterTxnMemBuffer(m.ctx, m.cacheTable, m.kvRanges, m.desc, func(key, value []byte) error {
data, err := m.decodeIndexKeyValue(key, value, tps)
if err != nil {
return err
}
mutableRow.SetDatums(data...)
matched, _, err := expression.EvalBool(m.ctx, m.conditions, mutableRow.ToRow())
if err != nil || !matched {
return err
}
m.addedRows = append(m.addedRows, data)
return nil
})
if err != nil {
return nil, err
}
if m.keepOrder && m.table.GetPartitionInfo() != nil {
slices.SortFunc(m.addedRows, func(a, b []types.Datum) int {
ret, err1 := m.compare(m.ctx.GetSessionVars().StmtCtx, a, b)
if err1 != nil {
err = err1
}
return ret
})
return m.addedRows, err
}
return m.addedRows, nil
}
func (m *memIndexReader) decodeIndexKeyValue(key, value []byte, tps []*types.FieldType) ([]types.Datum, error) {
hdStatus := tablecodec.HandleDefault
if mysql.HasUnsignedFlag(tps[len(tps)-1].GetFlag()) {
hdStatus = tablecodec.HandleIsUnsigned
}
colInfos := tables.BuildRowcodecColInfoForIndexColumns(m.index, m.table)
colInfos = tables.TryAppendCommonHandleRowcodecColInfos(colInfos, m.table)
values, err := tablecodec.DecodeIndexKV(key, value, len(m.index.Columns), hdStatus, colInfos)
if err != nil {
return nil, errors.Trace(err)
}
ds := make([]types.Datum, 0, len(m.outputOffset))
for _, offset := range m.outputOffset {
d, err := tablecodec.DecodeColumnValue(values[offset], tps[offset], m.ctx.GetSessionVars().Location())
if err != nil {
return nil, err
}
ds = append(ds, d)
}
return ds, nil
}
type memTableReader struct {
ctx sessionctx.Context
table *model.TableInfo
columns []*model.ColumnInfo
kvRanges []kv.KeyRange
conditions []expression.Expression
addedRows [][]types.Datum
retFieldTypes []*types.FieldType
colIDs map[int64]int
buffer allocBuf
pkColIDs []int64
cacheTable kv.MemBuffer
offsets []int
keepOrder bool
compareExec
}
type allocBuf struct {
// cache for decode handle.
handleBytes []byte
rd *rowcodec.BytesDecoder
cd *rowcodec.ChunkDecoder
}
func buildMemTableReader(ctx context.Context, us *UnionScanExec, kvRanges []kv.KeyRange) *memTableReader {
defer tracing.StartRegion(ctx, "buildMemTableReader").End()
colIDs := make(map[int64]int, len(us.columns))
for i, col := range us.columns {
colIDs[col.ID] = i
}
colInfo := make([]rowcodec.ColInfo, 0, len(us.columns))
for i := range us.columns {
col := us.columns[i]
colInfo = append(colInfo, rowcodec.ColInfo{
ID: col.ID,
IsPKHandle: us.table.Meta().PKIsHandle && mysql.HasPriKeyFlag(col.GetFlag()),
Ft: rowcodec.FieldTypeFromModelColumn(col),
})
}
pkColIDs := tables.TryGetCommonPkColumnIds(us.table.Meta())
if len(pkColIDs) == 0 {
pkColIDs = []int64{-1}
}
defVal := func(i int) ([]byte, error) {
d, err := table.GetColOriginDefaultValueWithoutStrictSQLMode(us.Ctx(), us.columns[i])
if err != nil {
return nil, err
}
return tablecodec.EncodeValue(us.Ctx().GetSessionVars().StmtCtx, nil, d)
}
cd := NewRowDecoder(us.Ctx(), us.Schema(), us.table.Meta())
rd := rowcodec.NewByteDecoder(colInfo, pkColIDs, defVal, us.Ctx().GetSessionVars().Location())
if us.desc {
for i, j := 0, len(kvRanges)-1; i < j; i, j = i+1, j-1 {
kvRanges[i], kvRanges[j] = kvRanges[j], kvRanges[i]
}
}
return &memTableReader{
ctx: us.Ctx(),
table: us.table.Meta(),
columns: us.columns,
kvRanges: kvRanges,
conditions: us.conditions,
retFieldTypes: retTypes(us),
colIDs: colIDs,
buffer: allocBuf{
handleBytes: make([]byte, 0, 16),
rd: rd,
cd: cd,
},
pkColIDs: pkColIDs,
cacheTable: us.cacheTable,
keepOrder: us.keepOrder,
compareExec: us.compareExec,
}
}
type txnMemBufferIter struct {
*memTableReader
txn kv.Transaction
idx int
curr kv.Iterator
reverse bool
cd *rowcodec.ChunkDecoder
chk *chunk.Chunk
datumRow []types.Datum
}
func (iter *txnMemBufferIter) Next() ([]types.Datum, error) {
var ret []types.Datum
for iter.idx < len(iter.kvRanges) {
if iter.curr == nil {
rg := iter.kvRanges[iter.idx]
var tmp kv.Iterator
if !iter.reverse {
tmp = iter.txn.GetMemBuffer().SnapshotIter(rg.StartKey, rg.EndKey)
} else {
tmp = iter.txn.GetMemBuffer().SnapshotIterReverse(rg.EndKey, rg.StartKey)
}
snapCacheIter, err := getSnapIter(iter.ctx, iter.cacheTable, rg, iter.reverse)
if err != nil {
return nil, err
}
if snapCacheIter != nil {
tmp, err = transaction.NewUnionIter(tmp, snapCacheIter, iter.reverse)
if err != nil {
return nil, err
}
}
iter.curr = tmp
} else {
var err error
ret, err = iter.next()
if err != nil {
return nil, errors.Trace(err)
}
if ret != nil {
break
}
iter.idx++
iter.curr = nil
}
}
return ret, nil
}
func (iter *txnMemBufferIter) next() ([]types.Datum, error) {
var err error
curr := iter.curr
for ; err == nil && curr.Valid(); err = curr.Next() {
// check whether the key was been deleted.
if len(curr.Value()) == 0 {
continue
}
handle, err := tablecodec.DecodeRowKey(curr.Key())
if err != nil {
return nil, errors.Trace(err)
}
iter.chk.Reset()
if !rowcodec.IsNewFormat(curr.Value()) {
// TODO: remove the legacy code!
// fallback to the old way.
iter.datumRow, err = iter.decodeRecordKeyValue(curr.Key(), curr.Value(), &iter.datumRow)
if err != nil {
return nil, errors.Trace(err)
}
mutableRow := chunk.MutRowFromTypes(iter.retFieldTypes)
mutableRow.SetDatums(iter.datumRow...)
matched, _, err := expression.EvalBool(iter.ctx, iter.conditions, mutableRow.ToRow())
if err != nil {
return nil, errors.Trace(err)
}
if !matched {
continue
}
return iter.datumRow, curr.Next()
}
err = iter.cd.DecodeToChunk(curr.Value(), handle, iter.chk)
if err != nil {
return nil, errors.Trace(err)
}
row := iter.chk.GetRow(0)
matched, _, err := expression.EvalBool(iter.ctx, iter.conditions, row)
if err != nil {
return nil, errors.Trace(err)
}
if !matched {
continue
}
ret := row.GetDatumRowWithBuffer(iter.retFieldTypes, iter.datumRow)
return ret, curr.Next()
}
return nil, err
}
func (m *memTableReader) getMemRowsIter(ctx context.Context) (memRowsIter, error) {
// txnMemBufferIter not supports keepOrder + partitionTable.
if m.keepOrder && m.table.GetPartitionInfo() != nil {
data, err := m.getMemRows(ctx)
if err != nil {
return nil, errors.Trace(err)
}
return &defaultRowsIter{data: data}, nil
}
m.offsets = make([]int, len(m.columns))
for i, col := range m.columns {
m.offsets[i] = m.colIDs[col.ID]
}
txn, err := m.ctx.Txn(true)
if err != nil {
return nil, err
}
return &txnMemBufferIter{
memTableReader: m,
txn: txn,
cd: m.buffer.cd,
chk: chunk.New(m.retFieldTypes, 1, 1),
datumRow: make([]types.Datum, len(m.retFieldTypes)),
reverse: m.desc,
}, nil
}
// TODO: Try to make memXXXReader lazy, There is no need to decode many rows when parent operator only need 1 row.
func (m *memTableReader) getMemRows(ctx context.Context) ([][]types.Datum, error) {
defer tracing.StartRegion(ctx, "memTableReader.getMemRows").End()
mutableRow := chunk.MutRowFromTypes(m.retFieldTypes)
resultRows := make([]types.Datum, len(m.columns))
m.offsets = make([]int, len(m.columns))
for i, col := range m.columns {
m.offsets[i] = m.colIDs[col.ID]
}
err := iterTxnMemBuffer(m.ctx, m.cacheTable, m.kvRanges, m.desc, func(key, value []byte) error {
var err error
resultRows, err = m.decodeRecordKeyValue(key, value, &resultRows)
if err != nil {
return err
}
mutableRow.SetDatums(resultRows...)
matched, _, err := expression.EvalBool(m.ctx, m.conditions, mutableRow.ToRow())
if err != nil || !matched {
return err
}
m.addedRows = append(m.addedRows, resultRows)
resultRows = make([]types.Datum, len(m.columns))
return nil
})
if err != nil {
return nil, err
}
if m.keepOrder && m.table.GetPartitionInfo() != nil {
slices.SortFunc(m.addedRows, func(a, b []types.Datum) int {
ret, err1 := m.compare(m.ctx.GetSessionVars().StmtCtx, a, b)
if err1 != nil {
err = err1
}
return ret
})
return m.addedRows, err
}
return m.addedRows, nil
}
func (m *memTableReader) decodeRecordKeyValue(key, value []byte, resultRows *[]types.Datum) ([]types.Datum, error) {
handle, err := tablecodec.DecodeRowKey(key)
if err != nil {
return nil, errors.Trace(err)
}
return m.decodeRowData(handle, value, resultRows)
}
// decodeRowData uses to decode row data value.
func (m *memTableReader) decodeRowData(handle kv.Handle, value []byte, resultRows *[]types.Datum) ([]types.Datum, error) {
values, err := m.getRowData(handle, value)
if err != nil {
return nil, err
}
for i, col := range m.columns {
var datum types.Datum
err := tablecodec.DecodeColumnValueWithDatum(values[m.offsets[i]], &col.FieldType, m.ctx.GetSessionVars().Location(), &datum)
if err != nil {
return nil, err
}
(*resultRows)[i] = datum
}
return *resultRows, nil
}
// getRowData decodes raw byte slice to row data.
func (m *memTableReader) getRowData(handle kv.Handle, value []byte) ([][]byte, error) {
colIDs := m.colIDs
pkIsHandle := m.table.PKIsHandle
buffer := &m.buffer
ctx := m.ctx.GetSessionVars().StmtCtx
if rowcodec.IsNewFormat(value) {
return buffer.rd.DecodeToBytes(colIDs, handle, value, buffer.handleBytes)
}
values, err := tablecodec.CutRowNew(value, colIDs)
if err != nil {
return nil, errors.Trace(err)
}
if values == nil {
values = make([][]byte, len(colIDs))
}
// Fill the handle and null columns.
for _, col := range m.columns {
id := col.ID
offset := colIDs[id]
if m.table.IsCommonHandle {
for i, colID := range m.pkColIDs {
if colID == col.ID && !types.NeedRestoredData(&col.FieldType) {
// Only try to decode handle when there is no corresponding column in the value.
// This is because the information in handle may be incomplete in some cases.
// For example, prefixed clustered index like 'primary key(col1(1))' only store the leftmost 1 char in the handle.
if values[offset] == nil {
values[offset] = handle.EncodedCol(i)
break
}
}
}
} else if (pkIsHandle && mysql.HasPriKeyFlag(col.GetFlag())) || id == model.ExtraHandleID {
var handleDatum types.Datum
if mysql.HasUnsignedFlag(col.GetFlag()) {
// PK column is Unsigned.
handleDatum = types.NewUintDatum(uint64(handle.IntValue()))
} else {
handleDatum = types.NewIntDatum(handle.IntValue())
}
handleData, err1 := codec.EncodeValue(ctx, buffer.handleBytes, handleDatum)
if err1 != nil {
return nil, errors.Trace(err1)
}
values[offset] = handleData
continue
}
if hasColVal(values, colIDs, id) {
continue
}
// no need to fill default value.
values[offset] = []byte{codec.NilFlag}
}
return values, nil
}
// getMemRowsHandle is called when memIndexMergeReader.partialPlans[i] is TableScan.
func (m *memTableReader) getMemRowsHandle() ([]kv.Handle, error) {
handles := make([]kv.Handle, 0, 16)
err := iterTxnMemBuffer(m.ctx, m.cacheTable, m.kvRanges, m.desc, func(key, value []byte) error {
handle, err := tablecodec.DecodeRowKey(key)
if err != nil {
return err
}
handles = append(handles, handle)
return nil
})
if err != nil {
return nil, err
}
return handles, nil
}
func hasColVal(data [][]byte, colIDs map[int64]int, id int64) bool {
offset, ok := colIDs[id]
if ok && data[offset] != nil {
return true
}
return false
}
type processKVFunc func(key, value []byte) error
func iterTxnMemBuffer(ctx sessionctx.Context, cacheTable kv.MemBuffer, kvRanges []kv.KeyRange, reverse bool, fn processKVFunc) error {
txn, err := ctx.Txn(true)
if err != nil {
return err
}
for _, rg := range kvRanges {
var iter kv.Iterator
if !reverse {
iter = txn.GetMemBuffer().SnapshotIter(rg.StartKey, rg.EndKey)
} else {
iter = txn.GetMemBuffer().SnapshotIterReverse(rg.EndKey, rg.StartKey)
}
snapCacheIter, err := getSnapIter(ctx, cacheTable, rg, reverse)
if err != nil {
return err
}
if snapCacheIter != nil {
iter, err = transaction.NewUnionIter(iter, snapCacheIter, reverse)
if err != nil {
return err
}
}
for ; iter.Valid(); err = iter.Next() {
if err != nil {
return err
}
// check whether the key was been deleted.
if len(iter.Value()) == 0 {
continue
}
err = fn(iter.Key(), iter.Value())
if err != nil {
return err
}
}
}
return nil
}
func getSnapIter(ctx sessionctx.Context, cacheTable kv.MemBuffer, rg kv.KeyRange, reverse bool) (snapCacheIter kv.Iterator, err error) {
var cacheIter, snapIter kv.Iterator
tempTableData := ctx.GetSessionVars().TemporaryTableData
if tempTableData != nil {
if !reverse {
snapIter, err = tempTableData.Iter(rg.StartKey, rg.EndKey)
} else {
snapIter, err = tempTableData.IterReverse(rg.EndKey, rg.StartKey)
}
if err != nil {
return nil, err
}
snapCacheIter = snapIter
} else if cacheTable != nil {
if !reverse {
cacheIter, err = cacheTable.Iter(rg.StartKey, rg.EndKey)
} else {
cacheIter, err = cacheTable.IterReverse(rg.EndKey, rg.StartKey)
}
if err != nil {
return nil, errors.Trace(err)
}
snapCacheIter = cacheIter
}
return snapCacheIter, nil
}
func (m *memIndexReader) getMemRowsHandle() ([]kv.Handle, error) {
handles := make([]kv.Handle, 0, m.addedRowsLen)
err := iterTxnMemBuffer(m.ctx, m.cacheTable, m.kvRanges, m.desc, func(key, value []byte) error {
handle, err := tablecodec.DecodeIndexHandle(key, value, len(m.index.Columns))
if err != nil {
return err
}
// For https://github.com/pingcap/tidb/issues/41827,
// When handle type is year, tablecodec.DecodeIndexHandle will convert it to IntHandle instead of CommonHandle
if m.table.IsCommonHandle && handle.IsInt() {
b, err := codec.EncodeKey(m.ctx.GetSessionVars().StmtCtx, nil, types.NewDatum(handle.IntValue()))
if err != nil {
return err
}
handle, err = kv.NewCommonHandle(b)
if err != nil {
return err
}
}
handles = append(handles, handle)
return nil
})
if err != nil {
return nil, err
}
return handles, nil
}
type memIndexLookUpReader struct {
ctx sessionctx.Context
index *model.IndexInfo
columns []*model.ColumnInfo
table table.Table
conditions []expression.Expression
retFieldTypes []*types.FieldType
idxReader *memIndexReader
// partition mode
partitionMode bool // if it is accessing a partition table
partitionTables []table.PhysicalTable // partition tables to access
partitionKVRanges [][]kv.KeyRange // kv ranges for these partition tables
cacheTable kv.MemBuffer
keepOrder bool
compareExec
}
func buildMemIndexLookUpReader(ctx context.Context, us *UnionScanExec, idxLookUpReader *IndexLookUpExecutor) *memIndexLookUpReader {
defer tracing.StartRegion(ctx, "buildMemIndexLookUpReader").End()
kvRanges := idxLookUpReader.kvRanges
outputOffset := []int{len(idxLookUpReader.index.Columns)}
memIdxReader := &memIndexReader{
ctx: us.Ctx(),
index: idxLookUpReader.index,
table: idxLookUpReader.table.Meta(),
kvRanges: kvRanges,
retFieldTypes: retTypes(us),
outputOffset: outputOffset,
cacheTable: us.cacheTable,
}
return &memIndexLookUpReader{
ctx: us.Ctx(),
index: idxLookUpReader.index,
columns: idxLookUpReader.columns,
table: idxLookUpReader.table,
conditions: us.conditions,
retFieldTypes: retTypes(us),
idxReader: memIdxReader,
partitionMode: idxLookUpReader.partitionTableMode,
partitionKVRanges: idxLookUpReader.partitionKVRanges,
partitionTables: idxLookUpReader.prunedPartitions,
cacheTable: us.cacheTable,
keepOrder: idxLookUpReader.keepOrder,
compareExec: us.compareExec,
}
}
func (m *memIndexLookUpReader) getMemRowsIter(ctx context.Context) (memRowsIter, error) {
data, err := m.getMemRows(ctx)
if err != nil {
return nil, errors.Trace(err)
}
return &defaultRowsIter{data: data}, nil
}
func (m *memIndexLookUpReader) getMemRows(ctx context.Context) ([][]types.Datum, error) {
r, ctx := tracing.StartRegionEx(ctx, "memIndexLookUpReader.getMemRows")
defer r.End()
kvRanges := [][]kv.KeyRange{m.idxReader.kvRanges}
tbls := []table.Table{m.table}
if m.partitionMode {
kvRanges = m.partitionKVRanges
tbls = tbls[:0]
for _, p := range m.partitionTables {
tbls = append(tbls, p)
}
}
tblKVRanges := make([]kv.KeyRange, 0, 16)
numHandles := 0
for i, tbl := range tbls {
m.idxReader.kvRanges = kvRanges[i]
handles, err := m.idxReader.getMemRowsHandle()
if err != nil {
return nil, err
}
if len(handles) == 0 {
continue
}
numHandles += len(handles)
ranges, _ := distsql.TableHandlesToKVRanges(getPhysicalTableID(tbl), handles)
tblKVRanges = append(tblKVRanges, ranges...)
}
if numHandles == 0 {
return nil, nil
}
if m.desc {
for i, j := 0, len(tblKVRanges)-1; i < j; i, j = i+1, j-1 {
tblKVRanges[i], tblKVRanges[j] = tblKVRanges[j], tblKVRanges[i]
}
}
colIDs, pkColIDs, rd := getColIDAndPkColIDs(m.ctx, m.table, m.columns)
memTblReader := &memTableReader{
ctx: m.ctx,
table: m.table.Meta(),
columns: m.columns,
kvRanges: tblKVRanges,
conditions: m.conditions,
addedRows: make([][]types.Datum, 0, numHandles),
retFieldTypes: m.retFieldTypes,
colIDs: colIDs,
pkColIDs: pkColIDs,
buffer: allocBuf{
handleBytes: make([]byte, 0, 16),
rd: rd,
},
cacheTable: m.cacheTable,
keepOrder: m.keepOrder,
compareExec: m.compareExec,
}
return memTblReader.getMemRows(ctx)
}
func (*memIndexLookUpReader) getMemRowsHandle() ([]kv.Handle, error) {
return nil, errors.New("getMemRowsHandle has not been implemented for memIndexLookUpReader")
}
type memIndexMergeReader struct {
ctx sessionctx.Context
columns []*model.ColumnInfo
table table.Table
conditions []expression.Expression
retFieldTypes []*types.FieldType
indexMergeReader *IndexMergeReaderExecutor
memReaders []memReader
isIntersection bool
// partition mode
partitionMode bool // if it is accessing a partition table
partitionTables []table.PhysicalTable // partition tables to access
partitionKVRanges [][][]kv.KeyRange // kv ranges for these partition tables
keepOrder bool
compareExec
}
func buildMemIndexMergeReader(ctx context.Context, us *UnionScanExec, indexMergeReader *IndexMergeReaderExecutor) *memIndexMergeReader {
defer tracing.StartRegion(ctx, "buildMemIndexMergeReader").End()
indexCount := len(indexMergeReader.indexes)
memReaders := make([]memReader, 0, indexCount)
for i := 0; i < indexCount; i++ {
if indexMergeReader.indexes[i] == nil {
colIDs, pkColIDs, rd := getColIDAndPkColIDs(indexMergeReader.Ctx(), indexMergeReader.table, indexMergeReader.columns)
memReaders = append(memReaders, &memTableReader{
ctx: us.Ctx(),
table: indexMergeReader.table.Meta(),
columns: indexMergeReader.columns,
kvRanges: nil,
conditions: us.conditions,
addedRows: make([][]types.Datum, 0),
retFieldTypes: retTypes(us),
colIDs: colIDs,
pkColIDs: pkColIDs,
buffer: allocBuf{
handleBytes: make([]byte, 0, 16),
rd: rd,
},
})
} else {
outputOffset := []int{len(indexMergeReader.indexes[i].Columns)}
memReaders = append(memReaders, &memIndexReader{
ctx: us.Ctx(),
index: indexMergeReader.indexes[i],
table: indexMergeReader.table.Meta(),
kvRanges: nil,
compareExec: compareExec{desc: indexMergeReader.descs[i]},
retFieldTypes: retTypes(us),
outputOffset: outputOffset,
})
}
}
return &memIndexMergeReader{
ctx: us.Ctx(),
table: indexMergeReader.table,
columns: indexMergeReader.columns,
conditions: us.conditions,
retFieldTypes: retTypes(us),
indexMergeReader: indexMergeReader,
memReaders: memReaders,
isIntersection: indexMergeReader.isIntersection,
partitionMode: indexMergeReader.partitionTableMode,
partitionTables: indexMergeReader.prunedPartitions,
partitionKVRanges: indexMergeReader.partitionKeyRanges,
keepOrder: us.keepOrder,
compareExec: us.compareExec,
}
}
type memRowsIter interface {
Next() ([]types.Datum, error)
}
type defaultRowsIter struct {
data [][]types.Datum
cursor int
}
func (iter *defaultRowsIter) Next() ([]types.Datum, error) {
if iter.cursor < len(iter.data) {
ret := iter.data[iter.cursor]
iter.cursor++
return ret, nil
}
return nil, nil
}
func (m *memIndexMergeReader) getMemRowsIter(ctx context.Context) (memRowsIter, error) {
data, err := m.getMemRows(ctx)
if err != nil {
return nil, errors.Trace(err)
}
return &defaultRowsIter{data: data}, nil
}
func (m *memIndexMergeReader) getMemRows(ctx context.Context) ([][]types.Datum, error) {
r, ctx := tracing.StartRegionEx(ctx, "memIndexMergeReader.getMemRows")
defer r.End()
tbls := []table.Table{m.table}
// [partNum][indexNum][rangeNum]
var kvRanges [][][]kv.KeyRange
if m.partitionMode {
tbls = tbls[:0]
for _, p := range m.partitionTables {
tbls = append(tbls, p)
}
kvRanges = m.partitionKVRanges
} else {
kvRanges = append(kvRanges, m.indexMergeReader.keyRanges)
}
if len(kvRanges) != len(tbls) {
return nil, errors.Errorf("length of tbls(size: %d) should be equals to length of kvRanges(size: %d)", len(tbls), len(kvRanges))
}
tblKVRanges := make([]kv.KeyRange, 0, 16)
numHandles := 0
var handles []kv.Handle
var err error
for i, tbl := range tbls {
if m.isIntersection {
handles, err = m.intersectionHandles(kvRanges[i])
} else {
handles, err = m.unionHandles(kvRanges[i])
}
if err != nil {
return nil, err
}
if len(handles) == 0 {
continue
}
numHandles += len(handles)
ranges, _ := distsql.TableHandlesToKVRanges(getPhysicalTableID(tbl), handles)
tblKVRanges = append(tblKVRanges, ranges...)
}
if numHandles == 0 {
return nil, nil
}
colIDs, pkColIDs, rd := getColIDAndPkColIDs(m.ctx, m.table, m.columns)
memTblReader := &memTableReader{
ctx: m.ctx,
table: m.table.Meta(),
columns: m.columns,
kvRanges: tblKVRanges,
conditions: m.conditions,
addedRows: make([][]types.Datum, 0, numHandles),
retFieldTypes: m.retFieldTypes,
colIDs: colIDs,
pkColIDs: pkColIDs,
buffer: allocBuf{
handleBytes: make([]byte, 0, 16),
rd: rd,
},
}
rows, err := memTblReader.getMemRows(ctx)
if err != nil {
return nil, err
}
// Didn't set keepOrder = true for memTblReader,
// In indexMerge, non-partitioned tables are also need reordered.
if m.keepOrder {
slices.SortFunc(rows, func(a, b []types.Datum) int {
ret, err1 := m.compare(m.ctx.GetSessionVars().StmtCtx, a, b)
if err1 != nil {
err = err1
}
return ret
})
}
return rows, err
}
// Union all handles of all partial paths.
func (m *memIndexMergeReader) unionHandles(kvRanges [][]kv.KeyRange) (finalHandles []kv.Handle, err error) {
if len(m.memReaders) != len(kvRanges) {
return nil, errors.Errorf("len(kvRanges) should be equal to len(memReaders)")
}
hMap := kv.NewHandleMap()
var handles []kv.Handle
for i, reader := range m.memReaders {
switch r := reader.(type) {
case *memTableReader:
r.kvRanges = kvRanges[i]
case *memIndexReader:
r.kvRanges = kvRanges[i]
default:
return nil, errors.New("memReader have to be memTableReader or memIndexReader")
}
if handles, err = reader.getMemRowsHandle(); err != nil {
return nil, err
}
// Filter same row.
for _, h := range handles {
if _, ok := hMap.Get(h); !ok {
finalHandles = append(finalHandles, h)
hMap.Set(h, true)
}
}
}
return finalHandles, nil
}
// Intersect handles of each partial paths.
func (m *memIndexMergeReader) intersectionHandles(kvRanges [][]kv.KeyRange) (finalHandles []kv.Handle, err error) {
if len(m.memReaders) != len(kvRanges) {
return nil, errors.Errorf("len(kvRanges) should be equal to len(memReaders)")
}
hMap := kv.NewHandleMap()
var handles []kv.Handle
for i, reader := range m.memReaders {
switch r := reader.(type) {
case *memTableReader:
r.kvRanges = kvRanges[i]
case *memIndexReader:
r.kvRanges = kvRanges[i]
default:
return nil, errors.New("memReader have to be memTableReader or memIndexReader")
}
if handles, err = reader.getMemRowsHandle(); err != nil {
return nil, err
}
for _, h := range handles {
if cntPtr, ok := hMap.Get(h); !ok {
cnt := 1
hMap.Set(h, &cnt)
} else {
*(cntPtr.(*int))++
}
}
}
hMap.Range(func(h kv.Handle, val interface{}) bool {
if *(val.(*int)) == len(m.memReaders) {
finalHandles = append(finalHandles, h)
}
return true
})
return finalHandles, nil
}
func (*memIndexMergeReader) getMemRowsHandle() ([]kv.Handle, error) {
return nil, errors.New("getMemRowsHandle has not been implemented for memIndexMergeReader")
}
func getColIDAndPkColIDs(ctx sessionctx.Context, tbl table.Table, columns []*model.ColumnInfo) (map[int64]int, []int64, *rowcodec.BytesDecoder) {
colIDs := make(map[int64]int, len(columns))
for i, col := range columns {
colIDs[col.ID] = i
}
tblInfo := tbl.Meta()
colInfos := make([]rowcodec.ColInfo, 0, len(columns))
for i := range columns {
col := columns[i]
colInfos = append(colInfos, rowcodec.ColInfo{
ID: col.ID,
IsPKHandle: tblInfo.PKIsHandle && mysql.HasPriKeyFlag(col.GetFlag()),
Ft: rowcodec.FieldTypeFromModelColumn(col),
})
}
pkColIDs := tables.TryGetCommonPkColumnIds(tblInfo)
if len(pkColIDs) == 0 {
pkColIDs = []int64{-1}
}
defVal := func(i int) ([]byte, error) {
sessVars := ctx.GetSessionVars()
originStrict := sessVars.StrictSQLMode
sessVars.StrictSQLMode = false
d, err := table.GetColOriginDefaultValue(ctx, columns[i])
sessVars.StrictSQLMode = originStrict
if err != nil {
return nil, err
}
return tablecodec.EncodeValue(ctx.GetSessionVars().StmtCtx, nil, d)
}
rd := rowcodec.NewByteDecoder(colInfos, pkColIDs, defVal, ctx.GetSessionVars().Location())
return colIDs, pkColIDs, rd
}
|
package gorden
var strategies = make(map[string]Strategy)
func AddStrategy(name string, strategy Strategy) {
strategies[name] = strategy
}
|
package middlewares
import (
"fmt"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
)
func SetCorsMiddlewares(e *echo.Echo) {
fmt.Println("masuk ke CORS")
e.Use(middleware.CORSWithConfig(middleware.CORSConfig{
AllowOrigins: []string{"*"},
AllowHeaders: []string{echo.HeaderOrigin, echo.HeaderContentType, echo.HeaderAccept, echo.HeaderAuthorization},
AllowMethods: []string{"GET", "POST"},
}))
}
|
package main
import (
"fmt"
)
// 想法:
// 左指针找奇数,右指针找偶数,然后交换
func sortArrayByParity(A []int) []int {
i := 0
j := len(A) - 1
for i < j {
if A[i]%2 == 0 {
i++
} else if A[j]%2 != 0 {
j--
} else {
tmp := A[j]
A[j] = A[i]
A[i] = tmp
}
}
return A
}
func main() {
A := []int{3, 6, 8, 89, 4, 98, 12}
fmt.Println(sortArrayByParity(A))
}
|
package redis
import (
"github.com/astaxie/beego"
"github.com/go-redis/redis"
"time"
)
type Client struct {
baseClient *redis.Client
}
//内部调用
func RedisClient(class string) *Client {
Addr := ""
Password := ""
DB := 0
switch class {
case "user":
Addr = beego.AppConfig.String("common_addr")
Password = beego.AppConfig.String("common_password")
DB, _ = beego.AppConfig.Int("common_db")
break
default:
Addr = beego.AppConfig.String("common_addr")
Password = beego.AppConfig.String("common_password")
DB, _ = beego.AppConfig.Int("common_db")
}
redisClient := redis.NewClient(&redis.Options{
Addr: Addr,
Password: Password, // no password set
DB: DB, // use default DB
})
//fmt.Println(Addr, Password, DB)
client := &Client{
baseClient: redisClient,
}
return client
}
//每新增一个实例就这么加一个获取实例的方法
func GetCommonRedis() *Client {
return RedisClient("default")
}
func GetOrderRedis() *Client {
return RedisClient("order")
}
func (c *Client) Set(key string, value interface{}, expiration time.Duration) interface{} {
ret, _ := c.baseClient.Set(key, value, expiration*time.Second).Result()
return ret
}
func (c Client) Get(key string) string {
ret, _ := c.baseClient.Get(key).Result()
return ret
}
func (c Client) Del(key string) interface{} {
ret, _ := c.baseClient.Del(key).Result()
return ret
}
//hash
func (c Client) HSet(key string, field string, value interface{}, expiration time.Duration) interface{} {
ret, _ := c.baseClient.HSet(key, field, value).Result()
return ret
}
func (c Client) HGet(key string, field string) interface{} {
ret, _ := c.baseClient.HGet(key, field).Result()
return ret
}
func (c Client) HGetAll(key string) map[string]string {
ret, _ := c.baseClient.HGetAll(key).Result()
return ret
}
func (c Client) HLen(key string) int {
ret, _ := c.baseClient.HLen(key).Result()
return int(ret)
}
func (c Client) HDel(key string) interface{} {
ret, _ := c.baseClient.HDel(key).Result()
return ret
}
func (c Client) HExists(key string, field string) bool {
ret, _ := c.baseClient.HExists(key, field).Result()
return ret
}
func (c Client) HIncrBy(key string, field string, num int64, expiration time.Duration) interface{} {
ret, _ := c.baseClient.HIncrBy(key, field, int64(num)).Result()
return ret
}
func (c Client) HMget(key string, fields string) interface{} {
ret, _ := c.baseClient.HMGet(key, fields).Result()
return ret
}
func (c Client) HMset(key string, fields map[string]interface{}) interface{} {
ret, _ := c.baseClient.HMSet(key, fields).Result()
return ret
}
func (c Client) HVals(key string) interface{} {
ret, _ := c.baseClient.HVals(key).Result()
return ret
}
//list
func (c Client) HPush(key string, value interface{}) interface{} {
ret, _ := c.baseClient.LPush(key, value).Result()
return ret
}
func (c Client) HPop(key string) interface{} {
ret, _ := c.baseClient.RPop(key).Result()
return ret
}
func (c Client) LPush(key string, value interface{}) interface{} {
ret, _ := c.baseClient.RPush(key, value).Result()
return ret
}
func (c Client) LPop(key string) interface{} {
ret, _ := c.baseClient.LPop(key).Result()
return ret
}
func (c Client) LLen(key string) interface{} {
ret, _ := c.baseClient.LLen(key).Result()
return ret
}
//set
//zset
|
package atomic
import (
"sync"
"sync/atomic"
)
// Ordinal holds a global a value
// and can only be initialized once
type Ordinal struct {
ordinal uint64
once *sync.Once
}
// NewOrdinal returns ordinal with once
// setup
func NewOrdinal() *Ordinal {
return &Ordinal{once: &sync.Once{}}
}
// Init sets the ordinal value
// can only be done once
func (o *Ordinal) Init(val uint64) {
o.once.Do(func() {
atomic.StoreUint64(&o.ordinal, val)
})
}
// GetOrdinal will return the current
// ordinal
func (o *Ordinal) GetOrdinal() uint64 {
return atomic.LoadUint64(&o.ordinal)
}
// Increment will increment the current
// ordinal
func (o *Ordinal) Increment() {
atomic.AddUint64(&o.ordinal, 1)
}
|
package gencoder
import (
"io"
"time"
"unsafe"
pb "github.com/bgokden/veri/veriservice"
)
var (
_ = unsafe.Sizeof(0)
_ = io.ReadFull
_ = time.Now()
)
//////////
func SizeKey(d *pb.DatumKey) (s uint64) {
{
l := uint64(len(d.Feature))
{
t := l
for t >= 0x80 {
t >>= 7
s++
}
s++
}
s += 4 * l
}
{
l := uint64(len(d.GroupLabel))
{
t := l
for t >= 0x80 {
t >>= 7
s++
}
s++
}
s += l
}
s += 16
return
}
func MarshalKey(d *pb.DatumKey) ([]byte, error) {
size := SizeKey(d)
buf := make([]byte, size)
i := uint64(0)
{
l := uint64(len(d.Feature))
{
t := uint64(l)
for t >= 0x80 {
buf[i+0] = byte(t) | 0x80
t >>= 7
i++
}
buf[i+0] = byte(t)
i++
}
for k0 := range d.Feature {
{
v := *(*uint32)(unsafe.Pointer(&(d.Feature[k0])))
buf[i+0+0] = byte(v >> 0)
buf[i+1+0] = byte(v >> 8)
buf[i+2+0] = byte(v >> 16)
buf[i+3+0] = byte(v >> 24)
}
i += 4
}
}
{
l := uint64(len(d.GroupLabel))
{
t := uint64(l)
for t >= 0x80 {
buf[i+0] = byte(t) | 0x80
t >>= 7
i++
}
buf[i+0] = byte(t)
i++
}
copy(buf[i+0:], d.GroupLabel)
i += l
}
{
buf[i+0+0] = byte(d.Size1 >> 0)
buf[i+1+0] = byte(d.Size1 >> 8)
buf[i+2+0] = byte(d.Size1 >> 16)
buf[i+3+0] = byte(d.Size1 >> 24)
}
{
buf[i+0+4] = byte(d.Size2 >> 0)
buf[i+1+4] = byte(d.Size2 >> 8)
buf[i+2+4] = byte(d.Size2 >> 16)
buf[i+3+4] = byte(d.Size2 >> 24)
}
{
buf[i+0+8] = byte(d.Dim1 >> 0)
buf[i+1+8] = byte(d.Dim1 >> 8)
buf[i+2+8] = byte(d.Dim1 >> 16)
buf[i+3+8] = byte(d.Dim1 >> 24)
}
{
buf[i+0+12] = byte(d.Dim2 >> 0)
buf[i+1+12] = byte(d.Dim2 >> 8)
buf[i+2+12] = byte(d.Dim2 >> 16)
buf[i+3+12] = byte(d.Dim2 >> 24)
}
return buf[:i+16], nil
}
func UnmarshalKey(d *pb.DatumKey, buf []byte) (uint64, error) {
i := uint64(0)
{
l := uint64(0)
{
bs := uint8(7)
t := uint64(buf[i+0] & 0x7F)
for buf[i+0]&0x80 == 0x80 {
i++
t |= uint64(buf[i+0]&0x7F) << bs
bs += 7
}
i++
l = t
}
if uint64(cap(d.Feature)) >= l {
d.Feature = d.Feature[:l]
} else {
d.Feature = make([]float32, l)
}
for k0 := range d.Feature {
{
v := 0 | (uint32(buf[i+0+0]) << 0) | (uint32(buf[i+1+0]) << 8) | (uint32(buf[i+2+0]) << 16) | (uint32(buf[i+3+0]) << 24)
d.Feature[k0] = *(*float32)(unsafe.Pointer(&v))
}
i += 4
}
}
{
l := uint64(0)
{
bs := uint8(7)
t := uint64(buf[i+0] & 0x7F)
for buf[i+0]&0x80 == 0x80 {
i++
t |= uint64(buf[i+0]&0x7F) << bs
bs += 7
}
i++
l = t
}
if uint64(cap(d.GroupLabel)) >= l {
d.GroupLabel = d.GroupLabel[:l]
} else {
d.GroupLabel = make([]byte, l)
}
copy(d.GroupLabel, buf[i+0:])
i += l
}
{
d.Size1 = 0 | (uint32(buf[i+0+0]) << 0) | (uint32(buf[i+1+0]) << 8) | (uint32(buf[i+2+0]) << 16) | (uint32(buf[i+3+0]) << 24)
}
{
d.Size2 = 0 | (uint32(buf[i+0+4]) << 0) | (uint32(buf[i+1+4]) << 8) | (uint32(buf[i+2+4]) << 16) | (uint32(buf[i+3+4]) << 24)
}
{
d.Dim1 = 0 | (uint32(buf[i+0+8]) << 0) | (uint32(buf[i+1+8]) << 8) | (uint32(buf[i+2+8]) << 16) | (uint32(buf[i+3+8]) << 24)
}
{
d.Dim2 = 0 | (uint32(buf[i+0+12]) << 0) | (uint32(buf[i+1+12]) << 8) | (uint32(buf[i+2+12]) << 16) | (uint32(buf[i+3+12]) << 24)
}
return i + 16, nil
}
////////////////////////
func SizeValue(d *pb.DatumValue) (s uint64) {
{
l := uint64(len(d.Label))
{
t := l
for t >= 0x80 {
t >>= 7
s++
}
s++
}
s += l
}
s += 8
return
}
func MarshalValue(d *pb.DatumValue) ([]byte, error) {
size := SizeValue(d)
buf := make([]byte, size)
i := uint64(0)
{
buf[0+0] = byte(d.Version >> 0)
buf[1+0] = byte(d.Version >> 8)
buf[2+0] = byte(d.Version >> 16)
buf[3+0] = byte(d.Version >> 24)
buf[4+0] = byte(d.Version >> 32)
buf[5+0] = byte(d.Version >> 40)
buf[6+0] = byte(d.Version >> 48)
buf[7+0] = byte(d.Version >> 56)
}
{
l := uint64(len(d.Label))
{
t := uint64(l)
for t >= 0x80 {
buf[i+8] = byte(t) | 0x80
t >>= 7
i++
}
buf[i+8] = byte(t)
i++
}
copy(buf[i+8:], d.Label)
i += l
}
return buf[:i+8], nil
}
func UnmarshalValue(d *pb.DatumValue, buf []byte) (uint64, error) {
i := uint64(0)
{
d.Version = 0 | (uint64(buf[i+0+0]) << 0) | (uint64(buf[i+1+0]) << 8) | (uint64(buf[i+2+0]) << 16) | (uint64(buf[i+3+0]) << 24) | (uint64(buf[i+4+0]) << 32) | (uint64(buf[i+5+0]) << 40) | (uint64(buf[i+6+0]) << 48) | (uint64(buf[i+7+0]) << 56)
}
{
l := uint64(0)
{
bs := uint8(7)
t := uint64(buf[i+8] & 0x7F)
for buf[i+8]&0x80 == 0x80 {
i++
t |= uint64(buf[i+8]&0x7F) << bs
bs += 7
}
i++
l = t
}
if uint64(cap(d.Label)) >= l {
d.Label = d.Label[:l]
} else {
d.Label = make([]byte, l)
}
copy(d.Label, buf[i+8:])
i += l
}
return i + 8, nil
}
type DatumScore struct {
Score float64
}
func (d *DatumScore) Size() (s uint64) {
s += 8
return
}
func (d *DatumScore) Marshal() ([]byte, error) {
size := d.Size()
buf := make([]byte, size)
i := uint64(0)
{
v := *(*uint64)(unsafe.Pointer(&(d.Score)))
buf[0+0] = byte(v >> 0)
buf[1+0] = byte(v >> 8)
buf[2+0] = byte(v >> 16)
buf[3+0] = byte(v >> 24)
buf[4+0] = byte(v >> 32)
buf[5+0] = byte(v >> 40)
buf[6+0] = byte(v >> 48)
buf[7+0] = byte(v >> 56)
}
return buf[:i+8], nil
}
func (d *DatumScore) Unmarshal(buf []byte) (uint64, error) {
i := uint64(0)
{
v := 0 | (uint64(buf[0+0]) << 0) | (uint64(buf[1+0]) << 8) | (uint64(buf[2+0]) << 16) | (uint64(buf[3+0]) << 24) | (uint64(buf[4+0]) << 32) | (uint64(buf[5+0]) << 40) | (uint64(buf[6+0]) << 48) | (uint64(buf[7+0]) << 56)
d.Score = *(*float64)(unsafe.Pointer(&v))
}
return i + 8, nil
}
|
package commands
import (
"fmt"
"github.com/getkin/kin-openapi/openapi3"
"github.com/michaelsauter/go-oas-server/pkg/generator"
)
// Generate renders Go files based on specification in file into directory outputDir.
func Generate(file string, outputDir string) error {
spec, err := openapi3.NewSwaggerLoader().LoadSwaggerFromFile(file)
if err != nil {
return fmt.Errorf("could not load OpenAPI specification from %s: %s", file, err)
}
return generator.New(spec).Render(outputDir)
}
|
package main
import (
"strconv"
)
/**
二进制求和
给你两个二进制字符串,返回它们的和(用二进制表示)。
输入为 非空 字符串且只包含数字 `1` 和 `0`。
示例1:
```
输入: a = "11", b = "1"
输出: "100"
```
示例2:
```
输入: a = "1010", b = "1011"
输出: "10101"
```
提示:
- 每个字符串仅由字符 `'0'` 或 `'1'` 组成。
- `1 <= a.length, b.length <= 10^4`
- 字符串如果不是 `"0"` ,就都不含前导零。
*/
func AddBinary(a string, b string) string {
maxL := max(len(a), len(b))
ans := ""
jin := false
for i := 0; i < maxL; i++ {
count := 0
if i < len(a) {
count += int(a[len(a)-1-i] - '0')
}
if i < len(b) {
count += int(b[len(b)-1-i] - '0')
}
if jin {
count += 1
}
if count >= 2 {
jin = true
count = count - 2
} else {
jin = false
}
ans = strconv.Itoa(count) + ans
}
if jin {
ans = "1" + ans
}
return ans
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
|
package aoc2015
import (
"testing"
aoc "github.com/janreggie/aoc/internal"
"github.com/stretchr/testify/assert"
)
func day13sampleScenario() *tableScenario {
// guaranteed not to error
scenario, _ := newTableScenario(day13sampleInput)
return scenario
}
func day13myScenario() *tableScenario {
scenario, _ := newTableScenario(day13myInput)
return scenario
}
func Test_excludeVisitors(t *testing.T) {
assert := assert.New(t)
oldVisitors := []visitor{"Alice", "Bob", "Carol", "David", "Eric"}
newVisitors := excludeVisitors(oldVisitors, "Carol")
assert.ElementsMatch([]visitor{"Alice", "Bob", "Carol", "David", "Eric"}, oldVisitors)
assert.ElementsMatch([]visitor{"Alice", "Bob", "David", "Eric"}, newVisitors)
}
func Test_newTableScenario(t *testing.T) {
scenario := day13sampleScenario()
assert := assert.New(t)
assert.ElementsMatch([]visitor{"Alice", "Bob", "Carol", "David"},
scenario.visitors)
assert.Equal(map[visitorPair]happiness{
{"Alice", "Bob"}: 137,
{"Alice", "Carol"}: -141,
{"Alice", "David"}: 44,
{"Bob", "Carol"}: 53,
{"Bob", "David"}: -70,
{"Carol", "David"}: 96,
}, scenario.potentialHappiness)
}
func Test_tableScenario_happiestExhaustive(t *testing.T) {
assert := assert.New(t)
scenario := day13sampleScenario()
assert.EqualValues(330, scenario.happiestExhaustive())
}
func Test_tableScenario_happiestPermutative(t *testing.T) {
assert := assert.New(t)
scenario := day13sampleScenario()
assert.EqualValues(330, scenario.happiestPermutative())
}
func Test_seatingArrangement(t *testing.T) {
assert := assert.New(t)
scenario := day13myScenario()
arrangement, err := newSeatingArrangement("Alice", "Bob", scenario)
assert.NoError(err)
assert.Equal(seatingArrangement{
basis: scenario,
raw: []visitor{"Alice", "Bob"},
remaining: []visitor{"Carol", "David", "Eric", "Frank", "George", "Mallory"},
happiness: 42,
}, arrangement, "added Alice and Bob")
arrangement, err = arrangement.add("David")
assert.NoError(err)
assert.Equal(seatingArrangement{
basis: scenario,
raw: []visitor{"Alice", "Bob", "David"},
remaining: []visitor{"Carol", "Eric", "Frank", "George", "Mallory"},
happiness: 9,
}, arrangement, "added David")
_, err = arrangement.add("Earl")
assert.Error(err, "added nonexistent Earl")
_, err = arrangement.add("David")
assert.Error(err, "added already present David")
arrangement, err = arrangement.add("Mallory")
assert.NoError(err)
assert.Equal(seatingArrangement{
basis: scenario,
raw: []visitor{"Alice", "Bob", "David", "Mallory"},
remaining: []visitor{"Carol", "Eric", "Frank", "George"},
happiness: -30,
}, arrangement, "added Mallory")
_, err = arrangement.add("Mallory")
assert.Error(err, "added already present Mallory")
arrangement, err = arrangement.add("George")
assert.NoError(err)
assert.Equal(seatingArrangement{
basis: scenario,
raw: []visitor{"Alice", "Bob", "David", "Mallory", "George"},
remaining: []visitor{"Carol", "Eric", "Frank"},
happiness: -22,
}, arrangement, "added George")
arrangement, err = arrangement.add("Eric")
assert.NoError(err)
assert.Equal(seatingArrangement{
basis: scenario,
raw: []visitor{"Alice", "Bob", "David", "Mallory", "George", "Eric"},
remaining: []visitor{"Carol", "Frank"},
happiness: -59,
}, arrangement, "added Eric")
_, err = arrangement.add("Eric")
assert.Error(err, "added already present Eric")
_, err = arrangement.add("Alice")
assert.Error(err, "added already present Alice")
_, err = arrangement.add("Bob")
assert.Error(err, "added already present Bob")
_, err = arrangement.add("David")
assert.Error(err, "added already present David")
_, err = arrangement.add("Mallory")
assert.Error(err, "added already present Mallory")
arrangement, err = arrangement.add("Carol")
assert.NoError(err)
assert.Equal(seatingArrangement{
basis: scenario,
raw: []visitor{"Alice", "Bob", "David", "Mallory", "George", "Eric", "Carol"},
remaining: []visitor{"Frank"},
happiness: 136,
}, arrangement, "added Carol")
arrangement, err = arrangement.add("Frank")
assert.NoError(err)
assert.Equal(seatingArrangement{
basis: scenario,
raw: []visitor{"Alice", "Bob", "David", "Mallory", "George", "Eric", "Carol", "Frank"},
remaining: []visitor{},
happiness: 228,
}, arrangement, "added Frank and went full circle")
}
func Test_seatingArrangementQueue(t *testing.T) {
assert := assert.New(t)
scenario := day13sampleScenario()
queue := newSeatingArrangementQueue()
// suppose we start from Alice.
// should not error!
arrangement, err := newSeatingArrangement("Alice", "Alice", scenario)
assert.Error(err)
arrangement, err = newSeatingArrangement("Alice", "Bob", scenario)
assert.NoError(err)
queue.push(arrangement)
arrangement, err = newSeatingArrangement("Alice", "Carol", scenario)
assert.NoError(err)
queue.push(arrangement)
arrangement, err = newSeatingArrangement("Alice", "David", scenario)
assert.NoError(err)
queue.push(arrangement)
assert.ElementsMatch([]seatingArrangement{
{scenario, []visitor{"Alice", "Carol"}, []visitor{"Bob", "David"}, -141},
{scenario, []visitor{"Alice", "David"}, []visitor{"Bob", "Carol"}, 44},
{scenario, []visitor{"Alice", "Bob"}, []visitor{"Carol", "David"}, 137},
}, queue)
}
func TestDay13(t *testing.T) {
assert := assert.New(t)
testCases := []aoc.TestCase{
{Details: "Y2015D13 sample input",
Input: day13sampleInput,
Result1: "330",
Result2: "286"},
{Details: "Y2015D13 my input",
Input: day13myInput,
Result1: "733",
Result2: "725"},
}
for _, tt := range testCases {
tt.Test(Day13, assert)
}
}
func Benchmark_tableScenario_happiestExhaustive(b *testing.B) {
for ii := 0; ii < b.N; ii++ {
day13myScenario().happiestExhaustive()
}
}
func Benchmark_tableScenario_happiestPermutative(b *testing.B) {
for ii := 0; ii < b.N; ii++ {
day13myScenario().happiestPermutative()
}
}
func BenchmarkDay13(b *testing.B) {
aoc.Benchmark(Day13, b, day13myInput)
}
|
/*
Copyright 2018 The HAWQ Team.
*/
package controller
|
package config
import (
"fmt"
"io/ioutil"
"path/filepath"
"github.com/fsnotify/fsnotify"
)
func init() {
RegisterProvider(newFileProvider())
}
func newFileProvider() *FileProvider {
fp := &FileProvider{
cache: make(map[string]string),
cb: make(chan ProviderCallback),
disabledWatcher: true,
}
if watcher, err := fsnotify.NewWatcher(); err == nil {
fp.disabledWatcher = false
fp.watcher = watcher
go fp.run()
}
return fp
}
// FileProvider 从文件系统拉取文件内容
type FileProvider struct {
disabledWatcher bool
watcher *fsnotify.Watcher
cb chan ProviderCallback
cache map[string]string
}
// Name Provider名字
func (*FileProvider) Name() string {
return "file"
}
// Read 读取指定文件
func (fp *FileProvider) Read(path string) ([]byte, error) {
if !fp.disabledWatcher {
if err := fp.watcher.Add(path); err != nil {
return nil, err
}
fp.cache[filepath.Clean(path)] = path
}
data, err := ioutil.ReadFile(path)
if err != nil {
fmt.Printf("Failed to read file %v", err)
return nil, err
}
return data, nil
}
// Watch 注册文件变化处理函数
func (fp *FileProvider) Watch(cb ProviderCallback) {
if !fp.disabledWatcher {
fp.cb <- cb
}
}
func (fp *FileProvider) run() {
fn := make([]ProviderCallback, 0)
for {
select {
case i := <-fp.cb:
fn = append(fn, i)
case e := <-fp.watcher.Events:
if data, err := ioutil.ReadFile(e.Name); err == nil {
if path, ok := fp.cache[e.Name]; ok {
for _, f := range fn {
go f(path, data)
}
}
}
}
}
}
|
package pacs
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00200102 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:pacs.002.001.02 Document"`
Message *PaymentStatusReportV02 `xml:"pacs.002.001.02"`
}
func (d *Document00200102) AddMessage() *PaymentStatusReportV02 {
d.Message = new(PaymentStatusReportV02)
return d.Message
}
// Scope
// The PaymentStatusReport message is sent by an instructed agent to the previous party in the payment chain. It is used to inform this party about the positive or negative status of an instruction (either single or file). It is also used to report on a pending instruction.
// Usage
// The PaymentStatusReport message is exchanged between agents to provide status information about instructions previously sent. Its usage will always be governed by a bilateral agreement between the agents.
// The PaymentStatusReport message can be used to provide information about the status (e.g. rejection, acceptance) of a credit transfer instruction, a direct debit instruction, as well as other intra-agent instructions (e.g. PaymentCancellationRequest).
// The PaymentStatusReport message refers to the original instruction(s) by means of references only or by means of references and a set of elements from the original instruction.
// The PaymentStatusReport message can be used in domestic and cross-border scenarios.
// The PaymentStatusReport message exchanged between agents is identified in the schema as follows:
// urn:iso:std:iso:20022:tech:xsd:pacs.002.001.02
type PaymentStatusReportV02 struct {
// Set of characteristics shared by all individual transactions included in the message.
GroupHeader *iso20022.GroupHeader5 `xml:"GrpHdr"`
// Original group information concerning the group of transactions, to which the message refers to.
OriginalGroupInformationAndStatus *iso20022.OriginalGroupInformation1 `xml:"OrgnlGrpInfAndSts"`
// Information concerning the original transactions, to which the status report message refers.
TransactionInformationAndStatus []*iso20022.PaymentTransactionInformation1 `xml:"TxInfAndSts,omitempty"`
}
func (p *PaymentStatusReportV02) AddGroupHeader() *iso20022.GroupHeader5 {
p.GroupHeader = new(iso20022.GroupHeader5)
return p.GroupHeader
}
func (p *PaymentStatusReportV02) AddOriginalGroupInformationAndStatus() *iso20022.OriginalGroupInformation1 {
p.OriginalGroupInformationAndStatus = new(iso20022.OriginalGroupInformation1)
return p.OriginalGroupInformationAndStatus
}
func (p *PaymentStatusReportV02) AddTransactionInformationAndStatus() *iso20022.PaymentTransactionInformation1 {
newValue := new(iso20022.PaymentTransactionInformation1)
p.TransactionInformationAndStatus = append(p.TransactionInformationAndStatus, newValue)
return newValue
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
pb "github.com/polarbroadband/gnmi/pkg/gnmiprobe"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"github.com/kr/pretty"
log "github.com/sirupsen/logrus"
)
var (
ENCODING = "JSON"
// container image release
RELEASE = os.Getenv("RELEASE_CLNT")
// container name
HOST = os.Getenv("HOST_CLNT")
PROBE = os.Getenv("HOST_PROBE")
// JWT shared secret
TOKENSEC = []byte(os.Getenv("BACKEND_TOKEN"))
)
func init() {
// config package level default logger
log.SetFormatter(&log.JSONFormatter{})
log.SetOutput(os.Stdout)
log.SetLevel(log.TraceLevel)
}
func main() {
if res, err := http.Get("http://" + PROBE + ":8060/healtz"); err != nil {
log.Fatalf("%s healtz check fail: %v", PROBE, err)
} else {
var rb map[string]interface{}
json.NewDecoder(res.Body).Decode(&rb)
res.Body.Close()
pretty.Printf("\n*** probe healtz check ***\n%# v\n", rb)
}
if res, err := http.Get("http://" + PROBE + ":8060/ready"); err != nil {
log.Fatalf("%s load check fail: %v", PROBE, err)
} else {
var rb map[string]interface{}
json.NewDecoder(res.Body).Decode(&rb)
res.Body.Close()
pretty.Printf("\n*** probe load check ***\n%# v\n", rb)
}
caCer, err := credentials.NewClientTLSFromFile("/appsrc/cert/ca.cert", "")
if err != nil {
log.Fatalf("unable to import ca certificate: %v", err)
}
// Set up TLS connection to the server
probeConn, err := grpc.Dial(PROBE+":50051", grpc.WithTransportCredentials(caCer))
if err != nil {
log.Fatal("unable to connect %s: %v", PROBE, err)
}
defer probeConn.Close()
probe := pb.NewProbeClient(probeConn)
gCtx, gCancel := context.WithCancel(context.Background())
defer gCancel()
if res, err := probe.Healtz(gCtx, &pb.HealtzReq{}); err != nil {
log.Fatalf("%s gRPC healtz check fail: %v", PROBE, err)
} else {
fmt.Printf("\n*** probe gRPC healtz check ***\nHost: %s\nRel: %s\nLoad: %v\n", res.GetHost(), res.GetRelease(), res.GetLoad())
}
hold := make(chan struct{})
<-hold
}
|
package robot
import (
"encoding/json"
"fmt"
"robot-go/robot/msg"
"strconv"
)
// 登录发送的第一条消息,bind_user
func SendLogin(rb *Robot) {
_msg := msg.NewMsgRequest("bind_user", "")
_msg.SetParam("userId", rb.userId)
_msg.SetParam("gameId", HALL_GAMEID)
_msg.SetParam("clientId", rb.clientId)
fmt.Println("SendLogin==", _msg.GetInfo())
rb.Write(_msg)
}
// 收到返回user_info
func ReceiveLogin(mo *msg.Msg, rb *Robot) {
fmt.Println("ReceiveLogin==", rb.logined)
if rb.logined {
return
}
cmd := mo.GetCmd().(string)
if cmd == "_connect_" {
SendLogin(rb)
return
}
fmt.Println("gameId:", mo.GetResult("gameId"))
number, ok := mo.GetResult("gameId").(json.Number)
if !ok {
return
}
gameId, _ := strconv.Atoi(string(number))
if cmd == "user_info" {
_msg := msg.NewMsgRequest("game", "enter")
_msg.SetParam("userId", rb.userId)
_msg.SetParam("gameId", HALL_GAMEID)
_msg.SetParam("clientId", rb.clientId)
rb.Write(_msg)
}
if cmd == "game_data" && gameId == HALL_GAMEID {
_msg := msg.NewMsgRequest("game", "enter")
_msg.SetParam("userId", rb.userId)
_msg.SetParam("gameId", GAMEID)
_msg.SetParam("clientId", rb.clientId)
rb.Write(_msg)
}
if cmd == "game_data" && gameId == GAMEID {
// rb.gameData = mo.GetResult("result").(map[string]interface{})
rb.logined = true
}
}
|
package middleware
import (
"github.com/BukkitAPI-Translation-Group/docsbox/api"
"github.com/BukkitAPI-Translation-Group/docsbox/conf"
"github.com/labstack/echo"
"github.com/labstack/echo-contrib/session"
"net/http"
"strings"
)
func Auth(adminRequired bool) echo.MiddlewareFunc {
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
sess, _ := session.Get("session", c)
if _, ok := sess.Values["name"]; !ok {
return c.JSON(http.StatusUnauthorized, api.Response{
Code: 401,
Message: "请登录后在进行操作.",
})
}
name := sess.Values["name"].(string)
if len(name) == 0 {
return c.JSON(http.StatusUnauthorized, api.Response{
Code: 401,
Message: "session中name值为空.",
})
}
if adminRequired {
if !isAdmin(name) {
return c.JSON(http.StatusUnauthorized, api.Response{
Code: 401,
Message: "只有管理员才可以完成此操作.",
})
}
}
return next(c)
}
}
}
func isAdmin(user string) bool {
admins := strings.Split(conf.Conf.Server.Administrators, ",")
for _, v := range admins {
if user == v {
return true
}
}
return false
}
|
package resource
import (
"blog/resource/log"
"github.com/go-jar/mysql"
"blog/conf"
)
var MysqlClientPool *mysql.Pool
func InitMysql() {
config := &mysql.PoolConfig{NewClientFunc: NewMysqlClient}
config.MaxConns = conf.MysqlConf.PoolSize
config.MaxIdleTime = conf.MysqlConf.PoolClientMaxIdleTime
MysqlClientPool = mysql.NewPool(config)
}
func NewMysqlClient() (*mysql.Client, error) {
config := mysql.NewConfig(conf.MysqlConf.User, conf.MysqlConf.Pass, conf.MysqlConf.Host, conf.MysqlConf.Port, conf.MysqlConf.Name)
config.LogLevel = conf.MysqlConf.LogLevel
config.ReadTimeout = conf.MysqlConf.RWTimeout
config.WriteTimeout = conf.MysqlConf.RWTimeout
return mysql.NewClient(config, log.TraceLogger)
}
|
package feed
import "camp/skel/model"
// Del 定义删除操作
func (feeds *Feeds) Del(id int,txt string) (err error) {
feedsModel := model.NewFeed()
feedsModel.Id= id
feedsModel.Txt= txt
if err = feedsModel.Del(); err != nil {
return
}
return
}
|
package raft
import (
"bytes"
"math/rand"
"sync"
"sync/atomic"
"time"
"fmt"
"../labgob"
"../labrpc"
)
//
// 常量
//
const (
Candidate = 0
Follower = 1
Leader = 2
HeartBeatInterval = 100
ElectionTimeout = 150
ElectionRandomTimeRange = 150
)
//
// raft 常规操作,如状态持久化,获取状态等
//
type Raft struct {
mu sync.Mutex // Lock to protect shared access to this peer's state
peers []*labrpc.ClientEnd // RPC end points of all peers
persister *Persister // Object to hold this peer's persisted state
me int // this peer's index into peers[]
dead int32 // set by Kill()
state int // 服务所处的状态,领导者,候选人或是下属
leaderId int // 方便 follower 在接收到 client rpc 时重定向到 leader
voteCount int // 记录获得的票数,过半即成为领导者
timestamp time.Time // 计算超时时间
electionTimeout time.Duration // 记录超时时间
// 需持久化的
currentTerm int // 目前的状态
votedFor int // 该 term 中投给了谁
logs []LogEntry // all log entries,只能读取,不能更改
applyCh chan ApplyMsg // raft 从中读取要应用的日志信息,并应用到状态机
// leader 容易丢失的
matchIndexes []int // 领导者所维护的对各个下属服务的下一个待匹配索引 (init lastLogIndex + 1)
nextIndexes []int // 领导者所维护的与各个下属服务匹配上的最高索引 (init 0)
// 所有 server 都容易丢失的
commitIndex int // 最近的一次已提交的日志的索引
lastApplied int // 最近一次已应用的日志的索引
// Lab 3b
offset int // 实际日志索引为 offset + index
discardCh chan bool
}
type LogEntry struct {
Term int // log 发布时所处的时期
Command interface{} // 任务详情
CommandIndex int // 任务索引
}
func (rf *Raft) persist() {
w := new(bytes.Buffer)
e := labgob.NewEncoder(w)
e.Encode(rf.logs)
e.Encode(rf.currentTerm)
e.Encode(rf.votedFor)
data := w.Bytes()
rf.persister.SaveRaftState(data)
}
func (rf *Raft) readPersist(data []byte) {
if data == nil || len(data) < 1 {
return
}
r := bytes.NewBuffer(data)
d := labgob.NewDecoder(r)
var logs []LogEntry
var currentTerm int
var votedFor int
if d.Decode(&logs) != nil ||
d.Decode(¤tTerm) != nil ||
d.Decode(&votedFor) != nil {
panic("Decode error")
} else {
rf.logs = logs
rf.currentTerm = currentTerm
rf.votedFor = votedFor
}
}
func (rf *Raft) getLastLogTermAndIndex() (int, int) {
rf.mu.Lock()
defer rf.mu.Unlock()
term := rf.logs[len(rf.logs)-1].Term
index := len(rf.logs) - 1
return term, index
}
func (rf *Raft) GetState() (int, bool) {
var term int
var isleader bool
rf.mu.Lock()
term = rf.currentTerm
isleader = rf.state == Leader
rf.mu.Unlock()
return term, isleader
}
func (rf *Raft) Start(command interface{}) (int, int, bool) {
index := -1
// 判断是否是领导,只有领导才能存储从客户处收到的命令
term, isLeader := rf.GetState()
if isLeader {
// 构建包含命令的日志
_, lastLogIndex := rf.getLastLogTermAndIndex()
rf.mu.Lock()
term := rf.currentTerm
index := lastLogIndex + 1
newEntry := LogEntry{
Term: term,
Command: command,
CommandIndex: index,
}
rf.logs = append(rf.logs, newEntry)
DPrintf("append new command to leader local logs, leader current term is %+v ,command index is %+v", term, index)
// 更改匹配列表中与自身相关的信息
rf.nextIndexes[rf.me] = index + 1
rf.matchIndexes[rf.me] = index
rf.persist()
rf.mu.Unlock()
}
return index, term, isLeader
}
func (rf *Raft) Kill() {
atomic.StoreInt32(&rf.dead, 1)
// Your code here, if desired.
}
func (rf *Raft) killed() bool {
z := atomic.LoadInt32(&rf.dead)
return z == 1
}
//
// 关于 snapshot
//
// Lab 3B
type InstallSnapshotArgs struct {
Term int
LeaderId int
State []byte
Snapshot []byte
}
type InstallSnapshotReply struct {
Term int
}
// 丢弃先前的日志,索引从新开始
// serveridx -> server log index, raftidx -> raft log index
func (rf *Raft) DiscardPreviousLog(serveridx int, snapshot []byte) {
if serveridx == -1 {
rf.discardCh <- false
return
}
fmt.Printf("%v discard log before %v\n", rf.me, serveridx)
raftidx := serveridx - rf.offset
if raftidx < 1 {
fmt.Printf("%v log index < 1 \n", rf.me)
return
}
if raftidx <= len(rf.logs) {
rf.logs = rf.logs[raftidx - 1: ]
} else {
panic("discard wrong")
}
rf.offset += raftidx - 1
w := new(bytes.Buffer)
e := labgob.NewEncoder(w)
e.Encode(rf.votedFor)
e.Encode(rf.currentTerm)
e.Encode(rf.offset)
nLog := len(rf.logs)
e.Encode(nLog)
for i := 0; i < nLog; i++ {
entry := rf.logs[i]
e.Encode(entry)
}
state := w.Bytes()
rf.persister.SaveStateAndSnapshot(state, snapshot)
rf.discardCh <- true
}
//
// 关于 leader election
//
type RequestVoteArgs struct {
Term int // 候选者所处的任期
CandidateId int // 候选者的 id
LastLogIndex int // 候选者的最新日志的索引,用于资格核验
LastLogTerm int // 候选者的最新日志的所处任期,用于资格核验
}
type RequestVoteReply struct {
Term int // follower 所处的任期,用于让候选者更新自己的任期及状态
VoteGranted bool // follower 是否投票给 candidate
}
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
// 1. 初始化回复框架
rf.mu.Lock()
defer rf.persist()
defer rf.mu.Unlock()
reply.VoteGranted = false
// 2. 候选人资格审查,即候选人所含的日志信息是不是没有落后于我
// 2.1 若没有落后,则进入下一步,若落后了,则拒绝,并告知我所处的朝代
if !isCandidateUpToDate(args, rf) {
reply.Term = rf.currentTerm
} else {
// 3. 检查我是否还有投票资格,若还有,则通过候选人的投票申请
if rf.votedFor == -1 || rf.votedFor == args.CandidateId {
DPrintf("vote for %+v , current term is %+v ...", args.CandidateId, args.Term)
reply.VoteGranted = true
rf.votedFor = args.CandidateId
rf.resetElectionTimeout()
}
}
// 4. 更新我的朝代信息及状态信息,将我的朝代信息与候选人保持一致,我的状态改为 follower(我可能为落后的 candidate)
// 5. 更新回复中的我的朝代信息
if args.Term > rf.currentTerm {
rf.currentTerm = args.Term
rf.changeState(Follower)
reply.Term = rf.currentTerm
}
}
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
return ok
}
func isCandidateUpToDate(args *RequestVoteArgs, rf *Raft) bool {
// 候选人的 LastLogTerm 是否 < 我目前所处的 term,若是,则直接返回 false,否则进行下一步判断
if args.LastLogTerm < rf.logs[len(rf.logs)-1].Term {
DPrintf("reject vote for %+v, my latest log term : candidate latest log term is %+v : %+v", args.CandidateId, rf.currentTerm, args.LastLogTerm)
return false
}
// 候选人的 lastLogIndex 是否 >= 我的 lastLogIndex,若是,则返回 true,否则返回 false
if args.LastLogIndex >= rf.logs[len(rf.logs)-1].CommandIndex {
return true
}
DPrintf("reject vote for %+v, my latest log index : candidate latest log index is %+v : %+v", args.CandidateId, rf.logs[len(rf.logs)-1].CommandIndex, args.LastLogIndex)
return false
}
func (rf *Raft) changeState(state int) {
rf.mu.Lock()
rf.state = state
rf.mu.Unlock()
}
func (rf *Raft) isElectionTimeout() bool {
rf.mu.Lock()
defer rf.mu.Unlock()
return time.Now().Sub(rf.timestamp) > rf.electionTimeout
}
func (rf *Raft) resetElectionTimeout() {
rf.mu.Lock()
defer rf.mu.Unlock()
rf.electionTimeout = time.Duration(rand.Intn(ElectionRandomTimeRange)+ElectionTimeout) * time.Millisecond
rf.timestamp = time.Now()
}
func (rf *Raft) runLeader() {
// 1. 发送心跳信息
rf.sendHeartbeat()
// 2. 等待固定的心跳信息间隔时间
time.Sleep(time.Duration(HeartBeatInterval) * time.Millisecond)
}
func (rf *Raft) runCandidate() {
// 1. 开始领导选举
rf.startLeaderElection()
// 2. 选举超时,即重选
if rf.isElectionTimeout() {
rf.startLeaderElection()
}
// 3. 当所得票数超过总票数的一半时,将自己的状态变为 leader
rf.mu.Lock()
if rf.voteCount > len(rf.peers)/2 {
rf.changeState(Leader)
rf.persist()
}
rf.mu.Unlock()
// 4. 休息一下
time.Sleep(10 * time.Millisecond)
}
func (rf *Raft) runFollower() {
// 1. 若选举超时,则转换自己的状态
rf.resetElectionTimeout()
if rf.isElectionTimeout() {
rf.changeState(Candidate)
}
// 2. 休息一下
time.Sleep(10 * time.Millisecond)
}
func (rf *Raft) Run() {
for !rf.killed() {
rf.mu.Lock()
state := rf.state
rf.mu.Unlock()
switch state {
case Candidate:
rf.runCandidate()
case Follower:
rf.runFollower()
case Leader:
rf.runFollower()
default:
panic("wrong state")
}
}
}
func (rf *Raft) startLeaderElection() {
// 1. 重置选举超时时间
rf.resetElectionTimeout()
DPrintf("server %+v : start election", rf.me)
// 2. 改变自己的基本状态
rf.mu.Lock()
rf.voteCount = 1
rf.currentTerm += 1
rf.votedFor = rf.me
rf.mu.Unlock()
// 3. 准备给每个团队成员发送投票申请(除了自己)
for i, _ := range rf.peers {
if i == rf.me {
continue
}
// 4. 每次发送前都需要核对一下自己是否还具备资格
rf.mu.Lock()
state := rf.state
rf.mu.Unlock()
if state != Leader {
DPrintf("server %+v : fail to be elected because of state", rf.me)
return
}
go func(index int) {
// 5. 构造要发送的信息和回复接收框
rf.mu.Lock()
requestArgs := RequestVoteArgs{
Term: rf.currentTerm,
CandidateId: rf.me,
LastLogIndex: len(rf.logs) - 1,
LastLogTerm: rf.logs[len(rf.logs)-1].Term,
}
rf.mu.Unlock()
reply := RequestVoteReply{}
// 6. 发送投票申请
if rf.sendRequestVote(index, &requestArgs, &reply) {
// 7. 检查回复
rf.mu.Lock()
if reply.VoteGranted {
// 7.1 若得票,则将自己的已得票数 + 1
rf.voteCount += 1
} else {
// 7.2 若没得票,则需要检查原因
// 7.2.1 若具备资格,则对该条回复直接略过(投了同期的其他人而已)
// 7.2.2 若不具备资格,则改变自己的所处朝代及状态,并持久化状态
if reply.Term > rf.currentTerm {
rf.currentTerm = reply.Term
rf.changeState(Follower)
rf.persist()
}
}
rf.mu.Unlock()
}
}(i)
}
}
//
// 关于 log replication
//
type ApplyMsg struct {
CommandValid bool
Command interface{}
CommandIndex int
}
// 所有被 committed 的日志都会转换成 applyMsg,然后塞进 applyCh,等待被应用,因此所有命令进来就需要被注册,
// 之后可以通过注册时所得到的信息,找到每一条命令,然后执行,然后返回给 client 该消息已经被执行,因为所有的消息会存放在堆栈中,
// 只有确定执行了,server 可以通过 msg 中包含的信息,找到对应的 client,并给其回复
type AppendLogEntriesArgs struct {
Term int // 领导者所处的朝代
LeaderId int // 当客户发错消息给下属时,下属可以告诉客户领导是谁
PreLogIndex int // 前一日志的索引,用于资格核验
PreLogTerm int // 前一日志所处的任期,用于资格核验
Entries []LogEntry // 要添加的日志信息,为了效率,可能一次添加多条
LeaderCommit int // 领导通知提交的日志索引
}
type AppendLogEntriesReply struct {
Term int // 告知领导我目前所处的日志,以便领导更新自己,然后领导转变状态
XTerm int // 冲突日志的朝代
XIndex int // 与冲突日志同朝代的第一条日志的日志索引
Success bool // 当下属前一日志与领导相符时,回复成功
}
func (rf *Raft) AppendLogEntries(args *AppendLogEntriesArgs, reply *AppendLogEntriesReply) {
// 1. 初始化回复结构
reply.XTerm = -1
reply.XIndex = -1
reply.Success = false
// 2. 检查是否具备添加日志的资格,当被添加人(follower / candidate,即此raft)的朝代超前于申请添加人(疑似 leader)时,添加申请被拒绝
rf.mu.Lock()
defer rf.mu.Unlock()
defer rf.persist() // 存在日志更新都需要做持久化
if rf.currentTerm > args.Term {
reply.Term = rf.currentTerm
return
}
// 3. 检查日志一致性
// 3.1 若不一致,且不一致的点在于 follower 中的日志落后于 leader,则根据落后情况,返回相应的错误信息
// 这类错误可大致分为两种情况,存在冲突日志 & 不存在冲突日志
// 存在冲突日志:同索引,朝代不一致
// 不存在冲突日志: raft logs < PreLog
if args.PreLogIndex > len(rf.logs) {
reply.XTerm = rf.logs[len(rf.logs)].Term
} else if rf.logs[args.PreLogIndex].Term != args.PreLogIndex {
reply.XTerm = rf.logs[args.PreLogIndex].Term
xIndex := args.PreLogIndex
for rf.logs[xIndex-1].Term == reply.XTerm {
xIndex -= 1
}
reply.XIndex = xIndex
} else {
// 3.2 若不一致,但不一致的点在于被添加人(follower)含有 leader 的 PreLog,只是 follower 中的日志比 leader 多,此时应移除 PreLog 之后的所有日志,然后逐一添加新日志.
// 3.3 若一致,则逐一添加新日志
reply.Success = true
if len(args.Entries) > 0 {
for _, entry := range args.Entries {
// 先补上欠下的
if entry.CommandIndex < len(rf.logs) {
// 删除不一致的
if rf.logs[entry.CommandIndex].Term != entry.Term {
rf.logs = rf.logs[:entry.CommandIndex]
rf.logs = append(rf.logs, entry)
}
} else {
rf.logs = append(rf.logs, entry)
}
}
rf.nextIndexes[rf.me] = len(rf.logs)
rf.matchIndexes[rf.me] = len(rf.logs) - 1
}
// 4. 更新 commitIndex
// 若 leaderCommit > 我的 commitIndex, 则将 commitIndex = min(leaderCommit, 最新日志的索引) ,之后会随着我的日志更新, commitIndex 增大,直到跟上领导的进程
if args.LeaderCommit > rf.commitIndex {
if args.LeaderCommit < len(rf.logs)-1 {
rf.commitIndex = args.LeaderCommit
} else {
rf.commitIndex = len(rf.logs) - 1
}
}
}
// 5. 更新 raft 状态和朝代(raft 可能为 candidate)
// 当领导的 term > 我的 currentTerm 时, 会将 currentTerm 转换为领导的 term
if args.Term > rf.currentTerm {
rf.currentTerm = args.Term
rf.changeState(Follower)
}
// 6. 补全 reply 信息
reply.Term = rf.currentTerm
// 7. 检查是否有日志已提交但未应用,若存在,则发送这部分日志到应用管道
// 对于 leader 中已经 commit 的日志,会执行应用
if rf.commitIndex > rf.lastApplied {
rf.sendApplyMsg()
}
// 8. 每次接收一次日志更新的 rpc,都要重置一次选举倒计时
rf.resetElectionTimeout()
}
func (rf *Raft) sendAppendLogEntries(server int, args *AppendLogEntriesArgs, reply *AppendLogEntriesReply) bool {
ok := rf.peers[server].Call("Raft.AppendLogEntries", args, reply)
return ok
}
func (rf *Raft) sendApplyMsg() {
// 当 commitIndex > lastApplied, 需要发送未执行的命令到应用管道
if rf.commitIndex > rf.lastApplied {
// 1. 未执行的命令为已提交但未被应用的
unusedEntries := rf.logs[rf.lastApplied+1 : rf.commitIndex+1]
go func(entries []LogEntry) {
for _, entry := range entries {
// 2. 对各日志进行格式转换
msg := ApplyMsg{
CommandValid: true,
Command: entry.Command,
CommandIndex: entry.CommandIndex,
}
// 3. 将格式转换后的日志依次塞进应用管道
rf.applyCh <- msg
// 4. 更改 lastApplied
rf.mu.Lock()
rf.lastApplied = msg.CommandIndex
rf.mu.Unlock()
}
}(unusedEntries)
}
}
//
// 共有流程
//
func (rf *Raft) sendHeartbeat() {
// 1. 准备给每个 peer 发送心跳信(除了自己)
for i, _ := range rf.peers {
if rf.me == i {
continue
}
go func(index int) {
// 2. 正式发送前需检查自己是否还是 leader,若不是,则不能继续发送
rf.mu.Lock()
if rf.state != Leader {
rf.mu.Unlock()
return
}
// 3. 构造心跳信息
preLogIndex := rf.nextIndexes[index] - 1
entries := make([]LogEntry, len(rf.logs[preLogIndex+1:]))
copy(entries, rf.logs[preLogIndex+1:])
args := AppendLogEntriesArgs{
Term: rf.currentTerm,
LeaderId: rf.me,
PreLogIndex: preLogIndex,
PreLogTerm: rf.logs[preLogIndex].Term,
Entries: entries,
LeaderCommit: rf.commitIndex,
}
rf.mu.Unlock()
reply := AppendLogEntriesReply{}
// 4. 发送心跳信息
if rf.sendAppendLogEntries(index, &args, &reply) {
rf.mu.Lock()
// 5. 检查是否发送成功
// 5.1 若成功,则更新该 peer 的matchIndex & nextIndex
if reply.Success {
rf.matchIndexes[index] = args.PreLogIndex + len(args.Entries)
rf.nextIndexes[index] = rf.matchIndexes[index] + 1
} else {
// 5.2 若不成功,则检查自己是不是没资格
// 5.2.1 若是没资格,则转变自己的状态(term & state),并做持久化
if reply.Term > rf.currentTerm {
rf.currentTerm = reply.Term
rf.changeState(Follower)
rf.persist()
} else {
// 5.2.2 若不是没资格,则根据 XTerm & XIndex & LastLogIndex 更新该 peer 的 nextIndex.
// 当存在冲突日志,若 leader 根本没有 XTerm,nextIndex 可直接回到该 term 下的第一条 log 的索引,即 XIndex -> a
// 当存在冲突日志,但 leader 含有 XTerm,nextIndex 为该 Term 的最后一条 log 的索引 -> b
// 当不存在冲突日志,则 nextIndex 为 peer 最后一条 log 的索引 -> c
if reply.XIndex != -1 {
// c
rf.nextIndexes[index] = reply.XIndex + 1
} else {
nextIndex := reply.XIndex
for j := args.PreLogIndex; j >= 1; j-- {
if rf.logs[j].Term < reply.XTerm {
break
}
if rf.logs[j].Term == reply.XTerm {
nextIndex = j
}
}
rf.nextIndexes[index] = nextIndex
}
}
}
rf.mu.Unlock()
}
}(i)
}
// 6. 检查是否有新日志要被应用(即统计是否存在与超半数 peer 匹配的日志未被提交)
rf.mu.Lock()
if rf.commitIndex < rf.matchIndexes[rf.me] {
c := 0
minIndex := rf.matchIndexes[rf.me]
for i, _ := range rf.peers {
if rf.matchIndexes[i] > rf.commitIndex {
c += 1
if rf.matchIndexes[i] < minIndex {
minIndex = rf.matchIndexes[i]
}
}
}
// 只允许提交本朝代的日志
if c > len(rf.peers)/2 && rf.logs[minIndex].Term == rf.currentTerm {
rf.commitIndex = minIndex
rf.sendApplyMsg()
}
}
rf.mu.Unlock()
}
func Make(peers []*labrpc.ClientEnd, me int,
persister *Persister, applyCh chan ApplyMsg) *Raft {
rf := &Raft{}
rf.peers = peers
rf.persister = persister
rf.me = me
rf.state = Follower
rf.resetElectionTimeout()
rf.currentTerm = 0
rf.votedFor = -1
rf.logs = append(rf.logs, LogEntry{})
rf.applyCh = applyCh
rf.matchIndexes = make([]int, len(peers))
rf.nextIndexes = make([]int, len(peers))
rf.commitIndex = 0
rf.lastApplied = 0
// Your initialization code here (2A, 2B, 2C).
// initialize from state persisted before a crash
rf.readPersist(persister.ReadRaftState())
go rf.Run()
return rf
}
|
package blockchain
import "testing"
func TestCreateSimpleBlockchain(t *testing.T) {
simplechain := NewBlockchain()
if simplechain == nil {
t.Error("Error creating Blockchain object")
}
}
func TestCreateNextBlock(t *testing.T) {
simplechain := NewBlockchain()
block := simplechain.NextBlock()
if block == nil {
t.Error("Error creating next block in Blockchain")
}
}
func TestCreateAndPrintBlockchain(t *testing.T) {
simplechain := NewBlockchain()
for i := 0; i < 10; i++ {
simplechain.NextBlock()
}
simplechain.Print()
}
|
package annotations
import (
"strings"
"time"
"github.com/haproxytech/config-parser/v3/types"
"github.com/haproxytech/kubernetes-ingress/controller/haproxy/api"
"github.com/haproxytech/kubernetes-ingress/controller/store"
)
type GlobalHardStopAfter struct {
name string
data *types.StringC
client api.HAProxyClient
}
func NewGlobalHardStopAfter(n string, c api.HAProxyClient) *GlobalHardStopAfter {
return &GlobalHardStopAfter{name: n, client: c}
}
func (a *GlobalHardStopAfter) GetName() string {
return a.name
}
func (a *GlobalHardStopAfter) Parse(input store.StringW, forceParse bool) error {
if input.Status == store.EMPTY && !forceParse {
return ErrEmptyStatus
}
if input.Status == store.DELETED {
return nil
}
after, err := time.ParseDuration(input.Value)
if err != nil {
return err
}
duration := after.String()
if strings.HasSuffix(duration, "m0s") {
duration = duration[:len(duration)-2]
}
if strings.HasSuffix(duration, "h0m") {
duration = duration[:len(duration)-2]
}
if err != nil {
return err
}
a.data = &types.StringC{Value: duration}
return nil
}
func (a *GlobalHardStopAfter) Delete() error {
return a.client.GlobalHardStopAfter(nil)
}
func (a *GlobalHardStopAfter) Update() error {
if a.data == nil {
logger.Infof("Removing hard-stop-after timeout")
return a.client.GlobalHardStopAfter(nil)
}
logger.Infof("Setting hard-stop-after to %s", a.data.Value)
return a.client.GlobalHardStopAfter(a.data)
}
|
package executor
import (
"bytes"
"encoding/json"
"fmt"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/cbergoon/merkletree"
"github.com/meshplus/bitxhub-core/agency"
"github.com/meshplus/bitxhub-kit/crypto"
"github.com/meshplus/bitxhub-kit/crypto/asym"
"github.com/meshplus/bitxhub-kit/types"
"github.com/meshplus/bitxhub-model/pb"
"github.com/meshplus/bitxhub/internal/ledger"
"github.com/meshplus/bitxhub/internal/model/events"
"github.com/meshplus/bitxhub/pkg/vm"
"github.com/meshplus/bitxhub/pkg/vm/boltvm"
"github.com/meshplus/bitxhub/pkg/vm/wasm"
"github.com/sirupsen/logrus"
)
func (exec *BlockExecutor) processExecuteEvent(block *pb.Block) *ledger.BlockData {
current := time.Now()
block = exec.verifyProofs(block)
receipts := exec.txsExecutor.ApplyTransactions(block.Transactions)
applyTxsDuration.Observe(float64(time.Since(current)) / float64(time.Second))
exec.logger.WithFields(logrus.Fields{
"time": time.Since(current),
"count": len(block.Transactions),
}).Debug("Apply transactions elapsed")
calcMerkleStart := time.Now()
l1Root, l2Roots, err := exec.buildTxMerkleTree(block.Transactions)
if err != nil {
panic(err)
}
receiptRoot, err := exec.calcReceiptMerkleRoot(receipts)
if err != nil {
panic(err)
}
calcMerkleDuration.Observe(float64(time.Since(calcMerkleStart)) / float64(time.Second))
block.BlockHeader.TxRoot = l1Root
block.BlockHeader.ReceiptRoot = receiptRoot
block.BlockHeader.ParentHash = exec.currentBlockHash
accounts, journal := exec.ledger.FlushDirtyDataAndComputeJournal()
block.BlockHeader.StateRoot = journal.ChangedHash
block.BlockHash = block.Hash()
exec.logger.WithFields(logrus.Fields{
"tx_root": block.BlockHeader.TxRoot.String(),
"receipt_root": block.BlockHeader.ReceiptRoot.String(),
"state_root": block.BlockHeader.StateRoot.String(),
}).Debug("block meta")
calcBlockSize.Observe(float64(block.Size()))
executeBlockDuration.Observe(float64(time.Since(current)) / float64(time.Second))
counter := make(map[string]*pb.Uint64Slice)
for k, v := range exec.txsExecutor.GetInterchainCounter() {
counter[k] = &pb.Uint64Slice{Slice: v}
}
interchainMeta := &pb.InterchainMeta{
Counter: counter,
L2Roots: l2Roots,
}
exec.clear()
exec.currentHeight = block.BlockHeader.Number
exec.currentBlockHash = block.BlockHash
return &ledger.BlockData{
Block: block,
Receipts: receipts,
Accounts: accounts,
Journal: journal,
InterchainMeta: interchainMeta,
}
}
func (exec *BlockExecutor) listenPreExecuteEvent() {
for {
select {
case block := <-exec.preBlockC:
now := time.Now()
block = exec.verifySign(block)
exec.logger.WithFields(logrus.Fields{
"height": block.BlockHeader.Number,
"count": len(block.Transactions),
"elapse": time.Since(now),
}).Infof("Verified signature")
exec.blockC <- block
case <-exec.ctx.Done():
return
}
}
}
func (exec *BlockExecutor) buildTxMerkleTree(txs []*pb.Transaction) (*types.Hash, []types.Hash, error) {
var (
groupCnt = len(exec.txsExecutor.GetInterchainCounter()) + 1
wg = sync.WaitGroup{}
lock = sync.Mutex{}
l2Roots = make([]types.Hash, 0, groupCnt)
errorCnt = int32(0)
)
wg.Add(groupCnt - 1)
for addr, txIndexes := range exec.txsExecutor.GetInterchainCounter() {
go func(addr string, txIndexes []uint64) {
defer wg.Done()
txHashes := make([]merkletree.Content, 0, len(txIndexes))
for _, txIndex := range txIndexes {
txHashes = append(txHashes, txs[txIndex].TransactionHash)
}
hash, err := calcMerkleRoot(txHashes)
if err != nil {
atomic.AddInt32(&errorCnt, 1)
return
}
lock.Lock()
defer lock.Unlock()
l2Roots = append(l2Roots, *hash)
}(addr, txIndexes)
}
txHashes := make([]merkletree.Content, 0, len(exec.txsExecutor.GetNormalTxs()))
for _, txHash := range exec.txsExecutor.GetNormalTxs() {
txHashes = append(txHashes, txHash)
}
hash, err := calcMerkleRoot(txHashes)
if err != nil {
atomic.AddInt32(&errorCnt, 1)
}
lock.Lock()
l2Roots = append(l2Roots, *hash)
lock.Unlock()
wg.Wait()
if errorCnt != 0 {
return nil, nil, fmt.Errorf("build tx merkle tree error")
}
sort.Slice(l2Roots, func(i, j int) bool {
return bytes.Compare(l2Roots[i].Bytes(), l2Roots[j].Bytes()) < 0
})
contents := make([]merkletree.Content, 0, groupCnt)
for _, l2Root := range l2Roots {
contents = append(contents, &l2Root)
}
root, err := calcMerkleRoot(contents)
if err != nil {
return nil, nil, err
}
return root, l2Roots, nil
}
func (exec *BlockExecutor) verifySign(block *pb.Block) *pb.Block {
if block.BlockHeader.Number == 1 {
return block
}
var (
wg sync.WaitGroup
mutex sync.Mutex
index []int
)
txs := block.Transactions
wg.Add(len(txs))
for i, tx := range txs {
go func(i int, tx *pb.Transaction) {
defer wg.Done()
ok, _ := asym.Verify(crypto.Secp256k1, tx.Signature, tx.SignHash().Bytes(), *tx.From)
if !ok {
mutex.Lock()
defer mutex.Unlock()
index = append(index, i)
}
}(i, tx)
}
wg.Wait()
if len(index) > 0 {
sort.Sort(sort.Reverse(sort.IntSlice(index)))
for _, idx := range index {
txs = append(txs[:idx], txs[idx+1:]...)
}
block.Transactions = txs
}
return block
}
func (exec *BlockExecutor) applyTx(index int, tx *pb.Transaction, opt *agency.TxOpt) *pb.Receipt {
receipt := &pb.Receipt{
Version: tx.Version,
TxHash: tx.TransactionHash,
}
normalTx := true
ret, err := exec.applyTransaction(index, tx, opt)
if err != nil {
receipt.Status = pb.Receipt_FAILED
receipt.Ret = []byte(err.Error())
} else {
receipt.Status = pb.Receipt_SUCCESS
receipt.Ret = ret
}
events := exec.ledger.Events(tx.TransactionHash.String())
if len(events) != 0 {
receipt.Events = events
for _, ev := range events {
if ev.Interchain {
m := make(map[string]uint64)
err := json.Unmarshal(ev.Data, &m)
if err != nil {
panic(err)
}
for k, v := range m {
exec.txsExecutor.AddInterchainCounter(k, v)
}
normalTx = false
}
}
}
if normalTx {
exec.txsExecutor.AddNormalTx(tx.TransactionHash)
}
return receipt
}
func (exec *BlockExecutor) postBlockEvent(block *pb.Block, interchainMeta *pb.InterchainMeta) {
go exec.blockFeed.Send(events.NewBlockEvent{Block: block, InterchainMeta: interchainMeta})
}
func (exec *BlockExecutor) applyTransaction(i int, tx *pb.Transaction, opt *agency.TxOpt) ([]byte, error) {
if tx.IsIBTP() {
ctx := vm.NewContext(tx, uint64(i), nil, exec.ledger, exec.logger)
instance := boltvm.New(ctx, exec.validationEngine, exec.getContracts(opt))
return instance.HandleIBTP(tx.IBTP)
}
if tx.Payload == nil {
return nil, fmt.Errorf("empty transaction data")
}
data := &pb.TransactionData{}
if err := data.Unmarshal(tx.Payload); err != nil {
return nil, err
}
switch data.Type {
case pb.TransactionData_NORMAL:
err := exec.transfer(tx.From, tx.To, data.Amount)
return nil, err
default:
var instance vm.VM
switch data.VmType {
case pb.TransactionData_BVM:
ctx := vm.NewContext(tx, uint64(i), data, exec.ledger, exec.logger)
instance = boltvm.New(ctx, exec.validationEngine, exec.getContracts(opt))
case pb.TransactionData_XVM:
ctx := vm.NewContext(tx, uint64(i), data, exec.ledger, exec.logger)
imports, err := wasm.EmptyImports()
if err != nil {
return nil, err
}
instance, err = wasm.New(ctx, imports, exec.wasmInstances)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("wrong vm type")
}
return instance.Run(data.Payload)
}
}
func (exec *BlockExecutor) clear() {
exec.ledger.Clear()
}
func (exec *BlockExecutor) transfer(from, to *types.Address, value uint64) error {
if value == 0 {
return nil
}
fv := exec.ledger.GetBalance(from)
if fv < value {
return fmt.Errorf("not sufficient funds for %s", from.String())
}
tv := exec.ledger.GetBalance(to)
exec.ledger.SetBalance(from, fv-value)
exec.ledger.SetBalance(to, tv+value)
return nil
}
func (exec *BlockExecutor) calcReceiptMerkleRoot(receipts []*pb.Receipt) (*types.Hash, error) {
current := time.Now()
receiptHashes := make([]merkletree.Content, 0, len(receipts))
for _, receipt := range receipts {
receiptHashes = append(receiptHashes, receipt.Hash())
}
receiptRoot, err := calcMerkleRoot(receiptHashes)
if err != nil {
return nil, err
}
exec.logger.WithField("time", time.Since(current)).Debug("Calculate receipt merkle roots")
return receiptRoot, nil
}
func calcMerkleRoot(contents []merkletree.Content) (*types.Hash, error) {
if len(contents) == 0 {
return &types.Hash{}, nil
}
tree, err := merkletree.NewTree(contents)
if err != nil {
return nil, err
}
return types.NewHash(tree.MerkleRoot()), nil
}
func (exec *BlockExecutor) getContracts(opt *agency.TxOpt) map[string]agency.Contract {
if opt != nil && opt.Contracts != nil {
return opt.Contracts
}
return exec.txsExecutor.GetBoltContracts()
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.