text stringlengths 11 4.05M |
|---|
package pgsql
import (
"testing"
)
func TestText(t *testing.T) {
testlist2{{
data: []testdata{
{input: string("foo bar"), output: string("foo bar")},
},
}, {
data: []testdata{
{input: []byte("foo bar"), output: []byte("foo bar")},
},
}}.execute(t, "text")
}
|
package 链表
// --------------------- 迭代版 ---------------------
func mergeTwoLists(listA *ListNode, listB *ListNode) *ListNode {
dummyMergedListHead := &ListNode{-1, nil}
mergedListHead := dummyMergedListHead
for listA != nil && listB != nil {
if listA.Val > listB.Val {
mergedListHead.Next = listB
listB = listB.Next
} else {
mergedListHead.Next = listA
listA = listA.Next
}
mergedListHead = mergedListHead.Next
}
if listA == nil {
mergedListHead.Next = listB
}
if listB == nil {
mergedListHead.Next = listA
}
return dummyMergedListHead.Next
}
// --------------------- 递归版 ---------------------
func mergeTwoLists(listA *ListNode, listB *ListNode) *ListNode {
if listA == nil {
return listB
}
if listB == nil {
return listA
}
if listA.Val > listB.Val {
listB.Next = mergeTwoLists(listA, listB.Next)
return listB
} else {
listA.Next = mergeTwoLists(listB, listA.Next)
return listA
}
}
|
package handlers
import (
"bytes"
"log"
"net/http"
"net/http/httptest"
"os"
"testing"
"github.com/gorilla/mux"
"github.com/saurabmish/Coffee-Shop/data"
"github.com/stretchr/testify/assert"
)
func MiddlewareRouter() *mux.Router {
l := log.New(os.Stdout, "[TEST] Coffee shop API service ", log.LstdFlags)
v := data.NewValidation()
coffeeHandler := NewProducts(l, v)
serveMux := mux.NewRouter()
serveMux.HandleFunc("/coffee/add", coffeeHandler.Add).Methods("POST")
serveMux.HandleFunc("/coffee/modify/{id:[0-9]+}", coffeeHandler.Modify).Methods("PUT")
serveMux.Use(coffeeHandler.MiddlewareProductValidation)
return serveMux
}
func TestProductWithInsufficientFieldsUsingUpdate(t *testing.T) {
payload := []byte(`{"name": "Mocha", "sku": "COF-MOC-VAR-LAT"}`)
request, _ := http.NewRequest("PUT", "/coffee/modify/1", bytes.NewBuffer(payload))
response := httptest.NewRecorder()
MiddlewareRouter().ServeHTTP(response, request)
assert.Equal(t, http.StatusUnprocessableEntity, response.Code) // http.StatusUnprocessableEntity = 422
assert.Equal(t, "application/json", response.Header().Get("Content-Type"))
}
func TestProductWithWrongValueTypeUsingUpdate(t *testing.T) {
payload := []byte(`{"name": 1234, "description": "Chocolate-flavoured variant of latte", "sku": "COF-MOC-VAR-LAT"}`)
request, _ := http.NewRequest("PUT", "/coffee/modify/1", bytes.NewBuffer(payload))
response := httptest.NewRecorder()
MiddlewareRouter().ServeHTTP(response, request)
assert.Equal(t, http.StatusBadRequest, response.Code) // http.StatusBadRequest = 400
assert.Equal(t, "application/json", response.Header().Get("Content-Type"))
}
func TestProductWithExtraFieldsUsingCreate(t *testing.T) {
payload := []byte(`{"name": "Caramel Macchiato", "blob1": 12, "description": "Freshly steamed milk with vanilla-flavored syrup marked with espresso and topped with caramel drizzle.", "price": 4.0, "sku": "CAR-MAC-VAN-ESP", "blob2": 2}`)
request, _ := http.NewRequest("POST", "/coffee/add", bytes.NewBuffer(payload))
response := httptest.NewRecorder()
MiddlewareRouter().ServeHTTP(response, request)
assert.Equal(t, http.StatusCreated, response.Code, "Extra fields will be discarded")
assert.Equal(t, "application/json", response.Header().Get("Content-Type"))
}
func TestProductWithWrongValueTypeUsingCreate(t *testing.T) {
payload := []byte(`{"name": "Caramel Macchiato", "blob1": 12, "description": None, "price": 4.0, "sku": "CAR-MAC-VAN-ESP", "blob2": 2}`)
request, _ := http.NewRequest("PUT", "/coffee/modify/1", bytes.NewBuffer(payload))
response := httptest.NewRecorder()
MiddlewareRouter().ServeHTTP(response, request)
assert.Equal(t, http.StatusBadRequest, response.Code)
assert.Equal(t, "application/json", response.Header().Get("Content-Type"))
}
|
package pgtune
import (
"fmt"
"strconv"
"strings"
"github.com/timescale/timescaledb-tune/internal/parse"
)
const (
errUnrecognizedBoolValue = "unrecognized bool value: %s"
)
type FloatParser interface {
ParseFloat(string, string) (float64, error)
}
type bytesFloatParser struct{}
func (v *bytesFloatParser) ParseFloat(key string, s string) (float64, error) {
temp, err := parse.PGFormatToBytes(s)
return float64(temp), err
}
type numericFloatParser struct{}
func (v *numericFloatParser) ParseFloat(key string, s string) (float64, error) {
return strconv.ParseFloat(s, 64)
}
type boolFloatParser struct{}
func (v *boolFloatParser) ParseFloat(key string, s string) (float64, error) {
s = strings.ToLower(s)
s = strings.TrimLeft(s, `"'`)
s = strings.TrimRight(s, `"'`)
switch s {
case "on":
return 1.0, nil
case "off":
return 0.0, nil
case "true":
return 1.0, nil
case "false":
return 0.0, nil
case "yes":
return 1.0, nil
case "no":
return 0.0, nil
case "1":
return 1.0, nil
case "0":
return 0.0, nil
default:
return 0.0, fmt.Errorf(errUnrecognizedBoolValue, s)
}
}
// GetFloatParser returns the correct FloatParser for a given Recommender.
func GetFloatParser(r Recommender) FloatParser {
switch r.(type) {
case *MemoryRecommender:
return &bytesFloatParser{}
case *WALRecommender:
return &WALFloatParser{}
case *PromscaleWALRecommender:
return &WALFloatParser{}
case *PromscaleBgwriterRecommender:
return &numericFloatParser{}
case *ParallelRecommender:
return &numericFloatParser{}
default:
return &numericFloatParser{}
}
}
|
package main
import (
"encoding/json"
"time"
)
type Task struct {
ID int `json:"id"`
Progress float64 `json:"progress"`
ResourceLocation string `json:"resource_location"`
CreatedOn time.Time `json:"created_on"`
ExpiresOn time.Time `json:"expires_on"`
ProgressCheck ProgressCheck `json:"progress_check"`
}
type ProgressCheck struct {
endpoint string `json:"endpoint"`
interval int `json:"interval"`
}
//Creates a new task and saves it in the queue, which will generate an id for it.
func (q *Queue) NewTask(progress float64, location string, expires time.Time) Task {
t := Task{}
t.setTime(expires)
t.updateLocation(location)
t.updateProgress(progress)
q.insertNewTask(&t)
return t
}
//Update a task that already exists with an id.
func (q *Queue) UpdateTask(id int, progress float64, location string, expires time.Time) Task {
t := q.readTask(id)
t.updateExpiry(expires)
t.updateLocation(location)
t.updateProgress(progress)
q.insertTask(&t)
return t
}
//Set the initial creation time and expiration time. If no expiration time is give, set it to one day.
func (t *Task) setTime(expires time.Time) {
t.CreatedOn = time.Now()
if expires.IsZero() {
expires = time.Now().Add(time.Hour*24)
}
t.ExpiresOn = expires
}
func (t *Task) updateExpiry(newExpiry time.Time) {
if !newExpiry.IsZero() {
if newExpiry.Before(time.Now()) {
panic("Cannot set expiry to time in the past")
}
t.ExpiresOn = newExpiry
}
}
/*
Updates the progress of the task. A task cannot be completed until
it's location is set to the created resource. This function should throw
an error if this constraint is not met.
*/
func (t *Task) updateProgress(newProgress float64) {
if t.ResourceLocation == "" && (t.Progress + newProgress) >= 1 {
panic("Cannot set progress to 100% unless a location is set for the completed resource")
}
t.Progress = t.Progress + newProgress
}
//Updates the location of the tasks created resource.
func (t *Task) updateLocation(newLocation string) {
if newLocation != "" {
t.ResourceLocation = newLocation
}
}
//Converts the task to a json byte string.
func (t *Task) toJSON() []byte {
buf, err := json.Marshal(t)
if err != nil {
panic(err)
}
return buf
}
|
package ionic
import (
"github.com/franela/goblin"
. "github.com/onsi/gomega"
"testing"
)
func TestAppliedRulesets(t *testing.T) {
g := goblin.Goblin(t)
RegisterFailHandler(func(m string, _ ...int) { g.Fail(m) })
g.Describe("Applied Ruleset Summary", func() {
g.It("should return low risk and passed if the evaluation summary passed", func() {
ar := AppliedRulesetSummary{
RuleEvaluationSummary: &RuleEvaluationSummary{
Summary: "pass",
},
}
r, p := ar.SummarizeEvaluation()
Expect(r).To(Equal("low"))
Expect(p).To(Equal(true))
})
g.It("should return high risk and failed if the evaluation summary failed", func() {
ar := AppliedRulesetSummary{
RuleEvaluationSummary: &RuleEvaluationSummary{},
}
r, p := ar.SummarizeEvaluation()
Expect(r).To(Equal("high"))
Expect(p).To(Equal(false))
})
g.It("should return high risk and failed if the evaluation summary doesn't exist", func() {
ar := AppliedRulesetSummary{}
r, p := ar.SummarizeEvaluation()
Expect(r).To(Equal("high"))
Expect(p).To(Equal(false))
})
})
}
|
package main
import (
"log"
"strings"
)
type Profile struct {
Id int
Name string
Reporter map[string]bool
}
func solution(id_list []string, report []string, k int) []int {
points := make([]int, len(id_list))
// init id map
idMap := make(map[string]*Profile, len(id_list))
for idx, name := range id_list {
idMap[name] = &Profile{
Id: idx,
Name: name,
Reporter: make(map[string]bool),
}
}
// report
for _, r := range report {
slice := strings.Split(r, " ")
reporter := idMap[slice[0]]
reported := idMap[slice[1]]
if _, ok := reported.Reporter[reporter.Name]; ok {
continue
}
reported.Reporter[reporter.Name] = true
length := len((reported).Reporter)
log.Printf("LENGTH: %d\n", len(reported.Reporter))
switch {
case length == k:
log.Println("case11", reporter.Id, reported.Id)
for name := range reported.Reporter {
log.Println("case1", reporter.Id, reported.Id)
points[idMap[name].Id]++
}
case length > k:
log.Println("case2", reporter.Id, reported.Id)
points[reporter.Id]++
}
}
return points
}
func main() {
log.Println(solution(
[]string{"muzi", "frodo", "apeach", "neo"},
[]string{"muzi frodo", "apeach frodo", "frodo neo", "muzi neo", "apeach muzi"},
2))
log.Println(solution([]string{"con", "ryan"}, []string{"ryan con", "ryan con", "ryan con", "ryan con"}, 3))
}
|
package pgsql
import (
"database/sql"
"database/sql/driver"
"strconv"
)
// PointFromFloat64Array2 returns a driver.Valuer that produces a PostgreSQL point from the given Go [2]float64.
func PointFromFloat64Array2(val [2]float64) driver.Valuer {
return pointFromFloat64Array2{val: val}
}
// PointToFloat64Array2 returns an sql.Scanner that converts a PostgreSQL point into a Go [2]float64 and sets it to val.
func PointToFloat64Array2(val *[2]float64) sql.Scanner {
return pointToFloat64Array2{val: val}
}
type pointFromFloat64Array2 struct {
val [2]float64
}
func (v pointFromFloat64Array2) Value() (driver.Value, error) {
out := make([]byte, 1, 5) // len(`(x,y)`) == 5 (min size)
out[0] = '('
out = strconv.AppendFloat(out, v.val[0], 'f', -1, 64)
out = append(out, ',')
out = strconv.AppendFloat(out, v.val[1], 'f', -1, 64)
out = append(out, ')')
return out, nil
}
type pointToFloat64Array2 struct {
val *[2]float64
}
func (v pointToFloat64Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
point := pgParsePoint(data)
f0, err := strconv.ParseFloat(string(point[0]), 64)
if err != nil {
return err
}
f1, err := strconv.ParseFloat(string(point[1]), 64)
if err != nil {
return err
}
*v.val = [2]float64{f0, f1}
return nil
}
|
package test
import (
// "net/http"
"testing"
)
//____________________________ INSERT ________________________________________//
func TestUserEntityInsertWRONGBODY(t *testing.T) {
resp := sendPost("http://localhost:8080/SignUp", APPJASON_UTF_8, UserEntityInsertWRONGBODY)
response := responseToString(resp)
compareResults(t, response, wrongBody)
}
func TestUserEntityInsertSUCCESS(t *testing.T) {
resp := sendPost("http://localhost:8080/SignUp", APPJASON_UTF_8, UserEntityInsertSUCCESS)
response := responseToString(resp)
compareResults(t, response, Success)
}
//____________________________ VERIFY ________________________________________//
func TestUserEntityVerifyWRONGBODY(t *testing.T) {
resp := sendPost("http://localhost:8080/Login", APPJASON_UTF_8, UserEntityVerifyWRONGBODY)
response := responseToString(resp)
compareResults(t, response, wrongBody)
}
func TestUserEntityVerifyWRONGPASSWORD(t *testing.T) {
resp := sendPost("http://localhost:8080/Login", APPJASON_UTF_8, UserEntityVerifyWRONGPASSWORD)
response := responseToString(resp)
compareResults(t, response, errorDatabase)
}
func TestUserEntityVerifySUCCESS(t *testing.T) {
auth = sendPost("http://localhost:8080/Login", APPJASON_UTF_8, UserEntityVerifySUCCESS)
response := responseToString(auth)
compareResults(t, response, Success)
}
//____________________________ UPDATE _________________________________//
func TestUserEntityUpdateWRONGBODY(t *testing.T) {
resp, _ := sendPut("http://localhost:8080/YourAccount/Update/username=username", UserEntityUpdateWRONGBODY, auth.Header.Get("Authorization"))
response := responseToString(resp)
compareResults(t, response, wrongBody)
}
func TestUserEntityUpdateWRONGBODY(t *testing.T) {
resp, _ := sendPut("http://localhost:8080/YourAccount/Update/username=username", UserEntityUpdateWRONGBODY, auth.Header.Get("Authorization"))
response := responseToString(resp)
compareResults(t, response, wrongBody)
}
func TestUserEntityUpdateWRONGBODY(t *testing.T) {
resp, _ := sendPut("http://localhost:8080/YourAccount/Update/username=username", UserEntityUpdateWRONGBODY, auth.Header.Get("Authorization"))
response := responseToString(resp)
compareResults(t, response, wrongBody)
}
func TestUserEntityUpdateSingleWRONGBODY(t *testing.T) {
resp, _ := sendPut("http://localhost:8080/YourAccount/UpdateSingle/username=username", UserEntityUpdateSingleWRONGBODY, auth.Header.Get("Authorization"))
response := responseToString(resp)
compareResults(t, response, wrongBody)
}
/*
//____________________________ UPDATE SINGLE _________________________________//
func TestUserEntityUpdateSingleWRONGIMAGE(t *testing.T) {
req := sendRequestPut("http://localhost:8080/YourAccount/UpdateSingle/username=username", UserEntityUpdateSingleWRONGIMAGE)
response := requestToString(req)
compareResults(t, response, wrongValidation)
}
func TestUserEntityUpdateSingleSUCCESS(t *testing.T) {
req := sendRequestPut("http://localhost:8080/YourAccount/UpdateSingle/username=username", UserEntityVerifySUCCESS)
response := requestToString(req)
compareResults(t, response, Success)
}
*/ /*
func TestUserValidUpdate(t *testing.T) {
_, _ = PostRequest(user_path["crud"], user_responses["user"])
authRequest, _ := PostRequest(user_path["login"], user_responses["login"])
request := `{"email":"useraccounttestupdate@gmail.com" }`
res, _ := PutRequest(user_path["crud-user"], request, authRequest.Header.Get("Authorization"))
response := ReaderToString(res.Body)
assertEqual(t, response, ExpectedResponses["updated"])
}
func PutRequest(path string, request string, token string) (res *http.Response, err error) {
client := &http.Client{}
req, _ := http.NewRequest(http.MethodPut, "http://"+config.SERVER_HOST+path, StringToReader(request))
if token != "" {
req.Header.Set("Authorization", token)
}
return client.Do(req)
}
*/
|
package main
import (
"fmt"
"github.com/devfeel/dotweb"
//"net/http"
"db"
)
type S map[string][]string
func main() {
c := make(chan int, 2)//修改 2 为 1 就报错,修改 2 为 3 可以正常运行
c <- 1
c <- 2
fmt.Println(<-c)
fmt.Println(<-c)
//初始化app
dotapp := dotweb.New()
dotapp.SetLogPath("/Users/liangsijun/go/log")
InitRouter(dotapp.HttpServer)
db.Insert()
db.Search("测试1")
s:=Summary("Harry")
fmt.Printf("Summary(address):%v\r\n",s)
fmt.Printf("Summary(content):%v\r\n",*s)
err := dotapp.StartServer(3000)
fmt.Println("dotweb.StartServer error => ", err)
}
func Summary(aa string)(s*string){
s=&aa
return s
}
func Index(ctx *dotweb.HttpContext) {
ctx.View("/Users/liangsijun/go/localhost/self/template/index.html")
}
func Search(ctx *dotweb.HttpContext ) {
keyword := ctx.QueryString("keyword")
result := db.Search(keyword)
//fmt.Printf("search result is %v ",result)
ctx.WriteJson(result)
}
func InitRouter(server *dotweb.HttpServer) {
server.Router().GET("/index", Index)
server.Router().GET("/search", Search)
server.Router().ServerFile("/static/*filepath", "static")
} |
package main
// 方法1: 2次循环
func isMonotonic(A []int) bool {
flag1, flag2 := true, true
// 判断数组是否单调递增
for i := 1; i < len(A); i++ {
if A[i-1] > A[i] {
flag1 = false
}
}
// 判断数组是否单调递减
for i := 1; i < len(A); i++ {
if A[i-1] < A[i] {
flag2 = false
}
}
return flag1 || flag2
}
// 方法2: 1次循环 (先判断单调性)
func isMonotonic(A []int) bool {
if len(A) == 0 {
return true
}
firstNum := A[0]
lastNum := A[len(A)-1]
// 相等单调时,表示数组所有元素全相等
if firstNum == lastNum {
for i := 0; i < len(A); i++ {
if A[i-1] != A[i] {
return false
}
}
} else {
// 表示非递减
if firstNum < lastNum {
for i := 1; i < len(A); i++ {
if A[i-1] > A[i] {
return false
}
}
} else {
// 表示非递增
for i := 1; i < len(A); i++ {
if A[i-1] < A[i] {
return false
}
}
}
}
return true
}
// 方法3: 1次循环 (先判断单调性) (方法2的优雅版本)
func isMonotonic(A []int) bool {
if len(A) == 0 {
return true
}
firstNum := A[0]
lastNum := A[len(A)-1]
for i := 1; i < len(A); i++ {
if compare(A[i-1], A[i]) == 0 {
continue
}
if compare(firstNum, lastNum) != compare(A[i-1], A[i]) {
return false
}
}
return true
}
// 比较大小
// a == b: return 0
// a > b: return 1
// a < b: return -1
func compare(a, b int) int {
if a == b {
return 0
} else {
if a > b {
return 1
} else {
return -1
}
}
}
/*
题目链接:
https://leetcode-cn.com/problems/monotonic-array/submissions/ 单调数列
*/
|
package cmd
import (
"github.com/emi1997/con-app/client"
"github.com/spf13/cobra"
)
//calls on rootCmd from root.go with AddDocument function and passes it newly created command as argument
func init() {
rootCmd.AddCommand(addIndex)
}
//Defining new command
var addIndex = &cobra.Command{
Use: "newind",
Short: "Add a new index",
Long: `Use this command to add a new index of your choice.`,
Run: func(cmd *cobra.Command, args []string) {
client.AddIndex()
},
}
|
package config
import (
"encoding/json"
)
// Cache stores values indexed by a cache name and a cache key.
type Cache interface {
Flush()
Put(cacheName, key, value string)
Get(cacheName, key string) string
}
// MapCache is a Cache that stores all values in a map.
type MapCache struct {
data map[string]map[string]string
}
// NewCache creates a new empty Cache.
func NewMapCache() *MapCache {
return &MapCache{
data: make(map[string]map[string]string),
}
}
// Put stores a value index by a cache name and a cache key.
func (c *MapCache) Put(cacheName, key, v string) {
if c.data[cacheName] == nil {
c.data[cacheName] = make(map[string]string)
}
c.data[cacheName][key] = v
}
// Get returns the value pointed to by cacheName and key.
func (c *MapCache) Get(cacheName, key string) string {
return c.data[cacheName][key]
}
// Flush removes all values from the cache.
func (c *MapCache) Flush() {
c.data = make(map[string]map[string]string)
}
// MarshalJSON makes Cache implement json.Marshaler.
func (c *MapCache) MarshalJSON() ([]byte, error) {
return json.Marshal(c.data)
}
// UnmarshalJSON makes Cache implement json.Unmarshaler.
func (c *MapCache) UnmarshalJSON(data []byte) error {
aux := make(map[string]map[string]string)
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
c.data = aux
return nil
}
|
package atlas
import (
"errors"
"fmt"
"image"
"math"
"os"
)
// Includes parameters that can be passed to the Generate function
type GenerateParams struct {
Name string
Descriptor DescriptorFormat
Packer Packer
Sorter Sorter
MaxWidth, MaxHeight int
MaxAtlases int
Padding, Gutter int
}
// Includes details of the result of a texture atlas Generate request
type GenerateResult struct {
Files []*File
Atlases []*Atlas
}
// Generates a series of texture atlases using the given files as input
// and outputting to the given directory with the given parameters.
// Will generate an error if any IO operations fail or if the GenerateParams
// represent an invalid configuration
func Generate(files []string, outputDir string, params *GenerateParams) (res *GenerateResult, err error) {
// Apply any default parameters
if params == nil {
params = &GenerateParams{}
}
if params.Name == "" {
params.Name = "atlas"
}
if params.Descriptor == DESC_INVALID {
params.Descriptor = DESC_KIWI
}
if params.Packer == nil {
params.Packer = PackGrowing
}
if params.Sorter == nil {
params.Sorter = GetSorterFromString(SORT_DEFAULT)
}
if params.MaxWidth == 0 {
params.MaxWidth = math.MaxInt32
}
if params.MaxHeight == 0 {
params.MaxHeight = math.MaxInt32
}
res = &GenerateResult{}
res.Files = make([]*File, len(files))
// The amount that will be added to the files width/height
// by padding and gutter (we *2 to include both sides ie. top & bottom)
border := params.Padding*2 + params.Gutter*2
for i, filename := range files {
// Open the given file
r, err := os.Open(filename)
if err != nil {
return nil, err
}
decoded, _, err := image.Decode(r)
if err != nil && err != image.ErrFormat {
return nil, err
}
if err != image.ErrFormat {
size := decoded.Bounds().Size()
// Here we use padding*2 as if there is only one image it will still need
// padding on both sides left & right in the atlas
if size.X+border > params.MaxWidth ||
size.Y+border > params.MaxHeight {
return nil, errors.New(fmt.Sprintf("File %s exceeds maximum size of atlas (%dx%d)",
filename, size.X, size.Y))
}
// Here we only add padding to the width and height once because otherwise
// we will end up with double gaps between images
res.Files[i] = &File{
FileName: filename,
Width: size.X + border,
Height: size.Y + border,
}
} else {
fmt.Printf("Incorrect format for file: %s\n", filename)
}
}
if len(res.Files) == 0 {
fmt.Printf("No files to pack\n")
return res, nil
}
res.Atlases = make([]*Atlas, 0)
pending := params.Sorter(res.Files)
for i := 0; len(pending) > 0; i++ {
atlas := &Atlas{
Name: fmt.Sprintf("%s-%d", params.Name, (i + 1)),
MaxWidth: params.MaxWidth,
MaxHeight: params.MaxHeight,
Descriptor: DESC_KIWI,
Padding: params.Padding,
Gutter: params.Gutter,
}
res.Atlases = append(res.Atlases, atlas)
params.Packer(atlas, pending)
pending = getRemainingFiles(pending)
fmt.Printf("Writing atlas named %s to %s\n", atlas.Name, outputDir)
err = atlas.Write(outputDir)
if err != nil {
return nil, err
}
}
return res, nil
}
func getRemainingFiles(files []*File) (remaining []*File) {
remaining = make([]*File, 0)
for _, file := range files {
if file.Atlas == nil {
remaining = append(remaining, file)
}
}
return remaining
}
|
/*
An electric circuit uses exclusively identical capacitors of the same value C.
The capacitors can be connected in series or in parallel to form sub-units, which can then be connected in series or in parallel with other capacitors or other sub-units to form larger sub-units, and so on up to a final circuit.
Using this simple procedure and up to n identical capacitors, we can make circuits having a range of different total capacitances. For example, using up to n=3 capacitors of 60 uF each, we can obtain the following 7 distinct total capacitance values:
[60, 60, 60] = 180uF
[60, 60] = 120uF
[[60, 60], 60] = 90uF
[60uF] = 60uF
[60, [60, 60]] = 40uF
[[60, 60]] = 30uF
[[60, 60, 60]] = 20uF
If we denote by D(n) the number of distinct total capacitance values we can obtain when using up to n equal-valued capacitors and the simple procedure described above, we have: D(1)=1, D(2)=3, D(3)=7 ...
Find D(18).
Reminder : When connecting capacitors C1, C2 etc in parallel, the total capacitance is CT = C1 + C2 +...,
whereas when connecting them in series, the overall capacitance is given by:
1/Ct = 1/C1 + 1/C2 + ...
*/
package main
import "fmt"
func main() {
fmt.Println(solve(18))
}
// https://oeis.org/A153588
func solve(n int) int {
tab := []int{1, 3, 7, 15, 35, 77, 179, 429, 1039, 2525, 6235, 15463, 38513, 96231, 241519, 607339, 1529533, 3857447, 9743247, 24634043, 62335495, 157885967, 400211085, 1015080877, 2576308943}
if n < 1 || n > len(tab) {
return -1
}
return tab[n-1]
}
|
package main
import (
"learn6/map"
)
func main() {
demo.Test()
}
|
package main
import (
"errors"
"image"
"image/png"
"io"
"os"
"strconv"
)
/*
Maps a FieldObject to a RGBA color.
*/
var PIXEL_WALL_SOLID = newPixel(0, 0, 0, 255)
var PIXEL_WALL_WEAK = newPixel(66, 65, 66, 255)
var PIXEL_ITEM_BOOST = newPixel(0, 230, 255, 255)
var PIXEL_ITEM_SLOW = newPixel(255, 115, 0, 255)
var PIXEL_ITEM_GHOST = newPixel(0, 26, 255, 255)
/*
Gets a Map and a Path to an Image, which has the same Pixel-Dimensions as the Field-Array.
Loops over all Pixels and adds a FieldObject to the Map according to the RGBA-Value of the Color.
*/
func CreateMapFromImage(m Map, imagePath string) error {
image.RegisterFormat("png", "png", png.Decode, png.DecodeConfig)
file, err := os.Open(imagePath)
if err != nil {
return err
}
defer file.Close()
image, err := png.Decode(file)
if err != nil {
return err
}
if image.Bounds().Dx() != MAP_SIZE || image.Bounds().Dy() != MAP_SIZE {
return errors.New("Creating map from image failed: png needs to have the height " + strconv.Itoa(MAP_SIZE) + " and width " + strconv.Itoa(MAP_SIZE))
}
file, err = os.Open(imagePath)
if err != nil {
return err
}
pixels, err := getPixels(file)
if err != nil {
return err
}
wSolid := NewWall(false)
wWeak := NewWall(true)
i0 := NewItem(FieldObjectItemBoost)
i1 := NewItem(FieldObjectItemSlow)
i2 := NewItem(FieldObjectItemGhost)
p0 := NewPortal(newPosition(9, 3), newPosition(8, 8))
p1 := NewPortal(newPosition(10, 3), newPosition(11, 8))
p2 := NewPortal(newPosition(8, 11), newPosition(9, 16))
p3 := NewPortal(newPosition(11, 11), newPosition(10, 16))
m.addPortal(&p0)
m.addPortal(&p1)
m.addPortal(&p2)
m.addPortal(&p3)
//j und i vertauscht?
for i := 0; i < len(pixels); i++ {
for j := 0; j < len(pixels[i]); j++ {
if pixels[i][j] == PIXEL_WALL_SOLID {
m.Fields[j][i].addWall(wSolid)
}
if pixels[i][j] == PIXEL_WALL_WEAK {
m.Fields[j][i].addWall(wWeak)
}
if pixels[i][j] == PIXEL_ITEM_BOOST {
m.Fields[j][i].addItem(&i0)
}
if pixels[i][j] == PIXEL_ITEM_SLOW {
m.Fields[j][i].addItem(&i1)
}
if pixels[i][j] == PIXEL_ITEM_GHOST {
m.Fields[j][i].addItem(&i2)
}
}
}
return nil
}
/*
Converts a PNG to a two dimensional Pixel-Array.
*/
func getPixels(file io.Reader) ([][]Pixel, error) {
img, _, err := image.Decode(file)
if err != nil {
return nil, err
}
bounds := img.Bounds()
width, height := bounds.Max.X, bounds.Max.Y
var pixels [][]Pixel
for y := 0; y < height; y++ {
var row []Pixel
for x := 0; x < width; x++ {
row = append(row, rgbaToPixel(img.At(x, y).RGBA()))
}
pixels = append(pixels, row)
}
return pixels, nil
}
/*
Converts "img.At(x, y).RGBA()" to a Pixel.
"img.At(x, y).RGBA()" returns four uint32 values, we need a Pixel.
*/
func rgbaToPixel(r uint32, g uint32, b uint32, a uint32) Pixel {
return Pixel{int(r / 257), int(g / 257), int(b / 257), int(a / 257)}
}
/*
Represents a Pixel with RGBA values.
*/
type Pixel struct {
R int
G int
B int
A int
}
func newPixel(r int, g int, b int, a int) Pixel {
return Pixel{
R: r,
G: g,
B: b,
A: a,
}
}
|
package stages
import (
"fmt"
mortarpb "github.com/SoftwareDefinedBuildings/mortar/proto"
"github.com/pkg/errors"
"time"
)
func validateFetchRequest(req *mortarpb.FetchRequest) error {
// check the list of sites is non-empty
if len(req.Sites) == 0 {
return errors.New("Need to include non-empty request.Sites")
}
//TODO: add collection + selection tests
//// check that there are non-zero number of streams
//if len(req.Streams) == 0 {
// return errors.New("Need to include non-empty request.Streams")
//}
//hasWindowAgg := false
for idx, stream := range req.Streams {
// streams must have a name
if stream.Name == "" {
return fmt.Errorf("Stream %d must have a .Name", idx)
}
// streams EITHER have a definition (requiring Definition and DataVars)
// or they have a list of UUIDs
if stream.Definition != "" && len(stream.DataVars) == 0 {
return fmt.Errorf("If stream %d has a Definition, it also needs a list of DataVars", idx)
} else if stream.Definition == "" && len(stream.Uuids) == 0 {
return fmt.Errorf("Stream %d has no Definition, so it needs a list of UUIDS", idx)
}
if stream.Aggregation == mortarpb.AggFunc_AGG_FUNC_INVALID {
return fmt.Errorf("Stream %d has no aggregation function (can be RAW)", idx)
}
//if stream.Aggregation != mortarpb.AggFunc_AGG_FUNC_RAW {
// hasWindowAgg = true
//}
// TODO: check units?
}
// check time params
if len(req.DataFrames) > 0 && req.Time == nil {
return errors.New("Need to include non-empty request.Time")
}
if req.Time != nil {
// parse the times to check
if _, err := time.Parse(time.RFC3339, req.Time.Start); err != nil {
return errors.Wrapf(err, "request.Time.Start is not RFC3339-formatted timestamp (%s)", req.Time.Start)
}
if _, err := time.Parse(time.RFC3339, req.Time.End); err != nil {
return errors.Wrapf(err, "request.Time.End is not RFC3339-formatted timestamp (%s)", req.Time.End)
}
//if hasWindowAgg && req.Time.Window == "" {
// return errors.New("One of your stream uses a windowed aggregation function e.g. MEAN. Need to provide a valid request.Time.Window")
//}
}
return nil
}
func validateQualifyRequest(req *mortarpb.QualifyRequest) error {
return nil
}
|
package main
import (
"github.com/gin-gonic/gin"
"net/http"
)
/*
gin实现restful
*/
type User struct {
ID int `json:"id"`
Name string `json:"name"`
}
var users = []User{
{ID: 1, Name: "111"},
{ID: 2, Name: "222"},
{ID: 3, Name: "333"},
}
func main() {
r := gin.Default()
//r.GET("/users",listUser)
//r.GET("/users/:id", getUser)
r.POST("/users", createUsesr)
r.Run(":8090")
}
//--------------获取用户GET
//func listUser(c *gin.Context) {
// c.JSON(200, users)
//}
//-----------------获取特定的用户
//func getUser(c *gin.Context) {
// id:= c.Param("id")
// var user User
// found := false
// for _,u := range users {
// if strings.EqualFold(id, strconv.Itoa(u.ID)){
// user = u
// found = true
// break
// }
// }
// if found {
// c.JSON(200, user)
//
// }else {
// c.JSON(404, gin.H{
// "message":"用户不存在",
// })
// }
//}
//---------------------创建用户
func createUsesr(c *gin.Context) {
name := c.DefaultPostForm("name", "")
if name != "" {
u := User{ID: len(users) + 1, Name: name}
users = append(users, u)
c.JSON(http.StatusCreated, u)
} else {
c.JSON(http.StatusOK, gin.H{
"mesage": "请输入用户名",
})
}
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !codes
package testutil
import (
"math/rand"
"testing"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/collate"
"github.com/stretchr/testify/require"
)
// DatumEqual verifies that the actual value is equal to the expected value. For string datum, they are compared by the binary collation.
func DatumEqual(t testing.TB, expected, actual types.Datum, msgAndArgs ...interface{}) {
sc := new(stmtctx.StatementContext)
res, err := actual.Compare(sc, &expected, collate.GetBinaryCollator())
require.NoError(t, err, msgAndArgs)
require.Zero(t, res, msgAndArgs)
}
// HandleEqual verifies that the actual handle is equal to the expected handle.
func HandleEqual(t testing.TB, expected, actual kv.Handle, msgAndArgs ...interface{}) {
require.Equal(t, expected.IsInt(), actual.IsInt(), msgAndArgs)
require.Equal(t, expected.String(), actual.String(), msgAndArgs)
}
// CompareUnorderedStringSlice compare two string slices.
// If a and b is exactly the same except the order, it returns true.
// In otherwise return false.
func CompareUnorderedStringSlice(a []string, b []string) bool {
if a == nil && b == nil {
return true
}
if a == nil || b == nil {
return false
}
if len(a) != len(b) {
return false
}
m := make(map[string]int, len(a))
for _, i := range a {
_, ok := m[i]
if !ok {
m[i] = 1
} else {
m[i]++
}
}
for _, i := range b {
_, ok := m[i]
if !ok {
return false
}
m[i]--
if m[i] == 0 {
delete(m, i)
}
}
return len(m) == 0
}
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
// RandStringRunes generate random string of length n.
func RandStringRunes(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}
|
package isaac
import (
"time"
)
const (
actionLogsBasePath = "/api/v1/logs/scenariosLog"
)
type ActionLogsService interface {
Add(NewActionLog) (ActionLog, error)
Get(ID) (ActionLog, error)
List() ([]ActionLog, error)
}
type ActionLogsServiceOp struct {
client *Client
}
type NewActionLog struct {
Origin string `json:"origin"`
Action string `json:"action"`
Text string `json:"text"`
DisplayName string `json:"displayName"`
}
type ActionLog struct {
NewActionLog `json:",inline"`
ID ID `json:"_id"`
Time time.Time `json:"time"`
}
func (item ActionLog) ref() string {
return item.ID.String()
}
type getApiV1LogsActionLogsResponseWrapper struct {
Page int `json:"page"`
PerPage int `json:"perPage"`
TotalPages int `json:"totalPages"`
TotalLogs int `json:"totalLogs"`
Logs []ActionLog `json:"logs"`
}
func (s *ActionLogsServiceOp) Add(entry NewActionLog) (ActionLog, error) {
var respStruct ActionLog
err := s.client.genericPost(actionLogsBasePath, entry, &respStruct)
return respStruct, err
}
func (s *ActionLogsServiceOp) Get(id ID) (ActionLog, error) {
var respStruct ActionLog
err := s.client.genericGetID(actionLogsBasePath, id, &respStruct)
return respStruct, err
}
func (s *ActionLogsServiceOp) List() ([]ActionLog, error) {
var respStruct getApiV1LogsActionLogsResponseWrapper
err := s.client.genericGet(actionLogsBasePath, &respStruct)
return respStruct.Logs, err
}
|
package kucoin
import (
"net/http"
)
// A WithdrawalModel represents a withdrawal.
type WithdrawalModel struct {
Chain string `json:"chain"`
Id string `json:"id"`
Address string `json:"address"`
Memo string `json:"memo"`
Currency string `json:"currency"`
Amount string `json:"amount"`
Fee string `json:"fee"`
WalletTxId string `json:"walletTxId"`
IsInner bool `json:"isInner"`
Status string `json:"status"`
Remark string `json:"remark"`
CreatedAt int64 `json:"createdAt"`
UpdatedAt int64 `json:"updatedAt"`
}
// A WithdrawalsModel is the set of *WithdrawalModel.
type WithdrawalsModel []*WithdrawalModel
// Withdrawals returns a list of withdrawals.
func (as *ApiService) Withdrawals(params map[string]string, pagination *PaginationParam) (*ApiResponse, error) {
pagination.ReadParam(params)
req := NewRequest(http.MethodGet, "/api/v1/withdrawals", params)
return as.Call(req)
}
// A V1WithdrawalModel represents a v1 historical withdrawal.
type V1WithdrawalModel struct {
Address string `json:"address"`
Amount string `json:"amount"`
Currency string `json:"currency"`
IsInner bool `json:"isInner"`
WalletTxId string `json:"walletTxId"`
Status string `json:"status"`
CreateAt int64 `json:"createAt"`
}
// A V1WithdrawalsModel is the set of *V1WithdrawalModel.
type V1WithdrawalsModel []*V1WithdrawalModel
// V1Withdrawals returns a list of v1 historical withdrawals.
func (as *ApiService) V1Withdrawals(params map[string]string, pagination *PaginationParam) (*ApiResponse, error) {
pagination.ReadParam(params)
req := NewRequest(http.MethodGet, "/api/v1/hist-withdrawals", params)
return as.Call(req)
}
// A WithdrawalQuotasModel represents the quotas for a currency.
type WithdrawalQuotasModel struct {
Currency string `json:"currency"`
AvailableAmount string `json:"availableAmount"`
RemainAmount string `json:"remainAmount"`
WithdrawMinSize string `json:"withdrawMinSize"`
LimitBTCAmount string `json:"limitBTCAmount"`
InnerWithdrawMinFee string `json:"innerWithdrawMinFee"`
UsedBTCAmount string `json:"usedBTCAmount"`
IsWithdrawEnabled bool `json:"isWithdrawEnabled"`
WithdrawMinFee string `json:"withdrawMinFee"`
Precision uint8 `json:"precision"`
Chain string `json:"chain"`
}
// WithdrawalQuotas returns the quotas of withdrawal.
func (as *ApiService) WithdrawalQuotas(currency, chain string) (*ApiResponse, error) {
params := map[string]string{"currency": currency}
if chain != "" {
params["chain"] = chain
}
req := NewRequest(http.MethodGet, "/api/v1/withdrawals/quotas", params)
return as.Call(req)
}
// ApplyWithdrawalResultModel represents the result of ApplyWithdrawal().
type ApplyWithdrawalResultModel struct {
WithdrawalId string `json:"withdrawalId"`
}
// ApplyWithdrawal applies a withdrawal.
func (as *ApiService) ApplyWithdrawal(currency, address, amount string, options map[string]string) (*ApiResponse, error) {
p := map[string]string{
"currency": currency,
"address": address,
"amount": amount,
}
for k, v := range options {
p[k] = v
}
req := NewRequest(http.MethodPost, "/api/v1/withdrawals", p)
return as.Call(req)
}
// CancelWithdrawalResultModel represents the result of CancelWithdrawal().
type CancelWithdrawalResultModel struct {
CancelledWithdrawIds []string `json:"cancelledWithdrawIds"`
}
// CancelWithdrawal cancels a withdrawal by withdrawalId.
func (as *ApiService) CancelWithdrawal(withdrawalId string) (*ApiResponse, error) {
req := NewRequest(http.MethodDelete, "/api/v1/withdrawals/"+withdrawalId, nil)
return as.Call(req)
}
|
// Package unions illustrates how to implement tagged unions in Go
// Let's start with simple enums that you're probably familiar with.
package enums
import (
"fmt"
)
// The simplest form of enums using consts and `iota`
// Here, we encode the cardinal directions as ints
const (
// North as in the North Star
North int = iota
// South as in South L.A.
South
// East as in East Coast
East
// West as in Best
West
)
// Cardinal demonstrates how to use simple enums with the `switch` statement.
// This is simple, but comes with the risk of passing a cardinal that the
// function does not understand.
func Cardinal(card int) string {
switch card {
case North:
return "North"
case South:
return "South"
case East:
return "East"
case West:
return "West"
default:
return "Unknown"
}
}
// Example1 shows how to use the Cardinal function to print names
func Example1() {
// >>> "North South East West"
fmt.Println(Cardinal(North), Cardinal(South), Cardinal(East), Cardinal(West))
// But nothing stops you from trying to print non-existant constants
fmt.Println(Cardinal(10), Cardinal(-1))
}
// Direction is introduced a newtype to mitigate this problem
type Direction int
const (
// Left like all your stuff
Left Direction = iota
// Right as in not Wrong
Right
// Up as in Pixar
Up
// Down as in Down Under
Down
)
// String shows the side benefit of using a newtype: we can implement
// the standard String() method that fmt.Println already understands
func (d Direction) String() string {
switch d {
case Left:
return "Left"
case Right:
return "Right"
case Up:
return "Up"
case Down:
return "Down"
}
// Notice that we still have to handle the case of a non-existant enum
// The compiler can't prove that we didn't call Direction(5) somewhere,
// but at least we have to explicitely try to break things
return "Unknown"
}
// icon is a private type that enables us to hide the enum constructor,
// ensuring that it is impossible for another module to accidentally construct
// an invalid enum.
type icon int
// Icon is public to allow external modules to store variables of the
// icon type. The method is private, preventing other modules from implementing
// the interface
type Icon interface {
markIcon()
}
func (i icon) markIcon() {}
const (
// Circle is a round shape
Circle icon = iota
// Square or Box
Square
// Arrow is pointy
Arrow
)
// String is defined, again.
func (i icon) String() string {
switch i {
case Circle:
return "()"
case Square:
return "[]"
case Arrow:
return "=>"
default:
// The compiler still can't prove away this default.
// This results from having to mimic the pattern
// instead of having it supported in the compiler
return ""
}
}
|
package paths
import (
"errors"
"fmt"
"os"
"path/filepath"
)
func GetPaths(getResourcesPathFuncs ...func() (string, error)) (Paths, error) {
var getResourcesPathFunc func() (string, error)
switch len(getResourcesPathFuncs) {
case 0:
getResourcesPathFunc = getResourcesPath
case 1:
getResourcesPathFunc = getResourcesPathFuncs[0]
default:
return Paths{}, errors.New("you can only pass one function in getResourcesPathFuncs arg")
}
homeDir, err := os.UserHomeDir()
if err != nil {
return Paths{}, fmt.Errorf("failed to get user home directory: %w", err)
}
dataHome := os.Getenv("XDG_DATA_HOME")
if dataHome == "" {
dataHome = filepath.Join(homeDir, ".local", "share")
}
configHome := os.Getenv("XDG_CONFIG_HOME")
if configHome == "" {
configHome = filepath.Join(homeDir, ".config")
}
cacheHome := os.Getenv("XDG_CACHE_HOME")
if cacheHome == "" {
cacheHome = filepath.Join(homeDir, ".cache")
}
altAppHome := filepath.Join(homeDir, ".rd")
paths := Paths{
AppHome: filepath.Join(configHome, appName),
AltAppHome: altAppHome,
Config: filepath.Join(configHome, appName),
Cache: filepath.Join(cacheHome, appName),
Lima: filepath.Join(dataHome, appName, "lima"),
Integration: filepath.Join(altAppHome, "bin"),
DeploymentProfileSystem: filepath.Join("/etc", appName),
DeploymentProfileUser: configHome,
ExtensionRoot: filepath.Join(dataHome, appName, "extensions"),
Snapshots: filepath.Join(dataHome, appName, "snapshots"),
}
paths.Logs = os.Getenv("RD_LOGS_DIR")
if paths.Logs == "" {
paths.Logs = filepath.Join(dataHome, appName, "logs")
}
paths.Resources, err = getResourcesPathFunc()
if err != nil {
return Paths{}, fmt.Errorf("failed to find resources directory: %w", err)
}
return paths, nil
}
|
package node
import (
"github.com/wcong/ants-go/ants/crawler"
"github.com/wcong/ants-go/ants/http"
"github.com/wcong/ants-go/ants/util"
)
type NodeInfo struct {
Name string
Ip string
Port int
Settings *util.Settings
}
type Node interface {
GetNodeInfo() *NodeInfo
StartSpider(spiderName string) (bool, string)
CloseSpider(spiderName string)
AcceptRequest(request *http.Request)
IsMe(nodeName string) bool
DistributeRequest(request *http.Request)
AddToCrawlingQuene(request *http.Request)
ReportToMaster(result *crawler.ScrapeResult)
AcceptResult(scrapyResult *crawler.ScrapeResult)
CanWeStopSpider(spiderName string) bool
IsStop() bool
StopCrawl()
MakeMasterNode(nodeName string)
IsMasterNode() bool
Join()
Ready()
StartCrawl()
PauseCrawl()
UnpauseCrawl()
GetSpidersName() []string
}
|
// Package mock provides a mock implementation of session store and loader.
package mock
import (
"net/http"
"github.com/pomerium/pomerium/internal/encoding"
"github.com/pomerium/pomerium/internal/encoding/jws"
"github.com/pomerium/pomerium/internal/sessions"
)
var (
_ sessions.SessionStore = &Store{}
_ sessions.SessionLoader = &Store{}
)
// Store is a mock implementation of the SessionStore interface
type Store struct {
ResponseSession string
Session *sessions.State
SaveError error
LoadError error
Secret []byte
Encrypted bool
}
// ClearSession clears the ResponseSession
func (ms *Store) ClearSession(http.ResponseWriter, *http.Request) {
ms.ResponseSession = ""
}
// LoadSession returns the session and a error
func (ms Store) LoadSession(*http.Request) (string, error) {
var signer encoding.MarshalUnmarshaler
signer, _ = jws.NewHS256Signer(ms.Secret)
jwt, _ := signer.Marshal(ms.Session)
return string(jwt), ms.LoadError
}
// SaveSession returns a save error.
func (ms Store) SaveSession(http.ResponseWriter, *http.Request, interface{}) error {
return ms.SaveError
}
|
package gh
import (
"context"
"github.com/google/go-github/github"
)
var publicReposFilter = github.RepositoryListByOrgOptions{Type: "public"}
var openIssuesFilter = github.IssueListByRepoOptions{State: "open"}
type Client struct {
GithubClient *github.Client
}
func NewClient(githubClient *github.Client) *Client {
return &Client{
GithubClient: githubClient,
}
}
func (client *Client) PublicRepositories(ctx context.Context, org string) ([]*github.Repository, error) {
options := publicReposFilter
var all []*github.Repository
for {
resources, resp, err := client.GithubClient.Repositories.ListByOrg(
ctx,
org,
&options,
)
if err != nil {
return nil, err
}
if len(resources) == 0 {
break
}
all = append(all, resources...)
if resp.NextPage == 0 {
break
}
options.ListOptions.Page = resp.NextPage
}
return all, nil
}
func (client *Client) AllIssuesForOrganization(ctx context.Context, org string) ([]*github.Issue, error) {
repos, err := client.PublicRepositories(ctx, org)
if err != nil {
return nil, err
}
var all []*github.Issue
for _, repo := range repos {
issues, err := client.AllIssues(ctx, repo)
if err != nil {
return nil, err
}
all = append(all, issues...)
}
return all, nil
}
func (client *Client) AllRepositoryCommentsForOrganization(ctx context.Context, org string) ([]*github.RepositoryComment, error) {
repos, err := client.PublicRepositories(ctx, org)
if err != nil {
return nil, err
}
var all []*github.RepositoryComment
for _, repo := range repos {
issues, err := client.AllCommentsForRepository(ctx, repo)
if err != nil {
return nil, err
}
all = append(all, issues...)
}
return all, nil
}
func (client *Client) AllIssueCommentsForOrganization(ctx context.Context, org string) ([]*github.IssueComment, error) {
repos, err := client.PublicRepositories(ctx, org)
if err != nil {
return nil, err
}
var all []*github.IssueComment
for _, repo := range repos {
issues, err := client.AllIssueCommentsForRepository(ctx, repo)
if err != nil {
return nil, err
}
all = append(all, issues...)
}
return all, nil
}
func (client *Client) AllIssues(ctx context.Context, repo *github.Repository) ([]*github.Issue, error) {
options := openIssuesFilter
var all []*github.Issue
for {
resources, resp, err := client.GithubClient.Issues.ListByRepo(
ctx,
*repo.Owner.Login,
*repo.Name,
&options,
)
if err != nil {
return nil, err
}
if len(resources) == 0 {
break
}
all = append(all, resources...)
if resp.NextPage == 0 {
break
}
options.ListOptions.Page = resp.NextPage
}
return all, nil
}
func (client *Client) AllCommentsForRepository(
ctx context.Context,
repo *github.Repository,
) ([]*github.RepositoryComment, error) {
options := &github.ListOptions{}
var all []*github.RepositoryComment
for {
resources, resp, err := client.GithubClient.Repositories.ListComments(
ctx,
*repo.Owner.Login,
*repo.Name,
options,
)
if err != nil {
return nil, err
}
if len(resources) == 0 {
break
}
all = append(all, resources...)
if resp.NextPage == 0 {
break
}
options.Page = resp.NextPage
}
return all, nil
}
func (client *Client) AllIssueCommentsForRepository(
ctx context.Context,
repo *github.Repository,
) ([]*github.IssueComment, error) {
options := &github.IssueListCommentsOptions{}
allCommentsForRepo := 0
var all []*github.IssueComment
for {
resources, resp, err := client.GithubClient.Issues.ListComments(
ctx,
*repo.Owner.Login,
*repo.Name,
allCommentsForRepo,
options,
)
if err != nil {
return nil, err
}
if len(resources) == 0 {
break
}
all = append(all, resources...)
if resp.NextPage == 0 {
break
}
options.Page = resp.NextPage
}
return all, nil
}
|
package main
// emulates some C functions
import (
"fmt"
"os"
"unicode"
)
const UINT_MAX = 4294967295
type pseudoStdin struct {
buf []byte
i int
}
var stdin *pseudoStdin
func initStdin() {
stdin = newStdin()
}
func newStdin() *pseudoStdin {
s := &pseudoStdin{}
s.buf = make([]byte, 1024*1024)
os.Stdin.Read(s.buf)
return s
}
func getc(stdin *pseudoStdin) (byte, error) {
b := stdin.buf[stdin.i]
if b == byte(0) {
return b, fmt.Errorf("EOL")
}
stdin.i++
return b, nil
}
func ungetc(c byte, stdin *pseudoStdin) {
stdin.i--
return
}
func isspace(c byte) bool {
return unicode.IsSpace(rune(c))
}
func isdigit(c byte) bool {
return unicode.IsDigit(rune(c))
}
func isalpha(c byte) bool {
return (byte('a') <= c && c <= byte('z')) || (byte('A') <= c && c <= byte('Z'))
}
func isalnum(c byte) bool {
return isalpha(c) || byte('0') <= c && c <= byte('9')
}
func printf(format string, args ...interface{}) {
fmt.Printf(format, args...)
}
|
package handler
import (
"context"
"fmt"
"math"
"time"
"github.com/jinmukeji/jiujiantang-services/service/auth"
"github.com/golang/protobuf/ptypes"
"github.com/jinmukeji/go-pkg/v2/age"
corepb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/core/v1"
subscriptionpb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/subscription/v1"
jinmuidpb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/user/v1"
)
const (
// RegisterTypeUsername 用户名注册
RegisterTypeUsername = "username"
)
// UserSignIn 用户登录
func (j *JinmuHealth) UserSignIn(ctx context.Context, req *corepb.UserSignInRequest, resp *corepb.UserSignInResponse) error {
reqUserSignInByUsernamePassword := new(jinmuidpb.UserSignInByUsernamePasswordRequest)
reqUserSignInByUsernamePassword.Username = req.SignInKey
reqUserSignInByUsernamePassword.HashedPassword = req.PasswordHash
reqUserSignInByUsernamePassword.Seed = ""
reqUserSignInByUsernamePassword.Ip = req.Ip
respUserSignInByUsernamePassword, errUserSignInByUsernamePassword := j.jinmuidSvc.UserSignInByUsernamePassword(ctx, reqUserSignInByUsernamePassword)
if errUserSignInByUsernamePassword != nil {
return errUserSignInByUsernamePassword
}
resp.AccessToken = respUserSignInByUsernamePassword.AccessToken
resp.UserId = respUserSignInByUsernamePassword.UserId
reqGetUserSubscriptions := new(subscriptionpb.GetUserSubscriptionsRequest)
reqGetUserSubscriptions.UserId = respUserSignInByUsernamePassword.UserId
ctx = auth.AddContextToken(ctx, respUserSignInByUsernamePassword.AccessToken)
respGetUserSubscriptions, errGetUserSubscriptions := j.subscriptionSvc.GetUserSubscriptions(ctx, reqGetUserSubscriptions)
if errGetUserSubscriptions != nil || len(respGetUserSubscriptions.Subscriptions) == 0 {
resp.RemainDays = 0
resp.ExpireTime = ptypes.TimestampNow()
return nil
}
// 获取当前正在使用的订阅
selectedSubscription := new(subscriptionpb.Subscription)
for _, item := range respGetUserSubscriptions.Subscriptions {
if item.IsSelected {
selectedSubscription = item
}
}
// 激活订阅
if !selectedSubscription.Activated {
reqActivateSubscription := new(subscriptionpb.ActivateSubscriptionRequest)
reqActivateSubscription.SubscriptionId = selectedSubscription.SubscriptionId
respActivateSubscription, errActivateSubscription := j.subscriptionSvc.ActivateSubscription(ctx, reqActivateSubscription)
selectedSubscription.ExpiredTime = respActivateSubscription.ExpiredTime
if errActivateSubscription != nil {
return errActivateSubscription
}
}
expiredAt, _ := ptypes.Timestamp(selectedSubscription.ExpiredTime)
resp.RemainDays = getRemainDays(expiredAt.UTC())
resp.ExpireTime = selectedSubscription.ExpiredTime
resp.AccessTokenExpiredTime = respUserSignInByUsernamePassword.ExpiredTime
return nil
}
// 得到剩余时间
func getRemainDays(expiredAt time.Time) int32 {
return int32(math.Ceil(time.Until(expiredAt).Hours() / 24))
}
// GetUserByUserName 通过Username得到User
func (j *JinmuHealth) GetUserByUserName(ctx context.Context, req *corepb.GetUserByUserNameRequest, resp *corepb.GetUserByUserNameResponse) error {
u, err := j.datastore.FindUserByUsername(ctx, req.Username)
if err != nil {
return NewError(ErrDatabase, fmt.Errorf("failed to find user by username %s: %s", req.Username, err.Error()))
}
birth, _ := ptypes.TimestampProto(u.Birthday)
gender, errMapDBGenderToProto := mapDBGenderToProto(u.Gender)
if errMapDBGenderToProto != nil {
return NewError(ErrInvalidGender, errMapDBGenderToProto)
}
resp.User = &corepb.User{
UserId: int32(u.UserID),
Username: u.Username,
RegisterType: u.RegisterType,
IsProfileCompleted: u.IsProfileCompleted,
IsRemovable: u.IsRemovable,
Profile: &corepb.UserProfile{
Nickname: u.Nickname,
BirthdayTime: birth,
Age: int32(age.Age(u.Birthday)),
Gender: gender,
Height: int32(u.Height),
Weight: int32(u.Weight),
Phone: u.Phone,
Email: u.Email,
UserDefinedCode: u.UserDefinedCode,
Remark: u.Remark,
State: u.State,
City: u.City,
Street: u.Street,
Country: u.Country,
},
}
return nil
}
|
package c34_mitm_diffie_hellman
import (
"math/big"
)
type Point interface {
SetReceiver(p Point)
SendPGK()
ReceivePGK(p, g, pKA *big.Int)
SendK()
ReceiveK(pKB *big.Int)
SendMessage([]byte)
ReceiveMessage([]byte)
ReturnMessage()
}
func EchoStream(uA, uB Point, msg []byte) {
uA.SetReceiver(uB)
uB.SetReceiver(uA)
uA.SendPGK()
uB.SendK()
uA.SendMessage(msg)
uB.ReturnMessage()
}
func EchoMITMStream(uA, uM, uB Point, msg []byte) {
uA.SetReceiver(uM)
uM.SetReceiver(uA)
uM.SetReceiver(uB)
uB.SetReceiver(uM)
uA.SendPGK()
uB.SendK()
uA.SendMessage(msg)
uB.ReturnMessage()
}
|
package main
import (
"../core"
)
func main() {
w := core.NewWorld(25, 50)
w.Run()
}
|
package main
func main() {
largestRectangleArea([]int{1, 3, 4, 56, 6, 2})
}
func largestRectangleArea(heights []int) int {
max := 0
stack := []int{0}
heights = append([]int{-1}, heights...)
heights = append(heights, 0)
for i := 1; i < len(heights); i++ {
if heights[i] >= heights[stack[len(stack)-1]] {
stack = append(stack, i)
continue
}
for {
if heights[stack[len(stack)-1]] < heights[i] {
stack = append(stack, i)
break
}
top := heights[stack[len(stack)-1]]
stack = stack[:len(stack)-1]
area := (i - stack[len(stack)-1] - 1) * top
if area > max {
max = area
}
}
}
return max
}
|
package player
// NotePlayer is an interface for playing notes. This allows for swapping out different types of players.
// Return an interface to allow for unit testing output.
type NotePlayer interface {
PlayNotes(noteNames []string) interface{}
}
|
package etcdraft
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/golang/mock/gomock"
crypto2 "github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/meshplus/bitxhub-kit/crypto"
"github.com/meshplus/bitxhub-kit/crypto/asym"
"github.com/meshplus/bitxhub-kit/crypto/asym/ecdsa"
"github.com/meshplus/bitxhub-kit/log"
"github.com/meshplus/bitxhub-kit/types"
"github.com/meshplus/bitxhub-model/pb"
"github.com/meshplus/bitxhub/internal/ledger/mock_ledger"
"github.com/meshplus/bitxhub/internal/model/events"
"github.com/meshplus/bitxhub/internal/repo"
"github.com/meshplus/bitxhub/pkg/cert"
"github.com/meshplus/bitxhub/pkg/order"
"github.com/meshplus/bitxhub/pkg/peermgr"
"github.com/meshplus/bitxhub/pkg/peermgr/mock_peermgr"
ma "github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const to = "0x3f9d18f7c3a6e5e4c0b877fe3e688ab08840b997"
func TestNode_Start(t *testing.T) {
repoRoot, err := ioutil.TempDir("", "node")
assert.Nil(t, err)
defer os.RemoveAll(repoRoot)
var ID uint64 = 1
nodes := make(map[uint64]types.Address)
hash := types.NewAddressByStr("000000000000000000000000000000000000000a")
nodes[ID] = *hash
fileData, err := ioutil.ReadFile("../../../config/order.toml")
require.Nil(t, err)
err = ioutil.WriteFile(filepath.Join(repoRoot, "order.toml"), fileData, 0644)
require.Nil(t, err)
mockCtl := gomock.NewController(t)
mockPeermgr := mock_peermgr.NewMockPeerManager(mockCtl)
peers := make(map[uint64]*peer.AddrInfo)
mockPeermgr.EXPECT().Peers().Return(peers).AnyTimes()
order, err := NewNode(
order.WithRepoRoot(repoRoot),
order.WithID(ID),
order.WithNodes(nodes),
order.WithPeerManager(mockPeermgr),
order.WithStoragePath(repo.GetStoragePath(repoRoot, "order")),
order.WithLogger(log.NewWithModule("consensus")),
order.WithApplied(1),
)
require.Nil(t, err)
err = order.Start()
require.Nil(t, err)
for {
time.Sleep(200 * time.Millisecond)
if order.Ready() {
break
}
}
tx := generateTx()
err = order.Prepare(tx)
require.Nil(t, err)
block := <-order.Commit()
require.Equal(t, uint64(2), block.BlockHeader.Number)
require.Equal(t, 1, len(block.Transactions))
order.Stop()
}
func TestMulti_Node_Start(t *testing.T) {
peerCnt := 4
swarms, nodes := newSwarms(t, peerCnt)
//time.Sleep(3 * time.Second)
repoRoot, err := ioutil.TempDir("", "nodes")
defer os.RemoveAll(repoRoot)
fileData, err := ioutil.ReadFile("../../../config/order.toml")
require.Nil(t, err)
orders := make([]order.Order, 0)
for i := 0; i < peerCnt; i++ {
nodePath := fmt.Sprintf("node%d", i)
nodeRepo := filepath.Join(repoRoot, nodePath)
err := os.Mkdir(nodeRepo, 0744)
require.Nil(t, err)
orderPath := filepath.Join(nodeRepo, "order.toml")
err = ioutil.WriteFile(orderPath, fileData, 0744)
require.Nil(t, err)
ID := i + 1
order, err := NewNode(
order.WithRepoRoot(nodeRepo),
order.WithID(uint64(ID)),
order.WithNodes(nodes),
order.WithPeerManager(swarms[i]),
order.WithStoragePath(repo.GetStoragePath(nodeRepo, "order")),
order.WithLogger(log.NewWithModule("consensus")),
order.WithApplied(1),
)
require.Nil(t, err)
err = order.Start()
require.Nil(t, err)
orders = append(orders, order)
go listen(t, order, swarms[i])
}
for {
time.Sleep(200 * time.Millisecond)
if orders[0].Ready() {
break
}
}
tx := generateTx()
err = orders[0].Prepare(tx)
require.Nil(t, err)
for i := 0; i < len(orders); i++ {
block := <-orders[i].Commit()
require.Equal(t, uint64(2), block.BlockHeader.Number)
require.Equal(t, 1, len(block.Transactions))
}
}
func listen(t *testing.T, order order.Order, swarm *peermgr.Swarm) {
orderMsgCh := make(chan events.OrderMessageEvent)
sub := swarm.SubscribeOrderMessage(orderMsgCh)
defer sub.Unsubscribe()
for {
select {
case ev := <-orderMsgCh:
err := order.Step(context.Background(), ev.Data)
require.Nil(t, err)
}
}
}
func generateTx() *pb.Transaction {
privKey, _ := asym.GenerateKeyPair(crypto.Secp256k1)
from, _ := privKey.PublicKey().Address()
tx := &pb.Transaction{
From: from,
To: types.NewAddressByStr(to),
Timestamp: time.Now().UnixNano(),
Nonce: 1,
}
_ = tx.Sign(privKey)
tx.TransactionHash = tx.Hash()
return tx
}
func genKeysAndConfig(t *testing.T, peerCnt int) ([]crypto2.PrivKey, []crypto.PrivateKey, []string) {
var nodeKeys []crypto2.PrivKey
var privKeys []crypto.PrivateKey
var peers []string
port := 7001
for i := 0; i < peerCnt; i++ {
key, err := asym.GenerateKeyPair(crypto.ECDSA_P256)
require.Nil(t, err)
libp2pKey, err := convertToLibp2pPrivKey(key)
require.Nil(t, err)
nodeKeys = append(nodeKeys, libp2pKey)
id, err := peer.IDFromPublicKey(libp2pKey.GetPublic())
require.Nil(t, err)
peer := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d/p2p/%s", port, id)
peers = append(peers, peer)
port++
privKey, err := asym.GenerateKeyPair(crypto.Secp256k1)
require.Nil(t, err)
privKeys = append(privKeys, privKey)
}
return nodeKeys, privKeys, peers
}
func convertToLibp2pPrivKey(privateKey crypto.PrivateKey) (crypto2.PrivKey, error) {
ecdsaPrivKey, ok := privateKey.(*ecdsa.PrivateKey)
if !ok {
return nil, fmt.Errorf("convert to libp2p private key: not ecdsa private key")
}
libp2pPrivKey, _, err := crypto2.ECDSAKeyPairFromKey(ecdsaPrivKey.K)
if err != nil {
return nil, err
}
return libp2pPrivKey, nil
}
func otherPeers(id uint64, addrs []string) map[uint64]*peer.AddrInfo {
m := make(map[uint64]*peer.AddrInfo)
for i, addr := range addrs {
if uint64(i+1) == id {
continue
}
addr, _ := ma.NewMultiaddr(addr)
pAddr, _ := peer.AddrInfoFromP2pAddr(addr)
m[uint64(i+1)] = pAddr
}
return m
}
func newSwarms(t *testing.T, peerCnt int) ([]*peermgr.Swarm, map[uint64]types.Address) {
var swarms []*peermgr.Swarm
nodes := make(map[uint64]types.Address)
nodeKeys, privKeys, addrs := genKeysAndConfig(t, peerCnt)
mockCtl := gomock.NewController(t)
mockLedger := mock_ledger.NewMockLedger(mockCtl)
agencyData, err := ioutil.ReadFile("testdata/agency.cert")
require.Nil(t, err)
nodeData, err := ioutil.ReadFile("testdata/node.cert")
require.Nil(t, err)
caData, err := ioutil.ReadFile("testdata/ca.cert")
require.Nil(t, err)
cert, err := cert.ParseCert(caData)
require.Nil(t, err)
for i := 0; i < peerCnt; i++ {
ID := i + 1
repo := &repo.Repo{
Key: &repo.Key{},
NetworkConfig: &repo.NetworkConfig{
N: uint64(peerCnt),
ID: uint64(ID),
},
Certs: &repo.Certs{
NodeCertData: nodeData,
AgencyCertData: agencyData,
CACert: cert,
},
}
var local string
id, err := peer.IDFromPublicKey(nodeKeys[i].GetPublic())
require.Nil(t, err)
if strings.HasSuffix(addrs[i], id.String()) {
idx := strings.LastIndex(addrs[i], "/p2p/")
local = addrs[i][:idx]
}
repo.NetworkConfig.LocalAddr = local
repo.Key.Libp2pPrivKey = nodeKeys[i]
repo.Key.PrivKey = privKeys[i]
repo.NetworkConfig.OtherNodes = otherPeers(uint64(ID), addrs)
address, err := privKeys[i].PublicKey().Address()
require.Nil(t, err)
nodes[uint64(ID)] = *address
swarm, err := peermgr.New(repo, log.NewWithModule("p2p"), mockLedger)
require.Nil(t, err)
err = swarm.Start()
require.Nil(t, err)
swarms = append(swarms, swarm)
}
return swarms, nodes
} |
package main
import (
"fmt"
"io/ioutil"
"net/http"
)
func main() {
client := &http.Client{}
reqest, _ := http.NewRequest("GET", "http://127.0.0.1:554/modules/admin/server/qtssSvrModuleObjects/QTSSRelayModule/qtssModPrefs/relay_prefs_file?command=set+value=\"/etc/streaming/relayconfig.xml\"", nil)
reqest.Header.Add("Authorization", `Basic YWRtaW46YWRtaW4=`)
response, _ := client.Do(reqest)
defer response.Body.Close()
body, _ := ioutil.ReadAll(response.Body)
fmt.Println(string(body))
}
|
package znet
import (
"errors"
"log"
"net"
"sync"
"zinxWebsocket/utils"
"zinxWebsocket/ziface"
"github.com/gorilla/websocket"
)
//连接管理
type Connection struct {
//当前属于那个server
WsServer ziface.IServer
//当前连接的ws
Conn *websocket.Conn
//连接id
ConnID uint32
//当前连接状态
isClosed bool
//告知当前连接已经退出/停止,由reder退出的信号
ExitChan chan bool
//无缓冲读写通信
msgChan chan string
//有缓冲读写通信
msgBuffChan chan string
//路由管理,用来绑定msgid与api关系
MsgHandle ziface.IMsgHandle
//绑定属性
property map[string]interface{}
//保护连接属性
propertyLock sync.RWMutex
//消息类型 TextMessage 或 BinaryMessage之类
messageType int `json:"messageType"`
}
//初始化连接方法
func NewConnection(server ziface.IServer, conn *websocket.Conn, connID uint32, mh ziface.IMsgHandle) *Connection {
c := &Connection{
WsServer: server,
Conn: conn,
ConnID: connID,
MsgHandle: mh,
isClosed: false,
msgChan: make(chan string, 1),
msgBuffChan: make(chan string, utils.GlobalObject.MaxMsgChanLen),
ExitChan: make(chan bool, 1),
property: make(map[string]interface{}),
messageType: websocket.TextMessage, //默认文本协议
}
//将当前连接放入connmgr
c.WsServer.GetConnMgr().Add(c)
return c
}
//读业务
func (c *Connection) StartReader() {
log.Println("connection StartReader start connid:", c.ConnID)
defer log.Println("connection StartReader exit connid:", c.ConnID, " remoteip:", c.Conn.RemoteAddr())
defer c.Stop()
//读业务
for {
//读取数据到内存中 messageType:TextMessage/BinaryMessage
messageType, data, err := c.Conn.ReadMessage()
if err != nil {
log.Println("connection startReader read err:", err)
break
}
c.messageType = messageType //以客户端的类型为准
log.Println("connection StartReader recv from client1:", string(data))
//得到request数据
req := &Request{
conn: c,
message: string(data),
}
//如果配置了工作池
if utils.GlobalObject.WorkerPoolSize > 0 {
c.MsgHandle.SendMsgToTaskQueue(req)
} else {
//根据gorilla/websocket官方文档 此处不能开启多线程
// go c.MsgHandle.DoMsgHandler(req)
c.MsgHandle.DoMsgHandler(req)
}
}
}
//写业务,专门发给客户端
func (c *Connection) StartWriter() {
log.Println("connection StartWriter start")
defer log.Println("connection StartWriter exit connid:", c.ConnID, " remoteip:", c.Conn.RemoteAddr())
defer c.Stop()
//不断的发送消息
for {
select {
case msg := <-c.msgChan:
//有数据接收
// log.Println("connection StartWriter msg:",string(msg.Data))
if err := c.Conn.WriteMessage(c.messageType, []byte(msg)); err != nil {
//写失败通知关闭连接
log.Println("connection StartWriter msgchan err:", err)
return
}
case msg, ok := <-c.msgBuffChan:
if ok {
// log.Println("connection StartWriter buffmsg:",string(msg.Data))
if err := c.Conn.WriteMessage(c.messageType, []byte(msg)); err != nil {
//写失败通知关闭连接
log.Println("connection StartWriter msgbuff err:", err)
return
}
} else {
log.Println("connection StartWriter msgbuff is closed")
break
}
case <-c.ExitChan:
//读出错了
return
}
}
}
//启动连接,让当前连接,开始工作
func (c *Connection) Start() {
log.Println("connection Start connid:", c.ConnID)
//根据官方文档 读与写只能开一个线程
//启动读数据业务
go c.StartReader()
//启动写数据业务
go c.StartWriter()
//按照开发者传递的函数来,调用回调函数
c.WsServer.CallOnConnStart(c)
}
//停止连接,结束当前连接工作
func (c *Connection) Stop() {
log.Println("connection stop start connid:", c.ConnID, " remoteAddr:", c.RemoteAddr())
//如是已经关闭
if c.isClosed == true {
return
}
c.isClosed = true
//按照开发者传递的函数来,调用回调函数,注意在close之前调用
c.WsServer.CallOnConnStop(c)
//关闭连接
c.Conn.Close()
//告知writer停止
c.ExitChan <- true
//将conn在connmgr中删除
c.WsServer.GetConnMgr().Remove(c)
//关闭管道
close(c.ExitChan)
close(c.msgChan)
close(c.msgBuffChan)
log.Println("connection stop end connid:", c.ConnID, " isClosed:", c.isClosed)
}
//获取当前连接的websocket conn
func (c *Connection) GetWsConnection() *websocket.Conn {
return c.Conn
}
//获取当前连接的id
func (c *Connection) GetConnID() uint32 {
return c.ConnID
}
//获取连接客户端的信息,后续可以加userAgent等
func (c *Connection) RemoteAddr() net.Addr {
return c.Conn.RemoteAddr()
}
//发送数据,将数据发送给远程客户端(无缓冲)
func (c *Connection) SendMsg(data string) error {
if c.isClosed {
return errors.New("connection sendmsg is closed1")
}
//发消息给通道
c.msgChan <- data
return nil
}
//发送数据,将数据发送给远程客户端(无缓冲)
func (c *Connection) SendByteMsg(data []byte) error {
return c.SendMsg(string(data))
}
//发送数据,将数据发送给远程客户端(有缓冲)
func (c *Connection) SendBuffByteMsg(data []byte) error {
return c.SendBuffMsg(string(data))
}
//发送数据,将数据发送给远程客户端(有缓冲)
func (c *Connection) SendBuffMsg(data string) error {
if c.isClosed {
return errors.New("connection SendBuffMsg is closed1")
}
//发消息给通道
c.msgBuffChan <- data
return nil
}
//设置连接属性
func (c *Connection) SetProperty(key string, value interface{}) {
c.propertyLock.Lock()
defer c.propertyLock.Unlock()
c.property[key] = value
}
//获取连接属性
func (c *Connection) GetProperty(key string) (interface{}, error) {
c.propertyLock.RLock()
defer c.propertyLock.RUnlock()
if value, ok := c.property[key]; ok {
return value, nil
} else {
return nil, errors.New("connection getproperty get error key:" + key)
}
}
//移除设置属性
func (c *Connection) RemoveProperty(key string) {
c.propertyLock.Lock()
defer c.propertyLock.Unlock()
delete(c.property, key)
}
// //设置消息类型
// func (c *Connection) SetMessageType(mt int) {
// c.messageType = mt
// }
//获取消息类型
func (c *Connection) GetMessageType() int {
return c.messageType
}
//是否关闭
func (c *Connection) IsClosed() bool {
return c.isClosed
}
|
package std
import (
"fmt"
"github.com/gopherjs/gopherjs/js"
"github.com/iansmith/tropical"
)
//This implementation assumes that the browser is doing double buffering so there
//no need to do that ourselves.
//http://stackoverflow.com/questions/2795269/does-html5-canvas-support-double-buffering
type canvasImpl struct {
element, context *js.Object
htmlWidth, htmlHeight int
}
func NewCanvas(elementName string) tropical.Canvas {
elem := js.Global.Get("document").Call("getElementById", elementName)
if elem == nil {
panic(fmt.Sprint("your code and html are out of sync, missing reference: %s", elementName))
}
ctx := elem.Call("getContext", "2d")
result := &canvasImpl{
element: elem,
context: ctx,
htmlWidth: elem.Get("width").Int(),
htmlHeight: elem.Get("height").Int(),
}
return result
}
//
// DOM Level Methods
//
func (c *canvasImpl) Width() int {
return c.htmlWidth
}
func (c *canvasImpl) Height() int {
return c.htmlHeight
}
func (c *canvasImpl) Context() *js.Object {
return c.context
}
func (c *canvasImpl) Element() *js.Object {
return c.element
}
//
// Convenience Methods
//
func (c *canvasImpl) FillRectangle(x, y, w, h int) {
c.BeginPath()
c.Rectangle(x, y, w, h)
c.Fill()
}
func (c *canvasImpl) DrawLine(x1, y1, x2, y2 int) {
c.BeginPath()
c.MoveTo(x1, y1)
c.LineTo(x2, y2)
c.Stroke()
}
//
// Pass through functions to the 2d drawing context
//
func (c *canvasImpl) Translate(x, y int) {
c.context.Call("translate", x, y)
}
func (c *canvasImpl) MoveTo(x, y int) {
c.context.Call("moveTo", x, y)
}
func (c *canvasImpl) LineTo(x, y int) {
c.context.Call("lineTo", x, y)
}
func (c *canvasImpl) Save() {
c.context.Call("save")
}
func (c *canvasImpl) Fill() {
c.context.Call("fill")
}
func (c *canvasImpl) Stroke() {
c.context.Call("stroke")
}
func (c *canvasImpl) Restore() {
c.context.Call("restore")
}
func (c *canvasImpl) BeginPath() {
c.context.Call("beginPath")
}
func (c *canvasImpl) Rectangle(x, y, w, h int) {
c.context.Call("rect", x, y, w, h)
}
func (c *canvasImpl) Clip() {
c.context.Call("clip")
}
func (c *canvasImpl) SetFillColor(rgbish string) {
c.context.Set("fillStyle", rgbish)
}
func (c *canvasImpl) SetStrokeColor(rgbish string) {
c.context.Set("strokeStyle", rgbish)
}
func (c *canvasImpl) DrawImageById(id string, x, y int) {
img := js.Global.Get("document").Call("getElementById", id)
c.context.Call("drawImage", img, x, y)
}
func (c *canvasImpl) Arc(x, y, radius int, startAngle, finishAngle float64) {
c.context.Call("arc", x, y, radius, startAngle, finishAngle, false)
}
|
// Copyright 2019-2023 The sakuracloud_exporter Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"context"
"fmt"
"log/slog"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/sacloud/iaas-api-go/types"
"github.com/sacloud/packages-go/newsfeed"
"github.com/sacloud/sakuracloud_exporter/platform"
)
// DatabaseCollector collects metrics about all databases.
type DatabaseCollector struct {
ctx context.Context
logger *slog.Logger
errors *prometheus.CounterVec
client platform.DatabaseClient
Up *prometheus.Desc
DatabaseInfo *prometheus.Desc
CPUTime *prometheus.Desc
MemoryUsed *prometheus.Desc
MemoryTotal *prometheus.Desc
NICInfo *prometheus.Desc
NICReceive *prometheus.Desc
NICSend *prometheus.Desc
SystemDiskUsed *prometheus.Desc
SystemDiskTotal *prometheus.Desc
BackupDiskUsed *prometheus.Desc
BackupDiskTotal *prometheus.Desc
BinlogUsed *prometheus.Desc
DiskRead *prometheus.Desc
DiskWrite *prometheus.Desc
ReplicationDelay *prometheus.Desc
MaintenanceScheduled *prometheus.Desc
MaintenanceInfo *prometheus.Desc
MaintenanceStartTime *prometheus.Desc
MaintenanceEndTime *prometheus.Desc
}
// NewDatabaseCollector returns a new DatabaseCollector.
func NewDatabaseCollector(ctx context.Context, logger *slog.Logger, errors *prometheus.CounterVec, client platform.DatabaseClient) *DatabaseCollector {
errors.WithLabelValues("database").Add(0)
databaseLabels := []string{"id", "name", "zone"}
databaseInfoLabels := append(databaseLabels,
"plan", "host",
"database_type", "database_revision", "database_version",
"web_ui", "replication_enabled", "replication_role", "tags", "description")
nicInfoLabels := append(databaseLabels, "upstream_type", "upstream_id", "upstream_name", "ipaddress", "nw_mask_len", "gateway")
return &DatabaseCollector{
ctx: ctx,
logger: logger,
errors: errors,
client: client,
Up: prometheus.NewDesc(
"sakuracloud_database_up",
"If 1 the database is up and running, 0 otherwise",
databaseLabels, nil,
),
DatabaseInfo: prometheus.NewDesc(
"sakuracloud_database_info",
"A metric with a constant '1' value labeled by database information",
databaseInfoLabels, nil,
),
CPUTime: prometheus.NewDesc(
"sakuracloud_database_cpu_time",
"Database's CPU time(unit:ms)",
databaseLabels, nil,
),
MemoryUsed: prometheus.NewDesc(
"sakuracloud_database_memory_used",
"Database's used memory size(unit:GB)",
databaseLabels, nil,
),
MemoryTotal: prometheus.NewDesc(
"sakuracloud_database_memory_total",
"Database's total memory size(unit:GB)",
databaseLabels, nil,
),
NICInfo: prometheus.NewDesc(
"sakuracloud_database_nic_info",
"A metric with a constant '1' value labeled by nic information",
nicInfoLabels, nil,
),
NICReceive: prometheus.NewDesc(
"sakuracloud_database_nic_receive",
"NIC's receive bytes(unit: Kbps)",
databaseLabels, nil,
),
NICSend: prometheus.NewDesc(
"sakuracloud_database_nic_send",
"NIC's send bytes(unit: Kbps)",
databaseLabels, nil,
),
SystemDiskUsed: prometheus.NewDesc(
"sakuracloud_database_disk_system_used",
"Database's used system-disk size(unit:GB)",
databaseLabels, nil,
),
SystemDiskTotal: prometheus.NewDesc(
"sakuracloud_database_disk_system_total",
"Database's total system-disk size(unit:GB)",
databaseLabels, nil,
),
BackupDiskUsed: prometheus.NewDesc(
"sakuracloud_database_disk_backup_used",
"Database's used backup-disk size(unit:GB)",
databaseLabels, nil,
),
BackupDiskTotal: prometheus.NewDesc(
"sakuracloud_database_disk_backup_total",
"Database's total backup-disk size(unit:GB)",
databaseLabels, nil,
),
BinlogUsed: prometheus.NewDesc(
"sakuracloud_database_binlog_used",
"Database's used binlog size(unit:GB)",
databaseLabels, nil,
),
DiskRead: prometheus.NewDesc(
"sakuracloud_database_disk_read",
"Disk's read bytes(unit: KBps)",
databaseLabels, nil,
),
DiskWrite: prometheus.NewDesc(
"sakuracloud_database_disk_write",
"Disk's write bytes(unit: KBps)",
databaseLabels, nil,
),
ReplicationDelay: prometheus.NewDesc(
"sakuracloud_database_replication_delay",
"Replication delay time(unit:second)",
databaseLabels, nil,
),
MaintenanceScheduled: prometheus.NewDesc(
"sakuracloud_database_maintenance_scheduled",
"If 1 the database has scheduled maintenance info, 0 otherwise",
databaseLabels, nil,
),
MaintenanceInfo: prometheus.NewDesc(
"sakuracloud_database_maintenance_info",
"A metric with a constant '1' value labeled by maintenance information",
append(databaseLabels, "info_url", "info_title", "description", "start_date", "end_date"), nil,
),
MaintenanceStartTime: prometheus.NewDesc(
"sakuracloud_database_maintenance_start",
"Scheduled maintenance start time in seconds since epoch (1970)",
databaseLabels, nil,
),
MaintenanceEndTime: prometheus.NewDesc(
"sakuracloud_database_maintenance_end",
"Scheduled maintenance end time in seconds since epoch (1970)",
databaseLabels, nil,
),
}
}
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector.
func (c *DatabaseCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.Up
ch <- c.DatabaseInfo
ch <- c.CPUTime
ch <- c.MemoryUsed
ch <- c.MemoryTotal
ch <- c.NICInfo
ch <- c.NICReceive
ch <- c.NICSend
ch <- c.SystemDiskUsed
ch <- c.SystemDiskTotal
ch <- c.BackupDiskUsed
ch <- c.BackupDiskTotal
ch <- c.BinlogUsed
ch <- c.DiskRead
ch <- c.DiskWrite
ch <- c.ReplicationDelay
ch <- c.MaintenanceScheduled
ch <- c.MaintenanceInfo
ch <- c.MaintenanceStartTime
ch <- c.MaintenanceEndTime
}
// Collect is called by the Prometheus registry when collecting metrics.
func (c *DatabaseCollector) Collect(ch chan<- prometheus.Metric) {
databases, err := c.client.Find(c.ctx)
if err != nil {
c.errors.WithLabelValues("database").Add(1)
c.logger.Warn(
"can't list databases",
slog.Any("err", err),
)
}
var wg sync.WaitGroup
wg.Add(len(databases))
for i := range databases {
func(database *platform.Database) {
defer wg.Done()
databaseLabels := c.databaseLabels(database)
var up float64
if database.InstanceStatus.IsUp() {
up = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.Up,
prometheus.GaugeValue,
up,
databaseLabels...,
)
ch <- prometheus.MustNewConstMetric(
c.DatabaseInfo,
prometheus.GaugeValue,
float64(1.0),
c.databaseInfoLabels(database)...,
)
ch <- prometheus.MustNewConstMetric(
c.NICInfo,
prometheus.GaugeValue,
float64(1.0),
c.nicInfoLabels(database)...,
)
if database.Availability.IsAvailable() && database.InstanceStatus.IsUp() {
now := time.Now()
// system info
wg.Add(1)
go func() {
c.collectDatabaseMetrics(ch, database, now)
wg.Done()
}()
// cpu-time
wg.Add(1)
go func() {
c.collectCPUTime(ch, database, now)
wg.Done()
}()
// Disk read/write
wg.Add(1)
go func() {
c.collectDiskMetrics(ch, database, now)
wg.Done()
}()
// NICs
wg.Add(1)
go func() {
c.collectNICMetrics(ch, database, now)
wg.Done()
}()
// maintenance info
var maintenanceScheduled float64
if database.InstanceHostInfoURL != "" {
maintenanceScheduled = 1.0
wg.Add(1)
go func() {
c.collectMaintenanceInfo(ch, database)
wg.Done()
}()
}
ch <- prometheus.MustNewConstMetric(
c.MaintenanceScheduled,
prometheus.GaugeValue,
maintenanceScheduled,
databaseLabels...,
)
}
}(databases[i])
}
wg.Wait()
}
func (c *DatabaseCollector) databaseLabels(database *platform.Database) []string {
return []string{
database.ID.String(),
database.Name,
database.ZoneName,
}
}
var databasePlanLabels = map[types.ID]string{
types.DatabasePlans.DB10GB: "10GB",
types.DatabasePlans.DB30GB: "30GB",
types.DatabasePlans.DB90GB: "90GB",
types.DatabasePlans.DB240GB: "240GB",
types.DatabasePlans.DB500GB: "500GB",
types.DatabasePlans.DB1TB: "1TB",
}
func (c *DatabaseCollector) databaseInfoLabels(database *platform.Database) []string {
labels := c.databaseLabels(database)
instanceHost := "-"
if database.InstanceHostName != "" {
instanceHost = database.InstanceHostName
}
replEnabled := "0"
replRole := ""
if database.ReplicationSetting != nil {
replEnabled = "1"
if database.ReplicationSetting.Model == types.DatabaseReplicationModels.MasterSlave {
replRole = "master"
} else {
replRole = "slave"
}
}
return append(labels,
databasePlanLabels[database.PlanID],
instanceHost,
database.Conf.DatabaseName,
database.Conf.DatabaseRevision,
database.Conf.DatabaseVersion,
"", // TODO libsacloud v2 doesn't support WebUI URL
replEnabled,
replRole,
flattenStringSlice(database.Tags),
database.Description,
)
}
func (c *DatabaseCollector) nicInfoLabels(database *platform.Database) []string {
labels := c.databaseLabels(database)
var upstreamType, upstreamID, upstreamName string
if len(database.Interfaces) > 0 {
nic := database.Interfaces[0]
upstreamType = nic.UpstreamType.String()
if !nic.SwitchID.IsEmpty() {
upstreamID = nic.SwitchID.String()
upstreamName = nic.SwitchName
}
}
nwMaskLen := database.NetworkMaskLen
strMaskLen := ""
if nwMaskLen > 0 {
strMaskLen = fmt.Sprintf("%d", nwMaskLen)
}
return append(labels,
upstreamType,
upstreamID,
upstreamName,
database.IPAddresses[0],
strMaskLen,
database.DefaultRoute,
)
}
func (c *DatabaseCollector) collectCPUTime(ch chan<- prometheus.Metric, database *platform.Database, now time.Time) {
values, err := c.client.MonitorCPU(c.ctx, database.ZoneName, database.ID, now)
if err != nil {
c.errors.WithLabelValues("database").Add(1)
c.logger.Warn(
fmt.Sprintf("can't get database's cpu time: DatabaseID=%d", database.ID),
slog.Any("err", err),
)
return
}
if values == nil {
return
}
m := prometheus.MustNewConstMetric(
c.CPUTime,
prometheus.GaugeValue,
values.CPUTime*1000,
c.databaseLabels(database)...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
}
func (c *DatabaseCollector) collectDiskMetrics(ch chan<- prometheus.Metric, database *platform.Database, now time.Time) {
values, err := c.client.MonitorDisk(c.ctx, database.ZoneName, database.ID, now)
if err != nil {
c.errors.WithLabelValues("database").Add(1)
c.logger.Warn(
fmt.Sprintf("can't get disk's metrics: DatabaseID=%d", database.ID),
slog.Any("err", err),
)
return
}
if values == nil {
return
}
m := prometheus.MustNewConstMetric(
c.DiskRead,
prometheus.GaugeValue,
values.Read/1024,
c.databaseLabels(database)...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
m = prometheus.MustNewConstMetric(
c.DiskWrite,
prometheus.GaugeValue,
values.Write/1024,
c.databaseLabels(database)...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
}
func (c *DatabaseCollector) collectNICMetrics(ch chan<- prometheus.Metric, database *platform.Database, now time.Time) {
values, err := c.client.MonitorNIC(c.ctx, database.ZoneName, database.ID, now)
if err != nil {
c.errors.WithLabelValues("database").Add(1)
c.logger.Warn(
fmt.Sprintf("can't get database's NIC metrics: DatabaseID=%d", database.ID),
slog.Any("err", err),
)
return
}
if values == nil {
return
}
m := prometheus.MustNewConstMetric(
c.NICReceive,
prometheus.GaugeValue,
values.Receive*8/1000,
c.databaseLabels(database)...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
m = prometheus.MustNewConstMetric(
c.NICSend,
prometheus.GaugeValue,
values.Send*8/1000,
c.databaseLabels(database)...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
}
func (c *DatabaseCollector) collectDatabaseMetrics(ch chan<- prometheus.Metric, database *platform.Database, now time.Time) {
values, err := c.client.MonitorDatabase(c.ctx, database.ZoneName, database.ID, now)
if err != nil {
c.errors.WithLabelValues("database").Add(1)
c.logger.Warn(
fmt.Sprintf("can't get database's system metrics: DatabaseID=%d", database.ID),
slog.Any("err", err),
)
return
}
if values == nil {
return
}
labels := c.databaseLabels(database)
totalMemorySize := values.TotalMemorySize
if totalMemorySize > 0 {
totalMemorySize = totalMemorySize / 1024 / 1024
}
m := prometheus.MustNewConstMetric(
c.MemoryTotal,
prometheus.GaugeValue,
totalMemorySize,
labels...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
usedMemorySize := values.UsedMemorySize
if usedMemorySize > 0 {
usedMemorySize = usedMemorySize / 1024 / 1024
}
m = prometheus.MustNewConstMetric(
c.MemoryUsed,
prometheus.GaugeValue,
usedMemorySize,
labels...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
totalDisk1Size := values.TotalDisk1Size
if totalDisk1Size > 0 {
totalDisk1Size = totalDisk1Size / 1024 / 1024
}
m = prometheus.MustNewConstMetric(
c.SystemDiskTotal,
prometheus.GaugeValue,
totalDisk1Size,
labels...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
usedDisk1Size := values.UsedDisk1Size
if usedDisk1Size > 0 {
usedDisk1Size = usedDisk1Size / 1024 / 1024
}
m = prometheus.MustNewConstMetric(
c.SystemDiskUsed,
prometheus.GaugeValue,
usedDisk1Size,
labels...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
totalDisk2Size := values.TotalDisk2Size
if totalDisk2Size > 0 {
totalDisk2Size = totalDisk2Size / 1024 / 1024
}
m = prometheus.MustNewConstMetric(
c.BackupDiskTotal,
prometheus.GaugeValue,
totalDisk2Size,
labels...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
usedDisk2Size := values.UsedDisk2Size
if usedDisk2Size > 0 {
usedDisk2Size = usedDisk2Size / 1024 / 1024
}
m = prometheus.MustNewConstMetric(
c.BackupDiskUsed,
prometheus.GaugeValue,
usedDisk2Size,
labels...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
binlogUsed := values.BinlogUsedSizeKiB
if binlogUsed > 0 {
binlogUsed = binlogUsed / 1024 / 1024
}
m = prometheus.MustNewConstMetric(
c.BinlogUsed,
prometheus.GaugeValue,
binlogUsed,
c.databaseLabels(database)...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
m = prometheus.MustNewConstMetric(
c.ReplicationDelay,
prometheus.GaugeValue,
values.DelayTimeSec,
c.databaseLabels(database)...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
}
func (c *DatabaseCollector) maintenanceInfoLabels(resource *platform.Database, info *newsfeed.FeedItem) []string {
labels := c.databaseLabels(resource)
return append(labels,
info.URL,
info.Title,
info.Description,
fmt.Sprintf("%d", info.EventStart().Unix()),
fmt.Sprintf("%d", info.EventEnd().Unix()),
)
}
func (c *DatabaseCollector) collectMaintenanceInfo(ch chan<- prometheus.Metric, resource *platform.Database) {
if resource.InstanceHostInfoURL == "" {
return
}
info, err := c.client.MaintenanceInfo(resource.InstanceHostInfoURL)
if err != nil {
c.errors.WithLabelValues("database").Add(1)
c.logger.Warn(
fmt.Sprintf("can't get database's maintenance info: ID=%d", resource.ID),
slog.Any("err", err),
)
return
}
infoLabels := c.maintenanceInfoLabels(resource, info)
// info
ch <- prometheus.MustNewConstMetric(
c.MaintenanceInfo,
prometheus.GaugeValue,
1.0,
infoLabels...,
)
// start
ch <- prometheus.MustNewConstMetric(
c.MaintenanceStartTime,
prometheus.GaugeValue,
float64(info.EventStart().Unix()),
c.databaseLabels(resource)...,
)
// end
ch <- prometheus.MustNewConstMetric(
c.MaintenanceEndTime,
prometheus.GaugeValue,
float64(info.EventEnd().Unix()),
c.databaseLabels(resource)...,
)
}
|
package sms
import (
"MI/pkg/cache"
"context"
"time"
)
//将手机号跟验证码存入redis
func SmsSet(key,value string)error{
return cache.Set(context.Background(),key,value,60*time.Second)
}
func SmsGet(key string)(string,error) {
return cache.Get(context.Background(),key)
} |
package handlers
import (
"crypto/sha256"
"encoding/json"
"fmt"
"html/template"
"log"
"net/http"
"strings"
"github.com/Neffats/final-scenes/models"
"github.com/Neffats/final-scenes/stores"
)
type HTTP struct {
Films *stores.FilmStore
Logger *log.Logger
}
func (h *HTTP) HandleGuess(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
h.Logger.Printf("received bad guess request: unsupported method: %s\n", r.Method)
http.Error(w, "Only POST requests are supported", http.StatusMethodNotAllowed)
return
}
contentType := r.Header.Get("Content-Type")
if contentType != "application/json" {
h.Logger.Printf(
"received bad guess request: unsupported Content-Type: %s\n", contentType)
http.Error(w, "Unsupported Content-Type", http.StatusBadRequest)
return
}
var guess models.GuessAttempt
var resp models.GuessResponse
err := json.NewDecoder(r.Body).Decode(&guess)
if err != nil {
h.Logger.Printf("failed to unmarshal guess: %v\n", err)
http.Error(w, "something went wrong", http.StatusInternalServerError)
return
}
hashedGuess := fmt.Sprintf("%x", sha256.Sum256([]byte(strings.ToLower(guess.Guess))))
if hashedGuess == guess.Question {
resp.Answer = true
} else {
resp.Answer = false
}
byteResp, err := json.Marshal(resp)
if err != nil {
h.Logger.Printf("failed to marshal guess response: %v\n", err)
http.Error(w, "something went wrong", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(byteResp)
return
}
type GiveUpRequest struct {
FilmHash string
}
type GiveUpResponse struct {
FilmName string
}
func (h *HTTP) HandleGiveUp(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
h.Logger.Printf("received bad give up request: unsupported method: %s\n", r.Method)
http.Error(w, "Only POST requests are supported", http.StatusMethodNotAllowed)
return
}
contentType := r.Header.Get("Content-Type")
if contentType != "application/json" {
h.Logger.Printf(
"received bad give up request: unsupported Content-Type: %s\n", contentType)
http.Error(w, "Unsupported Content-Type", http.StatusBadRequest)
return
}
var guRequest GiveUpRequest
err := json.NewDecoder(r.Body).Decode(&guRequest)
if err != nil {
h.Logger.Printf("failed to unmarshal give up request: %v\n", err)
http.Error(w, "something went wrong", http.StatusInternalServerError)
return
}
if guRequest.FilmHash == "" {
h.Logger.Printf("give up request missing hash in request\n", err)
http.Error(w, "something went wrong", http.StatusInternalServerError)
return
}
film, err := h.Films.Find(stores.ByHash(guRequest.FilmHash))
if err != nil {
if err, ok := err.(*stores.FilmNotFoundError); ok {
h.Logger.Printf("film hash provided in give up request does not exist in store: %v\n", err)
http.Error(w, "something went wrong", http.StatusInternalServerError)
return
}
h.Logger.Printf("failed to find film hash in store: %v\n", err)
http.Error(w, "something went wrong", http.StatusInternalServerError)
return
}
guResponse := GiveUpResponse{FilmName: film.Name}
byteResp, err := json.Marshal(guResponse)
if err != nil {
h.Logger.Printf("failed to marshal give up response: %v\n", err)
http.Error(w, "something went wrong", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(byteResp)
return
}
func (h *HTTP) HandleTemplate(w http.ResponseWriter, r *http.Request) {
t, err := template.New("index.gohtml").Funcs(template.FuncMap{
"inc": func(x int) int {
return x + 1
},
}).ParseFiles("templates/index.gohtml")
if err != nil {
h.Logger.Printf("failed to parse template file: %v\n", err)
http.Error(w, "something went wrong", http.StatusInternalServerError)
return
}
films := h.Films.All()
err = t.Execute(w, films)
if err != nil {
h.Logger.Printf("failed to execute template file: %v\n", err)
http.Error(w, "something went wrong", http.StatusInternalServerError)
return
}
}
|
package rws
import (
"testing"
"io/ioutil"
)
func TestGet(t *testing.T) {
resp,err := Get()
if err != nil {
t.Error("Connection error: %s",err)
}
if resp.StatusCode != 200 {
t.Error()
content,err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
t.Error(string(content[:]))
if err != nil {
t.Error(err)
}
}
}
|
package main
// http://go101.org/article/unsafe.html
func main() {
// Fact 1: Unsafe Pointers Are Pointers And Uintptr Values Are Intergers
// Fact 2: Unused Values May Be Collected At Any Time
// Fact 3: We Can Use A runtime.KeepAlive Function Call To Mark A Value As Still In Using (Reachable) Currently
// Fact 4: *unsafe.Pointer Is A General Safe Pointer Type
// Pattern 1: Convert *T1 To Unsafe Poniter, Then Convert The Unsafe Pointer Value To *T2
// Pattern 2: Convert Unsafe Pointer To Uintptr, Then Use The Uintptr Value
// Pattern 3: Convert Unsafe Pointer To Uintptr, Do Arithmetic Operations With The Uintptr Value, Then Convert Back
// Pattern 4: Convert Unsafe Pointer To uintptr When Calling syscall.Syscall
// Pattern 5: Convert The uintptr Result Of reflect.Value.Pointer Or reflect.Value.UnsafeAddr Method Call To Unsafe Pointer
// Pattern 6: Convert A reflect.SliceHeader.Data Or reflect.StringHeader.Data Field To Unsafe Pointer, And The Inverse.
// 具体参考 slice_pointer.go
}
|
package main
import "math"
import "fmt"
func isPrime(nb int) bool {
i := 1
limit := int(math.Sqrt(float64(nb))) // the square of the number (int : can't % on float...)
for i < limit {
i++
if nb%i == 0 {
return false
}
}
return true
}
func main() {
var rank int
fmt.Print("The rank of the prime number you want : ")
fmt.Scan(&rank)
primeNb := 1
for cpt := 1; cpt <= rank; cpt++ {
primeNb++
for !isPrime(primeNb) {
primeNb++
}
}
fmt.Println("\nThe", rank, "th prime number is ", primeNb)
}
/* Problem 7
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
What is the 10 001st prime number?
-> 104743
*/
|
package persistence
import (
"fmt"
"gopetstore/src/domain"
"testing"
)
func TestInsertAccount(t *testing.T) {
err := InsertAccount(&domain.Account{
UserName: "1234",
Password: "1234",
Email: "1234",
FirstName: "1234",
LastName: "1234",
Status: "",
Address1: "1234",
Address2: "1234",
City: "1234",
State: "1234",
Zip: "1234",
Country: "1234",
Phone: "1234",
FavouriteCategoryId: "1234",
LanguagePreference: "1234",
ListOption: false,
BannerOption: false,
BannerName: "",
})
if err != nil {
t.Error(err.Error())
}
}
func TestUpdateAccountByUserName(t *testing.T) {
err := UpdateAccountByUserName(&domain.Account{
UserName: "test",
Password: "hahaha",
Email: "hahaha",
FirstName: "test",
LastName: "test",
Status: "",
Address1: "test",
Address2: "test",
City: "test",
State: "test",
Zip: "test",
Country: "test",
Phone: "1234",
FavouriteCategoryId: "test",
LanguagePreference: "test",
ListOption: true,
BannerOption: true,
BannerName: "",
}, "test")
if err != nil {
t.Error(err.Error())
}
}
func TestGetAccountByUserName(t *testing.T) {
a, err := GetAccountByUserName("test")
if err != nil {
t.Error(err.Error())
}
fmt.Println(a)
}
|
package main
import "fmt"
//func sayHelloWithFilter(name string, filter func(string) string) {
// nameFiltered := filter(name)
// fmt.Println("Hello", nameFiltered)
//}
// example if using function as parameter with type declaration
type Filter func(string) string
func sayHelloWithFilter(name string, filter Filter) {
nameFiltered := filter(name)
fmt.Println("Hello", nameFiltered)
}
func spamFilter(name string) string {
if name == "Anjing" {
return "..."
} else {
return name
}
}
func main() {
sayHelloWithFilter("Eko", spamFilter)
sayHelloWithFilter("Anjing", spamFilter)
filter := spamFilter
sayHelloWithFilter("Anjing", filter)
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schemacmp
import (
"math/bits"
"strings"
"github.com/pingcap/tidb/parser/charset"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/types"
)
const (
flagMaskKeys = mysql.PriKeyFlag | mysql.UniqueKeyFlag | mysql.MultipleKeyFlag
flagMaskDefVal = mysql.AutoIncrementFlag | mysql.NoDefaultValueFlag
notPartOfKeys = ^byte(0)
)
// Please ensure this list is synchronized with the order of Tuple{} in encodeFieldTypeToLattice().
const (
fieldTypeTupleIndexTp = iota
fieldTypeTupleIndexFlen
fieldTypeTupleIndexDec
fieldTypeTupleIndexFlagSingleton
fieldTypeTupleIndexFlagNull
fieldTypeTupleIndexFlagAntiKeys
fieldTypeTupleIndexFlagDefVal
fieldTypeTupleIndexCharset
fieldTypeTupleIndexCollate
fieldTypeTupleIndexElems
ErrMsgAutoTypeWithoutKey = "auto type but not defined as a key"
)
func encodeAntiKeys(flag uint) byte {
// this ensure we get this order:
// 1. "not part of keys" (flag = 0) is the maximum
// 2. multiple keys (8) > unique key (4) > primary key (2).
return ^bits.Reverse8(byte(flag & flagMaskKeys))
}
func decodeAntiKeys(encoded byte) uint {
return uint(bits.Reverse8(^encoded))
}
// encodeTypeTpAsLattice
func encodeFieldTypeToLattice(ft *types.FieldType) Tuple {
var flen, dec Lattice
if ft.GetType() == mysql.TypeNewDecimal {
flen = Singleton(ft.GetFlen())
dec = Singleton(ft.GetDecimal())
} else {
flen = Int(ft.GetFlen())
dec = Int(ft.GetDecimal())
}
var defVal Lattice
if mysql.HasAutoIncrementFlag(ft.GetFlag()) || !mysql.HasNoDefaultValueFlag(ft.GetFlag()) {
defVal = Maybe(Singleton(ft.GetFlag() & flagMaskDefVal))
} else {
defVal = Maybe(nil)
}
return Tuple{
FieldTp(ft.GetType()),
flen,
dec,
// TODO: recognize if the remaining flags can be merged or not.
Singleton(ft.GetFlag() &^ (flagMaskDefVal | mysql.NotNullFlag | flagMaskKeys)),
Bool(!mysql.HasNotNullFlag(ft.GetFlag())),
Byte(encodeAntiKeys(ft.GetFlag())),
defVal,
Singleton(ft.GetCharset()),
Singleton(ft.GetCollate()),
StringList(ft.GetElems()),
}
}
func decodeFieldTypeFromLattice(tup Tuple) *types.FieldType {
lst := tup.Unwrap().([]interface{})
flags := lst[fieldTypeTupleIndexFlagSingleton].(uint)
flags |= decodeAntiKeys(lst[fieldTypeTupleIndexFlagAntiKeys].(byte))
if !lst[fieldTypeTupleIndexFlagNull].(bool) {
flags |= mysql.NotNullFlag
}
if x, ok := lst[fieldTypeTupleIndexFlagDefVal].(uint); ok {
flags |= x
} else {
flags |= mysql.NoDefaultValueFlag
}
return types.NewFieldTypeBuilder().SetType(lst[fieldTypeTupleIndexTp].(byte)).SetFlen(lst[fieldTypeTupleIndexFlen].(int)).SetDecimal(lst[fieldTypeTupleIndexDec].(int)).SetFlag(flags).SetCharset(lst[fieldTypeTupleIndexCharset].(string)).SetCollate(lst[fieldTypeTupleIndexCollate].(string)).SetElems(lst[fieldTypeTupleIndexElems].([]string)).BuildP()
}
type typ struct{ Tuple }
// Type is to create type.
func Type(ft *types.FieldType) typ {
return typ{Tuple: encodeFieldTypeToLattice(ft)}
}
func (a typ) hasDefault() bool {
return a.Tuple[fieldTypeTupleIndexFlagDefVal].Unwrap() != nil
}
// setFlagForMissingColumn adjusts the flags of the type for filling in a
// missing column. Returns whether the column had no default values.
// If the column is AUTO_INCREMENT, returns an incompatible error, because such
// column cannot be part of any keys in the joined table which is invalid.
func (a typ) setFlagForMissingColumn() (hadNoDefault bool) {
a.Tuple[fieldTypeTupleIndexFlagAntiKeys] = Byte(notPartOfKeys)
defVal, ok := a.Tuple[fieldTypeTupleIndexFlagDefVal].Unwrap().(uint)
if !ok || mysql.HasNoDefaultValueFlag(defVal) {
a.Tuple[fieldTypeTupleIndexFlagDefVal] = Maybe(Singleton(defVal &^ mysql.NoDefaultValueFlag))
return true
}
return false
}
func (a typ) isNotNull() bool {
return !a.Tuple[fieldTypeTupleIndexFlagNull].Unwrap().(bool)
}
func (a typ) inAutoIncrement() bool {
defVal, ok := a.Tuple[fieldTypeTupleIndexFlagDefVal].Unwrap().(uint)
return ok && mysql.HasAutoIncrementFlag(defVal)
}
func (a typ) setAntiKeyFlags(flag uint) {
a.Tuple[fieldTypeTupleIndexFlagAntiKeys] = Byte(encodeAntiKeys(flag))
}
func (a typ) getStandardDefaultValue() interface{} {
var tail string
if dec := a.Tuple[fieldTypeTupleIndexDec].Unwrap().(int); dec > 0 {
tail = "." + strings.Repeat("0", dec)
}
switch a.Tuple[fieldTypeTupleIndexTp].Unwrap().(byte) {
case mysql.TypeTiny, mysql.TypeInt24, mysql.TypeShort, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeFloat, mysql.TypeDouble, mysql.TypeNewDecimal:
return "0"
case mysql.TypeTimestamp, mysql.TypeDatetime:
return "0000-00-00 00:00:00" + tail
case mysql.TypeDate:
return "0000-00-00"
case mysql.TypeDuration:
return "00:00:00" + tail
case mysql.TypeYear:
return "0000"
case mysql.TypeJSON:
return "null"
case mysql.TypeEnum:
return a.Tuple[fieldTypeTupleIndexElems].(StringList)[0]
case mysql.TypeString:
// ref https://github.com/pingcap/tidb/blob/66948b2fd9bec8ea11644770a2fa746c7eba1a1f/ddl/ddl_api.go#L3916
if a.Tuple[fieldTypeTupleIndexCollate].Unwrap().(string) == charset.CollationBin {
return string(make([]byte, a.Tuple[fieldTypeTupleIndexFlen].Unwrap().(int)))
}
return ""
default:
return ""
}
}
func (a typ) clone() typ {
return typ{Tuple: append(make(Tuple, 0, len(a.Tuple)), a.Tuple...)}
}
func (a typ) Unwrap() interface{} {
return decodeFieldTypeFromLattice(a.Tuple)
}
func (a typ) Compare(other Lattice) (int, error) {
if b, ok := other.(typ); ok {
return a.Tuple.Compare(b.Tuple)
}
return 0, typeMismatchError(a, other)
}
func (a typ) Join(other Lattice) (Lattice, error) {
if b, ok := other.(typ); ok {
genJoin, err := a.Tuple.Join(b.Tuple)
if err != nil {
return nil, err
}
join := genJoin.(Tuple)
// Special check: we can't have an AUTO_INCREMENT column without being a KEY.
x, ok := join[fieldTypeTupleIndexFlagDefVal].Unwrap().(uint)
if ok && x&mysql.AutoIncrementFlag != 0 && join[fieldTypeTupleIndexFlagAntiKeys].Unwrap() == notPartOfKeys {
return nil, &IncompatibleError{Msg: ErrMsgAutoTypeWithoutKey}
}
return typ{Tuple: join}, nil
}
return nil, typeMismatchError(a, other)
}
|
package dht
import (
"time"
"net/http"
// "net/http/httptest"
"testing"
// "github.com/julienschmidt/httprouter"
"fmt"
"log"
//"io/ioutil"
"bytes"
"strconv"
)
type handlerStruct struct {
handeled *bool
}
func Test_1(t *testing.T)() {
node0 := makeDHTNode(generateNodeId(), "127.0.0.1", "4242")
go makeserver(node0, "8080")
node1 := makeDHTNode(generateNodeId(), "127.0.0.1", "4243")
go makeserver(node1, "8081")
node2 := makeDHTNode(generateNodeId(), "127.0.0.1", "4244")
go makeserver(node2, "8082")
node3 := makeDHTNode(generateNodeId(), "127.0.0.1", "4245")
go makeserver(node3, "8083")
node4 := makeDHTNode(generateNodeId(), "127.0.0.1", "4246")
go makeserver(node4, "8084")
node5 := makeDHTNode(generateNodeId(), "127.0.0.1", "4247")
go makeserver(node5, "8085")
node6 := makeDHTNode(generateNodeId(), "127.0.0.1", "4248")
go makeserver(node6, "8086")
node7 := makeDHTNode(generateNodeId(), "127.0.0.1", "4249")
go makeserver(node7, "8087")
//Need to do that since node 0 is not added to the ring:
go node0.transport.sendHeartBeat()
go node0.transport.heartBeatListen()
node0.addToRing(node1)
time.Sleep(2000 * time.Millisecond)
node0.addToRing(node2)
time.Sleep(2000 * time.Millisecond)
node0.addToRing(node3)
time.Sleep(2000 * time.Millisecond)
node0.addToRing(node4)
time.Sleep(2000 * time.Millisecond)
node0.addToRing(node5)
time.Sleep(2000 * time.Millisecond)
node0.addToRing(node6)
time.Sleep(2000 * time.Millisecond)
node0.addToRing(node7)
time.Sleep(2000 * time.Millisecond)
// fmt.Println("node0:")
// node0.printNode(false)
// fmt.Println("node1:")
// node1.printNode(false)
// fmt.Println("node2:")
// node2.printNode(false)
// fmt.Println("node3:")
// node3.printNode(false)
// fmt.Println("node4:")
// node4.printNode(false)
// fmt.Println("node5:")
// node5.printNode(false)
// fmt.Println("node6:")
// node6.printNode(false)
// fmt.Println("node7:")
// node7.printNode(false)
//message := printRingMsg(node0.coordinates, node0.successor, node0.coordinates, 0)
//node0.printRing(message)
//node0.updateFingers()
//time.Sleep(5000 * time.Millisecond)
// data := "0"
// node7.addNewDataToNode(data)
data := "0"
node0.addNewData(data)
time.Sleep(1000*time.Millisecond)
data = "1"
node1.addNewData(data)
time.Sleep(1000*time.Millisecond)
data = "2"
node2.addNewData(data)
time.Sleep(1000*time.Millisecond)
data = "3"
node3.addNewData(data)
time.Sleep(1000*time.Millisecond)
data = "4"
node4.addNewData(data)
time.Sleep(1000*time.Millisecond)
data = "5"
node5.addNewData(data)
time.Sleep(2000 * time.Millisecond)
var key string
i:=6
for {
if len(node0.data.StoredData)!=0{
for k, _ := range node0.data.StoredData {
//fmt.Println(node0.data.StoredData[k].Value, node0.data.StoredData[k].Original)
if node0.data.StoredData[k].Original{
// key=node0.data.StoredData[k].Key
key=k
break
}
}
if key!=""{
break
}
}
data=strconv.Itoa(i)
node0.addNewData(data)
i = i+1
time.Sleep(1000*time.Millisecond)
}
//node0.data.printData()
// var key string
// for k, _ := range node0.data.StoredData {
// //fmt.Println(node0.data.StoredData[k].Value, node0.data.StoredData[k].Original)
// if node0.data.StoredData[k].Original{
// key=node0.data.StoredData[k].Key
// break
// }
// }
// if key==""{
// for k, _ := range node0.data.StoredData {
// //fmt.Println(node1.data.StoredData[k].Value, node1.data.StoredData[k].Original)
// if node0.data.StoredData[k].Original {
// key = node0.data.StoredData[k].Key
// break
// }
// }
// }
fmt.Println("Key:",key)
time.Sleep(2000 * time.Millisecond)
/*for k, _ := range node0.data.StoredData {
key[i] = node0.data.StoredData[k].Key
i++
}*/
//GET REQUEST
fmt.Println("\n****GET REQUEST****")
_, err := http.Get("http://127.0.0.1:8080/STORAGE/"+key)
time.Sleep(2000 * time.Millisecond)
//PUT REQUEST
fmt.Println("\n****PUT REQUEST****")
var jsonStr = []byte(`{"Value":"Helloword"}`)
req, err := http.NewRequest("PUT", "http://127.0.0.1:8080/STORAGE/"+key, bytes.NewBuffer(jsonStr))
req.Header.Set("X-Custom-Header", "myvalue")
req.Header.Set("Content-Type", "application/json")
_, err = http.DefaultClient.Do(req)
time.Sleep(3000 * time.Millisecond)
//DELETE REQUEST
fmt.Println("\n****DELETE REQUEST****")
req, err = http.NewRequest("DELETE", "http://127.0.0.1:8080/STORAGE/"+key, nil)
_, err = http.DefaultClient.Do(req)
if err != nil {
log.Fatal(err)
}
time.Sleep(2000 * time.Millisecond)
//POST REQUEST
fmt.Println("\n****POST REQUEST****")
key2 := generateNodeId()
jsonStr = []byte(`{"Key":"`+key2+`","Value":"Helloword"}`)
req, err = http.NewRequest("POST", "http://127.0.0.1:8081/STORAGE/", bytes.NewBuffer(jsonStr))
req.Header.Set("X-Custom-Header", "myvalue")
req.Header.Set("Content-Type", "application/json")
_, err = http.DefaultClient.Do(req)
}
|
package chance_test
import (
"regexp"
"testing"
. "github.com/smartystreets/goconvey/convey"
"github.com/victorquinn/chancego"
)
func TestChar(t *testing.T) {
Convey("Existence", t, func() {
c := chance.Char()
So(c, ShouldNotBeNil)
})
Convey("Generates random character", t, func() {
charRegex := regexp.MustCompile(`[a-z]`)
charMap := map[string]int{}
for i := 0; i < 2600; i++ {
c := chance.Char()
So(charRegex.MatchString(c), ShouldBeTrue)
charMap[c] = charMap[c] + 1
}
Convey("Generated some of each character", func() {
So(len(charMap), ShouldEqual, 26)
})
Convey("And is relatively evenly distributed", func() {
for _, count := range charMap {
So(count, ShouldAlmostEqual, 100, 30)
}
})
})
}
func TestCharFromPool(t *testing.T) {
Convey("Existence", t, func() {
c := chance.CharFromPool("a")
So(c, ShouldNotBeNil)
})
Convey("Should throw error if called with empty pool", t, func() {
So(func() {
c := chance.CharFromPool("")
// This next line should never get hit but need to use c
// or Go complains
So(len(c), ShouldBeZeroValue)
}, ShouldPanic)
})
Convey("Generates random character", t, func() {
charRegex := regexp.MustCompile(`[a-e]`)
charMap := map[string]int{}
for i := 0; i < 500; i++ {
c := chance.CharFromPool("abcde")
So(charRegex.MatchString(c), ShouldBeTrue)
charMap[c] = charMap[c] + 1
}
Convey("Generated some of each character", func() {
So(len(charMap), ShouldEqual, 5)
})
Convey("And is relatively evenly distributed", func() {
for _, count := range charMap {
So(count, ShouldAlmostEqual, 100, 30)
}
})
})
}
|
// Copyright © 2018 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"github.com/7cthunder/agenda/entity"
"github.com/spf13/cobra"
)
// registerCmd represents the register command
var registerCmd = &cobra.Command{
Use: "register -u=[username] -p=[password] -e=[email] -t=[phone]",
Short: "Register a new account with username, password, email and phone",
Long: `Register a new account with username, password, email and phone:
1. If the username you enter has been registered, you should change another username`,
Run: func(cmd *cobra.Command, args []string) {
username, _ := cmd.Flags().GetString("username")
password, _ := cmd.Flags().GetString("password")
email, _ := cmd.Flags().GetString("email")
phone, _ := cmd.Flags().GetString("phone")
logger := entity.NewLogger("[register]")
logger.Println("You are calling register -u=" + username + " -p=" + password + " -e=" + email + " -t=" + phone)
instance := entity.GetStorage()
if username == "" {
logger.Println("ERROR: You do not enter username, please input again!")
return
}
filter := func(u *entity.User) bool {
return u.GetName() == username
}
ulist := instance.QueryUser(filter)
if len(ulist) > 0 {
logger.Println("ERROR: Duplicate username, please change another one!")
return
}
if password == "" {
logger.Println("ERROR: You do not enter password, please input again!")
return
}
if email == "" {
logger.Println("ERROR: You do not enter email, please input again!")
return
}
if phone == "" {
logger.Println("ERROR: You do not enter phone, please input again!")
return
}
instance.CreateUser(*entity.NewUser(username, password, email, phone))
logger.Println("Register new user successfully!")
},
}
func init() {
rootCmd.AddCommand(registerCmd)
registerCmd.Flags().StringP("username", "u", "", "username message")
registerCmd.Flags().StringP("password", "p", "", "password message")
registerCmd.Flags().StringP("email", "e", "", "email message")
registerCmd.Flags().StringP("phone", "t", "", "phone message")
}
|
package utils
import (
"errors"
"github.com/kataras/iris/v12/sessions/sessiondb/redis"
"strings"
)
func CheckLoginStatus(redis *redis.Database, authSid string, userId string, token string) (isLogin bool, err error) {
value := redis.Get(authSid, userId)
if value == nil {
return false, errors.New("authError:didn't login")
}
compareResult := strings.Compare(token, value.(string))
switch compareResult {
case 0:
return true, nil
default:
return false, errors.New("token don't match")
}
}
|
package ptp
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"io/ioutil"
"strconv"
"time"
"gopkg.in/yaml.v2"
)
// CryptoKey represents a key and it's expiration date
type CryptoKey struct {
TTLConfig string `yaml:"ttl"`
KeyConfig string `yaml:"key"`
Until time.Time
Key []byte
}
// Crypto is a object used by crypto subsystem
type Crypto struct {
Keys []CryptoKey
ActiveKey CryptoKey
Active bool
}
// EnrichKeyValues update information about current and feature keys
func (c Crypto) EnrichKeyValues(ckey CryptoKey, key, datetime string) CryptoKey {
var err error
i, err := strconv.ParseInt(datetime, 10, 64)
ckey.Until = time.Now()
// Default value is +1 hour
ckey.Until = ckey.Until.Add(60 * time.Minute)
if err != nil {
Log(Warning, "Failed to parse TTL. Falling back to default value of 1 hour")
} else {
ckey.Until = time.Unix(i, 0)
}
ckey.Key = []byte(key)
if err != nil {
Log(Error, "Failed to parse provided TTL value: %v", err)
return ckey
}
return ckey
}
// ReadKeysFromFile read a file stored in a file system and extracts keys to be used
func (c Crypto) ReadKeysFromFile(filepath string) {
yamlFile, err := ioutil.ReadFile(filepath)
if err != nil {
Log(Error, "Failed to read key file yaml: %v", err)
c.Active = false
return
}
var ckey CryptoKey
err = yaml.Unmarshal(yamlFile, ckey)
if err != nil {
Log(Error, "Failed to parse config: %v", err)
c.Active = false
return
}
ckey = c.EnrichKeyValues(ckey, ckey.KeyConfig, ckey.TTLConfig)
c.Active = true
c.Keys = append(c.Keys, ckey)
}
// Encrypt encrypts data
func (c Crypto) encrypt(key []byte, data []byte) ([]byte, error) {
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
if len(data) != aes.BlockSize {
padding := aes.BlockSize - len(data)%aes.BlockSize
data = append(data, bytes.Repeat([]byte{byte(padding)}, padding)...)
}
encData := make([]byte, aes.BlockSize+len(data))
iv := encData[:aes.BlockSize]
if _, err = rand.Read(iv); err != nil {
return nil, err
}
mode := cipher.NewCBCEncrypter(block, iv)
mode.CryptBlocks(encData[aes.BlockSize:], data)
return encData, nil
}
// Decrypt decrypts data
func (c Crypto) decrypt(key []byte, data []byte) ([]byte, error) {
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
encData := data[aes.BlockSize:]
mode := cipher.NewCBCDecrypter(block, data[:aes.BlockSize])
mode.CryptBlocks(encData, encData)
return encData, nil
}
|
package tasks
import (
"github.com/emicklei/go-restful"
api "github.com/emicklei/go-restful-openapi"
"data-importer/dbcentral/etcd"
"data-importer/dbcentral/pg"
"data-importer/types"
"grm-service/mq"
. "grm-service/util"
)
type TasksSvc struct {
SysDB *pg.SystemDB
MetaDB *pg.MetaDB
DynamicDB *etcd.DynamicDB
MsgQueue *mq.RabbitMQ
DataDir string
ConfigDir string
}
// WebService creates a new service that can handle REST requests for resources.
func (s TasksSvc) WebService() *restful.WebService {
ws := new(restful.WebService)
ws.Path("/tasks")
//ws.Consumes(restful.MIME_JSON, restful.MIME_XML)
ws.Produces(restful.MIME_JSON, restful.MIME_JSON)
tags := []string{TR("data importer tasks")}
// 获取任务列表
ws.Route(ws.GET("/{task-type}").To(s.getTask).
Doc(TR("get task(s) info")).
Param(ws.PathParameter("task-type", TR("task type"))).
Param(ws.QueryParameter("limit", "limit").DataType("string").Required(false)).
Param(ws.QueryParameter("offset", "offset").DataType("string").Required(false)).
Param(ws.QueryParameter("sort", "sort").DataType("string").Required(false)).
Param(ws.QueryParameter("order", "order").DataType("string").Required(false)).
Metadata(api.KeyOpenAPITags, tags).
Writes(types.TaskList{}))
// 中止任务
ws.Route(ws.PUT("/{task-id}/status").To(s.terminateTask).
Doc(TR("terminate task by id")).
Param(ws.PathParameter("task-id", TR("task id"))).
Metadata(api.KeyOpenAPITags, tags))
// 移除任务
ws.Route(ws.DELETE("/{task-type}").To(s.deleteTask).
Doc(TR("delete task by task id or a range of time")).
Param(ws.PathParameter("task-type", TR("task type")).Required(true)).
Param(ws.QueryParameter("task-id", TR("task id")).Required(false)).
Param(ws.QueryParameter("start-time", TR("start time")).Required(false)).
Param(ws.QueryParameter("end-time", TR("end time")).Required(false)).
Metadata(api.KeyOpenAPITags, tags))
// 获取任务输出
ws.Route(ws.GET("/{task-id}/logs/{log-type}").To(s.getTaskLog).
Doc(TR("get task logs")).
Param(ws.PathParameter("task-id", TR("task id"))).
Param(ws.PathParameter("log-type", TR("log type: stdout/stderr"))).
Metadata(api.KeyOpenAPITags, tags).
Writes(TaskLogReply{}))
return ws
}
|
package ast
// HasFields is an AST nodes with Field children.
type HasFields interface {
// AddField adds a Field to the node.
AddField(*Field)
}
|
package socialmedia
import (
"time"
)
//go:generate stringer -type=MoodState
type MoodState int
// Here we define all the possible mood states using an
// iota enumerator.
const (
MoodStateNeutral MoodState = iota
MoodStateHappy
MoodStateSad
MoodStateAngry
MoodStateHopeful
MoodStateThrilled
MoodStateBored
MoodStateShy
MoodStateComical
MoodStateOnCloudNine
)
// This is a type we embed into types we want to keep a
// check on for auditing purposes
type AuditableContent struct {
TimeCreated time.Time
TimeModified time.Time
CreatedBy string
ModifiedBy string
}
// This is the type that represents a Social Media Post
type Post struct {
AuditableContent // Embedded type
Caption string
MessageBody string
URL string
ImageURI string
ThumbnailURI string
Keywords []string
Likers []string
AuthorMood MoodState
}
// Map that holds the various mood states with keys to serve as
// aliases to their respective mood state
var Moods map[string]MoodState
// The init() function is responsible for initializing the mood state
func init() {
Moods = map[string]MoodState{"netural": MoodStateNeutral, "happy": MoodStateHappy, "sad": MoodStateSad, "angry": MoodStateAngry, "hopeful": MoodStateHopeful, "thrilled": MoodStateThrilled, "bored": MoodStateBored, "shy": MoodStateShy, "comical": MoodStateComical, "cloudnine": MoodStateOnCloudNine}
}
// This is the function responsible for creating a new social media post.
func NewPost(username string, mood MoodState, caption string, messageBody string, url string, imageURI string, thumbnailURI string, keywords []string) *Post {
auditableContent := AuditableContent{CreatedBy: username, TimeCreated: time.Now()}
return &Post{Caption: caption, MessageBody: messageBody, URL: url, ImageURI: imageURI, ThumbnailURI: thumbnailURI, AuthorMood: mood, Keywords: keywords, AuditableContent: auditableContent}
}
|
package swarm_test
import (
"context"
"fmt"
"sync"
"testing"
"time"
. "gx/ipfs/QmTJCJaS8Cpjc2MkoS32iwr4zMZtbLkaF9GJsUgH1uwtN9/go-libp2p-swarm"
peer "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer"
)
func getMockDialFunc() (DialFunc, func(), context.Context, <-chan struct{}) {
dfcalls := make(chan struct{}, 512) // buffer it large enough that we won't care
dialctx, cancel := context.WithCancel(context.Background())
ch := make(chan struct{})
f := func(ctx context.Context, p peer.ID) (*Conn, error) {
dfcalls <- struct{}{}
defer cancel()
select {
case <-ch:
return new(Conn), nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
o := new(sync.Once)
return f, func() { o.Do(func() { close(ch) }) }, dialctx, dfcalls
}
func TestBasicDialSync(t *testing.T) {
df, done, _, callsch := getMockDialFunc()
dsync := NewDialSync(df)
p := peer.ID("testpeer")
ctx := context.Background()
finished := make(chan struct{})
go func() {
_, err := dsync.DialLock(ctx, p)
if err != nil {
t.Error(err)
}
finished <- struct{}{}
}()
go func() {
_, err := dsync.DialLock(ctx, p)
if err != nil {
t.Error(err)
}
finished <- struct{}{}
}()
// short sleep just to make sure we've moved around in the scheduler
time.Sleep(time.Millisecond * 20)
done()
<-finished
<-finished
if len(callsch) > 1 {
t.Fatal("should only have called dial func once!")
}
}
func TestDialSyncCancel(t *testing.T) {
df, done, _, dcall := getMockDialFunc()
dsync := NewDialSync(df)
p := peer.ID("testpeer")
ctx1, cancel1 := context.WithCancel(context.Background())
finished := make(chan struct{})
go func() {
_, err := dsync.DialLock(ctx1, p)
if err != ctx1.Err() {
t.Error("should have gotten context error")
}
finished <- struct{}{}
}()
// make sure the above makes it through the wait code first
select {
case <-dcall:
case <-time.After(time.Second):
t.Fatal("timed out waiting for dial to start")
}
// Add a second dialwait in so two actors are waiting on the same dial
go func() {
_, err := dsync.DialLock(context.Background(), p)
if err != nil {
t.Error(err)
}
finished <- struct{}{}
}()
time.Sleep(time.Millisecond * 20)
// cancel the first dialwait, it should not affect the second at all
cancel1()
select {
case <-finished:
case <-time.After(time.Second):
t.Fatal("timed out waiting for wait to exit")
}
// short sleep just to make sure we've moved around in the scheduler
time.Sleep(time.Millisecond * 20)
done()
<-finished
}
func TestDialSyncAllCancel(t *testing.T) {
df, done, dctx, _ := getMockDialFunc()
dsync := NewDialSync(df)
p := peer.ID("testpeer")
ctx1, cancel1 := context.WithCancel(context.Background())
finished := make(chan struct{})
go func() {
_, err := dsync.DialLock(ctx1, p)
if err != ctx1.Err() {
t.Error("should have gotten context error")
}
finished <- struct{}{}
}()
// Add a second dialwait in so two actors are waiting on the same dial
go func() {
_, err := dsync.DialLock(ctx1, p)
if err != ctx1.Err() {
t.Error("should have gotten context error")
}
finished <- struct{}{}
}()
cancel1()
for i := 0; i < 2; i++ {
select {
case <-finished:
case <-time.After(time.Second):
t.Fatal("timed out waiting for wait to exit")
}
}
// the dial should have exited now
select {
case <-dctx.Done():
case <-time.After(time.Second):
t.Fatal("timed out waiting for dial to return")
}
// should be able to successfully dial that peer again
done()
_, err := dsync.DialLock(context.Background(), p)
if err != nil {
t.Fatal(err)
}
}
func TestFailFirst(t *testing.T) {
var count int
f := func(ctx context.Context, p peer.ID) (*Conn, error) {
if count > 0 {
return new(Conn), nil
}
count++
return nil, fmt.Errorf("gophers ate the modem")
}
ds := NewDialSync(f)
p := peer.ID("testing")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
_, err := ds.DialLock(ctx, p)
if err == nil {
t.Fatal("expected gophers to have eaten the modem")
}
c, err := ds.DialLock(ctx, p)
if err != nil {
t.Fatal(err)
}
if c == nil {
t.Fatal("should have gotten a 'real' conn back")
}
}
func TestStressActiveDial(t *testing.T) {
ds := NewDialSync(func(ctx context.Context, p peer.ID) (*Conn, error) {
return nil, nil
})
wg := sync.WaitGroup{}
pid := peer.ID("foo")
makeDials := func() {
for i := 0; i < 10000; i++ {
ds.DialLock(context.Background(), pid)
}
wg.Done()
}
for i := 0; i < 100; i++ {
wg.Add(1)
go makeDials()
}
wg.Wait()
}
|
package decodeways
import (
"fmt"
"strconv"
)
func toKey(arr []byte) (key string) {
for _, byt := range arr {
key = fmt.Sprintf("%s%c", key, byt+'a')
}
return
}
func decoder(s []byte, memo map[string]int) int {
fmt.Printf("s %v\n", s)
key := toKey(s)
if _, saw := memo[key]; saw {
return 0
}
var count int
currentIncrement := 1
for i := 0; i < len(s)-1; i++ {
inDigit := s[i] - '0'
if inDigit == 0 && i == 0 {
return 0
}
if inDigit == 0 {
currentIncrement--
}
nDigit := s[i+1] - '0'
fmted := fmt.Sprintf("%d%d", inDigit, nDigit)
toDigit, _ := strconv.Atoi(fmted)
switch {
case toDigit <= 26:
nArray := make([]byte, 0, len(s)-1)
nArray = append(nArray, s[:i]...)
nArray = append(nArray, byte(toDigit))
count += decoder(append(nArray, s[i+2:]...), memo)
case toDigit > 26 && inDigit == 0:
return 0
}
}
memo[key] = count+currentIncrement
return count+currentIncrement
}
// NumDecodings will return the amount of ways s can be decoded.
func NumDecodings(s string) (ret int) {
if len(s) == 0 {
return 0
}
return decoder([]byte(s), make(map[string]int, len(s)))
}
|
package context
import (
"testing"
)
func init() {
SetContext(NewMemoryContext())
}
type firstIndependentStruct struct {
val string
}
type secondIndependentStruct struct {
val string
}
type dependentStruct struct {
firstDep *firstIndependentStruct
secondDep *secondIndependentStruct
}
func TestMemoryContext_DefaultContext(t *testing.T) {
GetContext().Reg((*firstIndependentStruct)(nil), func() interface{} {
t.Log("Start init firstIndependentStruct")
return &firstIndependentStruct{"firstTestString"}
})
firstDep := Dep((*firstIndependentStruct)(nil))
secondDep := Dep((*secondIndependentStruct)(nil))
GetContext().Reg((*dependentStruct)(nil), func() interface{} {
t.Log("Start init dependentStruct")
return &dependentStruct{
firstDep: (<-firstDep.Waiter).(*firstIndependentStruct),
secondDep: (<-secondDep.Waiter).(*secondIndependentStruct),
}
}, firstDep, secondDep)
GetContext().Reg((*secondIndependentStruct)(nil), func() interface{} {
t.Log("Start init secondIndependentStruct")
return &secondIndependentStruct{"secondTestString"}
})
t.Log("Start waiting for dependentStruct")
actualInst := (<-GetContext().Ask((*dependentStruct)(nil))).(*dependentStruct)
if actualInst.firstDep.val == "firstTestString" && actualInst.secondDep.val == "secondTestString" {
t.Log("Initialized")
return
}
t.Errorf("Expected values %v %v", "firstTestString", "secondTestString")
}
func TestMemoryContext_CustomContext(t *testing.T) {
const customScopeName = "custom"
GetContext().RegScoped(customScopeName, (*firstIndependentStruct)(nil), func() interface{} {
t.Log("Start init firstIndependentStruct")
return &firstIndependentStruct{"firstTestString"}
})
firstDep := DepScoped(customScopeName, (*firstIndependentStruct)(nil))
secondDep := DepScoped(customScopeName, (*secondIndependentStruct)(nil))
GetContext().RegScoped(customScopeName, (*dependentStruct)(nil), func() interface{} {
t.Log("Start init dependentStruct")
return &dependentStruct{
firstDep: (<-firstDep.Waiter).(*firstIndependentStruct),
secondDep: (<-secondDep.Waiter).(*secondIndependentStruct),
}
}, firstDep, secondDep)
GetContext().RegScoped(customScopeName, (*secondIndependentStruct)(nil), func() interface{} {
t.Log("Start init secondIndependentStruct")
return &secondIndependentStruct{"secondTestString"}
})
t.Log("Start waiting for dependentStruct")
actualInst := (<-GetContext().AskScoped(customScopeName, (*dependentStruct)(nil))).(*dependentStruct)
if actualInst.firstDep.val == "firstTestString" && actualInst.secondDep.val == "secondTestString" {
t.Log("Initialized")
return
}
t.Errorf("Expected values %v %v", "firstTestString", "secondTestString")
}
|
package broker
import (
"fmt"
)
type Err struct {
code int
msg string
}
func (e Err) Error() string {
return fmt.Sprintf("%d %s", e.code, e.msg)
}
|
// Copyright 2021 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
containerpb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/container/container_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/container"
)
// Server implements the gRPC interface for Cluster.
type ClusterServer struct{}
// ProtoToClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum converts a ClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum enum from its proto representation.
func ProtoToContainerClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum(e containerpb.ContainerClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum) *container.ClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum_name[int32(e)]; ok {
e := container.ClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum(n[len("ContainerClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum"):])
return &e
}
return nil
}
// ProtoToClusterNodePoolsConfigWorkloadMetadataConfigModeEnum converts a ClusterNodePoolsConfigWorkloadMetadataConfigModeEnum enum from its proto representation.
func ProtoToContainerClusterNodePoolsConfigWorkloadMetadataConfigModeEnum(e containerpb.ContainerClusterNodePoolsConfigWorkloadMetadataConfigModeEnum) *container.ClusterNodePoolsConfigWorkloadMetadataConfigModeEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterNodePoolsConfigWorkloadMetadataConfigModeEnum_name[int32(e)]; ok {
e := container.ClusterNodePoolsConfigWorkloadMetadataConfigModeEnum(n[len("ContainerClusterNodePoolsConfigWorkloadMetadataConfigModeEnum"):])
return &e
}
return nil
}
// ProtoToClusterNodePoolsConfigTaintsEffectEnum converts a ClusterNodePoolsConfigTaintsEffectEnum enum from its proto representation.
func ProtoToContainerClusterNodePoolsConfigTaintsEffectEnum(e containerpb.ContainerClusterNodePoolsConfigTaintsEffectEnum) *container.ClusterNodePoolsConfigTaintsEffectEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterNodePoolsConfigTaintsEffectEnum_name[int32(e)]; ok {
e := container.ClusterNodePoolsConfigTaintsEffectEnum(n[len("ContainerClusterNodePoolsConfigTaintsEffectEnum"):])
return &e
}
return nil
}
// ProtoToClusterNodePoolsConfigSandboxConfigTypeEnum converts a ClusterNodePoolsConfigSandboxConfigTypeEnum enum from its proto representation.
func ProtoToContainerClusterNodePoolsConfigSandboxConfigTypeEnum(e containerpb.ContainerClusterNodePoolsConfigSandboxConfigTypeEnum) *container.ClusterNodePoolsConfigSandboxConfigTypeEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterNodePoolsConfigSandboxConfigTypeEnum_name[int32(e)]; ok {
e := container.ClusterNodePoolsConfigSandboxConfigTypeEnum(n[len("ContainerClusterNodePoolsConfigSandboxConfigTypeEnum"):])
return &e
}
return nil
}
// ProtoToClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum converts a ClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum enum from its proto representation.
func ProtoToContainerClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum(e containerpb.ContainerClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum) *container.ClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum_name[int32(e)]; ok {
e := container.ClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum(n[len("ContainerClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum"):])
return &e
}
return nil
}
// ProtoToClusterNodePoolsStatusEnum converts a ClusterNodePoolsStatusEnum enum from its proto representation.
func ProtoToContainerClusterNodePoolsStatusEnum(e containerpb.ContainerClusterNodePoolsStatusEnum) *container.ClusterNodePoolsStatusEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterNodePoolsStatusEnum_name[int32(e)]; ok {
e := container.ClusterNodePoolsStatusEnum(n[len("ContainerClusterNodePoolsStatusEnum"):])
return &e
}
return nil
}
// ProtoToClusterNodePoolsConditionsCodeEnum converts a ClusterNodePoolsConditionsCodeEnum enum from its proto representation.
func ProtoToContainerClusterNodePoolsConditionsCodeEnum(e containerpb.ContainerClusterNodePoolsConditionsCodeEnum) *container.ClusterNodePoolsConditionsCodeEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterNodePoolsConditionsCodeEnum_name[int32(e)]; ok {
e := container.ClusterNodePoolsConditionsCodeEnum(n[len("ContainerClusterNodePoolsConditionsCodeEnum"):])
return &e
}
return nil
}
// ProtoToClusterNodePoolsConditionsCanonicalCodeEnum converts a ClusterNodePoolsConditionsCanonicalCodeEnum enum from its proto representation.
func ProtoToContainerClusterNodePoolsConditionsCanonicalCodeEnum(e containerpb.ContainerClusterNodePoolsConditionsCanonicalCodeEnum) *container.ClusterNodePoolsConditionsCanonicalCodeEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterNodePoolsConditionsCanonicalCodeEnum_name[int32(e)]; ok {
e := container.ClusterNodePoolsConditionsCanonicalCodeEnum(n[len("ContainerClusterNodePoolsConditionsCanonicalCodeEnum"):])
return &e
}
return nil
}
// ProtoToClusterNetworkPolicyProviderEnum converts a ClusterNetworkPolicyProviderEnum enum from its proto representation.
func ProtoToContainerClusterNetworkPolicyProviderEnum(e containerpb.ContainerClusterNetworkPolicyProviderEnum) *container.ClusterNetworkPolicyProviderEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterNetworkPolicyProviderEnum_name[int32(e)]; ok {
e := container.ClusterNetworkPolicyProviderEnum(n[len("ContainerClusterNetworkPolicyProviderEnum"):])
return &e
}
return nil
}
// ProtoToClusterNetworkConfigPrivateIPv6GoogleAccessEnum converts a ClusterNetworkConfigPrivateIPv6GoogleAccessEnum enum from its proto representation.
func ProtoToContainerClusterNetworkConfigPrivateIPv6GoogleAccessEnum(e containerpb.ContainerClusterNetworkConfigPrivateIPv6GoogleAccessEnum) *container.ClusterNetworkConfigPrivateIPv6GoogleAccessEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterNetworkConfigPrivateIPv6GoogleAccessEnum_name[int32(e)]; ok {
e := container.ClusterNetworkConfigPrivateIPv6GoogleAccessEnum(n[len("ContainerClusterNetworkConfigPrivateIPv6GoogleAccessEnum"):])
return &e
}
return nil
}
// ProtoToClusterDatabaseEncryptionStateEnum converts a ClusterDatabaseEncryptionStateEnum enum from its proto representation.
func ProtoToContainerClusterDatabaseEncryptionStateEnum(e containerpb.ContainerClusterDatabaseEncryptionStateEnum) *container.ClusterDatabaseEncryptionStateEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterDatabaseEncryptionStateEnum_name[int32(e)]; ok {
e := container.ClusterDatabaseEncryptionStateEnum(n[len("ContainerClusterDatabaseEncryptionStateEnum"):])
return &e
}
return nil
}
// ProtoToClusterConditionsCanonicalCodeEnum converts a ClusterConditionsCanonicalCodeEnum enum from its proto representation.
func ProtoToContainerClusterConditionsCanonicalCodeEnum(e containerpb.ContainerClusterConditionsCanonicalCodeEnum) *container.ClusterConditionsCanonicalCodeEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterConditionsCanonicalCodeEnum_name[int32(e)]; ok {
e := container.ClusterConditionsCanonicalCodeEnum(n[len("ContainerClusterConditionsCanonicalCodeEnum"):])
return &e
}
return nil
}
// ProtoToClusterNodeConfigWorkloadMetadataConfigModeEnum converts a ClusterNodeConfigWorkloadMetadataConfigModeEnum enum from its proto representation.
func ProtoToContainerClusterNodeConfigWorkloadMetadataConfigModeEnum(e containerpb.ContainerClusterNodeConfigWorkloadMetadataConfigModeEnum) *container.ClusterNodeConfigWorkloadMetadataConfigModeEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterNodeConfigWorkloadMetadataConfigModeEnum_name[int32(e)]; ok {
e := container.ClusterNodeConfigWorkloadMetadataConfigModeEnum(n[len("ContainerClusterNodeConfigWorkloadMetadataConfigModeEnum"):])
return &e
}
return nil
}
// ProtoToClusterNodeConfigTaintsEffectEnum converts a ClusterNodeConfigTaintsEffectEnum enum from its proto representation.
func ProtoToContainerClusterNodeConfigTaintsEffectEnum(e containerpb.ContainerClusterNodeConfigTaintsEffectEnum) *container.ClusterNodeConfigTaintsEffectEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterNodeConfigTaintsEffectEnum_name[int32(e)]; ok {
e := container.ClusterNodeConfigTaintsEffectEnum(n[len("ContainerClusterNodeConfigTaintsEffectEnum"):])
return &e
}
return nil
}
// ProtoToClusterNodeConfigSandboxConfigTypeEnum converts a ClusterNodeConfigSandboxConfigTypeEnum enum from its proto representation.
func ProtoToContainerClusterNodeConfigSandboxConfigTypeEnum(e containerpb.ContainerClusterNodeConfigSandboxConfigTypeEnum) *container.ClusterNodeConfigSandboxConfigTypeEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterNodeConfigSandboxConfigTypeEnum_name[int32(e)]; ok {
e := container.ClusterNodeConfigSandboxConfigTypeEnum(n[len("ContainerClusterNodeConfigSandboxConfigTypeEnum"):])
return &e
}
return nil
}
// ProtoToClusterNodeConfigReservationAffinityConsumeReservationTypeEnum converts a ClusterNodeConfigReservationAffinityConsumeReservationTypeEnum enum from its proto representation.
func ProtoToContainerClusterNodeConfigReservationAffinityConsumeReservationTypeEnum(e containerpb.ContainerClusterNodeConfigReservationAffinityConsumeReservationTypeEnum) *container.ClusterNodeConfigReservationAffinityConsumeReservationTypeEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterNodeConfigReservationAffinityConsumeReservationTypeEnum_name[int32(e)]; ok {
e := container.ClusterNodeConfigReservationAffinityConsumeReservationTypeEnum(n[len("ContainerClusterNodeConfigReservationAffinityConsumeReservationTypeEnum"):])
return &e
}
return nil
}
// ProtoToClusterReleaseChannelChannelEnum converts a ClusterReleaseChannelChannelEnum enum from its proto representation.
func ProtoToContainerClusterReleaseChannelChannelEnum(e containerpb.ContainerClusterReleaseChannelChannelEnum) *container.ClusterReleaseChannelChannelEnum {
if e == 0 {
return nil
}
if n, ok := containerpb.ContainerClusterReleaseChannelChannelEnum_name[int32(e)]; ok {
e := container.ClusterReleaseChannelChannelEnum(n[len("ContainerClusterReleaseChannelChannelEnum"):])
return &e
}
return nil
}
// ProtoToClusterMasterAuth converts a ClusterMasterAuth resource from its proto representation.
func ProtoToContainerClusterMasterAuth(p *containerpb.ContainerClusterMasterAuth) *container.ClusterMasterAuth {
if p == nil {
return nil
}
obj := &container.ClusterMasterAuth{
Username: dcl.StringOrNil(p.Username),
Password: dcl.StringOrNil(p.Password),
ClientCertificateConfig: ProtoToContainerClusterMasterAuthClientCertificateConfig(p.GetClientCertificateConfig()),
ClusterCaCertificate: dcl.StringOrNil(p.ClusterCaCertificate),
ClientCertificate: dcl.StringOrNil(p.ClientCertificate),
ClientKey: dcl.StringOrNil(p.ClientKey),
}
return obj
}
// ProtoToClusterMasterAuthClientCertificateConfig converts a ClusterMasterAuthClientCertificateConfig resource from its proto representation.
func ProtoToContainerClusterMasterAuthClientCertificateConfig(p *containerpb.ContainerClusterMasterAuthClientCertificateConfig) *container.ClusterMasterAuthClientCertificateConfig {
if p == nil {
return nil
}
obj := &container.ClusterMasterAuthClientCertificateConfig{
IssueClientCertificate: dcl.Bool(p.IssueClientCertificate),
}
return obj
}
// ProtoToClusterAddonsConfig converts a ClusterAddonsConfig resource from its proto representation.
func ProtoToContainerClusterAddonsConfig(p *containerpb.ContainerClusterAddonsConfig) *container.ClusterAddonsConfig {
if p == nil {
return nil
}
obj := &container.ClusterAddonsConfig{
HttpLoadBalancing: ProtoToContainerClusterAddonsConfigHttpLoadBalancing(p.GetHttpLoadBalancing()),
HorizontalPodAutoscaling: ProtoToContainerClusterAddonsConfigHorizontalPodAutoscaling(p.GetHorizontalPodAutoscaling()),
KubernetesDashboard: ProtoToContainerClusterAddonsConfigKubernetesDashboard(p.GetKubernetesDashboard()),
NetworkPolicyConfig: ProtoToContainerClusterAddonsConfigNetworkPolicyConfig(p.GetNetworkPolicyConfig()),
CloudRunConfig: ProtoToContainerClusterAddonsConfigCloudRunConfig(p.GetCloudRunConfig()),
DnsCacheConfig: ProtoToContainerClusterAddonsConfigDnsCacheConfig(p.GetDnsCacheConfig()),
ConfigConnectorConfig: ProtoToContainerClusterAddonsConfigConfigConnectorConfig(p.GetConfigConnectorConfig()),
GcePersistentDiskCsiDriverConfig: ProtoToContainerClusterAddonsConfigGcePersistentDiskCsiDriverConfig(p.GetGcePersistentDiskCsiDriverConfig()),
}
return obj
}
// ProtoToClusterAddonsConfigHttpLoadBalancing converts a ClusterAddonsConfigHttpLoadBalancing resource from its proto representation.
func ProtoToContainerClusterAddonsConfigHttpLoadBalancing(p *containerpb.ContainerClusterAddonsConfigHttpLoadBalancing) *container.ClusterAddonsConfigHttpLoadBalancing {
if p == nil {
return nil
}
obj := &container.ClusterAddonsConfigHttpLoadBalancing{
Disabled: dcl.Bool(p.Disabled),
}
return obj
}
// ProtoToClusterAddonsConfigHorizontalPodAutoscaling converts a ClusterAddonsConfigHorizontalPodAutoscaling resource from its proto representation.
func ProtoToContainerClusterAddonsConfigHorizontalPodAutoscaling(p *containerpb.ContainerClusterAddonsConfigHorizontalPodAutoscaling) *container.ClusterAddonsConfigHorizontalPodAutoscaling {
if p == nil {
return nil
}
obj := &container.ClusterAddonsConfigHorizontalPodAutoscaling{
Disabled: dcl.Bool(p.Disabled),
}
return obj
}
// ProtoToClusterAddonsConfigKubernetesDashboard converts a ClusterAddonsConfigKubernetesDashboard resource from its proto representation.
func ProtoToContainerClusterAddonsConfigKubernetesDashboard(p *containerpb.ContainerClusterAddonsConfigKubernetesDashboard) *container.ClusterAddonsConfigKubernetesDashboard {
if p == nil {
return nil
}
obj := &container.ClusterAddonsConfigKubernetesDashboard{
Disabled: dcl.Bool(p.Disabled),
}
return obj
}
// ProtoToClusterAddonsConfigNetworkPolicyConfig converts a ClusterAddonsConfigNetworkPolicyConfig resource from its proto representation.
func ProtoToContainerClusterAddonsConfigNetworkPolicyConfig(p *containerpb.ContainerClusterAddonsConfigNetworkPolicyConfig) *container.ClusterAddonsConfigNetworkPolicyConfig {
if p == nil {
return nil
}
obj := &container.ClusterAddonsConfigNetworkPolicyConfig{
Disabled: dcl.Bool(p.Disabled),
}
return obj
}
// ProtoToClusterAddonsConfigCloudRunConfig converts a ClusterAddonsConfigCloudRunConfig resource from its proto representation.
func ProtoToContainerClusterAddonsConfigCloudRunConfig(p *containerpb.ContainerClusterAddonsConfigCloudRunConfig) *container.ClusterAddonsConfigCloudRunConfig {
if p == nil {
return nil
}
obj := &container.ClusterAddonsConfigCloudRunConfig{
Disabled: dcl.Bool(p.Disabled),
LoadBalancerType: ProtoToContainerClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum(p.GetLoadBalancerType()),
}
return obj
}
// ProtoToClusterAddonsConfigDnsCacheConfig converts a ClusterAddonsConfigDnsCacheConfig resource from its proto representation.
func ProtoToContainerClusterAddonsConfigDnsCacheConfig(p *containerpb.ContainerClusterAddonsConfigDnsCacheConfig) *container.ClusterAddonsConfigDnsCacheConfig {
if p == nil {
return nil
}
obj := &container.ClusterAddonsConfigDnsCacheConfig{
Enabled: dcl.Bool(p.Enabled),
}
return obj
}
// ProtoToClusterAddonsConfigConfigConnectorConfig converts a ClusterAddonsConfigConfigConnectorConfig resource from its proto representation.
func ProtoToContainerClusterAddonsConfigConfigConnectorConfig(p *containerpb.ContainerClusterAddonsConfigConfigConnectorConfig) *container.ClusterAddonsConfigConfigConnectorConfig {
if p == nil {
return nil
}
obj := &container.ClusterAddonsConfigConfigConnectorConfig{
Enabled: dcl.Bool(p.Enabled),
}
return obj
}
// ProtoToClusterAddonsConfigGcePersistentDiskCsiDriverConfig converts a ClusterAddonsConfigGcePersistentDiskCsiDriverConfig resource from its proto representation.
func ProtoToContainerClusterAddonsConfigGcePersistentDiskCsiDriverConfig(p *containerpb.ContainerClusterAddonsConfigGcePersistentDiskCsiDriverConfig) *container.ClusterAddonsConfigGcePersistentDiskCsiDriverConfig {
if p == nil {
return nil
}
obj := &container.ClusterAddonsConfigGcePersistentDiskCsiDriverConfig{
Enabled: dcl.Bool(p.Enabled),
}
return obj
}
// ProtoToClusterNodePools converts a ClusterNodePools resource from its proto representation.
func ProtoToContainerClusterNodePools(p *containerpb.ContainerClusterNodePools) *container.ClusterNodePools {
if p == nil {
return nil
}
obj := &container.ClusterNodePools{
Name: dcl.StringOrNil(p.Name),
Config: ProtoToContainerClusterNodePoolsConfig(p.GetConfig()),
InitialNodeCount: dcl.Int64OrNil(p.InitialNodeCount),
SelfLink: dcl.StringOrNil(p.SelfLink),
Version: dcl.StringOrNil(p.Version),
Status: ProtoToContainerClusterNodePoolsStatusEnum(p.GetStatus()),
StatusMessage: dcl.StringOrNil(p.StatusMessage),
Autoscaling: ProtoToContainerClusterNodePoolsAutoscaling(p.GetAutoscaling()),
Management: ProtoToContainerClusterNodePoolsManagement(p.GetManagement()),
MaxPodsConstraint: ProtoToContainerClusterNodePoolsMaxPodsConstraint(p.GetMaxPodsConstraint()),
PodIPv4CidrSize: dcl.Int64OrNil(p.PodIpv4CidrSize),
UpgradeSettings: ProtoToContainerClusterNodePoolsUpgradeSettings(p.GetUpgradeSettings()),
}
for _, r := range p.GetLocations() {
obj.Locations = append(obj.Locations, r)
}
for _, r := range p.GetInstanceGroupUrls() {
obj.InstanceGroupUrls = append(obj.InstanceGroupUrls, r)
}
for _, r := range p.GetConditions() {
obj.Conditions = append(obj.Conditions, *ProtoToContainerClusterNodePoolsConditions(r))
}
return obj
}
// ProtoToClusterNodePoolsConfig converts a ClusterNodePoolsConfig resource from its proto representation.
func ProtoToContainerClusterNodePoolsConfig(p *containerpb.ContainerClusterNodePoolsConfig) *container.ClusterNodePoolsConfig {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsConfig{
MachineType: dcl.StringOrNil(p.MachineType),
DiskSizeGb: dcl.Int64OrNil(p.DiskSizeGb),
ServiceAccount: dcl.StringOrNil(p.ServiceAccount),
ImageType: dcl.StringOrNil(p.ImageType),
LocalSsdCount: dcl.Int64OrNil(p.LocalSsdCount),
Preemptible: dcl.Bool(p.Preemptible),
DiskType: dcl.StringOrNil(p.DiskType),
MinCpuPlatform: dcl.StringOrNil(p.MinCpuPlatform),
WorkloadMetadataConfig: ProtoToContainerClusterNodePoolsConfigWorkloadMetadataConfig(p.GetWorkloadMetadataConfig()),
SandboxConfig: ProtoToContainerClusterNodePoolsConfigSandboxConfig(p.GetSandboxConfig()),
NodeGroup: dcl.StringOrNil(p.NodeGroup),
ReservationAffinity: ProtoToContainerClusterNodePoolsConfigReservationAffinity(p.GetReservationAffinity()),
ShieldedInstanceConfig: ProtoToContainerClusterNodePoolsConfigShieldedInstanceConfig(p.GetShieldedInstanceConfig()),
LinuxNodeConfig: ProtoToContainerClusterNodePoolsConfigLinuxNodeConfig(p.GetLinuxNodeConfig()),
KubeletConfig: ProtoToContainerClusterNodePoolsConfigKubeletConfig(p.GetKubeletConfig()),
BootDiskKmsKey: dcl.StringOrNil(p.BootDiskKmsKey),
}
for _, r := range p.GetOauthScopes() {
obj.OAuthScopes = append(obj.OAuthScopes, r)
}
for _, r := range p.GetTags() {
obj.Tags = append(obj.Tags, r)
}
for _, r := range p.GetAccelerators() {
obj.Accelerators = append(obj.Accelerators, *ProtoToContainerClusterNodePoolsConfigAccelerators(r))
}
for _, r := range p.GetTaints() {
obj.Taints = append(obj.Taints, *ProtoToContainerClusterNodePoolsConfigTaints(r))
}
return obj
}
// ProtoToClusterNodePoolsConfigAccelerators converts a ClusterNodePoolsConfigAccelerators resource from its proto representation.
func ProtoToContainerClusterNodePoolsConfigAccelerators(p *containerpb.ContainerClusterNodePoolsConfigAccelerators) *container.ClusterNodePoolsConfigAccelerators {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsConfigAccelerators{
AcceleratorCount: dcl.Int64OrNil(p.AcceleratorCount),
AcceleratorType: dcl.StringOrNil(p.AcceleratorType),
}
return obj
}
// ProtoToClusterNodePoolsConfigWorkloadMetadataConfig converts a ClusterNodePoolsConfigWorkloadMetadataConfig resource from its proto representation.
func ProtoToContainerClusterNodePoolsConfigWorkloadMetadataConfig(p *containerpb.ContainerClusterNodePoolsConfigWorkloadMetadataConfig) *container.ClusterNodePoolsConfigWorkloadMetadataConfig {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsConfigWorkloadMetadataConfig{
Mode: ProtoToContainerClusterNodePoolsConfigWorkloadMetadataConfigModeEnum(p.GetMode()),
}
return obj
}
// ProtoToClusterNodePoolsConfigTaints converts a ClusterNodePoolsConfigTaints resource from its proto representation.
func ProtoToContainerClusterNodePoolsConfigTaints(p *containerpb.ContainerClusterNodePoolsConfigTaints) *container.ClusterNodePoolsConfigTaints {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsConfigTaints{
Key: dcl.StringOrNil(p.Key),
Value: dcl.StringOrNil(p.Value),
Effect: ProtoToContainerClusterNodePoolsConfigTaintsEffectEnum(p.GetEffect()),
}
return obj
}
// ProtoToClusterNodePoolsConfigSandboxConfig converts a ClusterNodePoolsConfigSandboxConfig resource from its proto representation.
func ProtoToContainerClusterNodePoolsConfigSandboxConfig(p *containerpb.ContainerClusterNodePoolsConfigSandboxConfig) *container.ClusterNodePoolsConfigSandboxConfig {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsConfigSandboxConfig{
Type: ProtoToContainerClusterNodePoolsConfigSandboxConfigTypeEnum(p.GetType()),
}
return obj
}
// ProtoToClusterNodePoolsConfigReservationAffinity converts a ClusterNodePoolsConfigReservationAffinity resource from its proto representation.
func ProtoToContainerClusterNodePoolsConfigReservationAffinity(p *containerpb.ContainerClusterNodePoolsConfigReservationAffinity) *container.ClusterNodePoolsConfigReservationAffinity {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsConfigReservationAffinity{
ConsumeReservationType: ProtoToContainerClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum(p.GetConsumeReservationType()),
Key: dcl.StringOrNil(p.Key),
}
for _, r := range p.GetValues() {
obj.Values = append(obj.Values, r)
}
return obj
}
// ProtoToClusterNodePoolsConfigShieldedInstanceConfig converts a ClusterNodePoolsConfigShieldedInstanceConfig resource from its proto representation.
func ProtoToContainerClusterNodePoolsConfigShieldedInstanceConfig(p *containerpb.ContainerClusterNodePoolsConfigShieldedInstanceConfig) *container.ClusterNodePoolsConfigShieldedInstanceConfig {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsConfigShieldedInstanceConfig{
EnableSecureBoot: dcl.Bool(p.EnableSecureBoot),
EnableIntegrityMonitoring: dcl.Bool(p.EnableIntegrityMonitoring),
}
return obj
}
// ProtoToClusterNodePoolsConfigLinuxNodeConfig converts a ClusterNodePoolsConfigLinuxNodeConfig resource from its proto representation.
func ProtoToContainerClusterNodePoolsConfigLinuxNodeConfig(p *containerpb.ContainerClusterNodePoolsConfigLinuxNodeConfig) *container.ClusterNodePoolsConfigLinuxNodeConfig {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsConfigLinuxNodeConfig{}
return obj
}
// ProtoToClusterNodePoolsConfigKubeletConfig converts a ClusterNodePoolsConfigKubeletConfig resource from its proto representation.
func ProtoToContainerClusterNodePoolsConfigKubeletConfig(p *containerpb.ContainerClusterNodePoolsConfigKubeletConfig) *container.ClusterNodePoolsConfigKubeletConfig {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsConfigKubeletConfig{
CpuManagerPolicy: dcl.StringOrNil(p.CpuManagerPolicy),
CpuCfsQuota: dcl.Bool(p.CpuCfsQuota),
CpuCfsQuotaPeriod: dcl.StringOrNil(p.CpuCfsQuotaPeriod),
}
return obj
}
// ProtoToClusterNodePoolsAutoscaling converts a ClusterNodePoolsAutoscaling resource from its proto representation.
func ProtoToContainerClusterNodePoolsAutoscaling(p *containerpb.ContainerClusterNodePoolsAutoscaling) *container.ClusterNodePoolsAutoscaling {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsAutoscaling{
Enabled: dcl.Bool(p.Enabled),
MinNodeCount: dcl.Int64OrNil(p.MinNodeCount),
MaxNodeCount: dcl.Int64OrNil(p.MaxNodeCount),
Autoprovisioned: dcl.Bool(p.Autoprovisioned),
}
return obj
}
// ProtoToClusterNodePoolsManagement converts a ClusterNodePoolsManagement resource from its proto representation.
func ProtoToContainerClusterNodePoolsManagement(p *containerpb.ContainerClusterNodePoolsManagement) *container.ClusterNodePoolsManagement {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsManagement{
AutoUpgrade: dcl.Bool(p.AutoUpgrade),
AutoRepair: dcl.Bool(p.AutoRepair),
UpgradeOptions: ProtoToContainerClusterNodePoolsManagementUpgradeOptions(p.GetUpgradeOptions()),
}
return obj
}
// ProtoToClusterNodePoolsManagementUpgradeOptions converts a ClusterNodePoolsManagementUpgradeOptions resource from its proto representation.
func ProtoToContainerClusterNodePoolsManagementUpgradeOptions(p *containerpb.ContainerClusterNodePoolsManagementUpgradeOptions) *container.ClusterNodePoolsManagementUpgradeOptions {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsManagementUpgradeOptions{
AutoUpgradeStartTime: dcl.StringOrNil(p.AutoUpgradeStartTime),
Description: dcl.StringOrNil(p.Description),
}
return obj
}
// ProtoToClusterNodePoolsMaxPodsConstraint converts a ClusterNodePoolsMaxPodsConstraint resource from its proto representation.
func ProtoToContainerClusterNodePoolsMaxPodsConstraint(p *containerpb.ContainerClusterNodePoolsMaxPodsConstraint) *container.ClusterNodePoolsMaxPodsConstraint {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsMaxPodsConstraint{
MaxPodsPerNode: dcl.Int64OrNil(p.MaxPodsPerNode),
}
return obj
}
// ProtoToClusterNodePoolsConditions converts a ClusterNodePoolsConditions resource from its proto representation.
func ProtoToContainerClusterNodePoolsConditions(p *containerpb.ContainerClusterNodePoolsConditions) *container.ClusterNodePoolsConditions {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsConditions{
Code: ProtoToContainerClusterNodePoolsConditionsCodeEnum(p.GetCode()),
Message: dcl.StringOrNil(p.Message),
CanonicalCode: ProtoToContainerClusterNodePoolsConditionsCanonicalCodeEnum(p.GetCanonicalCode()),
}
return obj
}
// ProtoToClusterNodePoolsUpgradeSettings converts a ClusterNodePoolsUpgradeSettings resource from its proto representation.
func ProtoToContainerClusterNodePoolsUpgradeSettings(p *containerpb.ContainerClusterNodePoolsUpgradeSettings) *container.ClusterNodePoolsUpgradeSettings {
if p == nil {
return nil
}
obj := &container.ClusterNodePoolsUpgradeSettings{
MaxSurge: dcl.Int64OrNil(p.MaxSurge),
MaxUnavailable: dcl.Int64OrNil(p.MaxUnavailable),
}
return obj
}
// ProtoToClusterLegacyAbac converts a ClusterLegacyAbac resource from its proto representation.
func ProtoToContainerClusterLegacyAbac(p *containerpb.ContainerClusterLegacyAbac) *container.ClusterLegacyAbac {
if p == nil {
return nil
}
obj := &container.ClusterLegacyAbac{
Enabled: dcl.Bool(p.Enabled),
}
return obj
}
// ProtoToClusterNetworkPolicy converts a ClusterNetworkPolicy resource from its proto representation.
func ProtoToContainerClusterNetworkPolicy(p *containerpb.ContainerClusterNetworkPolicy) *container.ClusterNetworkPolicy {
if p == nil {
return nil
}
obj := &container.ClusterNetworkPolicy{
Provider: ProtoToContainerClusterNetworkPolicyProviderEnum(p.GetProvider()),
Enabled: dcl.Bool(p.Enabled),
}
return obj
}
// ProtoToClusterIPAllocationPolicy converts a ClusterIPAllocationPolicy resource from its proto representation.
func ProtoToContainerClusterIPAllocationPolicy(p *containerpb.ContainerClusterIPAllocationPolicy) *container.ClusterIPAllocationPolicy {
if p == nil {
return nil
}
obj := &container.ClusterIPAllocationPolicy{
UseIPAliases: dcl.Bool(p.UseIpAliases),
CreateSubnetwork: dcl.Bool(p.CreateSubnetwork),
SubnetworkName: dcl.StringOrNil(p.SubnetworkName),
ClusterSecondaryRangeName: dcl.StringOrNil(p.ClusterSecondaryRangeName),
ServicesSecondaryRangeName: dcl.StringOrNil(p.ServicesSecondaryRangeName),
ClusterIPv4CidrBlock: dcl.StringOrNil(p.ClusterIpv4CidrBlock),
NodeIPv4CidrBlock: dcl.StringOrNil(p.NodeIpv4CidrBlock),
ServicesIPv4CidrBlock: dcl.StringOrNil(p.ServicesIpv4CidrBlock),
TPUIPv4CidrBlock: dcl.StringOrNil(p.TpuIpv4CidrBlock),
ClusterIPv4Cidr: dcl.StringOrNil(p.ClusterIpv4Cidr),
NodeIPv4Cidr: dcl.StringOrNil(p.NodeIpv4Cidr),
ServicesIPv4Cidr: dcl.StringOrNil(p.ServicesIpv4Cidr),
UseRoutes: dcl.Bool(p.UseRoutes),
}
return obj
}
// ProtoToClusterMasterAuthorizedNetworksConfig converts a ClusterMasterAuthorizedNetworksConfig resource from its proto representation.
func ProtoToContainerClusterMasterAuthorizedNetworksConfig(p *containerpb.ContainerClusterMasterAuthorizedNetworksConfig) *container.ClusterMasterAuthorizedNetworksConfig {
if p == nil {
return nil
}
obj := &container.ClusterMasterAuthorizedNetworksConfig{
Enabled: dcl.Bool(p.Enabled),
}
for _, r := range p.GetCidrBlocks() {
obj.CidrBlocks = append(obj.CidrBlocks, *ProtoToContainerClusterMasterAuthorizedNetworksConfigCidrBlocks(r))
}
return obj
}
// ProtoToClusterMasterAuthorizedNetworksConfigCidrBlocks converts a ClusterMasterAuthorizedNetworksConfigCidrBlocks resource from its proto representation.
func ProtoToContainerClusterMasterAuthorizedNetworksConfigCidrBlocks(p *containerpb.ContainerClusterMasterAuthorizedNetworksConfigCidrBlocks) *container.ClusterMasterAuthorizedNetworksConfigCidrBlocks {
if p == nil {
return nil
}
obj := &container.ClusterMasterAuthorizedNetworksConfigCidrBlocks{
DisplayName: dcl.StringOrNil(p.DisplayName),
CidrBlock: dcl.StringOrNil(p.CidrBlock),
}
return obj
}
// ProtoToClusterBinaryAuthorization converts a ClusterBinaryAuthorization resource from its proto representation.
func ProtoToContainerClusterBinaryAuthorization(p *containerpb.ContainerClusterBinaryAuthorization) *container.ClusterBinaryAuthorization {
if p == nil {
return nil
}
obj := &container.ClusterBinaryAuthorization{
Enabled: dcl.Bool(p.Enabled),
}
return obj
}
// ProtoToClusterAutoscaling converts a ClusterAutoscaling resource from its proto representation.
func ProtoToContainerClusterAutoscaling(p *containerpb.ContainerClusterAutoscaling) *container.ClusterAutoscaling {
if p == nil {
return nil
}
obj := &container.ClusterAutoscaling{
EnableNodeAutoprovisioning: dcl.Bool(p.EnableNodeAutoprovisioning),
AutoprovisioningNodePoolDefaults: ProtoToContainerClusterAutoscalingAutoprovisioningNodePoolDefaults(p.GetAutoprovisioningNodePoolDefaults()),
}
for _, r := range p.GetResourceLimits() {
obj.ResourceLimits = append(obj.ResourceLimits, *ProtoToContainerClusterAutoscalingResourceLimits(r))
}
for _, r := range p.GetAutoprovisioningLocations() {
obj.AutoprovisioningLocations = append(obj.AutoprovisioningLocations, r)
}
return obj
}
// ProtoToClusterAutoscalingResourceLimits converts a ClusterAutoscalingResourceLimits resource from its proto representation.
func ProtoToContainerClusterAutoscalingResourceLimits(p *containerpb.ContainerClusterAutoscalingResourceLimits) *container.ClusterAutoscalingResourceLimits {
if p == nil {
return nil
}
obj := &container.ClusterAutoscalingResourceLimits{
ResourceType: dcl.StringOrNil(p.ResourceType),
Minimum: dcl.Int64OrNil(p.Minimum),
Maximum: dcl.Int64OrNil(p.Maximum),
}
return obj
}
// ProtoToClusterAutoscalingAutoprovisioningNodePoolDefaults converts a ClusterAutoscalingAutoprovisioningNodePoolDefaults resource from its proto representation.
func ProtoToContainerClusterAutoscalingAutoprovisioningNodePoolDefaults(p *containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaults) *container.ClusterAutoscalingAutoprovisioningNodePoolDefaults {
if p == nil {
return nil
}
obj := &container.ClusterAutoscalingAutoprovisioningNodePoolDefaults{
ServiceAccount: dcl.StringOrNil(p.ServiceAccount),
UpgradeSettings: ProtoToContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettings(p.GetUpgradeSettings()),
Management: ProtoToContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagement(p.GetManagement()),
MinCpuPlatform: dcl.StringOrNil(p.MinCpuPlatform),
DiskSizeGb: dcl.Int64OrNil(p.DiskSizeGb),
DiskType: dcl.StringOrNil(p.DiskType),
ShieldedInstanceConfig: ProtoToContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfig(p.GetShieldedInstanceConfig()),
BootDiskKmsKey: dcl.StringOrNil(p.BootDiskKmsKey),
}
for _, r := range p.GetOauthScopes() {
obj.OAuthScopes = append(obj.OAuthScopes, r)
}
return obj
}
// ProtoToClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettings converts a ClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettings resource from its proto representation.
func ProtoToContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettings(p *containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettings) *container.ClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettings {
if p == nil {
return nil
}
obj := &container.ClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettings{
MaxSurge: dcl.Int64OrNil(p.MaxSurge),
MaxUnavailable: dcl.Int64OrNil(p.MaxUnavailable),
}
return obj
}
// ProtoToClusterAutoscalingAutoprovisioningNodePoolDefaultsManagement converts a ClusterAutoscalingAutoprovisioningNodePoolDefaultsManagement resource from its proto representation.
func ProtoToContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagement(p *containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagement) *container.ClusterAutoscalingAutoprovisioningNodePoolDefaultsManagement {
if p == nil {
return nil
}
obj := &container.ClusterAutoscalingAutoprovisioningNodePoolDefaultsManagement{
AutoUpgrade: dcl.Bool(p.AutoUpgrade),
AutoRepair: dcl.Bool(p.AutoRepair),
UpgradeOptions: ProtoToContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptions(p.GetUpgradeOptions()),
}
return obj
}
// ProtoToClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptions converts a ClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptions resource from its proto representation.
func ProtoToContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptions(p *containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptions) *container.ClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptions {
if p == nil {
return nil
}
obj := &container.ClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptions{
AutoUpgradeStartTime: dcl.StringOrNil(p.AutoUpgradeStartTime),
Description: dcl.StringOrNil(p.Description),
}
return obj
}
// ProtoToClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfig converts a ClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfig resource from its proto representation.
func ProtoToContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfig(p *containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfig) *container.ClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfig {
if p == nil {
return nil
}
obj := &container.ClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfig{
EnableSecureBoot: dcl.Bool(p.EnableSecureBoot),
EnableIntegrityMonitoring: dcl.Bool(p.EnableIntegrityMonitoring),
}
return obj
}
// ProtoToClusterNetworkConfig converts a ClusterNetworkConfig resource from its proto representation.
func ProtoToContainerClusterNetworkConfig(p *containerpb.ContainerClusterNetworkConfig) *container.ClusterNetworkConfig {
if p == nil {
return nil
}
obj := &container.ClusterNetworkConfig{
Network: dcl.StringOrNil(p.Network),
Subnetwork: dcl.StringOrNil(p.Subnetwork),
EnableIntraNodeVisibility: dcl.Bool(p.EnableIntraNodeVisibility),
DefaultSnatStatus: ProtoToContainerClusterNetworkConfigDefaultSnatStatus(p.GetDefaultSnatStatus()),
PrivateIPv6GoogleAccess: ProtoToContainerClusterNetworkConfigPrivateIPv6GoogleAccessEnum(p.GetPrivateIpv6GoogleAccess()),
}
return obj
}
// ProtoToClusterNetworkConfigDefaultSnatStatus converts a ClusterNetworkConfigDefaultSnatStatus resource from its proto representation.
func ProtoToContainerClusterNetworkConfigDefaultSnatStatus(p *containerpb.ContainerClusterNetworkConfigDefaultSnatStatus) *container.ClusterNetworkConfigDefaultSnatStatus {
if p == nil {
return nil
}
obj := &container.ClusterNetworkConfigDefaultSnatStatus{
Disabled: dcl.Bool(p.Disabled),
}
return obj
}
// ProtoToClusterMaintenancePolicy converts a ClusterMaintenancePolicy resource from its proto representation.
func ProtoToContainerClusterMaintenancePolicy(p *containerpb.ContainerClusterMaintenancePolicy) *container.ClusterMaintenancePolicy {
if p == nil {
return nil
}
obj := &container.ClusterMaintenancePolicy{
Window: ProtoToContainerClusterMaintenancePolicyWindow(p.GetWindow()),
ResourceVersion: dcl.StringOrNil(p.ResourceVersion),
}
return obj
}
// ProtoToClusterMaintenancePolicyWindow converts a ClusterMaintenancePolicyWindow resource from its proto representation.
func ProtoToContainerClusterMaintenancePolicyWindow(p *containerpb.ContainerClusterMaintenancePolicyWindow) *container.ClusterMaintenancePolicyWindow {
if p == nil {
return nil
}
obj := &container.ClusterMaintenancePolicyWindow{
DailyMaintenanceWindow: ProtoToContainerClusterMaintenancePolicyWindowDailyMaintenanceWindow(p.GetDailyMaintenanceWindow()),
RecurringWindow: ProtoToContainerClusterMaintenancePolicyWindowRecurringWindow(p.GetRecurringWindow()),
}
return obj
}
// ProtoToClusterMaintenancePolicyWindowDailyMaintenanceWindow converts a ClusterMaintenancePolicyWindowDailyMaintenanceWindow resource from its proto representation.
func ProtoToContainerClusterMaintenancePolicyWindowDailyMaintenanceWindow(p *containerpb.ContainerClusterMaintenancePolicyWindowDailyMaintenanceWindow) *container.ClusterMaintenancePolicyWindowDailyMaintenanceWindow {
if p == nil {
return nil
}
obj := &container.ClusterMaintenancePolicyWindowDailyMaintenanceWindow{
StartTime: dcl.StringOrNil(p.GetStartTime()),
Duration: dcl.StringOrNil(p.Duration),
}
return obj
}
// ProtoToClusterMaintenancePolicyWindowRecurringWindow converts a ClusterMaintenancePolicyWindowRecurringWindow resource from its proto representation.
func ProtoToContainerClusterMaintenancePolicyWindowRecurringWindow(p *containerpb.ContainerClusterMaintenancePolicyWindowRecurringWindow) *container.ClusterMaintenancePolicyWindowRecurringWindow {
if p == nil {
return nil
}
obj := &container.ClusterMaintenancePolicyWindowRecurringWindow{
Window: ProtoToContainerClusterMaintenancePolicyWindowRecurringWindowWindow(p.GetWindow()),
Recurrence: dcl.StringOrNil(p.Recurrence),
}
return obj
}
// ProtoToClusterMaintenancePolicyWindowRecurringWindowWindow converts a ClusterMaintenancePolicyWindowRecurringWindowWindow resource from its proto representation.
func ProtoToContainerClusterMaintenancePolicyWindowRecurringWindowWindow(p *containerpb.ContainerClusterMaintenancePolicyWindowRecurringWindowWindow) *container.ClusterMaintenancePolicyWindowRecurringWindowWindow {
if p == nil {
return nil
}
obj := &container.ClusterMaintenancePolicyWindowRecurringWindowWindow{
StartTime: dcl.StringOrNil(p.GetStartTime()),
EndTime: dcl.StringOrNil(p.GetEndTime()),
}
return obj
}
// ProtoToClusterDefaultMaxPodsConstraint converts a ClusterDefaultMaxPodsConstraint resource from its proto representation.
func ProtoToContainerClusterDefaultMaxPodsConstraint(p *containerpb.ContainerClusterDefaultMaxPodsConstraint) *container.ClusterDefaultMaxPodsConstraint {
if p == nil {
return nil
}
obj := &container.ClusterDefaultMaxPodsConstraint{
MaxPodsPerNode: dcl.StringOrNil(p.MaxPodsPerNode),
}
return obj
}
// ProtoToClusterResourceUsageExportConfig converts a ClusterResourceUsageExportConfig resource from its proto representation.
func ProtoToContainerClusterResourceUsageExportConfig(p *containerpb.ContainerClusterResourceUsageExportConfig) *container.ClusterResourceUsageExportConfig {
if p == nil {
return nil
}
obj := &container.ClusterResourceUsageExportConfig{
BigqueryDestination: ProtoToContainerClusterResourceUsageExportConfigBigqueryDestination(p.GetBigqueryDestination()),
EnableNetworkEgressMonitoring: dcl.Bool(p.EnableNetworkEgressMonitoring),
ConsumptionMeteringConfig: ProtoToContainerClusterResourceUsageExportConfigConsumptionMeteringConfig(p.GetConsumptionMeteringConfig()),
EnableNetworkEgressMetering: dcl.Bool(p.EnableNetworkEgressMetering),
}
return obj
}
// ProtoToClusterResourceUsageExportConfigBigqueryDestination converts a ClusterResourceUsageExportConfigBigqueryDestination resource from its proto representation.
func ProtoToContainerClusterResourceUsageExportConfigBigqueryDestination(p *containerpb.ContainerClusterResourceUsageExportConfigBigqueryDestination) *container.ClusterResourceUsageExportConfigBigqueryDestination {
if p == nil {
return nil
}
obj := &container.ClusterResourceUsageExportConfigBigqueryDestination{
DatasetId: dcl.StringOrNil(p.DatasetId),
}
return obj
}
// ProtoToClusterResourceUsageExportConfigConsumptionMeteringConfig converts a ClusterResourceUsageExportConfigConsumptionMeteringConfig resource from its proto representation.
func ProtoToContainerClusterResourceUsageExportConfigConsumptionMeteringConfig(p *containerpb.ContainerClusterResourceUsageExportConfigConsumptionMeteringConfig) *container.ClusterResourceUsageExportConfigConsumptionMeteringConfig {
if p == nil {
return nil
}
obj := &container.ClusterResourceUsageExportConfigConsumptionMeteringConfig{
Enabled: dcl.Bool(p.Enabled),
}
return obj
}
// ProtoToClusterAuthenticatorGroupsConfig converts a ClusterAuthenticatorGroupsConfig resource from its proto representation.
func ProtoToContainerClusterAuthenticatorGroupsConfig(p *containerpb.ContainerClusterAuthenticatorGroupsConfig) *container.ClusterAuthenticatorGroupsConfig {
if p == nil {
return nil
}
obj := &container.ClusterAuthenticatorGroupsConfig{
Enabled: dcl.Bool(p.Enabled),
SecurityGroup: dcl.StringOrNil(p.SecurityGroup),
}
return obj
}
// ProtoToClusterPrivateClusterConfig converts a ClusterPrivateClusterConfig resource from its proto representation.
func ProtoToContainerClusterPrivateClusterConfig(p *containerpb.ContainerClusterPrivateClusterConfig) *container.ClusterPrivateClusterConfig {
if p == nil {
return nil
}
obj := &container.ClusterPrivateClusterConfig{
EnablePrivateNodes: dcl.Bool(p.EnablePrivateNodes),
EnablePrivateEndpoint: dcl.Bool(p.EnablePrivateEndpoint),
MasterIPv4CidrBlock: dcl.StringOrNil(p.MasterIpv4CidrBlock),
PrivateEndpoint: dcl.StringOrNil(p.PrivateEndpoint),
PublicEndpoint: dcl.StringOrNil(p.PublicEndpoint),
PeeringName: dcl.StringOrNil(p.PeeringName),
MasterGlobalAccessConfig: ProtoToContainerClusterPrivateClusterConfigMasterGlobalAccessConfig(p.GetMasterGlobalAccessConfig()),
}
return obj
}
// ProtoToClusterPrivateClusterConfigMasterGlobalAccessConfig converts a ClusterPrivateClusterConfigMasterGlobalAccessConfig resource from its proto representation.
func ProtoToContainerClusterPrivateClusterConfigMasterGlobalAccessConfig(p *containerpb.ContainerClusterPrivateClusterConfigMasterGlobalAccessConfig) *container.ClusterPrivateClusterConfigMasterGlobalAccessConfig {
if p == nil {
return nil
}
obj := &container.ClusterPrivateClusterConfigMasterGlobalAccessConfig{
Enabled: dcl.Bool(p.Enabled),
}
return obj
}
// ProtoToClusterDatabaseEncryption converts a ClusterDatabaseEncryption resource from its proto representation.
func ProtoToContainerClusterDatabaseEncryption(p *containerpb.ContainerClusterDatabaseEncryption) *container.ClusterDatabaseEncryption {
if p == nil {
return nil
}
obj := &container.ClusterDatabaseEncryption{
State: ProtoToContainerClusterDatabaseEncryptionStateEnum(p.GetState()),
KeyName: dcl.StringOrNil(p.KeyName),
}
return obj
}
// ProtoToClusterVerticalPodAutoscaling converts a ClusterVerticalPodAutoscaling resource from its proto representation.
func ProtoToContainerClusterVerticalPodAutoscaling(p *containerpb.ContainerClusterVerticalPodAutoscaling) *container.ClusterVerticalPodAutoscaling {
if p == nil {
return nil
}
obj := &container.ClusterVerticalPodAutoscaling{
Enabled: dcl.Bool(p.Enabled),
}
return obj
}
// ProtoToClusterShieldedNodes converts a ClusterShieldedNodes resource from its proto representation.
func ProtoToContainerClusterShieldedNodes(p *containerpb.ContainerClusterShieldedNodes) *container.ClusterShieldedNodes {
if p == nil {
return nil
}
obj := &container.ClusterShieldedNodes{
Enabled: dcl.Bool(p.Enabled),
}
return obj
}
// ProtoToClusterConditions converts a ClusterConditions resource from its proto representation.
func ProtoToContainerClusterConditions(p *containerpb.ContainerClusterConditions) *container.ClusterConditions {
if p == nil {
return nil
}
obj := &container.ClusterConditions{
Code: dcl.StringOrNil(p.Code),
Message: dcl.StringOrNil(p.Message),
CanonicalCode: ProtoToContainerClusterConditionsCanonicalCodeEnum(p.GetCanonicalCode()),
}
return obj
}
// ProtoToClusterAutopilot converts a ClusterAutopilot resource from its proto representation.
func ProtoToContainerClusterAutopilot(p *containerpb.ContainerClusterAutopilot) *container.ClusterAutopilot {
if p == nil {
return nil
}
obj := &container.ClusterAutopilot{
Enabled: dcl.Bool(p.Enabled),
}
return obj
}
// ProtoToClusterNodeConfig converts a ClusterNodeConfig resource from its proto representation.
func ProtoToContainerClusterNodeConfig(p *containerpb.ContainerClusterNodeConfig) *container.ClusterNodeConfig {
if p == nil {
return nil
}
obj := &container.ClusterNodeConfig{
MachineType: dcl.StringOrNil(p.MachineType),
DiskSizeGb: dcl.Int64OrNil(p.DiskSizeGb),
ServiceAccount: dcl.StringOrNil(p.ServiceAccount),
ImageType: dcl.StringOrNil(p.ImageType),
LocalSsdCount: dcl.Int64OrNil(p.LocalSsdCount),
Preemptible: dcl.Bool(p.Preemptible),
DiskType: dcl.StringOrNil(p.DiskType),
MinCpuPlatform: dcl.StringOrNil(p.MinCpuPlatform),
WorkloadMetadataConfig: ProtoToContainerClusterNodeConfigWorkloadMetadataConfig(p.GetWorkloadMetadataConfig()),
SandboxConfig: ProtoToContainerClusterNodeConfigSandboxConfig(p.GetSandboxConfig()),
NodeGroup: dcl.StringOrNil(p.NodeGroup),
ReservationAffinity: ProtoToContainerClusterNodeConfigReservationAffinity(p.GetReservationAffinity()),
ShieldedInstanceConfig: ProtoToContainerClusterNodeConfigShieldedInstanceConfig(p.GetShieldedInstanceConfig()),
LinuxNodeConfig: ProtoToContainerClusterNodeConfigLinuxNodeConfig(p.GetLinuxNodeConfig()),
KubeletConfig: ProtoToContainerClusterNodeConfigKubeletConfig(p.GetKubeletConfig()),
BootDiskKmsKey: dcl.StringOrNil(p.BootDiskKmsKey),
}
for _, r := range p.GetOauthScopes() {
obj.OAuthScopes = append(obj.OAuthScopes, r)
}
for _, r := range p.GetTags() {
obj.Tags = append(obj.Tags, r)
}
for _, r := range p.GetAccelerators() {
obj.Accelerators = append(obj.Accelerators, *ProtoToContainerClusterNodeConfigAccelerators(r))
}
for _, r := range p.GetTaints() {
obj.Taints = append(obj.Taints, *ProtoToContainerClusterNodeConfigTaints(r))
}
return obj
}
// ProtoToClusterNodeConfigAccelerators converts a ClusterNodeConfigAccelerators resource from its proto representation.
func ProtoToContainerClusterNodeConfigAccelerators(p *containerpb.ContainerClusterNodeConfigAccelerators) *container.ClusterNodeConfigAccelerators {
if p == nil {
return nil
}
obj := &container.ClusterNodeConfigAccelerators{
AcceleratorCount: dcl.Int64OrNil(p.AcceleratorCount),
AcceleratorType: dcl.StringOrNil(p.AcceleratorType),
}
return obj
}
// ProtoToClusterNodeConfigWorkloadMetadataConfig converts a ClusterNodeConfigWorkloadMetadataConfig resource from its proto representation.
func ProtoToContainerClusterNodeConfigWorkloadMetadataConfig(p *containerpb.ContainerClusterNodeConfigWorkloadMetadataConfig) *container.ClusterNodeConfigWorkloadMetadataConfig {
if p == nil {
return nil
}
obj := &container.ClusterNodeConfigWorkloadMetadataConfig{
Mode: ProtoToContainerClusterNodeConfigWorkloadMetadataConfigModeEnum(p.GetMode()),
}
return obj
}
// ProtoToClusterNodeConfigTaints converts a ClusterNodeConfigTaints resource from its proto representation.
func ProtoToContainerClusterNodeConfigTaints(p *containerpb.ContainerClusterNodeConfigTaints) *container.ClusterNodeConfigTaints {
if p == nil {
return nil
}
obj := &container.ClusterNodeConfigTaints{
Key: dcl.StringOrNil(p.Key),
Value: dcl.StringOrNil(p.Value),
Effect: ProtoToContainerClusterNodeConfigTaintsEffectEnum(p.GetEffect()),
}
return obj
}
// ProtoToClusterNodeConfigSandboxConfig converts a ClusterNodeConfigSandboxConfig resource from its proto representation.
func ProtoToContainerClusterNodeConfigSandboxConfig(p *containerpb.ContainerClusterNodeConfigSandboxConfig) *container.ClusterNodeConfigSandboxConfig {
if p == nil {
return nil
}
obj := &container.ClusterNodeConfigSandboxConfig{
Type: ProtoToContainerClusterNodeConfigSandboxConfigTypeEnum(p.GetType()),
}
return obj
}
// ProtoToClusterNodeConfigReservationAffinity converts a ClusterNodeConfigReservationAffinity resource from its proto representation.
func ProtoToContainerClusterNodeConfigReservationAffinity(p *containerpb.ContainerClusterNodeConfigReservationAffinity) *container.ClusterNodeConfigReservationAffinity {
if p == nil {
return nil
}
obj := &container.ClusterNodeConfigReservationAffinity{
ConsumeReservationType: ProtoToContainerClusterNodeConfigReservationAffinityConsumeReservationTypeEnum(p.GetConsumeReservationType()),
Key: dcl.StringOrNil(p.Key),
}
for _, r := range p.GetValues() {
obj.Values = append(obj.Values, r)
}
return obj
}
// ProtoToClusterNodeConfigShieldedInstanceConfig converts a ClusterNodeConfigShieldedInstanceConfig resource from its proto representation.
func ProtoToContainerClusterNodeConfigShieldedInstanceConfig(p *containerpb.ContainerClusterNodeConfigShieldedInstanceConfig) *container.ClusterNodeConfigShieldedInstanceConfig {
if p == nil {
return nil
}
obj := &container.ClusterNodeConfigShieldedInstanceConfig{
EnableSecureBoot: dcl.Bool(p.EnableSecureBoot),
EnableIntegrityMonitoring: dcl.Bool(p.EnableIntegrityMonitoring),
}
return obj
}
// ProtoToClusterNodeConfigLinuxNodeConfig converts a ClusterNodeConfigLinuxNodeConfig resource from its proto representation.
func ProtoToContainerClusterNodeConfigLinuxNodeConfig(p *containerpb.ContainerClusterNodeConfigLinuxNodeConfig) *container.ClusterNodeConfigLinuxNodeConfig {
if p == nil {
return nil
}
obj := &container.ClusterNodeConfigLinuxNodeConfig{}
return obj
}
// ProtoToClusterNodeConfigKubeletConfig converts a ClusterNodeConfigKubeletConfig resource from its proto representation.
func ProtoToContainerClusterNodeConfigKubeletConfig(p *containerpb.ContainerClusterNodeConfigKubeletConfig) *container.ClusterNodeConfigKubeletConfig {
if p == nil {
return nil
}
obj := &container.ClusterNodeConfigKubeletConfig{
CpuManagerPolicy: dcl.StringOrNil(p.CpuManagerPolicy),
CpuCfsQuota: dcl.Bool(p.CpuCfsQuota),
CpuCfsQuotaPeriod: dcl.StringOrNil(p.CpuCfsQuotaPeriod),
}
return obj
}
// ProtoToClusterReleaseChannel converts a ClusterReleaseChannel resource from its proto representation.
func ProtoToContainerClusterReleaseChannel(p *containerpb.ContainerClusterReleaseChannel) *container.ClusterReleaseChannel {
if p == nil {
return nil
}
obj := &container.ClusterReleaseChannel{
Channel: ProtoToContainerClusterReleaseChannelChannelEnum(p.GetChannel()),
}
return obj
}
// ProtoToClusterWorkloadIdentityConfig converts a ClusterWorkloadIdentityConfig resource from its proto representation.
func ProtoToContainerClusterWorkloadIdentityConfig(p *containerpb.ContainerClusterWorkloadIdentityConfig) *container.ClusterWorkloadIdentityConfig {
if p == nil {
return nil
}
obj := &container.ClusterWorkloadIdentityConfig{
WorkloadPool: dcl.StringOrNil(p.WorkloadPool),
}
return obj
}
// ProtoToClusterNotificationConfig converts a ClusterNotificationConfig resource from its proto representation.
func ProtoToContainerClusterNotificationConfig(p *containerpb.ContainerClusterNotificationConfig) *container.ClusterNotificationConfig {
if p == nil {
return nil
}
obj := &container.ClusterNotificationConfig{
Pubsub: ProtoToContainerClusterNotificationConfigPubsub(p.GetPubsub()),
}
return obj
}
// ProtoToClusterNotificationConfigPubsub converts a ClusterNotificationConfigPubsub resource from its proto representation.
func ProtoToContainerClusterNotificationConfigPubsub(p *containerpb.ContainerClusterNotificationConfigPubsub) *container.ClusterNotificationConfigPubsub {
if p == nil {
return nil
}
obj := &container.ClusterNotificationConfigPubsub{
Enabled: dcl.Bool(p.Enabled),
Topic: dcl.StringOrNil(p.Topic),
}
return obj
}
// ProtoToClusterConfidentialNodes converts a ClusterConfidentialNodes resource from its proto representation.
func ProtoToContainerClusterConfidentialNodes(p *containerpb.ContainerClusterConfidentialNodes) *container.ClusterConfidentialNodes {
if p == nil {
return nil
}
obj := &container.ClusterConfidentialNodes{
Enabled: dcl.Bool(p.Enabled),
}
return obj
}
// ProtoToCluster converts a Cluster resource from its proto representation.
func ProtoToCluster(p *containerpb.ContainerCluster) *container.Cluster {
obj := &container.Cluster{
Name: dcl.StringOrNil(p.Name),
Description: dcl.StringOrNil(p.Description),
InitialNodeCount: dcl.Int64OrNil(p.InitialNodeCount),
MasterAuth: ProtoToContainerClusterMasterAuth(p.GetMasterAuth()),
LoggingService: dcl.StringOrNil(p.LoggingService),
MonitoringService: dcl.StringOrNil(p.MonitoringService),
Network: dcl.StringOrNil(p.Network),
ClusterIPv4Cidr: dcl.StringOrNil(p.ClusterIpv4Cidr),
AddonsConfig: ProtoToContainerClusterAddonsConfig(p.GetAddonsConfig()),
Subnetwork: dcl.StringOrNil(p.Subnetwork),
EnableKubernetesAlpha: dcl.Bool(p.EnableKubernetesAlpha),
LabelFingerprint: dcl.StringOrNil(p.LabelFingerprint),
LegacyAbac: ProtoToContainerClusterLegacyAbac(p.GetLegacyAbac()),
NetworkPolicy: ProtoToContainerClusterNetworkPolicy(p.GetNetworkPolicy()),
IPAllocationPolicy: ProtoToContainerClusterIPAllocationPolicy(p.GetIpAllocationPolicy()),
MasterAuthorizedNetworksConfig: ProtoToContainerClusterMasterAuthorizedNetworksConfig(p.GetMasterAuthorizedNetworksConfig()),
BinaryAuthorization: ProtoToContainerClusterBinaryAuthorization(p.GetBinaryAuthorization()),
Autoscaling: ProtoToContainerClusterAutoscaling(p.GetAutoscaling()),
NetworkConfig: ProtoToContainerClusterNetworkConfig(p.GetNetworkConfig()),
MaintenancePolicy: ProtoToContainerClusterMaintenancePolicy(p.GetMaintenancePolicy()),
DefaultMaxPodsConstraint: ProtoToContainerClusterDefaultMaxPodsConstraint(p.GetDefaultMaxPodsConstraint()),
ResourceUsageExportConfig: ProtoToContainerClusterResourceUsageExportConfig(p.GetResourceUsageExportConfig()),
AuthenticatorGroupsConfig: ProtoToContainerClusterAuthenticatorGroupsConfig(p.GetAuthenticatorGroupsConfig()),
PrivateClusterConfig: ProtoToContainerClusterPrivateClusterConfig(p.GetPrivateClusterConfig()),
DatabaseEncryption: ProtoToContainerClusterDatabaseEncryption(p.GetDatabaseEncryption()),
VerticalPodAutoscaling: ProtoToContainerClusterVerticalPodAutoscaling(p.GetVerticalPodAutoscaling()),
ShieldedNodes: ProtoToContainerClusterShieldedNodes(p.GetShieldedNodes()),
Endpoint: dcl.StringOrNil(p.Endpoint),
MasterVersion: dcl.StringOrNil(p.MasterVersion),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
Status: dcl.StringOrNil(p.Status),
StatusMessage: dcl.StringOrNil(p.StatusMessage),
NodeIPv4CidrSize: dcl.Int64OrNil(p.NodeIpv4CidrSize),
ServicesIPv4Cidr: dcl.StringOrNil(p.ServicesIpv4Cidr),
ExpireTime: dcl.StringOrNil(p.GetExpireTime()),
Location: dcl.StringOrNil(p.Location),
EnableTPU: dcl.Bool(p.EnableTpu),
TPUIPv4CidrBlock: dcl.StringOrNil(p.TpuIpv4CidrBlock),
Autopilot: ProtoToContainerClusterAutopilot(p.GetAutopilot()),
Project: dcl.StringOrNil(p.Project),
NodeConfig: ProtoToContainerClusterNodeConfig(p.GetNodeConfig()),
ReleaseChannel: ProtoToContainerClusterReleaseChannel(p.GetReleaseChannel()),
WorkloadIdentityConfig: ProtoToContainerClusterWorkloadIdentityConfig(p.GetWorkloadIdentityConfig()),
NotificationConfig: ProtoToContainerClusterNotificationConfig(p.GetNotificationConfig()),
ConfidentialNodes: ProtoToContainerClusterConfidentialNodes(p.GetConfidentialNodes()),
SelfLink: dcl.StringOrNil(p.SelfLink),
Zone: dcl.StringOrNil(p.Zone),
InitialClusterVersion: dcl.StringOrNil(p.InitialClusterVersion),
CurrentMasterVersion: dcl.StringOrNil(p.CurrentMasterVersion),
CurrentNodeVersion: dcl.StringOrNil(p.CurrentNodeVersion),
CurrentNodeCount: dcl.Int64OrNil(p.CurrentNodeCount),
Id: dcl.StringOrNil(p.Id),
}
for _, r := range p.GetNodePools() {
obj.NodePools = append(obj.NodePools, *ProtoToContainerClusterNodePools(r))
}
for _, r := range p.GetLocations() {
obj.Locations = append(obj.Locations, r)
}
for _, r := range p.GetConditions() {
obj.Conditions = append(obj.Conditions, *ProtoToContainerClusterConditions(r))
}
for _, r := range p.GetInstanceGroupUrls() {
obj.InstanceGroupUrls = append(obj.InstanceGroupUrls, r)
}
return obj
}
// ClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnumToProto converts a ClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum enum to its proto representation.
func ContainerClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnumToProto(e *container.ClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum) containerpb.ContainerClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum {
if e == nil {
return containerpb.ContainerClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum(0)
}
if v, ok := containerpb.ContainerClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum_value["ClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum"+string(*e)]; ok {
return containerpb.ContainerClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum(v)
}
return containerpb.ContainerClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnum(0)
}
// ClusterNodePoolsConfigWorkloadMetadataConfigModeEnumToProto converts a ClusterNodePoolsConfigWorkloadMetadataConfigModeEnum enum to its proto representation.
func ContainerClusterNodePoolsConfigWorkloadMetadataConfigModeEnumToProto(e *container.ClusterNodePoolsConfigWorkloadMetadataConfigModeEnum) containerpb.ContainerClusterNodePoolsConfigWorkloadMetadataConfigModeEnum {
if e == nil {
return containerpb.ContainerClusterNodePoolsConfigWorkloadMetadataConfigModeEnum(0)
}
if v, ok := containerpb.ContainerClusterNodePoolsConfigWorkloadMetadataConfigModeEnum_value["ClusterNodePoolsConfigWorkloadMetadataConfigModeEnum"+string(*e)]; ok {
return containerpb.ContainerClusterNodePoolsConfigWorkloadMetadataConfigModeEnum(v)
}
return containerpb.ContainerClusterNodePoolsConfigWorkloadMetadataConfigModeEnum(0)
}
// ClusterNodePoolsConfigTaintsEffectEnumToProto converts a ClusterNodePoolsConfigTaintsEffectEnum enum to its proto representation.
func ContainerClusterNodePoolsConfigTaintsEffectEnumToProto(e *container.ClusterNodePoolsConfigTaintsEffectEnum) containerpb.ContainerClusterNodePoolsConfigTaintsEffectEnum {
if e == nil {
return containerpb.ContainerClusterNodePoolsConfigTaintsEffectEnum(0)
}
if v, ok := containerpb.ContainerClusterNodePoolsConfigTaintsEffectEnum_value["ClusterNodePoolsConfigTaintsEffectEnum"+string(*e)]; ok {
return containerpb.ContainerClusterNodePoolsConfigTaintsEffectEnum(v)
}
return containerpb.ContainerClusterNodePoolsConfigTaintsEffectEnum(0)
}
// ClusterNodePoolsConfigSandboxConfigTypeEnumToProto converts a ClusterNodePoolsConfigSandboxConfigTypeEnum enum to its proto representation.
func ContainerClusterNodePoolsConfigSandboxConfigTypeEnumToProto(e *container.ClusterNodePoolsConfigSandboxConfigTypeEnum) containerpb.ContainerClusterNodePoolsConfigSandboxConfigTypeEnum {
if e == nil {
return containerpb.ContainerClusterNodePoolsConfigSandboxConfigTypeEnum(0)
}
if v, ok := containerpb.ContainerClusterNodePoolsConfigSandboxConfigTypeEnum_value["ClusterNodePoolsConfigSandboxConfigTypeEnum"+string(*e)]; ok {
return containerpb.ContainerClusterNodePoolsConfigSandboxConfigTypeEnum(v)
}
return containerpb.ContainerClusterNodePoolsConfigSandboxConfigTypeEnum(0)
}
// ClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnumToProto converts a ClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum enum to its proto representation.
func ContainerClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnumToProto(e *container.ClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum) containerpb.ContainerClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum {
if e == nil {
return containerpb.ContainerClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum(0)
}
if v, ok := containerpb.ContainerClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum_value["ClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum"+string(*e)]; ok {
return containerpb.ContainerClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum(v)
}
return containerpb.ContainerClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnum(0)
}
// ClusterNodePoolsStatusEnumToProto converts a ClusterNodePoolsStatusEnum enum to its proto representation.
func ContainerClusterNodePoolsStatusEnumToProto(e *container.ClusterNodePoolsStatusEnum) containerpb.ContainerClusterNodePoolsStatusEnum {
if e == nil {
return containerpb.ContainerClusterNodePoolsStatusEnum(0)
}
if v, ok := containerpb.ContainerClusterNodePoolsStatusEnum_value["ClusterNodePoolsStatusEnum"+string(*e)]; ok {
return containerpb.ContainerClusterNodePoolsStatusEnum(v)
}
return containerpb.ContainerClusterNodePoolsStatusEnum(0)
}
// ClusterNodePoolsConditionsCodeEnumToProto converts a ClusterNodePoolsConditionsCodeEnum enum to its proto representation.
func ContainerClusterNodePoolsConditionsCodeEnumToProto(e *container.ClusterNodePoolsConditionsCodeEnum) containerpb.ContainerClusterNodePoolsConditionsCodeEnum {
if e == nil {
return containerpb.ContainerClusterNodePoolsConditionsCodeEnum(0)
}
if v, ok := containerpb.ContainerClusterNodePoolsConditionsCodeEnum_value["ClusterNodePoolsConditionsCodeEnum"+string(*e)]; ok {
return containerpb.ContainerClusterNodePoolsConditionsCodeEnum(v)
}
return containerpb.ContainerClusterNodePoolsConditionsCodeEnum(0)
}
// ClusterNodePoolsConditionsCanonicalCodeEnumToProto converts a ClusterNodePoolsConditionsCanonicalCodeEnum enum to its proto representation.
func ContainerClusterNodePoolsConditionsCanonicalCodeEnumToProto(e *container.ClusterNodePoolsConditionsCanonicalCodeEnum) containerpb.ContainerClusterNodePoolsConditionsCanonicalCodeEnum {
if e == nil {
return containerpb.ContainerClusterNodePoolsConditionsCanonicalCodeEnum(0)
}
if v, ok := containerpb.ContainerClusterNodePoolsConditionsCanonicalCodeEnum_value["ClusterNodePoolsConditionsCanonicalCodeEnum"+string(*e)]; ok {
return containerpb.ContainerClusterNodePoolsConditionsCanonicalCodeEnum(v)
}
return containerpb.ContainerClusterNodePoolsConditionsCanonicalCodeEnum(0)
}
// ClusterNetworkPolicyProviderEnumToProto converts a ClusterNetworkPolicyProviderEnum enum to its proto representation.
func ContainerClusterNetworkPolicyProviderEnumToProto(e *container.ClusterNetworkPolicyProviderEnum) containerpb.ContainerClusterNetworkPolicyProviderEnum {
if e == nil {
return containerpb.ContainerClusterNetworkPolicyProviderEnum(0)
}
if v, ok := containerpb.ContainerClusterNetworkPolicyProviderEnum_value["ClusterNetworkPolicyProviderEnum"+string(*e)]; ok {
return containerpb.ContainerClusterNetworkPolicyProviderEnum(v)
}
return containerpb.ContainerClusterNetworkPolicyProviderEnum(0)
}
// ClusterNetworkConfigPrivateIPv6GoogleAccessEnumToProto converts a ClusterNetworkConfigPrivateIPv6GoogleAccessEnum enum to its proto representation.
func ContainerClusterNetworkConfigPrivateIPv6GoogleAccessEnumToProto(e *container.ClusterNetworkConfigPrivateIPv6GoogleAccessEnum) containerpb.ContainerClusterNetworkConfigPrivateIPv6GoogleAccessEnum {
if e == nil {
return containerpb.ContainerClusterNetworkConfigPrivateIPv6GoogleAccessEnum(0)
}
if v, ok := containerpb.ContainerClusterNetworkConfigPrivateIPv6GoogleAccessEnum_value["ClusterNetworkConfigPrivateIPv6GoogleAccessEnum"+string(*e)]; ok {
return containerpb.ContainerClusterNetworkConfigPrivateIPv6GoogleAccessEnum(v)
}
return containerpb.ContainerClusterNetworkConfigPrivateIPv6GoogleAccessEnum(0)
}
// ClusterDatabaseEncryptionStateEnumToProto converts a ClusterDatabaseEncryptionStateEnum enum to its proto representation.
func ContainerClusterDatabaseEncryptionStateEnumToProto(e *container.ClusterDatabaseEncryptionStateEnum) containerpb.ContainerClusterDatabaseEncryptionStateEnum {
if e == nil {
return containerpb.ContainerClusterDatabaseEncryptionStateEnum(0)
}
if v, ok := containerpb.ContainerClusterDatabaseEncryptionStateEnum_value["ClusterDatabaseEncryptionStateEnum"+string(*e)]; ok {
return containerpb.ContainerClusterDatabaseEncryptionStateEnum(v)
}
return containerpb.ContainerClusterDatabaseEncryptionStateEnum(0)
}
// ClusterConditionsCanonicalCodeEnumToProto converts a ClusterConditionsCanonicalCodeEnum enum to its proto representation.
func ContainerClusterConditionsCanonicalCodeEnumToProto(e *container.ClusterConditionsCanonicalCodeEnum) containerpb.ContainerClusterConditionsCanonicalCodeEnum {
if e == nil {
return containerpb.ContainerClusterConditionsCanonicalCodeEnum(0)
}
if v, ok := containerpb.ContainerClusterConditionsCanonicalCodeEnum_value["ClusterConditionsCanonicalCodeEnum"+string(*e)]; ok {
return containerpb.ContainerClusterConditionsCanonicalCodeEnum(v)
}
return containerpb.ContainerClusterConditionsCanonicalCodeEnum(0)
}
// ClusterNodeConfigWorkloadMetadataConfigModeEnumToProto converts a ClusterNodeConfigWorkloadMetadataConfigModeEnum enum to its proto representation.
func ContainerClusterNodeConfigWorkloadMetadataConfigModeEnumToProto(e *container.ClusterNodeConfigWorkloadMetadataConfigModeEnum) containerpb.ContainerClusterNodeConfigWorkloadMetadataConfigModeEnum {
if e == nil {
return containerpb.ContainerClusterNodeConfigWorkloadMetadataConfigModeEnum(0)
}
if v, ok := containerpb.ContainerClusterNodeConfigWorkloadMetadataConfigModeEnum_value["ClusterNodeConfigWorkloadMetadataConfigModeEnum"+string(*e)]; ok {
return containerpb.ContainerClusterNodeConfigWorkloadMetadataConfigModeEnum(v)
}
return containerpb.ContainerClusterNodeConfigWorkloadMetadataConfigModeEnum(0)
}
// ClusterNodeConfigTaintsEffectEnumToProto converts a ClusterNodeConfigTaintsEffectEnum enum to its proto representation.
func ContainerClusterNodeConfigTaintsEffectEnumToProto(e *container.ClusterNodeConfigTaintsEffectEnum) containerpb.ContainerClusterNodeConfigTaintsEffectEnum {
if e == nil {
return containerpb.ContainerClusterNodeConfigTaintsEffectEnum(0)
}
if v, ok := containerpb.ContainerClusterNodeConfigTaintsEffectEnum_value["ClusterNodeConfigTaintsEffectEnum"+string(*e)]; ok {
return containerpb.ContainerClusterNodeConfigTaintsEffectEnum(v)
}
return containerpb.ContainerClusterNodeConfigTaintsEffectEnum(0)
}
// ClusterNodeConfigSandboxConfigTypeEnumToProto converts a ClusterNodeConfigSandboxConfigTypeEnum enum to its proto representation.
func ContainerClusterNodeConfigSandboxConfigTypeEnumToProto(e *container.ClusterNodeConfigSandboxConfigTypeEnum) containerpb.ContainerClusterNodeConfigSandboxConfigTypeEnum {
if e == nil {
return containerpb.ContainerClusterNodeConfigSandboxConfigTypeEnum(0)
}
if v, ok := containerpb.ContainerClusterNodeConfigSandboxConfigTypeEnum_value["ClusterNodeConfigSandboxConfigTypeEnum"+string(*e)]; ok {
return containerpb.ContainerClusterNodeConfigSandboxConfigTypeEnum(v)
}
return containerpb.ContainerClusterNodeConfigSandboxConfigTypeEnum(0)
}
// ClusterNodeConfigReservationAffinityConsumeReservationTypeEnumToProto converts a ClusterNodeConfigReservationAffinityConsumeReservationTypeEnum enum to its proto representation.
func ContainerClusterNodeConfigReservationAffinityConsumeReservationTypeEnumToProto(e *container.ClusterNodeConfigReservationAffinityConsumeReservationTypeEnum) containerpb.ContainerClusterNodeConfigReservationAffinityConsumeReservationTypeEnum {
if e == nil {
return containerpb.ContainerClusterNodeConfigReservationAffinityConsumeReservationTypeEnum(0)
}
if v, ok := containerpb.ContainerClusterNodeConfigReservationAffinityConsumeReservationTypeEnum_value["ClusterNodeConfigReservationAffinityConsumeReservationTypeEnum"+string(*e)]; ok {
return containerpb.ContainerClusterNodeConfigReservationAffinityConsumeReservationTypeEnum(v)
}
return containerpb.ContainerClusterNodeConfigReservationAffinityConsumeReservationTypeEnum(0)
}
// ClusterReleaseChannelChannelEnumToProto converts a ClusterReleaseChannelChannelEnum enum to its proto representation.
func ContainerClusterReleaseChannelChannelEnumToProto(e *container.ClusterReleaseChannelChannelEnum) containerpb.ContainerClusterReleaseChannelChannelEnum {
if e == nil {
return containerpb.ContainerClusterReleaseChannelChannelEnum(0)
}
if v, ok := containerpb.ContainerClusterReleaseChannelChannelEnum_value["ClusterReleaseChannelChannelEnum"+string(*e)]; ok {
return containerpb.ContainerClusterReleaseChannelChannelEnum(v)
}
return containerpb.ContainerClusterReleaseChannelChannelEnum(0)
}
// ClusterMasterAuthToProto converts a ClusterMasterAuth resource to its proto representation.
func ContainerClusterMasterAuthToProto(o *container.ClusterMasterAuth) *containerpb.ContainerClusterMasterAuth {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterMasterAuth{
Username: dcl.ValueOrEmptyString(o.Username),
Password: dcl.ValueOrEmptyString(o.Password),
ClientCertificateConfig: ContainerClusterMasterAuthClientCertificateConfigToProto(o.ClientCertificateConfig),
ClusterCaCertificate: dcl.ValueOrEmptyString(o.ClusterCaCertificate),
ClientCertificate: dcl.ValueOrEmptyString(o.ClientCertificate),
ClientKey: dcl.ValueOrEmptyString(o.ClientKey),
}
return p
}
// ClusterMasterAuthClientCertificateConfigToProto converts a ClusterMasterAuthClientCertificateConfig resource to its proto representation.
func ContainerClusterMasterAuthClientCertificateConfigToProto(o *container.ClusterMasterAuthClientCertificateConfig) *containerpb.ContainerClusterMasterAuthClientCertificateConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterMasterAuthClientCertificateConfig{
IssueClientCertificate: dcl.ValueOrEmptyBool(o.IssueClientCertificate),
}
return p
}
// ClusterAddonsConfigToProto converts a ClusterAddonsConfig resource to its proto representation.
func ContainerClusterAddonsConfigToProto(o *container.ClusterAddonsConfig) *containerpb.ContainerClusterAddonsConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAddonsConfig{
HttpLoadBalancing: ContainerClusterAddonsConfigHttpLoadBalancingToProto(o.HttpLoadBalancing),
HorizontalPodAutoscaling: ContainerClusterAddonsConfigHorizontalPodAutoscalingToProto(o.HorizontalPodAutoscaling),
KubernetesDashboard: ContainerClusterAddonsConfigKubernetesDashboardToProto(o.KubernetesDashboard),
NetworkPolicyConfig: ContainerClusterAddonsConfigNetworkPolicyConfigToProto(o.NetworkPolicyConfig),
CloudRunConfig: ContainerClusterAddonsConfigCloudRunConfigToProto(o.CloudRunConfig),
DnsCacheConfig: ContainerClusterAddonsConfigDnsCacheConfigToProto(o.DnsCacheConfig),
ConfigConnectorConfig: ContainerClusterAddonsConfigConfigConnectorConfigToProto(o.ConfigConnectorConfig),
GcePersistentDiskCsiDriverConfig: ContainerClusterAddonsConfigGcePersistentDiskCsiDriverConfigToProto(o.GcePersistentDiskCsiDriverConfig),
}
return p
}
// ClusterAddonsConfigHttpLoadBalancingToProto converts a ClusterAddonsConfigHttpLoadBalancing resource to its proto representation.
func ContainerClusterAddonsConfigHttpLoadBalancingToProto(o *container.ClusterAddonsConfigHttpLoadBalancing) *containerpb.ContainerClusterAddonsConfigHttpLoadBalancing {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAddonsConfigHttpLoadBalancing{
Disabled: dcl.ValueOrEmptyBool(o.Disabled),
}
return p
}
// ClusterAddonsConfigHorizontalPodAutoscalingToProto converts a ClusterAddonsConfigHorizontalPodAutoscaling resource to its proto representation.
func ContainerClusterAddonsConfigHorizontalPodAutoscalingToProto(o *container.ClusterAddonsConfigHorizontalPodAutoscaling) *containerpb.ContainerClusterAddonsConfigHorizontalPodAutoscaling {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAddonsConfigHorizontalPodAutoscaling{
Disabled: dcl.ValueOrEmptyBool(o.Disabled),
}
return p
}
// ClusterAddonsConfigKubernetesDashboardToProto converts a ClusterAddonsConfigKubernetesDashboard resource to its proto representation.
func ContainerClusterAddonsConfigKubernetesDashboardToProto(o *container.ClusterAddonsConfigKubernetesDashboard) *containerpb.ContainerClusterAddonsConfigKubernetesDashboard {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAddonsConfigKubernetesDashboard{
Disabled: dcl.ValueOrEmptyBool(o.Disabled),
}
return p
}
// ClusterAddonsConfigNetworkPolicyConfigToProto converts a ClusterAddonsConfigNetworkPolicyConfig resource to its proto representation.
func ContainerClusterAddonsConfigNetworkPolicyConfigToProto(o *container.ClusterAddonsConfigNetworkPolicyConfig) *containerpb.ContainerClusterAddonsConfigNetworkPolicyConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAddonsConfigNetworkPolicyConfig{
Disabled: dcl.ValueOrEmptyBool(o.Disabled),
}
return p
}
// ClusterAddonsConfigCloudRunConfigToProto converts a ClusterAddonsConfigCloudRunConfig resource to its proto representation.
func ContainerClusterAddonsConfigCloudRunConfigToProto(o *container.ClusterAddonsConfigCloudRunConfig) *containerpb.ContainerClusterAddonsConfigCloudRunConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAddonsConfigCloudRunConfig{
Disabled: dcl.ValueOrEmptyBool(o.Disabled),
LoadBalancerType: ContainerClusterAddonsConfigCloudRunConfigLoadBalancerTypeEnumToProto(o.LoadBalancerType),
}
return p
}
// ClusterAddonsConfigDnsCacheConfigToProto converts a ClusterAddonsConfigDnsCacheConfig resource to its proto representation.
func ContainerClusterAddonsConfigDnsCacheConfigToProto(o *container.ClusterAddonsConfigDnsCacheConfig) *containerpb.ContainerClusterAddonsConfigDnsCacheConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAddonsConfigDnsCacheConfig{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
}
return p
}
// ClusterAddonsConfigConfigConnectorConfigToProto converts a ClusterAddonsConfigConfigConnectorConfig resource to its proto representation.
func ContainerClusterAddonsConfigConfigConnectorConfigToProto(o *container.ClusterAddonsConfigConfigConnectorConfig) *containerpb.ContainerClusterAddonsConfigConfigConnectorConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAddonsConfigConfigConnectorConfig{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
}
return p
}
// ClusterAddonsConfigGcePersistentDiskCsiDriverConfigToProto converts a ClusterAddonsConfigGcePersistentDiskCsiDriverConfig resource to its proto representation.
func ContainerClusterAddonsConfigGcePersistentDiskCsiDriverConfigToProto(o *container.ClusterAddonsConfigGcePersistentDiskCsiDriverConfig) *containerpb.ContainerClusterAddonsConfigGcePersistentDiskCsiDriverConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAddonsConfigGcePersistentDiskCsiDriverConfig{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
}
return p
}
// ClusterNodePoolsToProto converts a ClusterNodePools resource to its proto representation.
func ContainerClusterNodePoolsToProto(o *container.ClusterNodePools) *containerpb.ContainerClusterNodePools {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePools{
Name: dcl.ValueOrEmptyString(o.Name),
Config: ContainerClusterNodePoolsConfigToProto(o.Config),
InitialNodeCount: dcl.ValueOrEmptyInt64(o.InitialNodeCount),
SelfLink: dcl.ValueOrEmptyString(o.SelfLink),
Version: dcl.ValueOrEmptyString(o.Version),
Status: ContainerClusterNodePoolsStatusEnumToProto(o.Status),
StatusMessage: dcl.ValueOrEmptyString(o.StatusMessage),
Autoscaling: ContainerClusterNodePoolsAutoscalingToProto(o.Autoscaling),
Management: ContainerClusterNodePoolsManagementToProto(o.Management),
MaxPodsConstraint: ContainerClusterNodePoolsMaxPodsConstraintToProto(o.MaxPodsConstraint),
PodIpv4CidrSize: dcl.ValueOrEmptyInt64(o.PodIPv4CidrSize),
UpgradeSettings: ContainerClusterNodePoolsUpgradeSettingsToProto(o.UpgradeSettings),
}
for _, r := range o.Locations {
p.Locations = append(p.Locations, r)
}
for _, r := range o.InstanceGroupUrls {
p.InstanceGroupUrls = append(p.InstanceGroupUrls, r)
}
for _, r := range o.Conditions {
p.Conditions = append(p.Conditions, ContainerClusterNodePoolsConditionsToProto(&r))
}
return p
}
// ClusterNodePoolsConfigToProto converts a ClusterNodePoolsConfig resource to its proto representation.
func ContainerClusterNodePoolsConfigToProto(o *container.ClusterNodePoolsConfig) *containerpb.ContainerClusterNodePoolsConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsConfig{
MachineType: dcl.ValueOrEmptyString(o.MachineType),
DiskSizeGb: dcl.ValueOrEmptyInt64(o.DiskSizeGb),
ServiceAccount: dcl.ValueOrEmptyString(o.ServiceAccount),
ImageType: dcl.ValueOrEmptyString(o.ImageType),
LocalSsdCount: dcl.ValueOrEmptyInt64(o.LocalSsdCount),
Preemptible: dcl.ValueOrEmptyBool(o.Preemptible),
DiskType: dcl.ValueOrEmptyString(o.DiskType),
MinCpuPlatform: dcl.ValueOrEmptyString(o.MinCpuPlatform),
WorkloadMetadataConfig: ContainerClusterNodePoolsConfigWorkloadMetadataConfigToProto(o.WorkloadMetadataConfig),
SandboxConfig: ContainerClusterNodePoolsConfigSandboxConfigToProto(o.SandboxConfig),
NodeGroup: dcl.ValueOrEmptyString(o.NodeGroup),
ReservationAffinity: ContainerClusterNodePoolsConfigReservationAffinityToProto(o.ReservationAffinity),
ShieldedInstanceConfig: ContainerClusterNodePoolsConfigShieldedInstanceConfigToProto(o.ShieldedInstanceConfig),
LinuxNodeConfig: ContainerClusterNodePoolsConfigLinuxNodeConfigToProto(o.LinuxNodeConfig),
KubeletConfig: ContainerClusterNodePoolsConfigKubeletConfigToProto(o.KubeletConfig),
BootDiskKmsKey: dcl.ValueOrEmptyString(o.BootDiskKmsKey),
}
for _, r := range o.OAuthScopes {
p.OauthScopes = append(p.OauthScopes, r)
}
p.Metadata = make(map[string]string)
for k, r := range o.Metadata {
p.Metadata[k] = r
}
p.Labels = make(map[string]string)
for k, r := range o.Labels {
p.Labels[k] = r
}
for _, r := range o.Tags {
p.Tags = append(p.Tags, r)
}
for _, r := range o.Accelerators {
p.Accelerators = append(p.Accelerators, ContainerClusterNodePoolsConfigAcceleratorsToProto(&r))
}
for _, r := range o.Taints {
p.Taints = append(p.Taints, ContainerClusterNodePoolsConfigTaintsToProto(&r))
}
return p
}
// ClusterNodePoolsConfigAcceleratorsToProto converts a ClusterNodePoolsConfigAccelerators resource to its proto representation.
func ContainerClusterNodePoolsConfigAcceleratorsToProto(o *container.ClusterNodePoolsConfigAccelerators) *containerpb.ContainerClusterNodePoolsConfigAccelerators {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsConfigAccelerators{
AcceleratorCount: dcl.ValueOrEmptyInt64(o.AcceleratorCount),
AcceleratorType: dcl.ValueOrEmptyString(o.AcceleratorType),
}
return p
}
// ClusterNodePoolsConfigWorkloadMetadataConfigToProto converts a ClusterNodePoolsConfigWorkloadMetadataConfig resource to its proto representation.
func ContainerClusterNodePoolsConfigWorkloadMetadataConfigToProto(o *container.ClusterNodePoolsConfigWorkloadMetadataConfig) *containerpb.ContainerClusterNodePoolsConfigWorkloadMetadataConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsConfigWorkloadMetadataConfig{
Mode: ContainerClusterNodePoolsConfigWorkloadMetadataConfigModeEnumToProto(o.Mode),
}
return p
}
// ClusterNodePoolsConfigTaintsToProto converts a ClusterNodePoolsConfigTaints resource to its proto representation.
func ContainerClusterNodePoolsConfigTaintsToProto(o *container.ClusterNodePoolsConfigTaints) *containerpb.ContainerClusterNodePoolsConfigTaints {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsConfigTaints{
Key: dcl.ValueOrEmptyString(o.Key),
Value: dcl.ValueOrEmptyString(o.Value),
Effect: ContainerClusterNodePoolsConfigTaintsEffectEnumToProto(o.Effect),
}
return p
}
// ClusterNodePoolsConfigSandboxConfigToProto converts a ClusterNodePoolsConfigSandboxConfig resource to its proto representation.
func ContainerClusterNodePoolsConfigSandboxConfigToProto(o *container.ClusterNodePoolsConfigSandboxConfig) *containerpb.ContainerClusterNodePoolsConfigSandboxConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsConfigSandboxConfig{
Type: ContainerClusterNodePoolsConfigSandboxConfigTypeEnumToProto(o.Type),
}
return p
}
// ClusterNodePoolsConfigReservationAffinityToProto converts a ClusterNodePoolsConfigReservationAffinity resource to its proto representation.
func ContainerClusterNodePoolsConfigReservationAffinityToProto(o *container.ClusterNodePoolsConfigReservationAffinity) *containerpb.ContainerClusterNodePoolsConfigReservationAffinity {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsConfigReservationAffinity{
ConsumeReservationType: ContainerClusterNodePoolsConfigReservationAffinityConsumeReservationTypeEnumToProto(o.ConsumeReservationType),
Key: dcl.ValueOrEmptyString(o.Key),
}
for _, r := range o.Values {
p.Values = append(p.Values, r)
}
return p
}
// ClusterNodePoolsConfigShieldedInstanceConfigToProto converts a ClusterNodePoolsConfigShieldedInstanceConfig resource to its proto representation.
func ContainerClusterNodePoolsConfigShieldedInstanceConfigToProto(o *container.ClusterNodePoolsConfigShieldedInstanceConfig) *containerpb.ContainerClusterNodePoolsConfigShieldedInstanceConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsConfigShieldedInstanceConfig{
EnableSecureBoot: dcl.ValueOrEmptyBool(o.EnableSecureBoot),
EnableIntegrityMonitoring: dcl.ValueOrEmptyBool(o.EnableIntegrityMonitoring),
}
return p
}
// ClusterNodePoolsConfigLinuxNodeConfigToProto converts a ClusterNodePoolsConfigLinuxNodeConfig resource to its proto representation.
func ContainerClusterNodePoolsConfigLinuxNodeConfigToProto(o *container.ClusterNodePoolsConfigLinuxNodeConfig) *containerpb.ContainerClusterNodePoolsConfigLinuxNodeConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsConfigLinuxNodeConfig{}
p.Sysctls = make(map[string]string)
for k, r := range o.Sysctls {
p.Sysctls[k] = r
}
return p
}
// ClusterNodePoolsConfigKubeletConfigToProto converts a ClusterNodePoolsConfigKubeletConfig resource to its proto representation.
func ContainerClusterNodePoolsConfigKubeletConfigToProto(o *container.ClusterNodePoolsConfigKubeletConfig) *containerpb.ContainerClusterNodePoolsConfigKubeletConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsConfigKubeletConfig{
CpuManagerPolicy: dcl.ValueOrEmptyString(o.CpuManagerPolicy),
CpuCfsQuota: dcl.ValueOrEmptyBool(o.CpuCfsQuota),
CpuCfsQuotaPeriod: dcl.ValueOrEmptyString(o.CpuCfsQuotaPeriod),
}
return p
}
// ClusterNodePoolsAutoscalingToProto converts a ClusterNodePoolsAutoscaling resource to its proto representation.
func ContainerClusterNodePoolsAutoscalingToProto(o *container.ClusterNodePoolsAutoscaling) *containerpb.ContainerClusterNodePoolsAutoscaling {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsAutoscaling{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
MinNodeCount: dcl.ValueOrEmptyInt64(o.MinNodeCount),
MaxNodeCount: dcl.ValueOrEmptyInt64(o.MaxNodeCount),
Autoprovisioned: dcl.ValueOrEmptyBool(o.Autoprovisioned),
}
return p
}
// ClusterNodePoolsManagementToProto converts a ClusterNodePoolsManagement resource to its proto representation.
func ContainerClusterNodePoolsManagementToProto(o *container.ClusterNodePoolsManagement) *containerpb.ContainerClusterNodePoolsManagement {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsManagement{
AutoUpgrade: dcl.ValueOrEmptyBool(o.AutoUpgrade),
AutoRepair: dcl.ValueOrEmptyBool(o.AutoRepair),
UpgradeOptions: ContainerClusterNodePoolsManagementUpgradeOptionsToProto(o.UpgradeOptions),
}
return p
}
// ClusterNodePoolsManagementUpgradeOptionsToProto converts a ClusterNodePoolsManagementUpgradeOptions resource to its proto representation.
func ContainerClusterNodePoolsManagementUpgradeOptionsToProto(o *container.ClusterNodePoolsManagementUpgradeOptions) *containerpb.ContainerClusterNodePoolsManagementUpgradeOptions {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsManagementUpgradeOptions{
AutoUpgradeStartTime: dcl.ValueOrEmptyString(o.AutoUpgradeStartTime),
Description: dcl.ValueOrEmptyString(o.Description),
}
return p
}
// ClusterNodePoolsMaxPodsConstraintToProto converts a ClusterNodePoolsMaxPodsConstraint resource to its proto representation.
func ContainerClusterNodePoolsMaxPodsConstraintToProto(o *container.ClusterNodePoolsMaxPodsConstraint) *containerpb.ContainerClusterNodePoolsMaxPodsConstraint {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsMaxPodsConstraint{
MaxPodsPerNode: dcl.ValueOrEmptyInt64(o.MaxPodsPerNode),
}
return p
}
// ClusterNodePoolsConditionsToProto converts a ClusterNodePoolsConditions resource to its proto representation.
func ContainerClusterNodePoolsConditionsToProto(o *container.ClusterNodePoolsConditions) *containerpb.ContainerClusterNodePoolsConditions {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsConditions{
Code: ContainerClusterNodePoolsConditionsCodeEnumToProto(o.Code),
Message: dcl.ValueOrEmptyString(o.Message),
CanonicalCode: ContainerClusterNodePoolsConditionsCanonicalCodeEnumToProto(o.CanonicalCode),
}
return p
}
// ClusterNodePoolsUpgradeSettingsToProto converts a ClusterNodePoolsUpgradeSettings resource to its proto representation.
func ContainerClusterNodePoolsUpgradeSettingsToProto(o *container.ClusterNodePoolsUpgradeSettings) *containerpb.ContainerClusterNodePoolsUpgradeSettings {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodePoolsUpgradeSettings{
MaxSurge: dcl.ValueOrEmptyInt64(o.MaxSurge),
MaxUnavailable: dcl.ValueOrEmptyInt64(o.MaxUnavailable),
}
return p
}
// ClusterLegacyAbacToProto converts a ClusterLegacyAbac resource to its proto representation.
func ContainerClusterLegacyAbacToProto(o *container.ClusterLegacyAbac) *containerpb.ContainerClusterLegacyAbac {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterLegacyAbac{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
}
return p
}
// ClusterNetworkPolicyToProto converts a ClusterNetworkPolicy resource to its proto representation.
func ContainerClusterNetworkPolicyToProto(o *container.ClusterNetworkPolicy) *containerpb.ContainerClusterNetworkPolicy {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNetworkPolicy{
Provider: ContainerClusterNetworkPolicyProviderEnumToProto(o.Provider),
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
}
return p
}
// ClusterIPAllocationPolicyToProto converts a ClusterIPAllocationPolicy resource to its proto representation.
func ContainerClusterIPAllocationPolicyToProto(o *container.ClusterIPAllocationPolicy) *containerpb.ContainerClusterIPAllocationPolicy {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterIPAllocationPolicy{
UseIpAliases: dcl.ValueOrEmptyBool(o.UseIPAliases),
CreateSubnetwork: dcl.ValueOrEmptyBool(o.CreateSubnetwork),
SubnetworkName: dcl.ValueOrEmptyString(o.SubnetworkName),
ClusterSecondaryRangeName: dcl.ValueOrEmptyString(o.ClusterSecondaryRangeName),
ServicesSecondaryRangeName: dcl.ValueOrEmptyString(o.ServicesSecondaryRangeName),
ClusterIpv4CidrBlock: dcl.ValueOrEmptyString(o.ClusterIPv4CidrBlock),
NodeIpv4CidrBlock: dcl.ValueOrEmptyString(o.NodeIPv4CidrBlock),
ServicesIpv4CidrBlock: dcl.ValueOrEmptyString(o.ServicesIPv4CidrBlock),
TpuIpv4CidrBlock: dcl.ValueOrEmptyString(o.TPUIPv4CidrBlock),
ClusterIpv4Cidr: dcl.ValueOrEmptyString(o.ClusterIPv4Cidr),
NodeIpv4Cidr: dcl.ValueOrEmptyString(o.NodeIPv4Cidr),
ServicesIpv4Cidr: dcl.ValueOrEmptyString(o.ServicesIPv4Cidr),
UseRoutes: dcl.ValueOrEmptyBool(o.UseRoutes),
}
return p
}
// ClusterMasterAuthorizedNetworksConfigToProto converts a ClusterMasterAuthorizedNetworksConfig resource to its proto representation.
func ContainerClusterMasterAuthorizedNetworksConfigToProto(o *container.ClusterMasterAuthorizedNetworksConfig) *containerpb.ContainerClusterMasterAuthorizedNetworksConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterMasterAuthorizedNetworksConfig{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
}
for _, r := range o.CidrBlocks {
p.CidrBlocks = append(p.CidrBlocks, ContainerClusterMasterAuthorizedNetworksConfigCidrBlocksToProto(&r))
}
return p
}
// ClusterMasterAuthorizedNetworksConfigCidrBlocksToProto converts a ClusterMasterAuthorizedNetworksConfigCidrBlocks resource to its proto representation.
func ContainerClusterMasterAuthorizedNetworksConfigCidrBlocksToProto(o *container.ClusterMasterAuthorizedNetworksConfigCidrBlocks) *containerpb.ContainerClusterMasterAuthorizedNetworksConfigCidrBlocks {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterMasterAuthorizedNetworksConfigCidrBlocks{
DisplayName: dcl.ValueOrEmptyString(o.DisplayName),
CidrBlock: dcl.ValueOrEmptyString(o.CidrBlock),
}
return p
}
// ClusterBinaryAuthorizationToProto converts a ClusterBinaryAuthorization resource to its proto representation.
func ContainerClusterBinaryAuthorizationToProto(o *container.ClusterBinaryAuthorization) *containerpb.ContainerClusterBinaryAuthorization {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterBinaryAuthorization{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
}
return p
}
// ClusterAutoscalingToProto converts a ClusterAutoscaling resource to its proto representation.
func ContainerClusterAutoscalingToProto(o *container.ClusterAutoscaling) *containerpb.ContainerClusterAutoscaling {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAutoscaling{
EnableNodeAutoprovisioning: dcl.ValueOrEmptyBool(o.EnableNodeAutoprovisioning),
AutoprovisioningNodePoolDefaults: ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsToProto(o.AutoprovisioningNodePoolDefaults),
}
for _, r := range o.ResourceLimits {
p.ResourceLimits = append(p.ResourceLimits, ContainerClusterAutoscalingResourceLimitsToProto(&r))
}
for _, r := range o.AutoprovisioningLocations {
p.AutoprovisioningLocations = append(p.AutoprovisioningLocations, r)
}
return p
}
// ClusterAutoscalingResourceLimitsToProto converts a ClusterAutoscalingResourceLimits resource to its proto representation.
func ContainerClusterAutoscalingResourceLimitsToProto(o *container.ClusterAutoscalingResourceLimits) *containerpb.ContainerClusterAutoscalingResourceLimits {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAutoscalingResourceLimits{
ResourceType: dcl.ValueOrEmptyString(o.ResourceType),
Minimum: dcl.ValueOrEmptyInt64(o.Minimum),
Maximum: dcl.ValueOrEmptyInt64(o.Maximum),
}
return p
}
// ClusterAutoscalingAutoprovisioningNodePoolDefaultsToProto converts a ClusterAutoscalingAutoprovisioningNodePoolDefaults resource to its proto representation.
func ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsToProto(o *container.ClusterAutoscalingAutoprovisioningNodePoolDefaults) *containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaults {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaults{
ServiceAccount: dcl.ValueOrEmptyString(o.ServiceAccount),
UpgradeSettings: ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettingsToProto(o.UpgradeSettings),
Management: ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementToProto(o.Management),
MinCpuPlatform: dcl.ValueOrEmptyString(o.MinCpuPlatform),
DiskSizeGb: dcl.ValueOrEmptyInt64(o.DiskSizeGb),
DiskType: dcl.ValueOrEmptyString(o.DiskType),
ShieldedInstanceConfig: ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfigToProto(o.ShieldedInstanceConfig),
BootDiskKmsKey: dcl.ValueOrEmptyString(o.BootDiskKmsKey),
}
for _, r := range o.OAuthScopes {
p.OauthScopes = append(p.OauthScopes, r)
}
return p
}
// ClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettingsToProto converts a ClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettings resource to its proto representation.
func ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettingsToProto(o *container.ClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettings) *containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettings {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsUpgradeSettings{
MaxSurge: dcl.ValueOrEmptyInt64(o.MaxSurge),
MaxUnavailable: dcl.ValueOrEmptyInt64(o.MaxUnavailable),
}
return p
}
// ClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementToProto converts a ClusterAutoscalingAutoprovisioningNodePoolDefaultsManagement resource to its proto representation.
func ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementToProto(o *container.ClusterAutoscalingAutoprovisioningNodePoolDefaultsManagement) *containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagement {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagement{
AutoUpgrade: dcl.ValueOrEmptyBool(o.AutoUpgrade),
AutoRepair: dcl.ValueOrEmptyBool(o.AutoRepair),
UpgradeOptions: ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptionsToProto(o.UpgradeOptions),
}
return p
}
// ClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptionsToProto converts a ClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptions resource to its proto representation.
func ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptionsToProto(o *container.ClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptions) *containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptions {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsManagementUpgradeOptions{
AutoUpgradeStartTime: dcl.ValueOrEmptyString(o.AutoUpgradeStartTime),
Description: dcl.ValueOrEmptyString(o.Description),
}
return p
}
// ClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfigToProto converts a ClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfig resource to its proto representation.
func ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfigToProto(o *container.ClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfig) *containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAutoscalingAutoprovisioningNodePoolDefaultsShieldedInstanceConfig{
EnableSecureBoot: dcl.ValueOrEmptyBool(o.EnableSecureBoot),
EnableIntegrityMonitoring: dcl.ValueOrEmptyBool(o.EnableIntegrityMonitoring),
}
return p
}
// ClusterNetworkConfigToProto converts a ClusterNetworkConfig resource to its proto representation.
func ContainerClusterNetworkConfigToProto(o *container.ClusterNetworkConfig) *containerpb.ContainerClusterNetworkConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNetworkConfig{
Network: dcl.ValueOrEmptyString(o.Network),
Subnetwork: dcl.ValueOrEmptyString(o.Subnetwork),
EnableIntraNodeVisibility: dcl.ValueOrEmptyBool(o.EnableIntraNodeVisibility),
DefaultSnatStatus: ContainerClusterNetworkConfigDefaultSnatStatusToProto(o.DefaultSnatStatus),
PrivateIpv6GoogleAccess: ContainerClusterNetworkConfigPrivateIPv6GoogleAccessEnumToProto(o.PrivateIPv6GoogleAccess),
}
return p
}
// ClusterNetworkConfigDefaultSnatStatusToProto converts a ClusterNetworkConfigDefaultSnatStatus resource to its proto representation.
func ContainerClusterNetworkConfigDefaultSnatStatusToProto(o *container.ClusterNetworkConfigDefaultSnatStatus) *containerpb.ContainerClusterNetworkConfigDefaultSnatStatus {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNetworkConfigDefaultSnatStatus{
Disabled: dcl.ValueOrEmptyBool(o.Disabled),
}
return p
}
// ClusterMaintenancePolicyToProto converts a ClusterMaintenancePolicy resource to its proto representation.
func ContainerClusterMaintenancePolicyToProto(o *container.ClusterMaintenancePolicy) *containerpb.ContainerClusterMaintenancePolicy {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterMaintenancePolicy{
Window: ContainerClusterMaintenancePolicyWindowToProto(o.Window),
ResourceVersion: dcl.ValueOrEmptyString(o.ResourceVersion),
}
return p
}
// ClusterMaintenancePolicyWindowToProto converts a ClusterMaintenancePolicyWindow resource to its proto representation.
func ContainerClusterMaintenancePolicyWindowToProto(o *container.ClusterMaintenancePolicyWindow) *containerpb.ContainerClusterMaintenancePolicyWindow {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterMaintenancePolicyWindow{
DailyMaintenanceWindow: ContainerClusterMaintenancePolicyWindowDailyMaintenanceWindowToProto(o.DailyMaintenanceWindow),
RecurringWindow: ContainerClusterMaintenancePolicyWindowRecurringWindowToProto(o.RecurringWindow),
}
p.MaintenanceExclusions = make(map[string]string)
for k, r := range o.MaintenanceExclusions {
p.MaintenanceExclusions[k] = r
}
return p
}
// ClusterMaintenancePolicyWindowDailyMaintenanceWindowToProto converts a ClusterMaintenancePolicyWindowDailyMaintenanceWindow resource to its proto representation.
func ContainerClusterMaintenancePolicyWindowDailyMaintenanceWindowToProto(o *container.ClusterMaintenancePolicyWindowDailyMaintenanceWindow) *containerpb.ContainerClusterMaintenancePolicyWindowDailyMaintenanceWindow {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterMaintenancePolicyWindowDailyMaintenanceWindow{
StartTime: dcl.ValueOrEmptyString(o.StartTime),
Duration: dcl.ValueOrEmptyString(o.Duration),
}
return p
}
// ClusterMaintenancePolicyWindowRecurringWindowToProto converts a ClusterMaintenancePolicyWindowRecurringWindow resource to its proto representation.
func ContainerClusterMaintenancePolicyWindowRecurringWindowToProto(o *container.ClusterMaintenancePolicyWindowRecurringWindow) *containerpb.ContainerClusterMaintenancePolicyWindowRecurringWindow {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterMaintenancePolicyWindowRecurringWindow{
Window: ContainerClusterMaintenancePolicyWindowRecurringWindowWindowToProto(o.Window),
Recurrence: dcl.ValueOrEmptyString(o.Recurrence),
}
return p
}
// ClusterMaintenancePolicyWindowRecurringWindowWindowToProto converts a ClusterMaintenancePolicyWindowRecurringWindowWindow resource to its proto representation.
func ContainerClusterMaintenancePolicyWindowRecurringWindowWindowToProto(o *container.ClusterMaintenancePolicyWindowRecurringWindowWindow) *containerpb.ContainerClusterMaintenancePolicyWindowRecurringWindowWindow {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterMaintenancePolicyWindowRecurringWindowWindow{
StartTime: dcl.ValueOrEmptyString(o.StartTime),
EndTime: dcl.ValueOrEmptyString(o.EndTime),
}
return p
}
// ClusterDefaultMaxPodsConstraintToProto converts a ClusterDefaultMaxPodsConstraint resource to its proto representation.
func ContainerClusterDefaultMaxPodsConstraintToProto(o *container.ClusterDefaultMaxPodsConstraint) *containerpb.ContainerClusterDefaultMaxPodsConstraint {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterDefaultMaxPodsConstraint{
MaxPodsPerNode: dcl.ValueOrEmptyString(o.MaxPodsPerNode),
}
return p
}
// ClusterResourceUsageExportConfigToProto converts a ClusterResourceUsageExportConfig resource to its proto representation.
func ContainerClusterResourceUsageExportConfigToProto(o *container.ClusterResourceUsageExportConfig) *containerpb.ContainerClusterResourceUsageExportConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterResourceUsageExportConfig{
BigqueryDestination: ContainerClusterResourceUsageExportConfigBigqueryDestinationToProto(o.BigqueryDestination),
EnableNetworkEgressMonitoring: dcl.ValueOrEmptyBool(o.EnableNetworkEgressMonitoring),
ConsumptionMeteringConfig: ContainerClusterResourceUsageExportConfigConsumptionMeteringConfigToProto(o.ConsumptionMeteringConfig),
EnableNetworkEgressMetering: dcl.ValueOrEmptyBool(o.EnableNetworkEgressMetering),
}
return p
}
// ClusterResourceUsageExportConfigBigqueryDestinationToProto converts a ClusterResourceUsageExportConfigBigqueryDestination resource to its proto representation.
func ContainerClusterResourceUsageExportConfigBigqueryDestinationToProto(o *container.ClusterResourceUsageExportConfigBigqueryDestination) *containerpb.ContainerClusterResourceUsageExportConfigBigqueryDestination {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterResourceUsageExportConfigBigqueryDestination{
DatasetId: dcl.ValueOrEmptyString(o.DatasetId),
}
return p
}
// ClusterResourceUsageExportConfigConsumptionMeteringConfigToProto converts a ClusterResourceUsageExportConfigConsumptionMeteringConfig resource to its proto representation.
func ContainerClusterResourceUsageExportConfigConsumptionMeteringConfigToProto(o *container.ClusterResourceUsageExportConfigConsumptionMeteringConfig) *containerpb.ContainerClusterResourceUsageExportConfigConsumptionMeteringConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterResourceUsageExportConfigConsumptionMeteringConfig{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
}
return p
}
// ClusterAuthenticatorGroupsConfigToProto converts a ClusterAuthenticatorGroupsConfig resource to its proto representation.
func ContainerClusterAuthenticatorGroupsConfigToProto(o *container.ClusterAuthenticatorGroupsConfig) *containerpb.ContainerClusterAuthenticatorGroupsConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAuthenticatorGroupsConfig{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
SecurityGroup: dcl.ValueOrEmptyString(o.SecurityGroup),
}
return p
}
// ClusterPrivateClusterConfigToProto converts a ClusterPrivateClusterConfig resource to its proto representation.
func ContainerClusterPrivateClusterConfigToProto(o *container.ClusterPrivateClusterConfig) *containerpb.ContainerClusterPrivateClusterConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterPrivateClusterConfig{
EnablePrivateNodes: dcl.ValueOrEmptyBool(o.EnablePrivateNodes),
EnablePrivateEndpoint: dcl.ValueOrEmptyBool(o.EnablePrivateEndpoint),
MasterIpv4CidrBlock: dcl.ValueOrEmptyString(o.MasterIPv4CidrBlock),
PrivateEndpoint: dcl.ValueOrEmptyString(o.PrivateEndpoint),
PublicEndpoint: dcl.ValueOrEmptyString(o.PublicEndpoint),
PeeringName: dcl.ValueOrEmptyString(o.PeeringName),
MasterGlobalAccessConfig: ContainerClusterPrivateClusterConfigMasterGlobalAccessConfigToProto(o.MasterGlobalAccessConfig),
}
return p
}
// ClusterPrivateClusterConfigMasterGlobalAccessConfigToProto converts a ClusterPrivateClusterConfigMasterGlobalAccessConfig resource to its proto representation.
func ContainerClusterPrivateClusterConfigMasterGlobalAccessConfigToProto(o *container.ClusterPrivateClusterConfigMasterGlobalAccessConfig) *containerpb.ContainerClusterPrivateClusterConfigMasterGlobalAccessConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterPrivateClusterConfigMasterGlobalAccessConfig{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
}
return p
}
// ClusterDatabaseEncryptionToProto converts a ClusterDatabaseEncryption resource to its proto representation.
func ContainerClusterDatabaseEncryptionToProto(o *container.ClusterDatabaseEncryption) *containerpb.ContainerClusterDatabaseEncryption {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterDatabaseEncryption{
State: ContainerClusterDatabaseEncryptionStateEnumToProto(o.State),
KeyName: dcl.ValueOrEmptyString(o.KeyName),
}
return p
}
// ClusterVerticalPodAutoscalingToProto converts a ClusterVerticalPodAutoscaling resource to its proto representation.
func ContainerClusterVerticalPodAutoscalingToProto(o *container.ClusterVerticalPodAutoscaling) *containerpb.ContainerClusterVerticalPodAutoscaling {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterVerticalPodAutoscaling{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
}
return p
}
// ClusterShieldedNodesToProto converts a ClusterShieldedNodes resource to its proto representation.
func ContainerClusterShieldedNodesToProto(o *container.ClusterShieldedNodes) *containerpb.ContainerClusterShieldedNodes {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterShieldedNodes{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
}
return p
}
// ClusterConditionsToProto converts a ClusterConditions resource to its proto representation.
func ContainerClusterConditionsToProto(o *container.ClusterConditions) *containerpb.ContainerClusterConditions {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterConditions{
Code: dcl.ValueOrEmptyString(o.Code),
Message: dcl.ValueOrEmptyString(o.Message),
CanonicalCode: ContainerClusterConditionsCanonicalCodeEnumToProto(o.CanonicalCode),
}
return p
}
// ClusterAutopilotToProto converts a ClusterAutopilot resource to its proto representation.
func ContainerClusterAutopilotToProto(o *container.ClusterAutopilot) *containerpb.ContainerClusterAutopilot {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterAutopilot{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
}
return p
}
// ClusterNodeConfigToProto converts a ClusterNodeConfig resource to its proto representation.
func ContainerClusterNodeConfigToProto(o *container.ClusterNodeConfig) *containerpb.ContainerClusterNodeConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodeConfig{
MachineType: dcl.ValueOrEmptyString(o.MachineType),
DiskSizeGb: dcl.ValueOrEmptyInt64(o.DiskSizeGb),
ServiceAccount: dcl.ValueOrEmptyString(o.ServiceAccount),
ImageType: dcl.ValueOrEmptyString(o.ImageType),
LocalSsdCount: dcl.ValueOrEmptyInt64(o.LocalSsdCount),
Preemptible: dcl.ValueOrEmptyBool(o.Preemptible),
DiskType: dcl.ValueOrEmptyString(o.DiskType),
MinCpuPlatform: dcl.ValueOrEmptyString(o.MinCpuPlatform),
WorkloadMetadataConfig: ContainerClusterNodeConfigWorkloadMetadataConfigToProto(o.WorkloadMetadataConfig),
SandboxConfig: ContainerClusterNodeConfigSandboxConfigToProto(o.SandboxConfig),
NodeGroup: dcl.ValueOrEmptyString(o.NodeGroup),
ReservationAffinity: ContainerClusterNodeConfigReservationAffinityToProto(o.ReservationAffinity),
ShieldedInstanceConfig: ContainerClusterNodeConfigShieldedInstanceConfigToProto(o.ShieldedInstanceConfig),
LinuxNodeConfig: ContainerClusterNodeConfigLinuxNodeConfigToProto(o.LinuxNodeConfig),
KubeletConfig: ContainerClusterNodeConfigKubeletConfigToProto(o.KubeletConfig),
BootDiskKmsKey: dcl.ValueOrEmptyString(o.BootDiskKmsKey),
}
for _, r := range o.OAuthScopes {
p.OauthScopes = append(p.OauthScopes, r)
}
p.Metadata = make(map[string]string)
for k, r := range o.Metadata {
p.Metadata[k] = r
}
p.Labels = make(map[string]string)
for k, r := range o.Labels {
p.Labels[k] = r
}
for _, r := range o.Tags {
p.Tags = append(p.Tags, r)
}
for _, r := range o.Accelerators {
p.Accelerators = append(p.Accelerators, ContainerClusterNodeConfigAcceleratorsToProto(&r))
}
for _, r := range o.Taints {
p.Taints = append(p.Taints, ContainerClusterNodeConfigTaintsToProto(&r))
}
return p
}
// ClusterNodeConfigAcceleratorsToProto converts a ClusterNodeConfigAccelerators resource to its proto representation.
func ContainerClusterNodeConfigAcceleratorsToProto(o *container.ClusterNodeConfigAccelerators) *containerpb.ContainerClusterNodeConfigAccelerators {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodeConfigAccelerators{
AcceleratorCount: dcl.ValueOrEmptyInt64(o.AcceleratorCount),
AcceleratorType: dcl.ValueOrEmptyString(o.AcceleratorType),
}
return p
}
// ClusterNodeConfigWorkloadMetadataConfigToProto converts a ClusterNodeConfigWorkloadMetadataConfig resource to its proto representation.
func ContainerClusterNodeConfigWorkloadMetadataConfigToProto(o *container.ClusterNodeConfigWorkloadMetadataConfig) *containerpb.ContainerClusterNodeConfigWorkloadMetadataConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodeConfigWorkloadMetadataConfig{
Mode: ContainerClusterNodeConfigWorkloadMetadataConfigModeEnumToProto(o.Mode),
}
return p
}
// ClusterNodeConfigTaintsToProto converts a ClusterNodeConfigTaints resource to its proto representation.
func ContainerClusterNodeConfigTaintsToProto(o *container.ClusterNodeConfigTaints) *containerpb.ContainerClusterNodeConfigTaints {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodeConfigTaints{
Key: dcl.ValueOrEmptyString(o.Key),
Value: dcl.ValueOrEmptyString(o.Value),
Effect: ContainerClusterNodeConfigTaintsEffectEnumToProto(o.Effect),
}
return p
}
// ClusterNodeConfigSandboxConfigToProto converts a ClusterNodeConfigSandboxConfig resource to its proto representation.
func ContainerClusterNodeConfigSandboxConfigToProto(o *container.ClusterNodeConfigSandboxConfig) *containerpb.ContainerClusterNodeConfigSandboxConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodeConfigSandboxConfig{
Type: ContainerClusterNodeConfigSandboxConfigTypeEnumToProto(o.Type),
}
return p
}
// ClusterNodeConfigReservationAffinityToProto converts a ClusterNodeConfigReservationAffinity resource to its proto representation.
func ContainerClusterNodeConfigReservationAffinityToProto(o *container.ClusterNodeConfigReservationAffinity) *containerpb.ContainerClusterNodeConfigReservationAffinity {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodeConfigReservationAffinity{
ConsumeReservationType: ContainerClusterNodeConfigReservationAffinityConsumeReservationTypeEnumToProto(o.ConsumeReservationType),
Key: dcl.ValueOrEmptyString(o.Key),
}
for _, r := range o.Values {
p.Values = append(p.Values, r)
}
return p
}
// ClusterNodeConfigShieldedInstanceConfigToProto converts a ClusterNodeConfigShieldedInstanceConfig resource to its proto representation.
func ContainerClusterNodeConfigShieldedInstanceConfigToProto(o *container.ClusterNodeConfigShieldedInstanceConfig) *containerpb.ContainerClusterNodeConfigShieldedInstanceConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodeConfigShieldedInstanceConfig{
EnableSecureBoot: dcl.ValueOrEmptyBool(o.EnableSecureBoot),
EnableIntegrityMonitoring: dcl.ValueOrEmptyBool(o.EnableIntegrityMonitoring),
}
return p
}
// ClusterNodeConfigLinuxNodeConfigToProto converts a ClusterNodeConfigLinuxNodeConfig resource to its proto representation.
func ContainerClusterNodeConfigLinuxNodeConfigToProto(o *container.ClusterNodeConfigLinuxNodeConfig) *containerpb.ContainerClusterNodeConfigLinuxNodeConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodeConfigLinuxNodeConfig{}
p.Sysctls = make(map[string]string)
for k, r := range o.Sysctls {
p.Sysctls[k] = r
}
return p
}
// ClusterNodeConfigKubeletConfigToProto converts a ClusterNodeConfigKubeletConfig resource to its proto representation.
func ContainerClusterNodeConfigKubeletConfigToProto(o *container.ClusterNodeConfigKubeletConfig) *containerpb.ContainerClusterNodeConfigKubeletConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNodeConfigKubeletConfig{
CpuManagerPolicy: dcl.ValueOrEmptyString(o.CpuManagerPolicy),
CpuCfsQuota: dcl.ValueOrEmptyBool(o.CpuCfsQuota),
CpuCfsQuotaPeriod: dcl.ValueOrEmptyString(o.CpuCfsQuotaPeriod),
}
return p
}
// ClusterReleaseChannelToProto converts a ClusterReleaseChannel resource to its proto representation.
func ContainerClusterReleaseChannelToProto(o *container.ClusterReleaseChannel) *containerpb.ContainerClusterReleaseChannel {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterReleaseChannel{
Channel: ContainerClusterReleaseChannelChannelEnumToProto(o.Channel),
}
return p
}
// ClusterWorkloadIdentityConfigToProto converts a ClusterWorkloadIdentityConfig resource to its proto representation.
func ContainerClusterWorkloadIdentityConfigToProto(o *container.ClusterWorkloadIdentityConfig) *containerpb.ContainerClusterWorkloadIdentityConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterWorkloadIdentityConfig{
WorkloadPool: dcl.ValueOrEmptyString(o.WorkloadPool),
}
return p
}
// ClusterNotificationConfigToProto converts a ClusterNotificationConfig resource to its proto representation.
func ContainerClusterNotificationConfigToProto(o *container.ClusterNotificationConfig) *containerpb.ContainerClusterNotificationConfig {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNotificationConfig{
Pubsub: ContainerClusterNotificationConfigPubsubToProto(o.Pubsub),
}
return p
}
// ClusterNotificationConfigPubsubToProto converts a ClusterNotificationConfigPubsub resource to its proto representation.
func ContainerClusterNotificationConfigPubsubToProto(o *container.ClusterNotificationConfigPubsub) *containerpb.ContainerClusterNotificationConfigPubsub {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterNotificationConfigPubsub{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
Topic: dcl.ValueOrEmptyString(o.Topic),
}
return p
}
// ClusterConfidentialNodesToProto converts a ClusterConfidentialNodes resource to its proto representation.
func ContainerClusterConfidentialNodesToProto(o *container.ClusterConfidentialNodes) *containerpb.ContainerClusterConfidentialNodes {
if o == nil {
return nil
}
p := &containerpb.ContainerClusterConfidentialNodes{
Enabled: dcl.ValueOrEmptyBool(o.Enabled),
}
return p
}
// ClusterToProto converts a Cluster resource to its proto representation.
func ClusterToProto(resource *container.Cluster) *containerpb.ContainerCluster {
p := &containerpb.ContainerCluster{
Name: dcl.ValueOrEmptyString(resource.Name),
Description: dcl.ValueOrEmptyString(resource.Description),
InitialNodeCount: dcl.ValueOrEmptyInt64(resource.InitialNodeCount),
MasterAuth: ContainerClusterMasterAuthToProto(resource.MasterAuth),
LoggingService: dcl.ValueOrEmptyString(resource.LoggingService),
MonitoringService: dcl.ValueOrEmptyString(resource.MonitoringService),
Network: dcl.ValueOrEmptyString(resource.Network),
ClusterIpv4Cidr: dcl.ValueOrEmptyString(resource.ClusterIPv4Cidr),
AddonsConfig: ContainerClusterAddonsConfigToProto(resource.AddonsConfig),
Subnetwork: dcl.ValueOrEmptyString(resource.Subnetwork),
EnableKubernetesAlpha: dcl.ValueOrEmptyBool(resource.EnableKubernetesAlpha),
LabelFingerprint: dcl.ValueOrEmptyString(resource.LabelFingerprint),
LegacyAbac: ContainerClusterLegacyAbacToProto(resource.LegacyAbac),
NetworkPolicy: ContainerClusterNetworkPolicyToProto(resource.NetworkPolicy),
IpAllocationPolicy: ContainerClusterIPAllocationPolicyToProto(resource.IPAllocationPolicy),
MasterAuthorizedNetworksConfig: ContainerClusterMasterAuthorizedNetworksConfigToProto(resource.MasterAuthorizedNetworksConfig),
BinaryAuthorization: ContainerClusterBinaryAuthorizationToProto(resource.BinaryAuthorization),
Autoscaling: ContainerClusterAutoscalingToProto(resource.Autoscaling),
NetworkConfig: ContainerClusterNetworkConfigToProto(resource.NetworkConfig),
MaintenancePolicy: ContainerClusterMaintenancePolicyToProto(resource.MaintenancePolicy),
DefaultMaxPodsConstraint: ContainerClusterDefaultMaxPodsConstraintToProto(resource.DefaultMaxPodsConstraint),
ResourceUsageExportConfig: ContainerClusterResourceUsageExportConfigToProto(resource.ResourceUsageExportConfig),
AuthenticatorGroupsConfig: ContainerClusterAuthenticatorGroupsConfigToProto(resource.AuthenticatorGroupsConfig),
PrivateClusterConfig: ContainerClusterPrivateClusterConfigToProto(resource.PrivateClusterConfig),
DatabaseEncryption: ContainerClusterDatabaseEncryptionToProto(resource.DatabaseEncryption),
VerticalPodAutoscaling: ContainerClusterVerticalPodAutoscalingToProto(resource.VerticalPodAutoscaling),
ShieldedNodes: ContainerClusterShieldedNodesToProto(resource.ShieldedNodes),
Endpoint: dcl.ValueOrEmptyString(resource.Endpoint),
MasterVersion: dcl.ValueOrEmptyString(resource.MasterVersion),
CreateTime: dcl.ValueOrEmptyString(resource.CreateTime),
Status: dcl.ValueOrEmptyString(resource.Status),
StatusMessage: dcl.ValueOrEmptyString(resource.StatusMessage),
NodeIpv4CidrSize: dcl.ValueOrEmptyInt64(resource.NodeIPv4CidrSize),
ServicesIpv4Cidr: dcl.ValueOrEmptyString(resource.ServicesIPv4Cidr),
ExpireTime: dcl.ValueOrEmptyString(resource.ExpireTime),
Location: dcl.ValueOrEmptyString(resource.Location),
EnableTpu: dcl.ValueOrEmptyBool(resource.EnableTPU),
TpuIpv4CidrBlock: dcl.ValueOrEmptyString(resource.TPUIPv4CidrBlock),
Autopilot: ContainerClusterAutopilotToProto(resource.Autopilot),
Project: dcl.ValueOrEmptyString(resource.Project),
NodeConfig: ContainerClusterNodeConfigToProto(resource.NodeConfig),
ReleaseChannel: ContainerClusterReleaseChannelToProto(resource.ReleaseChannel),
WorkloadIdentityConfig: ContainerClusterWorkloadIdentityConfigToProto(resource.WorkloadIdentityConfig),
NotificationConfig: ContainerClusterNotificationConfigToProto(resource.NotificationConfig),
ConfidentialNodes: ContainerClusterConfidentialNodesToProto(resource.ConfidentialNodes),
SelfLink: dcl.ValueOrEmptyString(resource.SelfLink),
Zone: dcl.ValueOrEmptyString(resource.Zone),
InitialClusterVersion: dcl.ValueOrEmptyString(resource.InitialClusterVersion),
CurrentMasterVersion: dcl.ValueOrEmptyString(resource.CurrentMasterVersion),
CurrentNodeVersion: dcl.ValueOrEmptyString(resource.CurrentNodeVersion),
CurrentNodeCount: dcl.ValueOrEmptyInt64(resource.CurrentNodeCount),
Id: dcl.ValueOrEmptyString(resource.Id),
}
for _, r := range resource.NodePools {
p.NodePools = append(p.NodePools, ContainerClusterNodePoolsToProto(&r))
}
for _, r := range resource.Locations {
p.Locations = append(p.Locations, r)
}
for _, r := range resource.Conditions {
p.Conditions = append(p.Conditions, ContainerClusterConditionsToProto(&r))
}
for _, r := range resource.InstanceGroupUrls {
p.InstanceGroupUrls = append(p.InstanceGroupUrls, r)
}
return p
}
// ApplyCluster handles the gRPC request by passing it to the underlying Cluster Apply() method.
func (s *ClusterServer) applyCluster(ctx context.Context, c *container.Client, request *containerpb.ApplyContainerClusterRequest) (*containerpb.ContainerCluster, error) {
p := ProtoToCluster(request.GetResource())
res, err := c.ApplyCluster(ctx, p)
if err != nil {
return nil, err
}
r := ClusterToProto(res)
return r, nil
}
// ApplyCluster handles the gRPC request by passing it to the underlying Cluster Apply() method.
func (s *ClusterServer) ApplyContainerCluster(ctx context.Context, request *containerpb.ApplyContainerClusterRequest) (*containerpb.ContainerCluster, error) {
cl, err := createConfigCluster(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return s.applyCluster(ctx, cl, request)
}
// DeleteCluster handles the gRPC request by passing it to the underlying Cluster Delete() method.
func (s *ClusterServer) DeleteContainerCluster(ctx context.Context, request *containerpb.DeleteContainerClusterRequest) (*emptypb.Empty, error) {
cl, err := createConfigCluster(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteCluster(ctx, ProtoToCluster(request.GetResource()))
}
// ListContainerCluster handles the gRPC request by passing it to the underlying ClusterList() method.
func (s *ClusterServer) ListContainerCluster(ctx context.Context, request *containerpb.ListContainerClusterRequest) (*containerpb.ListContainerClusterResponse, error) {
cl, err := createConfigCluster(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
resources, err := cl.ListCluster(ctx, request.Project, request.Location)
if err != nil {
return nil, err
}
var protos []*containerpb.ContainerCluster
for _, r := range resources.Items {
rp := ClusterToProto(r)
protos = append(protos, rp)
}
return &containerpb.ListContainerClusterResponse{Items: protos}, nil
}
func createConfigCluster(ctx context.Context, service_account_file string) (*container.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return container.NewClient(conf), nil
}
|
package models
type Todo struct {
ID string
Title string
Done bool
}
|
package cmd
func x() {
return
}
|
package main
import (
"errors"
"github.com/shiningacg/apicore"
)
func init() {
apicore.AddHandler(apicore.NewMatcher("/login"), func() apicore.Handler {
return &Login{}
})
}
type Login struct {
UserName string `json:"user_name"`
UserPWD string `json:"user_pwd"`
}
func (l *Login) Handle(ctx apicore.Conn) {
ctx.SetHead("Access-Control-Allow-Origin", "127.0.0.1:3000")
if l.UserName == "shlande" && l.UserPWD == "shiningacg" {
ctx.SetRsp(apicore.NewSuccessResponse(nil))
return
}
ctx.SetRsp(apicore.NewClientErrorResponse(errors.New("账号错误")))
}
func (l *Login) IsValid() error {
if l.UserPWD == "" || l.UserName == "" {
return errors.New("无效的输入")
}
return nil
}
|
package pipenv
import (
"os"
"path"
"sort"
"strings"
"testing"
"github.com/aquasecurity/go-dep-parser/pkg/types"
"github.com/kylelemons/godebug/pretty"
)
func TestParse(t *testing.T) {
vectors := []struct {
file string // Test input file
libraries []types.Library
}{
{
file: "testdata/Pipfile_normal.lock",
libraries: PipenvNormal,
},
{
file: "testdata/Pipfile_django.lock",
libraries: PipenvDjango,
},
{
file: "testdata/Pipfile_many.lock",
libraries: PipenvMany,
},
}
for _, v := range vectors {
t.Run(path.Base(v.file), func(t *testing.T) {
f, err := os.Open(v.file)
if err != nil {
t.Fatalf("Open() error: %v", err)
}
libList, err := Parse(f)
if err != nil {
t.Fatalf("Parse() error: %v", err)
}
sort.Slice(libList, func(i, j int) bool {
ret := strings.Compare(libList[i].Name, libList[j].Name)
if ret == 0 {
return libList[i].Version < libList[j].Version
}
return ret < 0
})
sort.Slice(v.libraries, func(i, j int) bool {
ret := strings.Compare(v.libraries[i].Name, v.libraries[j].Name)
if ret == 0 {
return v.libraries[i].Version < v.libraries[j].Version
}
return ret < 0
})
if len(libList) != len(v.libraries) {
t.Fatalf("lib length: %s", pretty.Compare(libList, v.libraries))
}
for i, got := range libList {
want := v.libraries[i]
if want.Name != got.Name {
t.Errorf("%d: Name: got %s, want %s", i, got.Name, want.Name)
}
if want.Version != got.Version {
t.Errorf("%d: Version: got %s, want %s", i, got.Version, want.Version)
}
}
})
}
}
|
package main
import (
"fmt"
"strconv"
)
func restoreIpAddresses(s string) []string {
n := len(s)
if n < 4 || n > 12 {
return []string{}
}
pos := [4]int{}
var res []string
var search func(idx, cnt int)
search = func(idx, cnt int) {
if idx == n && cnt == 4 {
item := make([]byte, n+3)
for i, j := 0, 0; i < n && j < 4; i++ {
if i == pos[j] {
item[i+j] = '.'
j++
}
item[i+j] = s[i]
}
res = append(res, string(item))
return
}
if idx >= n || cnt >= 4 {
return
}
if s[idx] == '0' {
pos[cnt] = idx + 1
search(idx+1, cnt+1)
return
}
for i := 1; i <= 3 && idx+i <= n; i++ {
if x, e := strconv.Atoi(s[idx : idx+i]); e == nil && x > 0 && x <= 255 {
pos[cnt] = idx + i
search(idx+i, cnt+1)
}
}
}
search(0, 0)
return res
}
func main() {
cases := []string{
"25525511135",
}
realCase := cases[0:]
for i, c := range realCase {
fmt.Println("## case", i)
// solve
fmt.Println(restoreIpAddresses(c))
}
}
|
package main
import (
"github.com/gopherchina/website/controllers"
"github.com/gopherchina/website/models"
"github.com/astaxie/beego"
"github.com/beego/i18n"
)
func main() {
beego.Router("/", &controllers.MainController{})
beego.Router("/:name", &controllers.MainController{})
beego.Router("/:name/:id", &controllers.DocsController{})
beego.InsertFilter("/images/:all", beego.BeforeRouter, controllers.DocsStatic)
controllers.InitLocales()
models.InitModels()
beego.AddFuncMap("i18n", i18n.Tr)
beego.Run()
}
|
package kademlia
import (
"testing"
)
func TestStorage(t *testing.T) {
storage := NewStorage("TEST")
storage.Store("test.txt", []byte("bonjour"), false)
storage.Store("test.txt", []byte("bonjour"), false)
out := storage.Read("test.txt")
if string(out) != "bonjour" {
t.Error("Invalid content")
}
storage.deleteFile("test.txt")
if storage.Exists("test.txt") {
t.Error("Should not exists")
}
}
|
package payment
type NotFoundAccountError struct {
err error
}
func (e *NotFoundAccountError) Error() string {
return e.err.Error()
}
type SameEmailAccountAlreadyExistError struct {
err error
}
func (e *SameEmailAccountAlreadyExistError) Error() string {
return e.err.Error()
}
type NotFoundAccountRepositoryError struct {
err error
}
func (e *NotFoundAccountRepositoryError) Error() string {
return e.err.Error()
}
|
package main
import (
"fmt"
"regexp"
)
func main() {
regex := regexp.MustCompile("N([\\w])l")
fmt.Println(regex.MatchString("NAl"))
}
|
package route
import (
"fmt"
"RelationshipMatch/model"
"RelationshipMatch/repository"
"github.com/gin-gonic/gin"
)
// CreateUserRelationship
//
// PUT /users/:user_id/relationships/:other_user_id
//
// Request body
// {
// "user_id": "21341231231",
// "state": "liked" ,
// "type": "relationship"
// }
type CreateUserRelationshipReq struct {
UserId string `json:"user_id"`
State string `json:"state"`
Type string `json:"type"`
}
func (api *RestApi) CreateUserRelationship(c *gin.Context) {
user_id := c.Param("user_id")
other_user_id := c.Param("other_user_id")
params := CreateUserRelationshipReq{}
err := c.BindJSON(¶ms)
if err != nil {
c.JSON(200, gin.H{
"result": "Json format error.",
})
return
}
// State only allowed fields liked | disliked
if params.State != "liked" && params.State != "disliked" {
c.JSON(200, gin.H{
"result": "State format error.",
})
return
}
// validate user_id and other_user_id
is_user_exist, err := repository.IsUserExist(api.PG, user_id)
if err != nil {
result := fmt.Sprintf("Validate user error: %s.", err)
c.JSON(200, gin.H{
"result": result,
})
return
}
if !is_user_exist {
result := fmt.Sprintf("User id %s is not exist.", user_id)
c.JSON(200, gin.H{
"result": result,
})
return
}
is_other_user_id_exist, err := repository.IsUserExist(api.PG, other_user_id)
if err != nil {
result := fmt.Sprintf("Validate user error: %s.", err)
c.JSON(200, gin.H{
"result": result,
})
return
}
if !is_other_user_id_exist {
result := fmt.Sprintf("User id %s is not exist.", other_user_id)
c.JSON(200, gin.H{
"result": result,
})
return
}
relationship := &model.Relationship{
UserId: user_id,
OtherId: other_user_id,
State: params.State,
Type: params.Type,
}
// add relationship to database
res, err := repository.CreateUserRelationship(api.PG, relationship)
if err != nil {
result := fmt.Sprintf("Create user relationship error: %s.", err)
c.JSON(200, gin.H{
"result": result,
})
return
}
if !res {
c.JSON(200, gin.H{
"result": "Create relationship failed.",
})
return
}
c.JSON(200, gin.H{
"result": "Create relationship successed.",
})
return
}
|
package routerHandler
import (
"encoding/json"
"fmt"
"net"
"cmpeax.tech/lower-machine/lib/DataParser"
"cmpeax.tech/lower-machine/lib/routerDI"
"cmpeax.tech/lower-machine/struct/ACS"
)
func WSServiceExport() routerDI.MapOfWSCallbackJSONFunc {
return routerDI.MapOfWSCallbackJSONFunc{
"0x03": func(jsonData routerDI.Message, conn net.Conn) {
m := ACS.NewACS0x03("123456")
m.Code = "0x03"
jsonBytes, err := json.Marshal(m)
if err != nil {
fmt.Println(err)
}
conn.Write([]byte(DataParser.ParserToGbk(string(jsonBytes))))
fmt.Println("0x03调e用")
},
"0x04": func(jsonData routerDI.Message, conn net.Conn) {
m := ACS.NewACS0x04("123456")
m.Code = "0x04"
jsonBytes, err := json.Marshal(m)
if err != nil {
fmt.Println(err)
}
conn.Write([]byte(DataParser.ParserToGbk(string(jsonBytes))))
fmt.Println("0x04e调用")
},
}
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//456. 132 Pattern
//Given a sequence of n integers a1, a2, ..., an, a 132 pattern is a subsequence ai, aj, ak such that i < j < k and ai < ak < aj. Design an algorithm that takes a list of n numbers as input and checks whether there is a 132 pattern in the list.
//Note: n will be less than 15,000.
//Example 1:
//Input: [1, 2, 3, 4]
//Output: False
//Explanation: There is no 132 pattern in the sequence.
//Example 2:
//Input: [3, 1, 4, 2]
//Output: True
//Explanation: There is a 132 pattern in the sequence: [1, 4, 2].
//Example 3:
//Input: [-1, 3, 2, 0]
//Output: True
//Explanation: There are three 132 patterns in the sequence: [-1, 3, 2], [-1, 3, 0] and [-1, 2, 0].
//func find132pattern(nums []int) bool {
//}
// Time Is Money |
// Copyright 2020 beego
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"github.com/beego/beego/v2/server/web"
)
func main() {
ctrl := &MainController{}
// GET http://localhost:8080/health => ctrl.Health()
web.Router("/hello", ctrl, "get:Hello")
web.Run()
}
// MainController:
// The controller must implement ControllerInterface
// Usually we extends web.Controller
type MainController struct {
web.Controller
}
// address: http://localhost:8080/hello GET
func (ctrl *MainController) Hello() {
// web-example/views/hello_world.html
ctrl.TplName = "hello_world.html"
ctrl.Data["name"] = "Hello()"
// don't forget this
_ = ctrl.Render()
}
|
package rest
import (
"flag"
"fmt"
"metabnb/lib/configuration"
"net/http"
"github.com/julienschmidt/httprouter"
"metabnb/controllers"
"metabnb/lib/persistence/mongolayer"
)
func Server() error {
confPath := flag.String("conf", `.\configuration\config.json`, "flag to set the path to the configuration json file")
flag.Parse()
//extract configuration
config, _ := configuration.ExtractConfiguration(*confPath)
fmt.Println("Connecting to database")
dbhandler, _ := mongolayer.NewMongoDBLayer(config.DBConnection)
router := httprouter.New()
router.GET("/", controllers.GetListings)
return http.ListenAndServe(config.RestfulEndpoint, router)
}
|
package informer
import "context"
// Interface is used to access remote resources.
// may implmented by HTTP API or MySQL, etc.
type Interface[ObjectContent any] interface {
Create(ctx context.Context, object Object[ObjectContent]) (Object[ObjectContent], error)
// List return all objects
List(ctx context.Context) ([]Object[ObjectContent], error)
Get(ctx context.Context, name string) (Object[ObjectContent], error)
Update(ctx context.Context, object Object[ObjectContent]) (Object[ObjectContent], error)
Delete(ctx context.Context, name string) error
}
// todo[maybe]: Watchable Interface
|
package sort
/*
Notes:
分组进行的插入排序。
分组增量序列的选取影响算法效率。
Hibbard 增量序列:1,3,7,...,2n-1 被证明可广泛应用,时间复杂度 O(N^1.5)
希尔排序复杂度范围大约 O(N^1.3) ~ O(N^2)。
不稳定排序。
*/
func shellSort(nums []int) {
for step := len(nums) / 2; step >= 1; step /= 2 {
// insert sort
for i := step; i < len(nums); i += step {
for j := i - step; j >= 0; j -= step {
if nums[j+step] >= nums[j] {
break
}
nums[j+step], nums[j] = nums[j], nums[j+step]
}
}
}
}
|
package main
import (
"fmt"
"net/http"
"os"
"github.com/czerwonk/ping_exporter/config"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
)
const version string = "0.5.0"
var (
showVersion = kingpin.Flag("version", "Print version information").Default().Bool()
listenAddress = kingpin.Flag("web.listen-address", "Address on which to expose metrics and web interface").Default(":9427").String()
metricsPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics").Default("/metrics").String()
configFile = kingpin.Flag("config.path", "Path to config file").Default("").String()
pingInterval = kingpin.Flag("ping.interval", "Interval for ICMP echo requests").Default("5s").Duration()
pingTimeout = kingpin.Flag("ping.timeout", "Timeout for ICMP echo request").Default("4s").Duration()
pingSize = kingpin.Flag("ping.size", "Payload size for ICMP echo requests").Default("56").Uint16()
historySize = kingpin.Flag("ping.history-size", "Number of results to remember per target").Default("10").Int()
dnsRefresh = kingpin.Flag("dns.refresh", "Interval for refreshing DNS records and updating targets accordingly (0 if disabled)").Default("1m").Duration()
dnsNameServer = kingpin.Flag("dns.nameserver", "DNS server used to resolve hostname of targets").Default("").String()
logLevel = kingpin.Flag("log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]").Default("info").String()
disableIPv4 = kingpin.Flag("ipv4.disalbe", "Disable IPv4 requests").Default("false").Bool()
disableIPv6 = kingpin.Flag("ipv6.disable", "Disable IPv6 requests").Default("false").Bool()
debug = kingpin.Flag("debug", "Enables debug output").Default("false").Bool()
targets = kingpin.Arg("targets", "A list of targets to ping").Strings()
)
func init() {
kingpin.Parse()
}
func main() {
if *showVersion {
printVersion()
os.Exit(0)
}
err := log.Logger.SetLevel(log.Base(), *logLevel)
if err != nil {
log.Errorln(err)
os.Exit(1)
}
if mpath := *metricsPath; mpath == "" {
log.Warnln("web.telemetry-path is empty, correcting to `/metrics`")
mpath = "/metrics"
metricsPath = &mpath
} else if mpath[0] != '/' {
mpath = "/" + mpath
metricsPath = &mpath
}
cfg, err := loadConfig()
if err != nil {
kingpin.FatalUsage("could not load config.path: %v", err)
}
cfg.IP.IPv4Enabled = !*disableIPv4
cfg.IP.IPv6Enabled = !*disableIPv6
cfg.Debug = *debug
if cfg.Ping.History < 1 {
kingpin.FatalUsage("ping.history-size must be greater than 0")
}
if cfg.Ping.Size < 0 || cfg.Ping.Size > 65500 {
kingpin.FatalUsage("ping.size must be between 0 and 65500")
}
if len(cfg.Targets) == 0 {
kingpin.FatalUsage("No targets specified")
}
targets := make([]*target, len(cfg.Targets))
for i := 0; i < len(cfg.Targets); i++ {
t := cfg.Targets[i]
targets[i], err = newTarget(t, cfg)
if err != nil {
log.Errorf("could not add target %s: %v", t, err)
}
}
startServer(targets)
}
func printVersion() {
fmt.Println("ping-exporter")
fmt.Printf("Version: %s\n", version)
fmt.Println("Author(s): Philip Berndroth, Daniel Czerwonk")
fmt.Println("Metric exporter for go-icmp")
}
func startServer(targets []*target) {
log.Infof("Starting ping exporter (Version: %s)", version)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, indexHTML, *metricsPath)
})
reg := prometheus.NewRegistry()
reg.MustRegister(&pingCollector{
targets: targets})
h := promhttp.HandlerFor(reg, promhttp.HandlerOpts{
ErrorLog: log.NewErrorLogger(),
ErrorHandling: promhttp.ContinueOnError})
http.Handle(*metricsPath, h)
log.Infof("Listening for %s on %s", *metricsPath, *listenAddress)
log.Fatal(http.ListenAndServe(*listenAddress, nil))
}
func loadConfig() (*config.Config, error) {
if *configFile == "" {
cfg := config.Config{}
addFlagToConfig(&cfg)
return &cfg, nil
}
f, err := os.Open(*configFile)
if err != nil {
return nil, err
}
defer f.Close()
cfg, err := config.FromYAML(f)
if err != nil {
addFlagToConfig(cfg)
}
return cfg, err
}
// addFlagToConfig updates cfg with command line flag values, unless the
// config has non-zero values.
func addFlagToConfig(cfg *config.Config) {
if len(cfg.Targets) == 0 {
cfg.Targets = *targets
}
if cfg.Ping.History == 0 {
cfg.Ping.History = *historySize
}
if cfg.Ping.Interval == 0 {
cfg.Ping.Interval.Set(*pingInterval)
}
if cfg.Ping.Timeout == 0 {
cfg.Ping.Timeout.Set(*pingTimeout)
}
if cfg.Ping.Size == 0 {
cfg.Ping.Size = *pingSize
}
if cfg.DNS.Refresh == 0 {
cfg.DNS.Refresh.Set(*dnsRefresh)
}
if cfg.DNS.Nameserver == "" {
cfg.DNS.Nameserver = *dnsNameServer
}
}
const indexHTML = `<!doctype html>
<html>
<head>
<meta charset="UTF-8">
<title>ping Exporter (Version ` + version + `)</title>
</head>
<body>
<h1>ping Exporter</h1>
<p><a href="%s">Metrics</a></p>
<h2>More information:</h2>
<p><a href="https://github.com/czerwonk/ping_exporter">github.com/czerwonk/ping_exporter</a></p>
</body>
</html>
`
|
package main
func twoSum(numbers []int, target int) []int {
var res []int
if len(numbers) == 0 {
return res
}
left := 0
right := len(numbers) - 1
for left < right {
sum := numbers[left] + numbers[right]
if sum < target {
left++
} else if sum > target {
right--
} else {
res = append(res, left+1, right+1)
break
}
}
return res
}
|
require 'json'
require 'aws-sdk'
def lambda_handler(event:, context:)
# TODO implement
name=event["queryStringParameters"]["name"]
#タグごとそれぞれの総時間を格納するためのハッシュ
hash=Hash.new
10.times{|i|hash[i.to_s]=0}
dynamoDB = Aws::DynamoDB::Resource.new(region: 'ap-northeast-1')
table = dynamoDB.table('Data')
#データを取得
resp = table.get_item({ key: { 'name' => name }})
#タグごとの総時間をハッシュに格納
resp["item"]["plan"].each{|a|hash[a["tag"]]+=a["duar"].to_i}
#タグごとの時間のパーセンテージを格納
hash_rate=Hash.new
hash.each{|k,v|hash_rate[k]=v*100.0/hash.values.inject(:+)}
#得点を計算
point=0
hash_rate.each do |k,v| #開発と勉強はそのまま得点化
if k=="1"||k=="2"
point+=v
elsif k=="0"||k=="4"||k=="7" #睡眠と運動と食事は半分だけ加算
point+=v/2
end
end
# #タグとタスクの対応表
# task=["睡眠","勉強","開発","ゲーム","運動","スマホ・PC","テレビ","食事","遊び","その他"]
# task_hash=Hash.new
# 10.times{|i|task_hash[i.to_s]=task[i]}
#フロントエンド側で数字を変数名にできないため、キーの先頭に英語をつけたハッシュを緊急的に作る
new_hash=Hash.new
hash.each{|k,v|new_hash["a"+k]=v}
p new_hash
#タスクごとの総時間と得点のJSON
resp={ hash: new_hash, point: point.to_i }
return { statusCode: 200, body: resp.to_json }
end
|
package tars
import (
"bytes"
"context"
"fmt"
"strings"
"time"
githubql "github.com/shurcooL/githubv4"
"github.com/sirupsen/logrus"
"k8s.io/test-infra/prow/config"
"k8s.io/test-infra/prow/github"
"k8s.io/test-infra/prow/pluginhelp"
"k8s.io/test-infra/prow/pluginhelp/externalplugins"
"k8s.io/test-infra/prow/plugins"
tiexternalplugins "github.com/ti-community-infra/tichi/internal/pkg/externalplugins"
)
const (
// PluginName is the name of this plugin.
PluginName = "ti-community-tars"
// branchRefsPrefix specifies the prefix of branch refs.
// See also: https://docs.github.com/en/rest/reference/git#references.
branchRefsPrefix = "refs/heads/"
)
const configInfoAutoUpdatedMessagePrefix = "Auto updated message: "
const searchQueryPrefix = "archived:false is:pr is:open sort:created-asc"
var sleep = time.Sleep
type githubClient interface {
CreateComment(org, repo string, number int, comment string) error
BotUserChecker() (func(candidate string) bool, error)
DeleteStaleComments(org, repo string, number int,
comments []github.IssueComment, isStale func(github.IssueComment) bool) error
GetPullRequest(org, repo string, number int) (*github.PullRequest, error)
GetSingleCommit(org, repo, SHA string) (github.RepositoryCommit, error)
ListPRCommits(org, repo string, number int) ([]github.RepositoryCommit, error)
UpdatePullRequestBranch(org, repo string, number int, expectedHeadSha *string) error
Query(context.Context, interface{}, map[string]interface{}) error
}
// See: https://developer.github.com/v4/object/pullrequest/.
type pullRequest struct {
Number githubql.Int
Repository struct {
Name githubql.String
Owner struct {
Login githubql.String
}
}
Author struct {
Login githubql.String
}
BaseRef struct {
Name githubql.String
}
Commits struct {
Nodes []struct {
Commit struct {
OID githubql.GitObjectID `graphql:"oid"`
Parents struct {
Nodes []struct {
OID githubql.GitObjectID `graphql:"oid"`
}
} `graphql:"parents(first:100)"`
}
}
} `graphql:"commits(last:1)"`
Labels struct {
Nodes []struct {
Name githubql.String
}
} `graphql:"labels(first:100)"`
}
type searchQuery struct {
RateLimit struct {
Cost githubql.Int
Remaining githubql.Int
}
Search struct {
PageInfo struct {
HasNextPage githubql.Boolean
EndCursor githubql.String
}
Nodes []struct {
PullRequest pullRequest `graphql:"... on PullRequest"`
}
} `graphql:"search(type: ISSUE, first: 100, after: $searchCursor, query: $query)"`
}
// HelpProvider constructs the PluginHelp for this plugin that takes into account enabled repositories.
// HelpProvider defines the type for function that construct the PluginHelp for plugins.
func HelpProvider(epa *tiexternalplugins.ConfigAgent) externalplugins.ExternalPluginHelpProvider {
return func(enabledRepos []config.OrgRepo) (*pluginhelp.PluginHelp, error) {
configInfo := map[string]string{}
cfg := epa.Config()
for _, repo := range enabledRepos {
opts := cfg.TarsFor(repo.Org, repo.Repo)
var isConfigured bool
var configInfoStrings []string
configInfoStrings = append(configInfoStrings, "The plugin has these configurations:<ul>")
if len(opts.Message) != 0 {
isConfigured = true
}
configInfoStrings = append(configInfoStrings, "<li>"+configInfoAutoUpdatedMessagePrefix+opts.Message+"</li>")
configInfoStrings = append(configInfoStrings, "</ul>")
if isConfigured {
configInfo[repo.String()] = strings.Join(configInfoStrings, "\n")
}
}
yamlSnippet, err := plugins.CommentMap.GenYaml(&tiexternalplugins.Configuration{
TiCommunityTars: []tiexternalplugins.TiCommunityTars{
{
Repos: []string{"ti-community-infra/test-dev"},
Message: "Your PR was out of date, I have automatically updated it for you.",
OnlyWhenLabel: "status/can-merge",
ExcludeLabels: []string{"do-not-merge/hold"},
},
},
})
if err != nil {
logrus.WithError(err).Warnf("cannot generate comments for %s plugin", PluginName)
}
pluginHelp := &pluginhelp.PluginHelp{
Description: `The tars plugin help you update your out-of-date PR.`,
Config: configInfo,
Snippet: yamlSnippet,
Events: []string{tiexternalplugins.IssueCommentEvent, tiexternalplugins.PushEvent},
}
return pluginHelp, nil
}
}
// HandleIssueCommentEvent handles a GitHub issue comment event and update the PR
// if the issue is a PR based on whether the PR out-of-date.
func HandleIssueCommentEvent(log *logrus.Entry, ghc githubClient, ice *github.IssueCommentEvent,
cfg *tiexternalplugins.Configuration) error {
if !ice.Issue.IsPullRequest() {
return nil
}
// Delay for a few seconds to give GitHub time to add or remove the label,
// as the comment may be a command related to a PR merge(such as /hold or /merge).
// See: https://github.com/ti-community-infra/tichi/issues/524.
sleep(time.Second * 5)
pr, err := ghc.GetPullRequest(ice.Repo.Owner.Login, ice.Repo.Name, ice.Issue.Number)
if err != nil {
return err
}
return handlePullRequest(log, ghc, pr, cfg)
}
func handlePullRequest(log *logrus.Entry, ghc githubClient,
pr *github.PullRequest, cfg *tiexternalplugins.Configuration) error {
org := pr.Base.Repo.Owner.Login
repo := pr.Base.Repo.Name
number := pr.Number
updated := false
tars := cfg.TarsFor(org, repo)
hasTriggerLabel := false
for _, label := range pr.Labels {
if label.Name == tars.OnlyWhenLabel {
hasTriggerLabel = true
}
}
if !hasTriggerLabel {
log.Infof("Ignore PR %s/%s#%d without trigger label %s.", org, repo, number, tars.OnlyWhenLabel)
return nil
}
for _, label := range pr.Labels {
for _, excludeLabel := range tars.ExcludeLabels {
if label.Name == excludeLabel {
log.Infof("Ignore PR %s/%s#%d with exclude label %s.", org, repo, number, label.Name)
return nil
}
}
}
prCommits, err := ghc.ListPRCommits(org, repo, pr.Number)
if err != nil {
return err
}
if len(prCommits) == 0 {
return nil
}
// Check if we update the base into PR.
currentBaseCommit, err := ghc.GetSingleCommit(org, repo, pr.Base.Ref)
if err != nil {
return err
}
for _, prCommit := range prCommits {
for _, parentCommit := range prCommit.Parents {
if parentCommit.SHA == currentBaseCommit.SHA {
updated = true
}
}
}
if updated {
return nil
}
return takeAction(log, ghc, org, repo, number, pr.User.Login, tars.Message)
}
// HandlePushEvent handles a GitHub push event and update the PR.
func HandlePushEvent(log *logrus.Entry, ghc githubClient, pe *github.PushEvent,
cfg *tiexternalplugins.Configuration) error {
if !strings.HasPrefix(pe.Ref, branchRefsPrefix) {
log.Infof("Ignoring ref %s push event.", pe.Ref)
return nil
}
org := pe.Repo.Owner.Login
repo := pe.Repo.Name
branch := getRefBranch(pe.Ref)
tars := cfg.TarsFor(org, repo)
log.Infof("Checking %s/%s/%s PRs.", org, repo, branch)
var buf bytes.Buffer
fmt.Fprintf(&buf, " repo:\"%s/%s\"", org, repo)
fmt.Fprintf(&buf, " base:\"%s\"", branch)
fmt.Fprintf(&buf, searchQueryPrefix+" label:\"%s\"", tars.OnlyWhenLabel)
for _, label := range tars.ExcludeLabels {
fmt.Fprintf(&buf, " -label:\"%s\"", label)
}
prs, err := search(context.Background(), log, ghc, buf.String())
if err != nil {
return err
}
log.Infof("Considering %d PRs.", len(prs))
for i := range prs {
pr := prs[i]
org := string(pr.Repository.Owner.Login)
repo := string(pr.Repository.Name)
num := int(pr.Number)
l := log.WithFields(logrus.Fields{
"org": org,
"repo": repo,
"pr": num,
})
takenAction, err := handle(l, ghc, &pr, cfg)
if err != nil {
l.WithError(err).Error("Error handling PR.")
continue
}
// Only one PR is processed at a time, because even if other PRs are updated,
// they still need to be queued for another update and merge.
// To save testing resources we only process one PR at a time.
if takenAction {
l.Info("Successfully updated and completed this push event response process.")
break
}
}
return nil
}
func getRefBranch(ref string) string {
return strings.TrimPrefix(ref, branchRefsPrefix)
}
// HandleAll checks all orgs and repos that enabled this plugin for open PRs to
// determine if the issue is a PR based on whether the PR out-of-date.
func HandleAll(log *logrus.Entry, ghc githubClient, config *plugins.Configuration,
externalConfig *tiexternalplugins.Configuration) error {
log.Info("Checking all PRs.")
_, repos := config.EnabledReposForExternalPlugin(PluginName)
if len(repos) == 0 {
log.Warnf("No repos have been configured for the %s plugin", PluginName)
return nil
}
if len(repos) == 0 {
return nil
}
// Do _not_ parallelize this. It will trigger GitHub's abuse detection and we don't really care anyways except
// when developing.
for _, repo := range repos {
// Construct the query.
var reposQuery bytes.Buffer
fmt.Fprint(&reposQuery, searchQueryPrefix)
slashSplit := strings.Split(repo, "/")
if n := len(slashSplit); n != 2 {
log.WithField("repo", repo).Warn("Found repo that was not in org/repo format, ignoring...")
continue
}
org := slashSplit[0]
repoName := slashSplit[1]
tars := externalConfig.TarsFor(org, repoName)
fmt.Fprintf(&reposQuery, " label:\"%s\" repo:\"%s\"", tars.OnlyWhenLabel, repo)
for _, label := range tars.ExcludeLabels {
fmt.Fprintf(&reposQuery, " -label:\"%s\"", label)
}
query := reposQuery.String()
prs, err := search(context.Background(), log, ghc, query)
if err != nil {
log.WithError(err).Error("Error was encountered when querying GitHub, " +
"but the remaining repositories will be processed anyway.")
continue
}
log.Infof("Considering %d PRs of %s.", len(prs), repo)
branches := make(map[string]bool)
for i := range prs {
pr := prs[i]
org := string(pr.Repository.Owner.Login)
repo := string(pr.Repository.Name)
num := int(pr.Number)
base := string(pr.BaseRef.Name)
l := log.WithFields(logrus.Fields{
"org": org,
"repo": repo,
"pr": num,
"base": base,
})
// Process only one PR for per branch at a time, because even if other PRs are updated,
// they cannot be merged and will generate DOS attacks on the CI system.
updated, ok := branches[base]
if ok {
if updated {
continue
}
} else {
branches[base] = false
}
// Try to update.
takenAction, err := handle(l, ghc, &pr, externalConfig)
if err != nil {
l.WithError(err).Error("The PR update failed, but the remaining PRs will be processed anyway.")
continue
}
if takenAction {
// Mark this base branch as already having an updated PR.
branches[base] = takenAction
l.Info("Successfully updated.")
}
}
}
return nil
}
func handle(log *logrus.Entry, ghc githubClient, pr *pullRequest, cfg *tiexternalplugins.Configuration) (bool, error) {
org := string(pr.Repository.Owner.Login)
repo := string(pr.Repository.Name)
number := int(pr.Number)
updated := false
tars := cfg.TarsFor(org, repo)
// Must have last commit.
if len(pr.Commits.Nodes) == 0 || len(pr.Commits.Nodes) != 1 {
return false, nil
}
// Check if we update the base into PR.
currentBaseCommit, err := ghc.GetSingleCommit(org, repo, string(pr.BaseRef.Name))
if err != nil {
return false, err
}
for _, prCommitParent := range pr.Commits.Nodes[0].Commit.Parents.Nodes {
if string(prCommitParent.OID) == currentBaseCommit.SHA {
updated = true
}
}
if updated {
return false, nil
}
return true, takeAction(log, ghc, org, repo, number, string(pr.Author.Login), tars.Message)
}
func search(ctx context.Context, log *logrus.Entry, ghc githubClient, q string) ([]pullRequest, error) {
var ret []pullRequest
vars := map[string]interface{}{
"query": githubql.String(q),
"searchCursor": (*githubql.String)(nil),
}
var totalCost int
var remaining int
for {
sq := searchQuery{}
if err := ghc.Query(ctx, &sq, vars); err != nil {
return nil, err
}
totalCost += int(sq.RateLimit.Cost)
remaining = int(sq.RateLimit.Remaining)
for _, n := range sq.Search.Nodes {
ret = append(ret, n.PullRequest)
}
if !sq.Search.PageInfo.HasNextPage {
break
}
vars["searchCursor"] = githubql.NewString(sq.Search.PageInfo.EndCursor)
}
log.Infof("Search for query \"%s\" cost %d point(s). %d remaining.", q, totalCost, remaining)
return ret, nil
}
// takeAction updates the PR and comment ont it.
func takeAction(log *logrus.Entry, ghc githubClient, org, repo string, num int,
author string, message string) error {
botUserChecker, err := ghc.BotUserChecker()
if err != nil {
return err
}
needsReply := len(message) != 0
if needsReply {
err = ghc.DeleteStaleComments(org, repo, num, nil, shouldPrune(botUserChecker, message))
if err != nil {
return err
}
}
log.Infof("Update PR %s/%s#%d.", org, repo, num)
err = ghc.UpdatePullRequestBranch(org, repo, num, nil)
if err != nil {
return err
}
if needsReply {
// Delay the reply because we may trigger the test in the reply.
// See: https://github.com/ti-community-infra/tichi/issues/181.
sleep(time.Second * 5)
msg := tiexternalplugins.FormatSimpleResponse(author, message)
return ghc.CreateComment(org, repo, num, msg)
}
return nil
}
func shouldPrune(isBot func(string) bool, message string) func(github.IssueComment) bool {
return func(ic github.IssueComment) bool {
return isBot(ic.User.Login) &&
strings.Contains(ic.Body, message)
}
}
|
package main
import (
"fmt"
)
func main() {
s := foo()
fmt.Printf("Type of value returned by foo() :: %T\n", s)
fmt.Println("String returned by foo() ::", s)
f := bar()
fmt.Printf("Type of value returned by bar() :: %T\n", f)
x := f()
fmt.Println("Value returned by the function returned by bar() :: ", x)
}
func foo() string {
return "Hello from foo()"
}
// bar() returns a function that returns an integer
func bar() func() int {
return func() int {
return 451
}
}
|
// Basic Layout Example
// http://qt-project.org/doc/qt-5.1/qtquickcontrols/basiclayouts.html
package main
import (
"fmt"
"github.com/niemeyer/qml"
"os"
)
func main() {
qml.Init(nil)
engine := qml.NewEngine()
engine.On("quit", func() {
fmt.Println("quit")
os.Exit(0)
})
component, err := engine.LoadFile("basiclayout.qml")
if err != nil {
fmt.Println(err)
return
}
ctrl := Control{Polje1: "Hello from Go!", Polje2: "tekst2"}
context := engine.Context()
context.SetVar("ctrl", &ctrl)
window := component.CreateWindow(nil)
ctrl.Root = window.Root()
window.Show()
window.Wait()
}
type Control struct {
Root qml.Object
Polje1 string
Polje2 string
}
func (ctrl *Control) ChangedPolje1(text qml.Object) {
fmt.Printf("changed %#v\n", text.String("text"))
}
|
package db
import (
"runtime"
"sync"
"sync/atomic"
)
type RWLocker interface {
sync.Locker
RLock()
RUnlock()
}
type brokenLocker struct{}
func newBrokenLocker() brokenLocker { return brokenLocker{} }
func (brokenLocker) Lock() {}
func (brokenLocker) Unlock() {}
func (brokenLocker) RLock() {}
func (brokenLocker) RUnlock() {}
type eagerLocker struct {
val uint32
}
func newEagerLocker() *eagerLocker { return &eagerLocker{} }
func (el *eagerLocker) Lock() {
for !atomic.CompareAndSwapUint32(&el.val, 0, 1) {
runtime.Gosched()
}
}
func (el *eagerLocker) Unlock() {
atomic.StoreUint32(&el.val, 0)
}
func (el *eagerLocker) RLock() {
el.Lock()
}
func (el *eagerLocker) RUnlock() {
el.Unlock()
}
type locker struct {
mutex sync.Mutex
}
func newLocker() *locker { return &locker{} }
func (l *locker) Lock() {
l.mutex.Lock()
}
func (l *locker) Unlock() {
l.mutex.Unlock()
}
func (l *locker) RLock() {
l.mutex.Lock()
}
func (l *locker) RUnlock() {
l.mutex.Unlock()
}
|
package cmd
import (
"flag"
"fmt"
"os"
"strings"
"github.com/mmbros/quote/internal/quote"
"github.com/mmbros/quote/pkg/simpleflag"
)
const (
defaultConfigType = "yaml"
defaultMode = "1"
)
type appArgs struct {
config simpleflag.String
configType simpleflag.String
database simpleflag.String
dryrun simpleflag.Bool
isins simpleflag.Strings
proxy simpleflag.String
sources simpleflag.Strings
workers simpleflag.Int
mode simpleflag.String
}
const (
usageApp = `Usage:
quote <command> [options]
Available Commands:
get Get the quotes of the specified isins
sources Show available sources
tor Checks if Tor network will be used
`
usageGet = `Usage:
quote get [options]
Options:
-c, --config path config file (default is $HOME/.quote.yaml)
--config-type string used if config file does not have the extension in the name;
accepted values are: YAML, TOML and JSON
-i, --isins strings list of isins to get the quotes
-n, --dry-run perform a trial run with no request/updates made
-p, --proxy url default proxy
-s, --sources strings list of sources to get the quotes from
-w, --workers int number of workers (default 1)
-d, --database dns sqlite3 database used to save the quotes
-m, --mode char result mode: "1" first success or last error (default)
"U" all errors until first success
"A" all
`
usageTor = `Usage:
quote tor [options]
Checks if Tor network will be used to get the quote.
To use the Tor network the proxy must be defined through:
1. proxy argument parameter
2. proxy config file parameter
3. HTTP_PROXY, HTTPS_PROXY and NOPROXY enviroment variables.
Options:
-c, --config path config file (default is $HOME/.quote.yaml)
--config-type string used if config file does not have the extension in the name;
accepted values are: YAML, TOML and JSON
-p, --proxy url proxy to test the Tor network
`
usageSources = `Usage:
quote sources
Prints list of available sources.
`
)
func initCommandGet(args *appArgs) *simpleflag.Command {
flags := []*simpleflag.Flag{
{Value: &args.config, Names: "c,config"},
{Value: &args.configType, Names: "config-type"},
{Value: &args.database, Names: "d,database"},
{Value: &args.dryrun, Names: "n,dryrun,dry-run"},
{Value: &args.isins, Names: "i,isins"},
{Value: &args.proxy, Names: "p,proxy"},
{Value: &args.sources, Names: "s,sources"},
{Value: &args.workers, Names: "w,workers"},
{Value: &args.mode, Names: "m,mode"},
}
cmd := &simpleflag.Command{
Names: "get,g",
Usage: usageGet,
Flags: flags,
}
return cmd
}
func initCommandTor(args *appArgs) *simpleflag.Command {
flags := []*simpleflag.Flag{
{Value: &args.config, Names: "c,config"},
{Value: &args.configType, Names: "config-ype"},
{Value: &args.proxy, Names: "p,proxy"},
}
cmd := &simpleflag.Command{
Names: "tor,t",
Usage: usageTor,
Flags: flags,
}
return cmd
}
func initCommandSources(args *appArgs) *simpleflag.Command {
cmd := &simpleflag.Command{
Names: "sources,s",
Usage: usageSources,
}
return cmd
}
func initApp(args *appArgs) *simpleflag.App {
app := &simpleflag.App{
ErrorHandling: flag.ExitOnError,
Name: "quote",
Usage: usageApp,
Commands: []*simpleflag.Command{
initCommandGet(args),
initCommandTor(args),
initCommandSources(args),
},
}
return app
}
func execTor(args *appArgs, cfg *Config) error {
if args.config.Passed {
fmt.Printf("Using configuration file %q\n", args.config.Value)
}
proxy := cfg.Proxy
// proxy = "x://\\"
fmt.Printf("Checking Tor connection with proxy %q\n", proxy)
_, msg, err := quote.TorCheck(proxy)
if err == nil {
// ok checking Tor network:
// prints the result: it can be ok or ko
fmt.Println(msg)
}
return err
}
func execGet(args *appArgs, cfg *Config) error {
sis := cfg.SourceIsinsList()
if args.dryrun.Value {
// fmt.Printf("ARGS: %v\n", args)
if args.config.Passed {
fmt.Printf("Using configuration file %q\n", args.config.Value)
}
if cfg.Database != "" {
fmt.Printf("Database: %q\n", cfg.Database)
}
// if cfg.Mode != "" {
// fmt.Printf("Mode: %q\n", cfg.Mode)
// }
fmt.Printf("Mode: %q (%d)\n", cfg.Mode, cfg.mode)
fmt.Println("Tasks:", jsonString(sis))
return nil
}
// do retrieves the quotes
return quote.Get(sis, cfg.Database, cfg.mode)
}
func execSources(args *appArgs, cfg *Config) error {
sources := quote.Sources()
fmt.Printf("Available sources: \"%s\"\n", strings.Join(sources, "\", \""))
return nil
}
// Execute is the main function
func Execute() {
arguments := os.Args[1:]
// arguments := strings.Split("get -c user.quote.yaml -s morningstarit:33", " ")
var (
app *simpleflag.App
cfg *Config // TODO rename in appConfig
args *appArgs
)
args = &appArgs{}
app = initApp(args)
// NOTE: if app.Parse ha success, as a side effect
// the args struct is initialized
err := app.Parse(arguments)
// get configuration
if err == nil {
cfg, err = GetConfig(args, quote.Sources())
}
if err == nil {
switch app.CommandName() {
case "get":
err = execGet(args, cfg)
case "tor":
err = execTor(args, cfg)
case "sources":
err = execSources(args, cfg)
}
}
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
|
package main
import (
"bytes"
"fmt"
"github.com/astaxie/beego"
"github.com/astaxie/beego/plugins/cors"
"time"
)
var Bc BlockChain
var Sleeptime time.Duration =60
var Id uint64 = 0
func main() {
Bc=*CreateBlockChain()
go MakeRecordBlock(&Bc)
defer Bc.db.Close()
beego.InsertFilter("*", beego.BeforeRouter, cors.Allow(&cors.Options{
AllowAllOrigins: true,
AllowMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
AllowHeaders: []string{"Origin", "Authorization", "Access-Control-Allow-Origin", "Access-Control-Allow-Headers", "Content-Type"},
ExposeHeaders: []string{"Content-Length", "Access-Control-Allow-Origin", "Access-Control-Allow-Headers", "Content-Type"},
AllowCredentials: true,
}))
beego.Run("localhost:8000")
}
func MakeRecordBlock(bc *BlockChain) {
var id uint64 = GetIdindex()
var recordStep uint64 = 2
fmt.Printf("id%v",id)
for{
records:=FindeRecords(id,recordStep)//一次最多读取 recordStep 条交易记录
len := len(records)
if len>0 && len<=2 {
id +=uint64(len)
bc.AddBlock(records)
fmt.Printf("%v\n",id)
SaveIdindex(id)
}
fmt.Printf("%vwawawawa\n",id)
time.Sleep(Sleeptime*time.Second)
}
}
func display(bc *BlockChain) {
it:=bc.NewIterator()
for{
block := it.Next()
fmt.Printf("++++++++++++++++++++++++++++\n")
fmt.Printf("PreBlockHash:%x\n",block.PreBlockHash)
fmt.Printf("MerKleRoot::%x\n",block.MerKleRoot)
timeFormat := time.Unix(int64(block.TimeStamp),0).Format("2006-01-02 15:04:05")
fmt.Printf("TimeStamp:%s\n",timeFormat)
fmt.Printf("Hash::%x\n",block.Hash)
records := block.Records
for _,v:=range records {
fmt.Printf("%v %v %v %v\n",v.Id,v.UserId,v.Type,v.Good)
}
if bytes.Equal(block.PreBlockHash,[]byte{}){
fmt.Printf("遍历结果\n")
break
}
}
} |
package main
import (
"bytes"
"testing"
)
func TestPKCS7Pad(t *testing.T) {
cases := []struct {
buf []byte
blockSize int
want []byte
}{
{
[]byte{0},
3,
[]byte{0, 2, 2},
},
{
[]byte{0, 0},
3,
[]byte{0, 0, 1},
},
{
[]byte{0, 0, 0},
3,
[]byte{0, 0, 0, 3, 3, 3},
},
}
for _, c := range cases {
got := PKCS7Pad(c.buf, c.blockSize)
if !bytes.Equal(got, c.want) {
t.Errorf("got %v, want %v", got, c.want)
}
}
}
|
package service
type queue struct {
Writer QueueWriter
}
type Queue interface {
Write(event Event) error
}
type QueueWriter interface {
Publish(event Event) error
}
func NewQueue(writer QueueWriter) Queue {
return &queue{
Writer: writer,
}
}
func (qs *queue) Write(event Event) error {
return qs.Writer.Publish(event)
}
|
package trace
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestTrace(t *testing.T) {
assert.Equal(t, "message=[test string] call=[github.com/atsttk84/goutils/trace.TestTrace] parameters=[string]", Trace("test string", "string"))
assert.Equal(t, "message=[test int] call=[github.com/atsttk84/goutils/trace.TestTrace] parameters=[1]", Trace("test int", 1))
assert.Equal(t, "message=[test map] call=[github.com/atsttk84/goutils/trace.TestTrace] parameters=[map[k:1]]", Trace("test map", map[string]int{"k": 1}))
assert.Equal(t, "message=[test slice] call=[github.com/atsttk84/goutils/trace.TestTrace] parameters=[[1 2 3]]", Trace("test slice", []int{1, 2, 3}))
assert.Equal(t, "message=[test variadic argument] call=[github.com/atsttk84/goutils/trace.TestTrace] parameters=[1 2 3]", Trace("test variadic argument", 1, 2, 3))
}
func TestTraceLine(t *testing.T) {
assert.Equal(t, "message=[test string] call=[github.com/atsttk84/goutils/trace.TestTraceLine] parameters=[string] line=[/go/src/github.com/atsttk84/goutils/trace/trace_test.go:17]", TraceLine("test string", "string"))
}
|
package ansi
// Mode is an ANSI terminal mode constant.
type Mode uint64
// Mode bit fields
const (
ModePrivate Mode = 1 << 63
)
// Set returns a control sequence for enabling the mode.
func (mode Mode) Set() Seq {
if mode&ModePrivate == 0 {
return SM.WithInts(int(mode))
}
return SMprivate.WithInts(int(mode & ^ModePrivate))
}
// Reset returns a control sequence for disabling the mode.
func (mode Mode) Reset() Seq {
if mode&ModePrivate == 0 {
return RM.WithInts(int(mode))
}
return RMprivate.WithInts(int(mode & ^ModePrivate))
}
// private mode constants
// TODO more coverage
const (
ModeMouseX10 Mode = 9
)
// xterm mode constants; see http://invisible-island.net/xterm/ctlseqs/ctlseqs.html.
const (
ModeMouseVt200 = ModePrivate | 1000
ModeMouseVt200Highlight = ModePrivate | 1001
ModeMouseBtnEvent = ModePrivate | 1002
ModeMouseAnyEvent = ModePrivate | 1003
ModeMouseFocusEvent = ModePrivate | 1004
ModeMouseExt = ModePrivate | 1005
ModeMouseSgrExt = ModePrivate | 1006
ModeMouseUrxvtExt = ModePrivate | 1015
ModeAlternateScroll = ModePrivate | 1007
ModeMetaReporting = ModePrivate | 1036
ModeAlternateScreen = ModePrivate | 1049
ModeBracketedPaste = ModePrivate | 2004
)
// TODO http://www.disinterest.org/resource/MUD-Dev/1997q1/000244.html and others
const (
ShowCursor = ModePrivate | 25
)
|
package main
import (
"encoding/json"
"flag"
"math/rand"
"net/http"
"time"
"github.com/fblanco/talks/rtb/bid"
)
var port = flag.String("port", "9090", "http server port")
func main() {
rand.Seed(time.Now().UnixNano())
flag.Parse()
http.HandleFunc("/bid", bidder)
http.ListenAndServe(":"+*port, nil)
}
func bidder(w http.ResponseWriter, r *http.Request) {
t := time.Now()
time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)
json.NewEncoder(w).Encode(bid.Bid{BidderName: "bidder-" + *port, CPM: rand.Float64() * 10, ElapsedTime: time.Since(t) / time.Millisecond})
}
|
package proxy
import (
"context"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/pomerium/pomerium/config"
hpke_handlers "github.com/pomerium/pomerium/pkg/hpke/handlers"
)
func testOptions(t *testing.T) *config.Options {
t.Helper()
opts := config.NewDefaultOptions()
to, err := config.ParseWeightedUrls("https://example.example")
require.NoError(t, err)
testPolicy := config.Policy{From: "https://corp.example.example", To: to}
opts.Policies = []config.Policy{testPolicy}
opts.InsecureServer = true
opts.CookieSecure = false
opts.Services = config.ServiceAll
opts.SharedKey = "80ldlrU2d7w+wVpKNfevk6fmb8otEx6CqOfshj2LwhQ="
opts.CookieSecret = "OromP1gurwGWjQPYb1nNgSxtbVB5NnLzX6z5WOKr0Yw="
hpkePrivateKey, err := opts.GetHPKEPrivateKey()
require.NoError(t, err)
authnSrv := httptest.NewServer(hpke_handlers.HPKEPublicKeyHandler(hpkePrivateKey.PublicKey()))
t.Cleanup(authnSrv.Close)
opts.AuthenticateURLString = authnSrv.URL
require.NoError(t, opts.Validate())
return opts
}
func TestOptions_Validate(t *testing.T) {
t.Parallel()
good := testOptions(t)
badAuthURL := testOptions(t)
badAuthURL.AuthenticateURLString = "BAD_URL"
authenticateBadScheme := testOptions(t)
authenticateBadScheme.AuthenticateURLString = "authenticate.corp.beyondperimeter.com"
invalidCookieSecret := testOptions(t)
invalidCookieSecret.CookieSecret = "OromP1gurwGWjQPYb1nNgSxtbVB5NnLzX6z5WOKr0Yw^"
shortCookieLength := testOptions(t)
shortCookieLength.CookieSecret = "gN3xnvfsAwfCXxnJorGLKUG4l2wC8sS8nfLMhcStPg=="
badSharedKey := testOptions(t)
badSharedKey.Services = "proxy"
badSharedKey.SharedKey = ""
sharedKeyBadBas64 := testOptions(t)
sharedKeyBadBas64.SharedKey = "%(*@389"
missingPolicy := testOptions(t)
missingPolicy.Policies = []config.Policy{}
tests := []struct {
name string
o *config.Options
wantErr bool
}{
{"good - minimum options", good, false},
{"nil options", &config.Options{}, true},
{"invalid cookie secret", invalidCookieSecret, true},
{"short cookie secret", shortCookieLength, true},
{"no shared secret", badSharedKey, true},
{"shared secret bad base64", sharedKeyBadBas64, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
o := tt.o
if err := ValidateOptions(o); (err != nil) != tt.wantErr {
t.Errorf("Options.Validate() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestNew(t *testing.T) {
t.Parallel()
good := testOptions(t)
shortCookieLength := testOptions(t)
shortCookieLength.CookieSecret = "gN3xnvfsAwfCXxnJorGLKUG4l2wC8sS8nfLMhcStPg=="
badCookie := testOptions(t)
badCookie.CookieName = ""
tests := []struct {
name string
opts *config.Options
wantProxy bool
wantErr bool
}{
{"good", good, true, false},
{"empty options", &config.Options{}, false, true},
{"short secret/validate sanity check", shortCookieLength, false, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := New(&config.Config{Options: tt.opts})
if (err != nil) != tt.wantErr {
t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got == nil && tt.wantProxy == true {
t.Errorf("New() expected valid proxy struct")
}
})
}
}
func Test_UpdateOptions(t *testing.T) {
t.Parallel()
good := testOptions(t)
to, err := config.ParseWeightedUrls("http://foo.example")
require.NoError(t, err)
newPolicy := config.Policy{To: to, From: "http://bar.example"}
newPolicies := testOptions(t)
newPolicies.Policies = []config.Policy{newPolicy}
require.NoError(t, newPolicy.Validate())
toFoo, err := config.ParseWeightedUrls("http://foo.example")
require.NoError(t, err)
badPolicyURL := config.Policy{To: []config.WeightedURL{{URL: url.URL{Scheme: "http", Path: "/"}}}, From: "http://bar.example"}
badNewPolicy := testOptions(t)
badNewPolicy.Policies = []config.Policy{
badPolicyURL,
}
disableTLSPolicy := config.Policy{To: toFoo, From: "http://bar.example", TLSSkipVerify: true}
disableTLSPolicies := testOptions(t)
disableTLSPolicies.Policies = []config.Policy{disableTLSPolicy}
customCAPolicies := testOptions(t)
customCAPolicies.Policies = []config.Policy{{To: toFoo, From: "http://bar.example", TLSCustomCA: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURlVENDQW1HZ0F3SUJBZ0lKQUszMmhoR0JIcmFtTUEwR0NTcUdTSWIzRFFFQkN3VUFNR0l4Q3pBSkJnTlYKQkFZVEFsVlRNUk13RVFZRFZRUUlEQXBEWVd4cFptOXlibWxoTVJZd0ZBWURWUVFIREExVFlXNGdSbkpoYm1OcApjMk52TVE4d0RRWURWUVFLREFaQ1lXUlRVMHd4RlRBVEJnTlZCQU1NRENvdVltRmtjM05zTG1OdmJUQWVGdzB4Ck9UQTJNVEl4TlRNeE5UbGFGdzB5TVRBMk1URXhOVE14TlRsYU1HSXhDekFKQmdOVkJBWVRBbFZUTVJNd0VRWUQKVlFRSURBcERZV3hwWm05eWJtbGhNUll3RkFZRFZRUUhEQTFUWVc0Z1JuSmhibU5wYzJOdk1ROHdEUVlEVlFRSwpEQVpDWVdSVFUwd3hGVEFUQmdOVkJBTU1EQ291WW1Ga2MzTnNMbU52YlRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCCkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1JRTdQaU03Z1RDczloUTFYQll6Sk1ZNjF5b2FFbXdJclg1bFo2eEt5eDIKUG16QVMyQk1UT3F5dE1BUGdMYXcrWExKaGdMNVhFRmRFeXQvY2NSTHZPbVVMbEEzcG1jY1lZejJRVUxGUnRNVwpoeWVmZE9zS25SRlNKaUZ6YklSTWVWWGswV3ZvQmoxSUZWS3RzeWpicXY5dS8yQ1ZTbmRyT2ZFazBURzIzVTNBCnhQeFR1VzFDcmJWOC9xNzFGZEl6U09jaWNjZkNGSHBzS09vM1N0L3FiTFZ5dEg1YW9oYmNhYkZYUk5zS0VxdmUKd3c5SGRGeEJJdUdhK1J1VDVxMGlCaWt1c2JwSkhBd25ucVA3aS9kQWNnQ3NrZ2paakZlRVU0RUZ5K2IrYTFTWQpRQ2VGeHhDN2MzRHZhUmhCQjBWVmZQbGtQejBzdzZsODY1TWFUSWJSeW9VQ0F3RUFBYU15TURBd0NRWURWUjBUCkJBSXdBREFqQmdOVkhSRUVIREFhZ2d3cUxtSmhaSE56YkM1amIyMkNDbUpoWkhOemJDNWpiMjB3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dFQkFJaTV1OXc4bWdUNnBwQ2M3eHNHK0E5ZkkzVzR6K3FTS2FwaHI1bHM3MEdCS2JpWQpZTEpVWVpoUGZXcGgxcXRra1UwTEhGUG04M1ZhNTJlSUhyalhUMFZlNEt0TzFuMElBZkl0RmFXNjJDSmdoR1luCmp6dzByeXpnQzRQeUZwTk1uTnRCcm9QdS9iUGdXaU1nTE9OcEVaaGlneDRROHdmMVkvVTlzK3pDQ3hvSmxhS1IKTVhidVE4N1g3bS85VlJueHhvNk56NVpmN09USFRwTk9JNlZqYTBCeGJtSUFVNnlyaXc5VXJnaWJYZk9qM2o2bgpNVExCdWdVVklCMGJCYWFzSnNBTUsrdzRMQU52YXBlWjBET1NuT1I0S0syNEowT3lvRjVmSG1wNTllTTE3SW9GClFxQmh6cG1RVWd1bmVjRVc4QlRxck5wRzc5UjF1K1YrNHd3Y2tQYz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="}}
badCustomCAPolicies := testOptions(t)
badCustomCAPolicies.Policies = []config.Policy{{To: toFoo, From: "http://bar.example", TLSCustomCA: "=@@"}}
goodClientCertPolicies := testOptions(t)
goodClientCertPolicies.Policies = []config.Policy{{To: toFoo, From: "http://bar.example", TLSClientKey: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcGdJQkFBS0NBUUVBNjdLanFtUVlHcTBNVnRBQ1ZwZUNtWG1pbmxRYkRQR0xtc1pBVUV3dWVIUW5ydDNXCnR2cERPbTZBbGFKTVVuVytIdTU1ampva2FsS2VWalRLbWdZR2JxVXpWRG9NYlBEYUhla2x0ZEJUTUdsT1VGc1AKNFVKU0RyTzR6ZE4rem80MjhUWDJQbkcyRkNkVktHeTRQRThpbEhiV0xjcjg3MVlqVjUxZnc4Q0xEWDlQWkpOdQo4NjFDRjdWOWlFSm02c1NmUWxtbmhOOGozK1d6VmJQUU55MVdzUjdpOWU5ajYzRXFLdDIyUTlPWEwrV0FjS3NrCm9JU21DTlZSVUFqVThZUlZjZ1FKQit6UTM0QVFQbHowT3A1Ty9RTi9NZWRqYUY4d0xTK2l2L3p2aVM4Y3FQYngKbzZzTHE2Rk5UbHRrL1FreGVDZUtLVFFlLzNrUFl2UUFkbmw2NVFJREFRQUJBb0lCQVFEQVQ0eXN2V2pSY3pxcgpKcU9SeGFPQTJEY3dXazJML1JXOFhtQWhaRmRTWHV2MkNQbGxhTU1yelBmTG41WUlmaHQzSDNzODZnSEdZc3pnClo4aWJiYWtYNUdFQ0t5N3lRSDZuZ3hFS3pRVGpiampBNWR3S0h0UFhQUnJmamQ1Y2FMczVpcDcxaWxCWEYxU3IKWERIaXUycnFtaC9kVTArWGRMLzNmK2VnVDl6bFQ5YzRyUm84dnZueWNYejFyMnVhRVZ2VExsWHVsb2NpeEVrcgoySjlTMmxveWFUb2tFTnNlMDNpSVdaWnpNNElZcVowOGJOeG9IWCszQXVlWExIUStzRkRKMlhaVVdLSkZHMHUyClp3R2w3YlZpRTFQNXdiQUdtZzJDeDVCN1MrdGQyUEpSV3Frb2VxY3F2RVdCc3RFL1FEcDFpVThCOHpiQXd0Y3IKZHc5TXZ6Q2hBb0dCQVBObzRWMjF6MGp6MWdEb2tlTVN5d3JnL2E4RkJSM2R2Y0xZbWV5VXkybmd3eHVucnFsdwo2U2IrOWdrOGovcXEvc3VQSDhVdzNqSHNKYXdGSnNvTkVqNCt2b1ZSM3UrbE5sTEw5b21rMXBoU0dNdVp0b3huCm5nbUxVbkJUMGI1M3BURkJ5WGsveE5CbElreWdBNlg5T2MreW5na3RqNlRyVnMxUERTdnVJY0s1QW9HQkFQZmoKcEUzR2F6cVFSemx6TjRvTHZmQWJBdktCZ1lPaFNnemxsK0ZLZkhzYWJGNkdudFd1dWVhY1FIWFpYZTA1c2tLcApXN2xYQ3dqQU1iUXI3QmdlazcrOSszZElwL1RnYmZCYnN3Syt6Vng3Z2doeWMrdytXRWExaHByWTZ6YXdxdkFaCkhRU2lMUEd1UGp5WXBQa1E2ZFdEczNmWHJGZ1dlTmd4SkhTZkdaT05Bb0dCQUt5WTF3MUM2U3Y2c3VuTC8vNTcKQ2Z5NTAwaXlqNUZBOWRqZkRDNWt4K1JZMnlDV0ExVGsybjZyVmJ6dzg4czBTeDMrYS9IQW1CM2dMRXBSRU5NKwo5NHVwcENFWEQ3VHdlcGUxUnlrTStKbmp4TzlDSE41c2J2U25sUnBQWlMvZzJRTVhlZ3grK2trbkhXNG1ITkFyCndqMlRrMXBBczFXbkJ0TG9WaGVyY01jSkFvR0JBSTYwSGdJb0Y5SysvRUcyY21LbUg5SDV1dGlnZFU2eHEwK0IKWE0zMWMzUHE0amdJaDZlN3pvbFRxa2d0dWtTMjBraE45dC9ibkI2TmhnK1N1WGVwSXFWZldVUnlMejVwZE9ESgo2V1BMTTYzcDdCR3cwY3RPbU1NYi9VRm5Yd0U4OHlzRlNnOUF6VjdVVUQvU0lDYkI5ZHRVMWh4SHJJK0pZRWdWCkFrZWd6N2lCQW9HQkFJRncrQVFJZUIwM01UL0lCbGswNENQTDJEak0rNDhoVGRRdjgwMDBIQU9mUWJrMEVZUDEKQ2FLR3RDbTg2MXpBZjBzcS81REtZQ0l6OS9HUzNYRk00Qm1rRk9nY1NXVENPNmZmTGdLM3FmQzN4WDJudlpIOQpYZGNKTDQrZndhY0x4c2JJKzhhUWNOVHRtb3pkUjEzQnNmUmIrSGpUL2o3dkdrYlFnSkhCT0syegotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=", TLSClientCert: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVJVENDQWdtZ0F3SUJBZ0lSQVBqTEJxS1lwcWU0ekhQc0dWdFR6T0F3RFFZSktvWklodmNOQVFFTEJRQXcKRWpFUU1BNEdBMVVFQXhNSFoyOXZaQzFqWVRBZUZ3MHhPVEE0TVRBeE9EUTVOREJhRncweU1UQXlNVEF4TnpRdwpNREZhTUJNeEVUQVBCZ05WQkFNVENIQnZiV1Z5YVhWdE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBCk1JSUJDZ0tDQVFFQTY3S2pxbVFZR3EwTVZ0QUNWcGVDbVhtaW5sUWJEUEdMbXNaQVVFd3VlSFFucnQzV3R2cEQKT202QWxhSk1VblcrSHU1NWpqb2thbEtlVmpUS21nWUdicVV6VkRvTWJQRGFIZWtsdGRCVE1HbE9VRnNQNFVKUwpEck80emROK3pvNDI4VFgyUG5HMkZDZFZLR3k0UEU4aWxIYldMY3I4NzFZalY1MWZ3OENMRFg5UFpKTnU4NjFDCkY3VjlpRUptNnNTZlFsbW5oTjhqMytXelZiUFFOeTFXc1I3aTllOWo2M0VxS3QyMlE5T1hMK1dBY0tza29JU20KQ05WUlVBalU4WVJWY2dRSkIrelEzNEFRUGx6ME9wNU8vUU4vTWVkamFGOHdMUytpdi96dmlTOGNxUGJ4bzZzTApxNkZOVGx0ay9Ra3hlQ2VLS1RRZS8za1BZdlFBZG5sNjVRSURBUUFCbzNFd2J6QU9CZ05WSFE4QkFmOEVCQU1DCkE3Z3dIUVlEVlIwbEJCWXdGQVlJS3dZQkJRVUhBd0VHQ0NzR0FRVUZCd01DTUIwR0ExVWREZ1FXQkJRQ1FYbWIKc0hpcS9UQlZUZVhoQ0dpNjhrVy9DakFmQmdOVkhTTUVHREFXZ0JSNTRKQ3pMRlg0T0RTQ1J0dWNBUGZOdVhWegpuREFOQmdrcWhraUc5dzBCQVFzRkFBT0NBZ0VBcm9XL2trMllleFN5NEhaQXFLNDVZaGQ5ay9QVTFiaDlFK1BRCk5jZFgzTUdEY2NDRUFkc1k4dll3NVE1cnhuMGFzcSt3VGFCcGxoYS9rMi9VVW9IQ1RqUVp1Mk94dEF3UTdPaWIKVE1tMEorU3NWT3d4YnFQTW9rK1RqVE16NFdXaFFUTzVwRmNoZDZXZXNCVHlJNzJ0aG1jcDd1c2NLU2h3YktIegpQY2h1QTQ4SzhPdi96WkxmZnduQVNZb3VCczJjd1ZiRDI3ZXZOMzdoMGFzR1BrR1VXdm1PSDduTHNVeTh3TTdqCkNGL3NwMmJmTC9OYVdNclJnTHZBMGZMS2pwWTQrVEpPbkVxQmxPcCsrbHlJTEZMcC9qMHNybjRNUnlKK0t6UTEKR1RPakVtQ1QvVEFtOS9XSThSL0FlYjcwTjEzTytYNEtaOUJHaDAxTzN3T1Vqd3BZZ3lxSnNoRnNRUG50VmMrSQpKQmF4M2VQU3NicUcwTFkzcHdHUkpRNmMrd1lxdGk2Y0tNTjliYlRkMDhCNUk1N1RRTHhNcUoycTFnWmw1R1VUCmVFZGNWRXltMnZmd0NPd0lrbGNBbThxTm5kZGZKV1FabE5VaHNOVWFBMkVINnlDeXdaZm9aak9hSDEwTXowV20KeTNpZ2NSZFQ3Mi9NR2VkZk93MlV0MVVvRFZmdEcxcysrditUQ1lpNmpUQU05dkZPckJ4UGlOeGFkUENHR2NZZAowakZIc2FWOGFPV1dQQjZBQ1JteHdDVDdRTnRTczM2MlpIOUlFWWR4Q00yMDUrZmluVHhkOUcwSmVRRTd2Kyt6CldoeWo2ZmJBWUIxM2wvN1hkRnpNSW5BOGxpekdrVHB2RHMxeTBCUzlwV3ppYmhqbVFoZGZIejdCZGpGTHVvc2wKZzlNZE5sND0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="}}
goodClientCertPolicies.Validate()
customServerName := testOptions(t)
customServerName.Policies = []config.Policy{{To: toFoo, From: "http://bar.example", TLSServerName: "test"}}
emptyPolicies := testOptions(t)
emptyPolicies.Policies = nil
allowWebSockets := testOptions(t)
allowWebSockets.Policies = []config.Policy{{To: toFoo, From: "http://bar.example", AllowWebsockets: true}}
customTimeout := testOptions(t)
ten := 10 * time.Second
customTimeout.Policies = []config.Policy{{To: toFoo, From: "http://bar.example", UpstreamTimeout: &ten}}
corsPreflight := testOptions(t)
corsPreflight.Policies = []config.Policy{{To: toFoo, From: "http://bar.example", CORSAllowPreflight: true}}
disableAuth := testOptions(t)
disableAuth.Policies = []config.Policy{{To: toFoo, From: "http://bar.example", AllowPublicUnauthenticatedAccess: true}}
reqHeaders := testOptions(t)
reqHeaders.Policies = []config.Policy{{To: toFoo, From: "http://bar.example", SetRequestHeaders: map[string]string{"x": "y"}}}
preserveHostHeader := testOptions(t)
preserveHostHeader.Policies = []config.Policy{{To: toFoo, From: "http://bar.example", PreserveHostHeader: true}}
tests := []struct {
name string
originalOptions *config.Options
updatedOptions *config.Options
host string
wantErr bool
wantRoute bool
}{
{"good no change", good, good, "https://corp.example.example", false, true},
{"changed", good, newPolicies, "https://bar.example", false, true},
{"changed and missing", good, newPolicies, "https://corp.example.example", false, false},
{"disable tls verification", good, disableTLSPolicies, "https://bar.example", false, true},
{"custom root ca", good, customCAPolicies, "https://bar.example", false, true},
{"good client certs", good, goodClientCertPolicies, "https://bar.example", false, true},
{"custom server name", customServerName, customServerName, "https://bar.example", false, true},
{"good no policies to start", emptyPolicies, good, "https://corp.example.example", false, true},
{"allow websockets", good, allowWebSockets, "https://corp.example.example", false, true},
{"no websockets, custom timeout", good, customTimeout, "https://corp.example.example", false, true},
{"enable cors preflight", good, corsPreflight, "https://corp.example.example", false, true},
{"disable auth", good, disableAuth, "https://corp.example.example", false, true},
{"set request headers", good, reqHeaders, "https://corp.example.example", false, true},
{"preserve host headers", preserveHostHeader, preserveHostHeader, "https://corp.example.example", false, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p, err := New(&config.Config{Options: tt.originalOptions})
if err != nil {
t.Fatal(err)
}
p.OnConfigChange(context.Background(), &config.Config{Options: tt.updatedOptions})
r := httptest.NewRequest(http.MethodGet, tt.host, nil)
w := httptest.NewRecorder()
p.ServeHTTP(w, r)
if tt.wantRoute && w.Code != http.StatusNotFound {
t.Errorf("Failed to find route handler")
return
}
})
}
// Test nil
var p *Proxy
p.OnConfigChange(context.Background(), &config.Config{})
}
|
package accounts
import (
"testing"
uuid "github.com/satori/go.uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewAccount(t *testing.T) {
acc := NewAccount("123", "GB", []string{"Samantha Holder"}, &AccountAttributes{BankID: "xxx"})
_, err := uuid.FromString(acc.Data.ID)
require.Nil(t, err)
assert.Equal(t, "123", acc.Data.OrganisationID)
assert.Equal(t, "accounts", acc.Data.Type)
}
func TestNewAccountRequiredParameters(t *testing.T) {
acc := NewAccount("123", "XX", []string{"Samantha Holder"}, &AccountAttributes{Country: "GB"})
assert.Equal(t, "XX", acc.Data.Attributes.Country)
}
func TestNewAccountWithoutAttributes(t *testing.T) {
acc := NewAccount("123", "GB", []string{"Samantha Holder"}, nil)
assert.IsType(t, &AccountAttributes{}, acc.Data.Attributes)
assert.Equal(t, "GB", acc.Data.Attributes.Country)
}
|
package fileWatcher
type Folder struct {
Path string
Recursive bool
}
|
package main
import (
"strings"
"testing"
)
func TestDetectCircularDependencySimple(t *testing.T) {
a := NewNode(Job{}, "a")
b := NewNode(Job{}, "b")
a.Dependents[b] = struct{}{}
b.Dependents[a] = struct{}{}
root := NewNode(Job{}, "root")
root.Dependents[a] = struct{}{}
expectedCycle := "a->b->a"
err := detectCircularDependency(root)
if err == nil {
t.Fatalf("failed to detect a circular dependency: %s", expectedCycle)
}
msg := err.Error()
if !strings.Contains(msg, expectedCycle) {
t.Fatalf("epexcting the error message to contain \"%s\", but got \"%s\"", expectedCycle, msg)
}
}
func TestDetectCircularDependencyMoreThanOneDependents(t *testing.T) {
a := NewNode(Job{}, "a")
b := NewNode(Job{}, "b")
c := NewNode(Job{}, "c")
d := NewNode(Job{}, "d")
e := NewNode(Job{}, "e")
root := NewNode(Job{}, "root")
root.Dependents[a] = struct{}{}
root.Dependents[b] = struct{}{}
a.Dependents[c] = struct{}{}
a.Dependents[d] = struct{}{}
d.Dependents[e] = struct{}{}
b.Dependents[e] = struct{}{}
e.Dependents[d] = struct{}{}
expectedCycles := []string{"d->e->d->a", "e->d->e->b"}
err := detectCircularDependency(root)
if err == nil {
t.Fatalf("failed to detect a circular dependency: \"%s\" or \"%s\"", expectedCycles[0], expectedCycles[1])
}
msg := err.Error()
var found bool
for _, expectedCycle := range expectedCycles {
if strings.Contains(msg, expectedCycle) {
found = true
}
}
if !found {
t.Fatalf("epexcting the error message to contain \"%s\" or \"%s\", but got \"%s\"", expectedCycles[0], expectedCycles[1], msg)
}
}
func TestDetectCircularDependencyNoCycle(t *testing.T) {
a := NewNode(Job{}, "a")
b := NewNode(Job{}, "b")
c := NewNode(Job{}, "c")
d := NewNode(Job{}, "d")
e := NewNode(Job{}, "e")
f := NewNode(Job{}, "f")
root := NewNode(Job{}, "root")
root.Dependents[a] = struct{}{}
root.Dependents[b] = struct{}{}
a.Dependents[c] = struct{}{}
a.Dependents[d] = struct{}{}
d.Dependents[e] = struct{}{}
b.Dependents[e] = struct{}{}
e.Dependents[f] = struct{}{}
err := detectCircularDependency(root)
if err != nil {
t.Fatalf("expected no cycle, but got \"%s\"", err.Error())
}
}
func TestDetectCircularDependencyWhenNil(t *testing.T) {
err := detectCircularDependency(nil)
if err != nil {
t.Fatal(err)
}
}
func TestNewGraphWithoutJobs(t *testing.T) {
var cfg Config
_, err := NewGraph(cfg)
if err == nil {
t.Fatal("expected to get an error due to no jobs")
}
}
func TestNewGraphWithNoDependencies(t *testing.T) {
var cfg Config
var job Job
cfg.Jobs = map[string]Job{
"job1": job,
"job2": job,
}
graph, err := NewGraph(cfg)
if err != nil {
t.Fatal(err)
}
if len(graph.Dependents) != 2 {
t.Fatalf("expected to have 2 dependents, but got %d", len(graph.Dependents))
}
expectedIDs := map[string]struct{}{
"job1": {},
"job2": {},
}
for dependent := range graph.Dependents {
if _, ok := expectedIDs[dependent.ID]; !ok {
t.Fatalf("%s is not an expected label", dependent.ID)
}
delete(expectedIDs, dependent.ID)
}
}
func TestNewGraphWithDependencies(t *testing.T) {
var cfg Config
var job Job
cfg.Jobs = map[string]Job{
"job1": job,
"job2": job,
}
job.Needs = []string{"job1"}
cfg.Jobs["job3"] = job
graph, err := NewGraph(cfg)
if err != nil {
t.Fatal(err)
}
if len(graph.Dependents) != 2 {
t.Fatalf("expected to have 2 dependents, but got %d", len(graph.Dependents))
}
expectedIDs := map[string][]string{
"job1": {"job3"},
"job2": nil,
}
for dependent := range graph.Dependents {
children, ok := expectedIDs[dependent.ID]
if !ok {
t.Fatalf("%s is not an expected label", dependent.ID)
}
if len(dependent.Dependents) != len(children) {
t.Fatalf("expected to have %d dependents, but got %d dependents", len(children), len(dependent.Dependents))
}
if len(children) > 0 {
child := children[0]
for d := range dependent.Dependents {
if d.ID != child {
t.Fatalf("expected the child to be labeled \"%s\", but instead \"%s\"", child, d.ID)
}
}
}
delete(expectedIDs, dependent.ID)
}
}
func TestNewGraphWithCircularDependency(t *testing.T) {
var cfg Config
var job Job
cfg.Jobs = make(map[string]Job)
job.Needs = []string{"job2"}
cfg.Jobs["job1"] = job
job.Needs = []string{"job1"}
cfg.Jobs["job2"] = job
_, err := NewGraph(cfg)
if err == nil {
t.Fatal("expected to get an error about circular dependency")
}
}
func TestNewGraphDependencyNotExist(t *testing.T) {
var cfg Config
var job Job
cfg.Jobs = make(map[string]Job)
job.Needs = []string{"job2"}
cfg.Jobs["job1"] = job
_, err := NewGraph(cfg)
if err == nil {
t.Fatal("expected to get an error")
}
}
|
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
package export
import (
"testing"
"github.com/pingcap/tidb/util/promutil"
)
func TestMetricsRegistration(t *testing.T) {
m := newMetrics(promutil.NewDefaultFactory(), nil)
registry := promutil.NewDefaultRegistry()
m.registerTo(registry)
m.unregisterFrom(registry)
}
|
package solution
import "testing"
func TestRomanToInt(t *testing.T) {
var tests = []struct {
s string
want int
}{
{"III", 3},
{"IV", 4},
{"IX", 9},
{"LVIII", 58},
{"MCMXCIV", 1994},
}
for _, c := range tests {
got := romanToInt(c.s)
if got != c.want {
t.Errorf("romanToInt(%s) == %d, want %d", c.s, got, c.want)
}
}
}
|
package schema
// PaginationMetadata represents the meta data of a paginated response
type PaginationMetadata struct {
TotalElements int `json:"total_elements"`
DisplayedElements int `json:"displayed_elements"`
}
|
package main
import (
"log"
"net"
)
const nLEDs = 92
func main() {
// Open the serial port
conn, err := net.Dial("tcp", "localhost:9996")
if err != nil {
log.Fatalln("Couldn't open localhost: ", err)
}
defer conn.Close()
buf := make([]byte, (nLEDs*3 + 1))
for j := 0; j < 15; j++ {
buf[0] = 0x84
for i := 0; i < nLEDs; i++ {
buf[1+i*3], buf[2+i*3], buf[3+i*3] = 0x80, 0x80, 0x80
}
total := 0
for total < (nLEDs*3 + 1) {
n, err := conn.Write(buf[total:])
if err != nil {
log.Fatalln("Failed to write to connection: ", err)
}
total += n
log.Println(total)
}
}
}
|
package mempool
import (
"testing"
"github.com/meshplus/bitxhub-model/pb"
"github.com/stretchr/testify/assert"
)
func TestForward(t *testing.T) {
ast := assert.New(t)
mpi, _ := mockMempoolImpl()
defer cleanTestData()
txList := make([]*pb.Transaction, 0)
privKey1 := genPrivKey()
account1, _ := privKey1.PublicKey().Address()
tx1 := constructTx(uint64(1), &privKey1)
tx2 := constructTx(uint64(2), &privKey1)
tx3 := constructTx(uint64(3), &privKey1)
tx4 := constructTx(uint64(4), &privKey1)
tx5 := constructTx(uint64(6), &privKey1)
txList = append(txList, tx1, tx2, tx3, tx4, tx5)
err := mpi.processTransactions(txList)
ast.Nil(err)
list := mpi.txStore.allTxs[account1.String()]
ast.Equal(5, list.index.size())
ast.Equal(4, mpi.txStore.priorityIndex.size())
ast.Equal(1, mpi.txStore.parkingLotIndex.size())
removeList := list.forward(uint64(3))
ast.Equal(1, len(removeList))
ast.Equal(2, len(removeList[account1.String()]))
ast.Equal(uint64(1), removeList[account1.String()][0].Nonce)
ast.Equal(uint64(2), removeList[account1.String()][1].Nonce)
}
|
package factory
import (
"fmt"
)
const (
ItalianType = 1
)
const (
FerrariModel = 1
CarWithFiveWheelModel = 2
)
type Vehicle interface {
NumOfWheels() int
GetModelName() string
}
type VehicleFactory interface {
Build(v int) (Vehicle, error)
}
type ItalianFactory struct{}
type CarWithFiveWheelType struct{}
func (f *CarWithFiveWheelType) NumOfWheels() int {
return 5
}
func (f *CarWithFiveWheelType) GetModelName() string {
return "Star"
}
type FerrariModelType struct {
}
func (f *FerrariModelType) NumOfWheels() int {
return 4
}
func (f *FerrariModelType) GetModelName() string {
return "Ferrari"
}
func (i *ItalianFactory) Build(v int) (Vehicle, error) {
switch v {
case FerrariModel:
return new(FerrariModelType), nil
case CarWithFiveWheelModel:
return new(CarWithFiveWheelType), nil
}
return nil, fmt.Errorf("No Italian cars of type %d\n", v)
}
func BuildFactory(f int) (VehicleFactory, error) {
switch f {
case ItalianType:
return new(ItalianFactory), nil
default:
return nil, fmt.Errorf("No factory with id %d\n", f)
}
}
|
package model
import (
"Seaman/utils"
"time"
)
type TplExportHistoryT struct {
Id int `xorm:"not null pk autoincr INT(11)"`
TypeId int `xorm:"not null comment('导出类型ID') INT(11)"`
Status int `xorm:"not null comment('状态') TINYINT(3)"`
Params string `xorm:"comment('参数列表,使用json格式,如{"a":"b"}') VARCHAR(512)"`
FilePath string `xorm:"comment('要导出的文件地址') VARCHAR(512)"`
CreateUserId int64 `xorm:"not null comment('创建用户ID') index BIGINT(20)"`
CreateDate time.Time `xorm:"comment('创建时间') DATETIME"`
LastUpdateUserId int64 `xorm:"comment('最后更新用户ID') BIGINT(20)"`
LastUpdateDate time.Time `xorm:"comment('最后修改时间') DATETIME"`
Revision int64 `xorm:"comment('版本号') BIGINT(20)"`
AppName string `xorm:"comment('应用名') VARCHAR(32)"`
TenantId string `xorm:"comment('多租户ID') VARCHAR(32)"`
AppScope string `xorm:"comment('系统群名') VARCHAR(32)"`
FileId int `xorm:"comment('文件路径ID') INT(11)"`
}
/**
* 将数据库查询出来的结果进行格式组装成request请求需要的json字段格式
*/
func (tplExportHistoryT *TplExportHistoryT) tplExportHistoryTToRespDesc() interface{} {
respInfo := map[string]interface{}{
"id": tplExportHistoryT.Id,
"type_id": tplExportHistoryT.TypeId,
"status": tplExportHistoryT.Status,
"params": tplExportHistoryT.Params,
"file_path": tplExportHistoryT.FilePath,
"file_id": tplExportHistoryT.FileId,
"revision": tplExportHistoryT.Revision,
"tenant_id": tplExportHistoryT.TenantId,
"app_name": tplExportHistoryT.AppName,
"app_scope": tplExportHistoryT.AppScope,
"create_date": utils.FormatDatetime(tplExportHistoryT.CreateDate),
"last_update_date": utils.FormatDatetime(tplExportHistoryT.LastUpdateDate),
"create_user_id": tplExportHistoryT.CreateUserId,
"last_update_user_id": tplExportHistoryT.LastUpdateUserId,
}
return respInfo
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.