text stringlengths 11 4.05M |
|---|
package printer
type PrinterContext struct {
outFile string
p Printer
name string
class int // 1 = client 2 = server
}
func (self *PrinterContext) Start(g *Globals) bool {
log.Infof("[%s] %s\n", self.name, self.outFile)
bf := self.p.Run(g, self.class)
if bf == nil {
return false
}
return bf.WriteFile(self.outFile) == nil
}
type Printer interface {
Run(g *Globals, outPutClass int) *Stream
}
var printerByExt = make(map[string]Printer)
func RegisterPrinter(ext string, p Printer) {
if _, ok := printerByExt[ext]; ok {
panic("duplicate printer")
}
printerByExt[ext] = p
}
|
package middleware
import "net/http"
//User represents a user of the system.
type User struct {
ID int
UserName string
}
//GetAuthenticatedUser is a function that returns the
//current user given a request, or nil if the user is
//not currently authenticated. This is just for demo
//purposes: normally you would use your sessions package
//to get the currently authenticated user.
func GetAuthenticatedUser(r *http.Request) (*User, error) {
return &User{1, "test"}, nil
}
//TODO: define a type for authenticated handler functions
//that take a `*User` as a third parameter
//TODO: create an adapter function that can adapt an
//authenticated handler function into a regular http
//handler function
|
package mbr
import (
"fmt"
"os"
"os/exec"
)
func decode(file *os.File) (*Mbr, error) {
var mbr Mbr
if _, err := file.ReadAt(mbr.raw[:], 0); err != nil {
return nil, err
}
if mbr.raw[0x1FE] == 0x55 && mbr.raw[0x1FF] == 0xAA {
return &mbr, nil
}
return nil, nil
}
func read32LE(address []byte) uint64 {
return uint64(address[0]) +
uint64(address[1])<<8 +
uint64(address[2])<<16 +
uint64(address[3])<<24
}
func write32LE(address []byte, value uint64) {
address[0] = byte(value & 0xff)
address[1] = byte((value >> 8) & 0xff)
address[2] = byte((value >> 16) & 0xff)
address[3] = byte((value >> 24) & 0xff)
}
func writeDefault(filename string, tableType TableType) error {
label, err := tableType.lookupString()
if err != nil {
return err
}
fmt.Printf("making table type: %d (%s)\n", tableType, label)
cmd := exec.Command("parted", "-s", "-a", "optimal", filename,
"mklabel", label,
"mkpart", "primary", "ext2", "1", "100%",
"set", "1", "boot", "on",
)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("error partitioning: %s: %s: %s",
filename, err, output)
}
return nil
}
func (mbr *Mbr) getPartitionOffset(index uint) uint64 {
partitionOffset := 0x1BE + 0x10*index
return 512 * read32LE(mbr.raw[partitionOffset+8:])
}
func (mbr *Mbr) getPartitionSize(index uint) uint64 {
partitionOffset := 0x1BE + 0x10*index
return 512 * read32LE(mbr.raw[partitionOffset+12:])
}
func (mbr *Mbr) setPartitionOffset(index uint, offset uint64) error {
if index >= mbr.GetNumPartitions() {
return fmt.Errorf("invalid partition index: %d", index)
}
offsetSector := offset >> 9
if offsetSector<<9 != offset {
return fmt.Errorf("offset: %d is not an integral multiple of blocks",
offset)
}
partitionOffset := 0x1BE + 0x10*index
write32LE(mbr.raw[partitionOffset+8:], offsetSector)
return nil
}
func (mbr *Mbr) setPartitionSize(index uint, size uint64) error {
if index >= mbr.GetNumPartitions() {
return fmt.Errorf("invalid partition index: %d", index)
}
sizeSector := size >> 9
if sizeSector<<9 != size {
return fmt.Errorf("size: %d is not an integral multiple of blocks",
size)
}
partitionOffset := 0x1BE + 0x10*index
write32LE(mbr.raw[partitionOffset+12:], sizeSector)
return nil
}
func (mbr *Mbr) write(filename string) error {
if file, err := os.OpenFile(filename, os.O_WRONLY, 0622); err != nil {
return err
} else {
defer file.Close()
if length, err := file.Write(mbr.raw[:]); err != nil {
return err
} else if length != len(mbr.raw) {
return fmt.Errorf("short write: %d", length)
}
return nil
}
}
|
package index
import (
"log"
"testing"
"github.com/stretchr/testify/assert"
)
func Test_newIndexTree_generates_an_indexTree_from_a_path(t *testing.T) {
testDir := "test_fixtures/root"
actual, err := newIndexTree(testDir)
if err != nil {
log.Println(err)
t.FailNow()
}
expected := section{
title: "Root",
url: "test_fixtures/root",
contents: []node{
section{
title: "Level1",
url: "test_fixtures/root/level1",
contents: []node{
page{title: "Level1 Other Page", url: "test_fixtures/root/level1/level1_other_page.md"},
page{title: "Level1 Page", url: "test_fixtures/root/level1/level1_page.md"},
section{
title: "Level2",
url: "test_fixtures/root/level1/level2",
contents: []node{
page{title: "Level2 Page", url: "test_fixtures/root/level1/level2/level2_page.md"},
},
},
},
},
page{title: "Root Page", url: "test_fixtures/root/root_page.md"},
},
}
assert.Equal(t, expected, actual, "")
}
|
package plumber
import (
"context"
"time"
"github.com/batchcorp/plumber-schemas/build/go/protos/encoding"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/plumber/backends"
"github.com/batchcorp/plumber/validate"
"github.com/batchcorp/plumber/writer"
"github.com/pkg/errors"
"github.com/posthog/posthog-go"
)
// HandleWriteCmd handles write mode
func (p *Plumber) HandleWriteCmd() error {
if err := validate.WriteOptionsForCLI(p.CLIOptions.Write); err != nil {
return errors.Wrap(err, "unable to validate read options")
}
backend, err := backends.New(p.cliConnOpts)
if err != nil {
return errors.Wrap(err, "unable to create new backend")
}
value, err := writer.GenerateWriteValue(p.CLIOptions.Write, p.cliFDS)
if err != nil {
return errors.Wrap(err, "unable to generate write value")
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
errorCh := make(chan *records.ErrorRecord, 1)
// Fire off a goroutine to (potentially) post usage telemetry
go p.doWriteTelemetry(backend.Name())
go func() {
if err := backend.Write(ctx, p.CLIOptions.Write, errorCh, value...); err != nil {
p.log.Errorf("unable to complete write(s): %s", err)
}
cancel()
}()
var errRecord *records.ErrorRecord
MAIN:
for {
select {
case errRecord = <-errorCh:
err = backend.DisplayError(errRecord)
break MAIN
case <-ctx.Done():
p.log.Debug("received quit from context - exiting write")
break MAIN
}
}
if errRecord == nil {
p.log.Infof("Successfully wrote '%d' message(s)", len(value))
}
return nil
}
func (p *Plumber) doWriteTelemetry(backend string) {
event := posthog.Capture{
Event: "command_write",
DistinctId: p.PersistentConfig.PlumberID,
Properties: map[string]interface{}{
"backend": backend,
"encode_type": "unset",
"input_as_json_array": p.CLIOptions.Write.XCliOptions.InputAsJsonArray,
"input_metadata_items": len(p.CLIOptions.Write.Record.InputMetadata),
},
}
if p.CLIOptions.Write.Record.Input != "" {
event.Properties["input_type"] = "argument"
} else if p.CLIOptions.Write.XCliOptions.InputFile != "" {
event.Properties["input_type"] = "file"
} else if len(p.CLIOptions.Write.XCliOptions.InputStdin) > 0 {
event.Properties["input_type"] = "stdin"
}
if p.CLIOptions.Write.EncodeOptions != nil {
event.Properties["encode_type"] = p.CLIOptions.Write.EncodeOptions.EncodeType.String()
if p.CLIOptions.Write.EncodeOptions.EncodeType == encoding.EncodeType_ENCODE_TYPE_JSONPB {
// Using FD's or dir?
if p.CLIOptions.Write.EncodeOptions.ProtobufSettings.ProtobufDescriptorSet != "" {
event.Properties["protobuf_type"] = "fds"
} else {
event.Properties["protobuf_type"] = "dir"
}
// Set envelope info
event.Properties["protobuf_envelope"] = p.CLIOptions.Write.EncodeOptions.ProtobufSettings.ProtobufEnvelopeType.String()
}
}
p.Config.Telemetry.Enqueue(event)
}
|
package utils
import (
"math"
)
const _DELTA = 0.000001
func step(z, x float64) float64 {
return z - (z*z-x)/(2*z)
}
func NeotainSqrt(x float64) float64 {
z := 1.0
for newZ := step(z, x); math.Abs(newZ-z) > _DELTA; {
z = newZ
newZ = step(z, x)
}
return z
}
|
package day02
import (
"fmt"
"strconv"
"strings"
)
type passwordRule struct {
Letter string
MinOccurrences int
MaxOccurrences int
}
func (r *passwordRule) EvaluateLegacyRule(input string) bool {
actualOccurrences := strings.Count(input, r.Letter)
return actualOccurrences >= r.MinOccurrences && actualOccurrences <= r.MaxOccurrences
}
func (r *passwordRule) Evaluate(input string) bool {
// MinOccurrences and MaxOccurrences are actually 1-based POSITIONS
// Exactly one of these positions must contain the letter provided
pos1 := input[r.MinOccurrences-1]
pos2 := input[r.MaxOccurrences-1]
letter := r.Letter[0]
return (pos1 == letter && pos2 != letter) ||
(pos1 != letter && pos2 == letter)
}
type passwordEntry struct {
Rule passwordRule
Password string
}
func Run(lines []string) error {
legacyValid := 0
newValid := 0
for _, line := range lines {
entry, err := parsePasswordEntry(line)
if err != nil {
return fmt.Errorf("error parsing password entry '%s': %v", line, err)
}
// Count valid passwords for part 1
if entry.Rule.EvaluateLegacyRule(entry.Password) {
legacyValid++
}
if entry.Rule.Evaluate(entry.Password) {
newValid++
}
}
fmt.Println("Part 1:", legacyValid)
fmt.Println("Part 2:", newValid)
return nil
}
func parsePasswordEntry(line string) (passwordEntry, error) {
colonIndex := strings.Index(line, ":")
if colonIndex < 0 {
return passwordEntry{}, fmt.Errorf("invalid entry, missing ':'")
}
rule, err := parsePasswordRule(line[:colonIndex])
if err != nil {
return passwordEntry{}, fmt.Errorf("error parsing rule: %v", err)
}
password := line[colonIndex+2:]
return passwordEntry{Rule: rule, Password: password}, nil
}
func parsePasswordRule(input string) (passwordRule, error) {
spaceIdx := strings.Index(input, " ")
if spaceIdx < 0 {
return passwordRule{}, fmt.Errorf("invalid entry, missing ' '")
}
occurrences := input[:spaceIdx]
character := input[spaceIdx+1 : spaceIdx+2]
dashIdx := strings.Index(occurrences, "-")
if dashIdx < 0 {
return passwordRule{}, fmt.Errorf("invalid entry, missing '-'")
}
min, err := strconv.Atoi(occurrences[:dashIdx])
if err != nil {
return passwordRule{}, fmt.Errorf("error parsing occurrence count '%s': %v", occurrences[:dashIdx], err)
}
max, err := strconv.Atoi(occurrences[dashIdx+1:])
if err != nil {
return passwordRule{}, fmt.Errorf("error parsing occurrence count '%s': %v", occurrences[dashIdx+1:], err)
}
return passwordRule{
Letter: character,
MinOccurrences: min,
MaxOccurrences: max,
}, nil
}
|
// DO NOT EDIT. This file was generated by "github.com/frk/gosql".
package testdata
import (
"github.com/frk/gosql"
"github.com/frk/gosql/internal/testdata/common"
)
func (q *SelectWithJoinBlockSliceQuery) Exec(c gosql.Conn) error {
const queryString = `SELECT
u."id"
, u."email"
, u."full_name"
, u."created_at"
FROM "test_user" AS u
LEFT JOIN "test_post" AS p ON p."user_id" = u."id"
LEFT JOIN "test_join1" AS j1 ON j1."post_id" = p."id"
RIGHT JOIN "test_join2" AS j2 ON j2."join1_id" = j1."id"
FULL JOIN "test_join3" AS j3 ON j3."join2_id" = j2."id"
CROSS JOIN "test_join4" AS j4
WHERE p."is_spam" IS TRUE` // `
rows, err := c.Query(queryString)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
v := new(common.User)
err := rows.Scan(
&v.Id,
&v.Email,
&v.FullName,
&v.CreatedAt,
)
if err != nil {
return err
}
q.Users = append(q.Users, v)
}
return rows.Err()
}
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"bytes"
"fmt"
"slices"
"strings"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
)
// ExplainInfo implements the Expression interface.
func (expr *ScalarFunction) ExplainInfo() string {
return expr.explainInfo(false)
}
func (expr *ScalarFunction) explainInfo(normalized bool) string {
var buffer bytes.Buffer
fmt.Fprintf(&buffer, "%s(", expr.FuncName.L)
switch expr.FuncName.L {
case ast.Cast:
for _, arg := range expr.GetArgs() {
if normalized {
buffer.WriteString(arg.ExplainNormalizedInfo())
} else {
buffer.WriteString(arg.ExplainInfo())
}
buffer.WriteString(", ")
buffer.WriteString(expr.RetType.String())
}
default:
for i, arg := range expr.GetArgs() {
if normalized {
buffer.WriteString(arg.ExplainNormalizedInfo())
} else {
buffer.WriteString(arg.ExplainInfo())
}
if i+1 < len(expr.GetArgs()) {
buffer.WriteString(", ")
}
}
}
buffer.WriteString(")")
return buffer.String()
}
// ExplainNormalizedInfo implements the Expression interface.
func (expr *ScalarFunction) ExplainNormalizedInfo() string {
return expr.explainInfo(true)
}
// ExplainInfo implements the Expression interface.
func (col *Column) ExplainInfo() string {
return col.String()
}
// ExplainNormalizedInfo implements the Expression interface.
func (col *Column) ExplainNormalizedInfo() string {
if col.OrigName != "" {
return col.OrigName
}
return "?"
}
// ExplainInfo implements the Expression interface.
func (expr *Constant) ExplainInfo() string {
dt, err := expr.Eval(chunk.Row{})
if err != nil {
return "not recognized const vanue"
}
return expr.format(dt)
}
// ExplainNormalizedInfo implements the Expression interface.
func (expr *Constant) ExplainNormalizedInfo() string {
return "?"
}
func (expr *Constant) format(dt types.Datum) string {
switch dt.Kind() {
case types.KindNull:
return "NULL"
case types.KindString, types.KindBytes, types.KindMysqlEnum, types.KindMysqlSet,
types.KindMysqlJSON, types.KindBinaryLiteral, types.KindMysqlBit:
return fmt.Sprintf("\"%v\"", dt.GetValue())
}
return fmt.Sprintf("%v", dt.GetValue())
}
// ExplainExpressionList generates explain information for a list of expressions.
func ExplainExpressionList(exprs []Expression, schema *Schema) string {
builder := &strings.Builder{}
for i, expr := range exprs {
switch expr.(type) {
case *Column, *CorrelatedColumn:
builder.WriteString(expr.String())
if expr.String() != schema.Columns[i].String() {
// simple col projected again with another uniqueID without origin name.
builder.WriteString("->")
builder.WriteString(schema.Columns[i].String())
}
case *Constant:
v := expr.String()
length := 64
if len(v) < length {
builder.WriteString(v)
} else {
builder.WriteString(v[:length])
fmt.Fprintf(builder, "(len:%d)", len(v))
}
builder.WriteString("->")
builder.WriteString(schema.Columns[i].String())
default:
builder.WriteString(expr.String())
builder.WriteString("->")
builder.WriteString(schema.Columns[i].String())
}
if i+1 < len(exprs) {
builder.WriteString(", ")
}
}
return builder.String()
}
// SortedExplainExpressionList generates explain information for a list of expressions in order.
// In some scenarios, the expr's order may not be stable when executing multiple times.
// So we add a sort to make its explain result stable.
func SortedExplainExpressionList(exprs []Expression) []byte {
return sortedExplainExpressionList(exprs, false)
}
func sortedExplainExpressionList(exprs []Expression, normalized bool) []byte {
buffer := bytes.NewBufferString("")
exprInfos := make([]string, 0, len(exprs))
for _, expr := range exprs {
if normalized {
exprInfos = append(exprInfos, expr.ExplainNormalizedInfo())
} else {
exprInfos = append(exprInfos, expr.ExplainInfo())
}
}
slices.Sort(exprInfos)
for i, info := range exprInfos {
buffer.WriteString(info)
if i+1 < len(exprInfos) {
buffer.WriteString(", ")
}
}
return buffer.Bytes()
}
// SortedExplainNormalizedExpressionList is same like SortedExplainExpressionList, but use for generating normalized information.
func SortedExplainNormalizedExpressionList(exprs []Expression) []byte {
return sortedExplainExpressionList(exprs, true)
}
// SortedExplainNormalizedScalarFuncList is same like SortedExplainExpressionList, but use for generating normalized information.
func SortedExplainNormalizedScalarFuncList(exprs []*ScalarFunction) []byte {
expressions := make([]Expression, len(exprs))
for i := range exprs {
expressions[i] = exprs[i]
}
return sortedExplainExpressionList(expressions, true)
}
// ExplainColumnList generates explain information for a list of columns.
func ExplainColumnList(cols []*Column) []byte {
buffer := bytes.NewBufferString("")
for i, col := range cols {
buffer.WriteString(col.ExplainInfo())
if i+1 < len(cols) {
buffer.WriteString(", ")
}
}
return buffer.Bytes()
}
|
package types
import (
"database/sql/driver"
"fmt"
"strings"
"golang.org/x/text/language"
)
// Language represents an ISO639 language. Its SQL type could be varchar(3).
type Language struct {
base language.Base
}
// NewLanguage creates a language from language.Base
func NewLanguage(base language.Base) Language {
return Language{
base: base,
}
}
// ParseLanguage parses a language code (either 2-letter or 3-letter)
func ParseLanguage(code string) (Language, error) {
l := Language{}
var err error
l.base, err = language.ParseBase(code)
return l, err
}
// UnmarshalText parses a language code from raw text
func (l *Language) UnmarshalText(text []byte) error {
lang, err := ParseLanguage(string(text))
if err != nil {
return err
}
*l = lang
return nil
}
// MustParseLanguage parses a language code (either 2-letter or 3-letter).
// Panics on failiure.
func MustParseLanguage(code string) Language {
language, err := ParseLanguage(code)
if err != nil {
panic(err)
}
return language
}
// String returns the 2-letter representation of the language
// (same as ISO2, implements the fmt.Stringer interface)
func (l *Language) String() string {
return l.ISO2()
}
// ISO2 returns the 2-letter representation of the language
func (l *Language) ISO2() string {
return l.base.String()
}
// ISO3 returns the 3-letter representation of the language
func (l *Language) ISO3() string {
return l.base.ISO3()
}
// Languages represents an array of languages
// (its SQL type should be text or varchar)
type Languages []Language
// NewLanguages creates a list of languages based on bases
func NewLanguages(bases []language.Base) Languages {
languages := make(Languages, len(bases))
for i := range bases {
languages[i] = NewLanguage(bases[i])
}
return languages
}
// ParseLanguages parses languages from a space-separated list of language codes
func ParseLanguages(spaceSeparatedCodes string) (Languages, error) {
codes := strings.Split(spaceSeparatedCodes, " ")
languages := make(Languages, len(codes))
var (
err error
j int
)
for i := range codes {
if codes[i] != "" {
languages[j], err = ParseLanguage(codes[i])
if err != nil {
return nil, err
}
j++
}
}
return languages, nil
}
// MustParseLanguages parses languages from a space-separated list of
// language codes, and panics on failiure
func MustParseLanguages(spaceSeparatedCodes string) Languages {
languages, err := ParseLanguages(spaceSeparatedCodes)
if err != nil {
panic(err)
}
return languages
}
// String represents the languages as 2-letter codes separated by spaces
func (l Languages) String() string {
codes := make([]string, len(l))
for i := range l {
codes[i] = l[i].String()
}
return strings.Join(codes, " ")
}
// Scan deserialises the object from raw database data
func (l *Languages) Scan(src interface{}) error {
var (
err error
codes string
)
switch data := src.(type) {
case string:
codes = data
case []byte:
codes = string(data)
default:
return fmt.Errorf("unknown languages type")
}
*l, err = ParseLanguages(codes)
if err != nil {
return err
}
return nil
}
// Scan deserialises the object from raw database data
func (l *Language) Scan(src interface{}) error {
var (
err error
code string
)
switch data := src.(type) {
case string:
code = data
case []byte:
code = string(data)
default:
return fmt.Errorf("unknown language type")
}
*l, err = ParseLanguage(code)
if err != nil {
return err
}
return nil
}
// Value serialises the object to raw database data
func (l Language) Value() (driver.Value, error) {
return l.String(), nil
}
// Value serialises the object to raw database data
func (l Languages) Value() (driver.Value, error) {
return l.String(), nil
}
|
package main
import (
"fmt"
"math"
"unsafe"
)
func main() {
var num int = 10
fmt.Printf("num = %v num 是 %T", num, num)
fmt.Println()
var num2 = 12
fmt.Println(unsafe.Sizeof(num2))
var a1 int16 = 10
var a2 int32 = 12
var a3 = int32(a1) + a2
fmt.Println(a3)
var n1 int16 = 130
fmt.Println(int8(n1))
fmt.Println("不同类型的输出")
var number = 17
// 原样输出
fmt.Printf("%v\n", number)
// 十进制输出
fmt.Printf("%d\n", number)
// 以八进制输出
fmt.Printf("%o\n", number)
// 以二进制输出
fmt.Printf("%b\n", number)
// 以十六进制输出
fmt.Printf("%x\n", number)
fmt.Println("浮点类型")
var pi = math.Pi
// 打印浮点类型,默认小数点6位
fmt.Printf("%f\n", pi)
// 打印浮点类型,打印小数点后2位
fmt.Printf("%.2f\n", pi)
}
|
// package main
// import (
// "fmt"
// "log"
// "net/http"
// "github.com/gorilla/mux"
// "github.com/jinzhu/gorm"
// _ "github.com/jinzhu/gorm/dialects/postgres"
// "github.com/rs/cors"
// )
// var db *gorm.DB
// var err error
// type Admindetails struct {
// gorm.Model
// Name string
// Email string
// Pass string
// Roll string
// }
// type Busdetails struct {
// gorm.Model
// Start string
// End string
// Name string
// Desc string
// Type string
// Wifi string
// Water string
// Refreshments string
// Capacity string
// Fare string
// Img string
// }
// // type Account struct {
// // gorm.Model
// // Email string
// // Pass string
// // }
// func InitialMigration() {
// db, err = gorm.Open("postgres", "port=5432 user=postgres dbname=user password=55021007 sslmode=disable")
// if err != nil {
// fmt.Println(err.Error())
// panic("Failed to connect")
// } else {
// fmt.Println("Connected successfully")
// }
// defer db.Close()
// db.AutoMigrate(&Admindetails{})
// db.AutoMigrate(&Busdetails{})
// // db.AutoMigrate(&Account{})
// }
// func helloworld(w http.ResponseWriter, r *http.Request) {
// fmt.Fprintf(w, "Helloworld")
// }
// func handleRequests() {
// myRouter := mux.NewRouter().StrictSlash(true)
// myRouter.HandleFunc("/", helloworld).Methods("GET")
// myRouter.HandleFunc("/admindetails", Allusers).Methods("GET")
// myRouter.HandleFunc("/admindetails/{Name}/{Email}/{Pass}/{Roll}", Newuser).Methods("POST")
// // myRouter.HandleFunc("/admindetails/{Email}", Deleteuser).Methods("DELETE")
// // myRouter.HandleFunc("/admindetails/{Name}/{Email}/{Pass}/{roll}", Updateuser).Methods("PUT")
// myRouter.HandleFunc("/busdetails", Allbus).Methods("GET")
// myRouter.HandleFunc("/busdetails/{Start}/{End}/{Name}/{Desc}/{Type}/{Wifi}/{Water}/{Refreshments}/{Capacity}/{Fare}/{Img}", Newbus).Methods("POST")
// // myRouter.HandleFunc("/busdetails/{Name}", Deletebus).Methods("DELETE")
// // myRouter.HandleFunc("/busdetails/{Name}/{Start}/{End}", Updatebus).Methods("PUT")
// // myRouter.HandleFunc("/account", Alladmin).Methods("GET")
// // myRouter.HandleFunc("/account/{Email}/{Pass}", Newadmin).Methods("POST")
// log.Fatal(http.ListenAndServe(":8000", cors.Default().Handler(myRouter)))
// }
// func main1() {
// // fmt.Println("Started")
// InitialMigration()
// handleRequests()
// }
|
package middlewares
import (
"github.com/valyala/fasthttp"
)
// LogRequest provides trace logging for all requests.
func LogRequest(next fasthttp.RequestHandler) fasthttp.RequestHandler {
return func(ctx *fasthttp.RequestCtx) {
autheliaCtx := &AutheliaCtx{RequestCtx: ctx}
logger := NewRequestLogger(autheliaCtx)
logger.Trace("Request hit")
next(ctx)
logger.Tracef("Replied (status=%d)", ctx.Response.StatusCode())
}
}
|
// Copyright (c) 2020 TomoChain
package services
import (
"context"
"encoding/hex"
"github.com/tomochain/tomochain"
"github.com/tomochain/tomochain-rosetta-gateway/common"
tc "github.com/tomochain/tomochain-rosetta-gateway/tomochain-client"
tomochaincommon "github.com/tomochain/tomochain/common"
"math/big"
"github.com/coinbase/rosetta-sdk-go/server"
"github.com/coinbase/rosetta-sdk-go/types"
"github.com/golang/protobuf/proto"
"github.com/spf13/cast"
"github.com/tomochain/tomochain/crypto"
)
type constructionAPIService struct {
client tc.TomoChainClient
}
// NewConstructionAPIService creates a new instance of an ConstructionAPIService.
func NewConstructionAPIService(client tc.TomoChainClient) server.ConstructionAPIServicer {
return &constructionAPIService{
client: client,
}
}
// ConstructionCombine implements the /construction/combine endpoint.
func (s *constructionAPIService) ConstructionCombine(
ctx context.Context,
request *types.ConstructionCombineRequest,
) (*types.ConstructionCombineResponse, *types.Error) {
if terr := common.ValidateNetworkIdentifier(ctx, s.client, request.NetworkIdentifier); terr != nil {
return nil, terr
}
hash, err := hex.DecodeString(request.UnsignedTransaction)
if err != nil {
terr := common.ErrInvalidInputParam
terr.Message += err.Error()
return nil, terr
}
if len(request.Signatures) != 1 {
terr := common.ErrInvalidInputParam
terr.Message += "need exact 1 signature"
return nil, terr
}
rawSig := request.Signatures[0].Bytes
if len(rawSig) != 65 {
terr := common.ErrInvalidInputParam
terr.Message += "invalid signature length"
return nil, terr
}
R := new(big.Int).SetBytes(rawSig[:32])
S := new(big.Int).SetBytes(rawSig[32:64])
V := new(big.Int).SetBytes([]byte{rawSig[64] + 27})
// TODO: sign transaction
msg := ""
return &types.ConstructionCombineResponse{
SignedTransaction: hex.EncodeToString(msg),
}, nil
}
// ConstructionDerive implements the /construction/derive endpoint.
func (s *constructionAPIService) ConstructionDerive(
ctx context.Context,
request *types.ConstructionDeriveRequest,
) (*types.ConstructionDeriveResponse, *types.Error) {
if terr := common.ValidateNetworkIdentifier(ctx, s.client, request.NetworkIdentifier); terr != nil {
return nil, terr
}
if len(request.PublicKey.Bytes) == 0 || request.PublicKey.CurveType != types.Secp256k1 {
terr := common.ErrInvalidInputParam
terr.Message += "unsupported public key type"
return nil, terr
}
rawPub := request.PublicKey.Bytes
addr := tomochaincommon.BytesToAddress(crypto.Keccak256(rawPub[1:])[12:])
return &types.ConstructionDeriveResponse{
Address: addr.String(),
}, nil
}
// ConstructionHash implements the /construction/hash endpoint.
func (s *constructionAPIService) ConstructionHash(
ctx context.Context,
request *types.ConstructionHashRequest,
) (*types.TransactionIdentifierResponse, *types.Error) {
if terr := common.ValidateNetworkIdentifier(ctx, s.client, request.NetworkIdentifier); terr != nil {
return nil, terr
}
tran, err := hex.DecodeString(request.SignedTransaction)
if err != nil {
terr := common.ErrInvalidInputParam
terr.Message += "invalid signed transaction format: " + err.Error()
return nil, terr
}
h := hash.Hash256b(tran)
return &types.TransactionIdentifierResponse{
TransactionIdentifier: &types.TransactionIdentifier{
hex.EncodeToString(h[:]),
},
}, nil
}
type metadataInputOptions struct {
senderAddress string
gasLimit *uint64
gasPrice *uint64
maxFee *big.Int
feeMultiplier *float64
typ common.TransactionLogType
}
// FIXME: required options
// sender (string): address of sender
// to (string): destination address
// gas_limit (uint64) : gas limit of the transaction
// gas_price (uint64): gas price in wei
// value (uint64)
// data ([]bytes) : data include method name, argument if this tx call a contract
func parseMetadataInputOptions(options map[string]interface{}) (tomochain.CallMsg, *types.Error) {
sender, ok := options[common.METADATA_SENDER]
if !ok {
terr := common.ErrInvalidInputParam
terr.Message += "empty sender address"
return tomochain.CallMsg{}, terr
}
to, ok := options[common.METADATA_RECIPIENT]
if !ok {
terr := common.ErrInvalidInputParam
terr.Message += "empty sender address"
return tomochain.CallMsg{}, terr
}
destinationAddress := tomochaincommon.HexToAddress(to.(string))
gasLimit, ok := options[common.METADATA_GAS_LIMIT]
if !ok {
terr := common.ErrInvalidInputParam
terr.Message += "empty gasLimit"
return tomochain.CallMsg{}, terr
}
gasPrice, ok := options[common.METADATA_GAS_PRICE]
if !ok {
gasPrice = big.NewInt(tomochaincommon.DefaultMinGasPrice)
}
v, ok := options[common.METADATA_TRANSACTION_VALUE]
if !ok {
v = big.NewInt(0)
}
d, ok := options[common.METADATA_TRANSACTION_DATA]
if !ok {
d = []byte{}
}
callMsg := tomochain.CallMsg{
From: tomochaincommon.HexToAddress(sender.(string)),
To: &destinationAddress,
Gas: gasLimit.(uint64),
GasPrice: new(big.Int).SetUint64(gasPrice.(uint64)),
Value: new(big.Int).SetUint64(v.(uint64)),
Data: d.([]byte),
BalanceTokenFee: nil,
}
return callMsg, nil
}
// ConstructionMetadata implements the /construction/metadata endpoint.
func (s *constructionAPIService) ConstructionMetadata(
ctx context.Context,
request *types.ConstructionMetadataRequest,
) (*types.ConstructionMetadataResponse, *types.Error) {
if terr := common.ValidateNetworkIdentifier(ctx, s.client, request.NetworkIdentifier); terr != nil {
return nil, terr
}
callMsg, terr := parseMetadataInputOptions(request.Options)
if terr != nil {
return nil, terr
}
estimateGas, err := s.client.EstimateGas(ctx, callMsg)
if err != nil {
return nil, common.ErrUnableToEstimateGas
}
account, err := s.client.GetAccount(ctx, nil, callMsg.From.String())
if err != nil {
terr := common.ErrUnableToGetAccount
terr.Message += err.Error()
return nil, terr
}
meta := account.Metadata
meta[common.METADATA_GAS_LIMIT] = callMsg.Gas
meta[common.METADATA_GAS_PRICE] = callMsg.GasPrice
suggestedFee := new(big.Int).Mul(new(big.Int).SetUint64(estimateGas), callMsg.GasPrice)
return &types.ConstructionMetadataResponse{
Metadata: meta,
SuggestedFee: []*types.Amount{
{
Value: suggestedFee.String(),
Currency: common.TomoNativeCoin,
},
},
}, nil
}
// ConstructionParse implements the /construction/parse endpoint.
func (s *constructionAPIService) ConstructionParse(
ctx context.Context,
request *types.ConstructionParseRequest,
) (*types.ConstructionParseResponse, *types.Error) {
if terr := common.ValidateNetworkIdentifier(ctx, request.NetworkIdentifier); terr != nil {
return nil, terr
}
tran, err := hex.DecodeString(request.Transaction)
if err != nil {
return nil, common.ErrUnableToParseTx
}
act := &iotextypes.Action{}
if err := proto.Unmarshal(tran, act); err != nil {
return nil, common.ErrUnableToParseTx
}
sender, terr := s.checkIoAction(act, request.Signed)
if terr != nil {
return nil, terr
}
ops, meta := s.ioActionToOps(sender, act)
resp := &types.ConstructionParseResponse{
Operations: ops,
Metadata: meta,
}
if request.Signed {
resp.Signers = []string{sender}
}
return resp, nil
}
// ConstructionPayloads implements the /construction/payloads endpoint.
func (s *constructionAPIService) ConstructionPayloads(
ctx context.Context,
request *types.ConstructionPayloadsRequest,
) (*types.ConstructionPayloadsResponse, *types.Error) {
if err := common.ValidateNetworkIdentifier(ctx, request.NetworkIdentifier); err != nil {
return nil, err
}
if err := s.checkOperationAndMeta(request.Operations, request.Metadata, true); err != nil {
return nil, err
}
act := s.opsToIoAction(request.Operations, request.Metadata)
msg, err := proto.Marshal(act)
if err != nil {
terr := common.ErrServiceInternal
terr.Message += err.Error()
return nil, terr
}
unsignedTx := hex.EncodeToString(msg)
core, err := proto.Marshal(act.GetCore())
if err != nil {
terr := common.ErrServiceInternal
terr.Message += err.Error()
return nil, terr
}
h := hash.Hash256b(core)
return &types.ConstructionPayloadsResponse{
UnsignedTransaction: unsignedTx,
Payloads: []*types.SigningPayload{
&types.SigningPayload{
Address: request.Operations[0].Account.Address,
Bytes: h[:],
SignatureType: SignatureType,
},
},
}, nil
}
// ConstructionPreprocess implements the /construction/preprocess endpoint.
func (s *constructionAPIService) ConstructionPreprocess(
ctx context.Context,
request *types.ConstructionPreprocessRequest,
) (*types.ConstructionPreprocessResponse, *types.Error) {
if err := common.ValidateNetworkIdentifier(ctx, s.client, request.NetworkIdentifier); err != nil {
return nil, err
}
options := make(map[string]interface{})
options[common.METADATA_SENDER] = request.Operations[0].Account.Address
options[common.METADATA_TRANSACTION_TYPE] = request.Operations[0].Type
options["amount"] = request.Operations[1].Amount.Value
options["symbol"] = request.Operations[1].Amount.Currency.Symbol
options["decimals"] = request.Operations[1].Amount.Currency.Decimals
options[common.METADATA_RECIPIENT] = request.Operations[1].Account.Address
// XXX it is unclear where these meta data should be
if request.Metadata[common.METADATA_GAS_LIMIT] != nil {
options[common.METADATA_GAS_LIMIT] = request.Metadata[common.METADATA_GAS_LIMIT]
}
if request.Metadata[common.METADATA_GAS_PRICE] != nil {
options[common.METADATA_GAS_PRICE] = request.Metadata[common.METADATA_GAS_PRICE]
}
return &types.ConstructionPreprocessResponse{
Options: options,
}, nil
}
// ConstructionSubmit implements the /construction/submit endpoint.
func (s *constructionAPIService) ConstructionSubmit(
ctx context.Context,
request *types.ConstructionSubmitRequest,
) (*types.TransactionIdentifierResponse, *types.Error) {
terr := common.ValidateNetworkIdentifier(ctx, s.client, request.NetworkIdentifier)
if terr != nil {
return nil, terr
}
tran, err := hex.DecodeString(request.SignedTransaction)
if err != nil {
terr := common.ErrInvalidInputParam
terr.Message += err.Error()
return nil, terr
}
txID, err := s.client.SubmitTx(ctx, tran)
if err != nil {
terr := common.ErrUnableToSubmitTx
terr.Message += err.Error()
return nil, terr
}
return &types.TransactionIdentifierResponse{
TransactionIdentifier: &types.TransactionIdentifier{
Hash: txID,
},
}, nil
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//748. Shortest Completing Word
//Find the minimum length word from a given dictionary words, which has all the letters from the string licensePlate. Such a word is said to complete the given string licensePlate
//Here, for letters we ignore case. For example, "P" on the licensePlate still matches "p" on the word.
//It is guaranteed an answer exists. If there are multiple answers, return the one that occurs first in the array.
//The license plate might have the same letter occurring multiple times. For example, given a licensePlate of "PP", the word "pair" does not complete the licensePlate, but the word "supper" does.
//Example 1:
//Input: licensePlate = "1s3 PSt", words = ["step", "steps", "stripe", "stepple"]
//Output: "steps"
//Explanation: The smallest length word that contains the letters "S", "P", "S", and "T".
//Note that the answer is not "step", because the letter "s" must occur in the word twice.
//Also note that we ignored case for the purposes of comparing whether a letter exists in the word.
//Example 2:
//Input: licensePlate = "1s3 456", words = ["looks", "pest", "stew", "show"]
//Output: "pest"
//Explanation: There are 3 smallest length words that contains the letters "s".
//We return the one that occurred first.
//Note:
//licensePlate will be a string with length in range [1, 7].
//licensePlate will contain digits, spaces, or letters (uppercase or lowercase).
//words will have a length in the range [10, 1000].
//Every words[i] will consist of lowercase letters, and have length in range [1, 15].
//func shortestCompletingWord(licensePlate string, words []string) string {
//}
// Time Is Money |
package controllers
import (
"github.com/goadesign/goa"
)
var (
duplicatedEmailErr = goa.NewErrorClass("duplicated_email", 1000)
)
func unexpectedError(service *goa.Service, err error) error {
service.LogError("Unexpected error", "err", err)
return goa.ErrInternal("unexpected error")
}
|
// Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package legacyrpc
import (
"bytes"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"sync"
"time"
"github.com/btcsuite/btcd/btcec/v2/ecdsa"
"github.com/btcsuite/btcd/btcjson"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/rpcclient"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcwallet/chain"
"github.com/btcsuite/btcwallet/waddrmgr"
"github.com/btcsuite/btcwallet/wallet"
"github.com/btcsuite/btcwallet/wallet/txrules"
"github.com/btcsuite/btcwallet/wtxmgr"
)
const (
// defaultAccountName is the name of the wallet's default account.
defaultAccountName = "default"
)
// confirms returns the number of confirmations for a transaction in a block at
// height txHeight (or -1 for an unconfirmed tx) given the chain height
// curHeight.
func confirms(txHeight, curHeight int32) int32 {
switch {
case txHeight == -1, txHeight > curHeight:
return 0
default:
return curHeight - txHeight + 1
}
}
// requestHandler is a handler function to handle an unmarshaled and parsed
// request into a marshalable response. If the error is a *btcjson.RPCError
// or any of the above special error classes, the server will respond with
// the JSON-RPC appropriate error code. All other errors use the wallet
// catch-all error code, btcjson.ErrRPCWallet.
type requestHandler func(interface{}, *wallet.Wallet) (interface{}, error)
// requestHandlerChain is a requestHandler that also takes a parameter for
type requestHandlerChainRequired func(interface{}, *wallet.Wallet, *chain.RPCClient) (interface{}, error)
var rpcHandlers = map[string]struct {
handler requestHandler
handlerWithChain requestHandlerChainRequired
// Function variables cannot be compared against anything but nil, so
// use a boolean to record whether help generation is necessary. This
// is used by the tests to ensure that help can be generated for every
// implemented method.
//
// A single map and this bool is here is used rather than several maps
// for the unimplemented handlers so every method has exactly one
// handler function.
noHelp bool
}{
// Reference implementation wallet methods (implemented)
"addmultisigaddress": {handler: addMultiSigAddress},
"createmultisig": {handler: createMultiSig},
"dumpprivkey": {handler: dumpPrivKey},
"getaccount": {handler: getAccount},
"getaccountaddress": {handler: getAccountAddress},
"getaddressesbyaccount": {handler: getAddressesByAccount},
"getbalance": {handler: getBalance},
"getbestblockhash": {handler: getBestBlockHash},
"getblockcount": {handler: getBlockCount},
"getinfo": {handlerWithChain: getInfo},
"getnewaddress": {handler: getNewAddress},
"getrawchangeaddress": {handler: getRawChangeAddress},
"getreceivedbyaccount": {handler: getReceivedByAccount},
"getreceivedbyaddress": {handler: getReceivedByAddress},
"gettransaction": {handler: getTransaction},
"help": {handler: helpNoChainRPC, handlerWithChain: helpWithChainRPC},
"importprivkey": {handler: importPrivKey},
"keypoolrefill": {handler: keypoolRefill},
"listaccounts": {handler: listAccounts},
"listlockunspent": {handler: listLockUnspent},
"listreceivedbyaccount": {handler: listReceivedByAccount},
"listreceivedbyaddress": {handler: listReceivedByAddress},
"listsinceblock": {handlerWithChain: listSinceBlock},
"listtransactions": {handler: listTransactions},
"listunspent": {handler: listUnspent},
"lockunspent": {handler: lockUnspent},
"sendfrom": {handlerWithChain: sendFrom},
"sendmany": {handler: sendMany},
"sendtoaddress": {handler: sendToAddress},
"settxfee": {handler: setTxFee},
"signmessage": {handler: signMessage},
"signrawtransaction": {handlerWithChain: signRawTransaction},
"validateaddress": {handler: validateAddress},
"verifymessage": {handler: verifyMessage},
"walletlock": {handler: walletLock},
"walletpassphrase": {handler: walletPassphrase},
"walletpassphrasechange": {handler: walletPassphraseChange},
// Reference implementation methods (still unimplemented)
"backupwallet": {handler: unimplemented, noHelp: true},
"dumpwallet": {handler: unimplemented, noHelp: true},
"getwalletinfo": {handler: unimplemented, noHelp: true},
"importwallet": {handler: unimplemented, noHelp: true},
"listaddressgroupings": {handler: unimplemented, noHelp: true},
// Reference methods which can't be implemented by btcwallet due to
// design decision differences
"encryptwallet": {handler: unsupported, noHelp: true},
"move": {handler: unsupported, noHelp: true},
"setaccount": {handler: unsupported, noHelp: true},
// Extensions to the reference client JSON-RPC API
"createnewaccount": {handler: createNewAccount},
"getbestblock": {handler: getBestBlock},
// This was an extension but the reference implementation added it as
// well, but with a different API (no account parameter). It's listed
// here because it hasn't been update to use the reference
// implemenation's API.
"getunconfirmedbalance": {handler: getUnconfirmedBalance},
"listaddresstransactions": {handler: listAddressTransactions},
"listalltransactions": {handler: listAllTransactions},
"renameaccount": {handler: renameAccount},
"walletislocked": {handler: walletIsLocked},
}
// unimplemented handles an unimplemented RPC request with the
// appropriate error.
func unimplemented(interface{}, *wallet.Wallet) (interface{}, error) {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCUnimplemented,
Message: "Method unimplemented",
}
}
// unsupported handles a standard bitcoind RPC request which is
// unsupported by btcwallet due to design differences.
func unsupported(interface{}, *wallet.Wallet) (interface{}, error) {
return nil, &btcjson.RPCError{
Code: -1,
Message: "Request unsupported by btcwallet",
}
}
// lazyHandler is a closure over a requestHandler or passthrough request with
// the RPC server's wallet and chain server variables as part of the closure
// context.
type lazyHandler func() (interface{}, *btcjson.RPCError)
// lazyApplyHandler looks up the best request handler func for the method,
// returning a closure that will execute it with the (required) wallet and
// (optional) consensus RPC server. If no handlers are found and the
// chainClient is not nil, the returned handler performs RPC passthrough.
func lazyApplyHandler(request *btcjson.Request, w *wallet.Wallet, chainClient chain.Interface) lazyHandler {
handlerData, ok := rpcHandlers[request.Method]
if ok && handlerData.handlerWithChain != nil && w != nil && chainClient != nil {
return func() (interface{}, *btcjson.RPCError) {
cmd, err := btcjson.UnmarshalCmd(request)
if err != nil {
return nil, btcjson.ErrRPCInvalidRequest
}
switch client := chainClient.(type) {
case *chain.RPCClient:
resp, err := handlerData.handlerWithChain(cmd,
w, client)
if err != nil {
return nil, jsonError(err)
}
return resp, nil
default:
return nil, &btcjson.RPCError{
Code: -1,
Message: "Chain RPC is inactive",
}
}
}
}
if ok && handlerData.handler != nil && w != nil {
return func() (interface{}, *btcjson.RPCError) {
cmd, err := btcjson.UnmarshalCmd(request)
if err != nil {
return nil, btcjson.ErrRPCInvalidRequest
}
resp, err := handlerData.handler(cmd, w)
if err != nil {
return nil, jsonError(err)
}
return resp, nil
}
}
// Fallback to RPC passthrough
return func() (interface{}, *btcjson.RPCError) {
if chainClient == nil {
return nil, &btcjson.RPCError{
Code: -1,
Message: "Chain RPC is inactive",
}
}
switch client := chainClient.(type) {
case *chain.RPCClient:
resp, err := client.RawRequest(request.Method,
request.Params)
if err != nil {
return nil, jsonError(err)
}
return &resp, nil
default:
return nil, &btcjson.RPCError{
Code: -1,
Message: "Chain RPC is inactive",
}
}
}
}
// makeResponse makes the JSON-RPC response struct for the result and error
// returned by a requestHandler. The returned response is not ready for
// marshaling and sending off to a client, but must be
func makeResponse(id, result interface{}, err error) btcjson.Response {
idPtr := idPointer(id)
if err != nil {
return btcjson.Response{
ID: idPtr,
Error: jsonError(err),
}
}
resultBytes, err := json.Marshal(result)
if err != nil {
return btcjson.Response{
ID: idPtr,
Error: &btcjson.RPCError{
Code: btcjson.ErrRPCInternal.Code,
Message: "Unexpected error marshalling result",
},
}
}
return btcjson.Response{
ID: idPtr,
Result: json.RawMessage(resultBytes),
}
}
// jsonError creates a JSON-RPC error from the Go error.
func jsonError(err error) *btcjson.RPCError {
if err == nil {
return nil
}
code := btcjson.ErrRPCWallet
switch e := err.(type) {
case btcjson.RPCError:
return &e
case *btcjson.RPCError:
return e
case DeserializationError:
code = btcjson.ErrRPCDeserialization
case InvalidParameterError:
code = btcjson.ErrRPCInvalidParameter
case ParseError:
code = btcjson.ErrRPCParse.Code
case waddrmgr.ManagerError:
if e.ErrorCode == waddrmgr.ErrWrongPassphrase {
code = btcjson.ErrRPCWalletPassphraseIncorrect
}
}
return &btcjson.RPCError{
Code: code,
Message: err.Error(),
}
}
// makeMultiSigScript is a helper function to combine common logic for
// AddMultiSig and CreateMultiSig.
func makeMultiSigScript(w *wallet.Wallet, keys []string, nRequired int) ([]byte, error) {
keysesPrecious := make([]*btcutil.AddressPubKey, len(keys))
// The address list will made up either of addreseses (pubkey hash), for
// which we need to look up the keys in wallet, straight pubkeys, or a
// mixture of the two.
for i, a := range keys {
// try to parse as pubkey address
a, err := decodeAddress(a, w.ChainParams())
if err != nil {
return nil, err
}
switch addr := a.(type) {
case *btcutil.AddressPubKey:
keysesPrecious[i] = addr
default:
pubKey, err := w.PubKeyForAddress(addr)
if err != nil {
return nil, err
}
pubKeyAddr, err := btcutil.NewAddressPubKey(
pubKey.SerializeCompressed(), w.ChainParams())
if err != nil {
return nil, err
}
keysesPrecious[i] = pubKeyAddr
}
}
return txscript.MultiSigScript(keysesPrecious, nRequired)
}
// addMultiSigAddress handles an addmultisigaddress request by adding a
// multisig address to the given wallet.
func addMultiSigAddress(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.AddMultisigAddressCmd)
// If an account is specified, ensure that is the imported account.
if cmd.Account != nil && *cmd.Account != waddrmgr.ImportedAddrAccountName {
return nil, &ErrNotImportedAccount
}
secp256k1Addrs := make([]btcutil.Address, len(cmd.Keys))
for i, k := range cmd.Keys {
addr, err := decodeAddress(k, w.ChainParams())
if err != nil {
return nil, ParseError{err}
}
secp256k1Addrs[i] = addr
}
script, err := w.MakeMultiSigScript(secp256k1Addrs, cmd.NRequired)
if err != nil {
return nil, err
}
p2shAddr, err := w.ImportP2SHRedeemScript(script)
if err != nil {
return nil, err
}
return p2shAddr.EncodeAddress(), nil
}
// createMultiSig handles an createmultisig request by returning a
// multisig address for the given inputs.
func createMultiSig(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.CreateMultisigCmd)
script, err := makeMultiSigScript(w, cmd.Keys, cmd.NRequired)
if err != nil {
return nil, ParseError{err}
}
address, err := btcutil.NewAddressScriptHash(script, w.ChainParams())
if err != nil {
// above is a valid script, shouldn't happen.
return nil, err
}
return btcjson.CreateMultiSigResult{
Address: address.EncodeAddress(),
RedeemScript: hex.EncodeToString(script),
}, nil
}
// dumpPrivKey handles a dumpprivkey request with the private key
// for a single address, or an appropriate error if the wallet
// is locked.
func dumpPrivKey(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.DumpPrivKeyCmd)
addr, err := decodeAddress(cmd.Address, w.ChainParams())
if err != nil {
return nil, err
}
key, err := w.DumpWIFPrivateKey(addr)
if waddrmgr.IsError(err, waddrmgr.ErrLocked) {
// Address was found, but the private key isn't
// accessible.
return nil, &ErrWalletUnlockNeeded
}
return key, err
}
// getAddressesByAccount handles a getaddressesbyaccount request by returning
// all addresses for an account, or an error if the requested account does
// not exist.
func getAddressesByAccount(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.GetAddressesByAccountCmd)
account, err := w.AccountNumber(waddrmgr.KeyScopeBIP0044, cmd.Account)
if err != nil {
return nil, err
}
addrs, err := w.AccountAddresses(account)
if err != nil {
return nil, err
}
addrStrs := make([]string, len(addrs))
for i, a := range addrs {
addrStrs[i] = a.EncodeAddress()
}
return addrStrs, nil
}
// getBalance handles a getbalance request by returning the balance for an
// account (wallet), or an error if the requested account does not
// exist.
func getBalance(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.GetBalanceCmd)
var balance btcutil.Amount
var err error
accountName := "*"
if cmd.Account != nil {
accountName = *cmd.Account
}
if accountName == "*" {
balance, err = w.CalculateBalance(int32(*cmd.MinConf))
if err != nil {
return nil, err
}
} else {
var account uint32
account, err = w.AccountNumber(waddrmgr.KeyScopeBIP0044, accountName)
if err != nil {
return nil, err
}
bals, err := w.CalculateAccountBalances(account, int32(*cmd.MinConf))
if err != nil {
return nil, err
}
balance = bals.Spendable
}
return balance.ToBTC(), nil
}
// getBestBlock handles a getbestblock request by returning a JSON object
// with the height and hash of the most recently processed block.
func getBestBlock(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
blk := w.Manager.SyncedTo()
result := &btcjson.GetBestBlockResult{
Hash: blk.Hash.String(),
Height: blk.Height,
}
return result, nil
}
// getBestBlockHash handles a getbestblockhash request by returning the hash
// of the most recently processed block.
func getBestBlockHash(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
blk := w.Manager.SyncedTo()
return blk.Hash.String(), nil
}
// getBlockCount handles a getblockcount request by returning the chain height
// of the most recently processed block.
func getBlockCount(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
blk := w.Manager.SyncedTo()
return blk.Height, nil
}
// getInfo handles a getinfo request by returning the a structure containing
// information about the current state of btcwallet.
// exist.
func getInfo(icmd interface{}, w *wallet.Wallet, chainClient *chain.RPCClient) (interface{}, error) {
// Call down to btcd for all of the information in this command known
// by them.
info, err := chainClient.GetInfo()
if err != nil {
return nil, err
}
bal, err := w.CalculateBalance(1)
if err != nil {
return nil, err
}
// TODO(davec): This should probably have a database version as opposed
// to using the manager version.
info.WalletVersion = int32(waddrmgr.LatestMgrVersion)
info.Balance = bal.ToBTC()
info.PaytxFee = float64(txrules.DefaultRelayFeePerKb)
// We don't set the following since they don't make much sense in the
// wallet architecture:
// - unlocked_until
// - errors
return info, nil
}
func decodeAddress(s string, params *chaincfg.Params) (btcutil.Address, error) {
addr, err := btcutil.DecodeAddress(s, params)
if err != nil {
msg := fmt.Sprintf("Invalid address %q: decode failed with %#q", s, err)
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCInvalidAddressOrKey,
Message: msg,
}
}
if !addr.IsForNet(params) {
msg := fmt.Sprintf("Invalid address %q: not intended for use on %s",
addr, params.Name)
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCInvalidAddressOrKey,
Message: msg,
}
}
return addr, nil
}
// getAccount handles a getaccount request by returning the account name
// associated with a single address.
func getAccount(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.GetAccountCmd)
addr, err := decodeAddress(cmd.Address, w.ChainParams())
if err != nil {
return nil, err
}
// Fetch the associated account
account, err := w.AccountOfAddress(addr)
if err != nil {
return nil, &ErrAddressNotInWallet
}
acctName, err := w.AccountName(waddrmgr.KeyScopeBIP0044, account)
if err != nil {
return nil, &ErrAccountNameNotFound
}
return acctName, nil
}
// getAccountAddress handles a getaccountaddress by returning the most
// recently-created chained address that has not yet been used (does not yet
// appear in the blockchain, or any tx that has arrived in the btcd mempool).
// If the most recently-requested address has been used, a new address (the
// next chained address in the keypool) is used. This can fail if the keypool
// runs out (and will return btcjson.ErrRPCWalletKeypoolRanOut if that happens).
func getAccountAddress(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.GetAccountAddressCmd)
account, err := w.AccountNumber(waddrmgr.KeyScopeBIP0044, cmd.Account)
if err != nil {
return nil, err
}
addr, err := w.CurrentAddress(account, waddrmgr.KeyScopeBIP0044)
if err != nil {
return nil, err
}
return addr.EncodeAddress(), err
}
// getUnconfirmedBalance handles a getunconfirmedbalance extension request
// by returning the current unconfirmed balance of an account.
func getUnconfirmedBalance(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.GetUnconfirmedBalanceCmd)
acctName := defaultAccountName
if cmd.Account != nil {
acctName = *cmd.Account
}
account, err := w.AccountNumber(waddrmgr.KeyScopeBIP0044, acctName)
if err != nil {
return nil, err
}
bals, err := w.CalculateAccountBalances(account, 1)
if err != nil {
return nil, err
}
return (bals.Total - bals.Spendable).ToBTC(), nil
}
// importPrivKey handles an importprivkey request by parsing
// a WIF-encoded private key and adding it to an account.
func importPrivKey(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.ImportPrivKeyCmd)
// Ensure that private keys are only imported to the correct account.
//
// Yes, Label is the account name.
if cmd.Label != nil && *cmd.Label != waddrmgr.ImportedAddrAccountName {
return nil, &ErrNotImportedAccount
}
wif, err := btcutil.DecodeWIF(cmd.PrivKey)
if err != nil {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCInvalidAddressOrKey,
Message: "WIF decode failed: " + err.Error(),
}
}
if !wif.IsForNet(w.ChainParams()) {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCInvalidAddressOrKey,
Message: "Key is not intended for " + w.ChainParams().Name,
}
}
// Import the private key, handling any errors.
_, err = w.ImportPrivateKey(waddrmgr.KeyScopeBIP0044, wif, nil, *cmd.Rescan)
switch {
case waddrmgr.IsError(err, waddrmgr.ErrDuplicateAddress):
// Do not return duplicate key errors to the client.
return nil, nil
case waddrmgr.IsError(err, waddrmgr.ErrLocked):
return nil, &ErrWalletUnlockNeeded
}
return nil, err
}
// keypoolRefill handles the keypoolrefill command. Since we handle the keypool
// automatically this does nothing since refilling is never manually required.
func keypoolRefill(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
return nil, nil
}
// createNewAccount handles a createnewaccount request by creating and
// returning a new account. If the last account has no transaction history
// as per BIP 0044 a new account cannot be created so an error will be returned.
func createNewAccount(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.CreateNewAccountCmd)
// The wildcard * is reserved by the rpc server with the special meaning
// of "all accounts", so disallow naming accounts to this string.
if cmd.Account == "*" {
return nil, &ErrReservedAccountName
}
_, err := w.NextAccount(waddrmgr.KeyScopeBIP0044, cmd.Account)
if waddrmgr.IsError(err, waddrmgr.ErrLocked) {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCWalletUnlockNeeded,
Message: "Creating an account requires the wallet to be unlocked. " +
"Enter the wallet passphrase with walletpassphrase to unlock",
}
}
return nil, err
}
// renameAccount handles a renameaccount request by renaming an account.
// If the account does not exist an appropriate error will be returned.
func renameAccount(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.RenameAccountCmd)
// The wildcard * is reserved by the rpc server with the special meaning
// of "all accounts", so disallow naming accounts to this string.
if cmd.NewAccount == "*" {
return nil, &ErrReservedAccountName
}
// Check that given account exists
account, err := w.AccountNumber(waddrmgr.KeyScopeBIP0044, cmd.OldAccount)
if err != nil {
return nil, err
}
return nil, w.RenameAccount(waddrmgr.KeyScopeBIP0044, account, cmd.NewAccount)
}
// getNewAddress handles a getnewaddress request by returning a new
// address for an account. If the account does not exist an appropriate
// error is returned.
// TODO: Follow BIP 0044 and warn if number of unused addresses exceeds
// the gap limit.
func getNewAddress(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.GetNewAddressCmd)
acctName := defaultAccountName
if cmd.Account != nil {
acctName = *cmd.Account
}
keyScope := waddrmgr.KeyScopeBIP0044
if cmd.AddressType != nil {
switch *cmd.AddressType {
case "p2sh-segwit":
keyScope = waddrmgr.KeyScopeBIP0049Plus
case "bech32":
keyScope = waddrmgr.KeyScopeBIP0084
case "legacy": // default if unset
default:
return nil, &ErrAddressTypeUnknown
}
}
account, err := w.AccountNumber(keyScope, acctName)
if err != nil {
return nil, err
}
addr, err := w.NewAddress(account, keyScope)
if err != nil {
return nil, err
}
// Return the new payment address string.
return addr.EncodeAddress(), nil
}
// getRawChangeAddress handles a getrawchangeaddress request by creating
// and returning a new change address for an account.
//
// Note: bitcoind allows specifying the account as an optional parameter,
// but ignores the parameter.
func getRawChangeAddress(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.GetRawChangeAddressCmd)
acctName := defaultAccountName
if cmd.Account != nil {
acctName = *cmd.Account
}
keyScope := waddrmgr.KeyScopeBIP0044
if cmd.AddressType != nil {
switch *cmd.AddressType {
case "p2sh-segwit":
keyScope = waddrmgr.KeyScopeBIP0049Plus
case "bech32":
keyScope = waddrmgr.KeyScopeBIP0084
case "legacy": // default if unset
default:
return nil, &ErrAddressTypeUnknown
}
}
account, err := w.AccountNumber(keyScope, acctName)
if err != nil {
return nil, err
}
addr, err := w.NewChangeAddress(account, keyScope)
if err != nil {
return nil, err
}
// Return the new payment address string.
return addr.EncodeAddress(), nil
}
// getReceivedByAccount handles a getreceivedbyaccount request by returning
// the total amount received by addresses of an account.
func getReceivedByAccount(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.GetReceivedByAccountCmd)
account, err := w.AccountNumber(waddrmgr.KeyScopeBIP0044, cmd.Account)
if err != nil {
return nil, err
}
// TODO: This is more inefficient that it could be, but the entire
// algorithm is already dominated by reading every transaction in the
// wallet's history.
results, err := w.TotalReceivedForAccounts(
waddrmgr.KeyScopeBIP0044, int32(*cmd.MinConf),
)
if err != nil {
return nil, err
}
acctIndex := int(account)
if account == waddrmgr.ImportedAddrAccount {
acctIndex = len(results) - 1
}
return results[acctIndex].TotalReceived.ToBTC(), nil
}
// getReceivedByAddress handles a getreceivedbyaddress request by returning
// the total amount received by a single address.
func getReceivedByAddress(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.GetReceivedByAddressCmd)
addr, err := decodeAddress(cmd.Address, w.ChainParams())
if err != nil {
return nil, err
}
total, err := w.TotalReceivedForAddr(addr, int32(*cmd.MinConf))
if err != nil {
return nil, err
}
return total.ToBTC(), nil
}
// getTransaction handles a gettransaction request by returning details about
// a single transaction saved by wallet.
func getTransaction(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.GetTransactionCmd)
txHash, err := chainhash.NewHashFromStr(cmd.Txid)
if err != nil {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCDecodeHexString,
Message: "Transaction hash string decode failed: " + err.Error(),
}
}
details, err := wallet.UnstableAPI(w).TxDetails(txHash)
if err != nil {
return nil, err
}
if details == nil {
return nil, &ErrNoTransactionInfo
}
syncBlock := w.Manager.SyncedTo()
// TODO: The serialized transaction is already in the DB, so
// reserializing can be avoided here.
var txBuf bytes.Buffer
txBuf.Grow(details.MsgTx.SerializeSize())
err = details.MsgTx.Serialize(&txBuf)
if err != nil {
return nil, err
}
// TODO: Add a "generated" field to this result type. "generated":true
// is only added if the transaction is a coinbase.
ret := btcjson.GetTransactionResult{
TxID: cmd.Txid,
Hex: hex.EncodeToString(txBuf.Bytes()),
Time: details.Received.Unix(),
TimeReceived: details.Received.Unix(),
WalletConflicts: []string{}, // Not saved
//Generated: blockchain.IsCoinBaseTx(&details.MsgTx),
}
if details.Block.Height != -1 {
ret.BlockHash = details.Block.Hash.String()
ret.BlockTime = details.Block.Time.Unix()
ret.Confirmations = int64(confirms(details.Block.Height, syncBlock.Height))
}
var (
debitTotal btcutil.Amount
creditTotal btcutil.Amount // Excludes change
fee btcutil.Amount
feeF64 float64
)
for _, deb := range details.Debits {
debitTotal += deb.Amount
}
for _, cred := range details.Credits {
if !cred.Change {
creditTotal += cred.Amount
}
}
// Fee can only be determined if every input is a debit.
if len(details.Debits) == len(details.MsgTx.TxIn) {
var outputTotal btcutil.Amount
for _, output := range details.MsgTx.TxOut {
outputTotal += btcutil.Amount(output.Value)
}
fee = debitTotal - outputTotal
feeF64 = fee.ToBTC()
}
if len(details.Debits) == 0 {
// Credits must be set later, but since we know the full length
// of the details slice, allocate it with the correct cap.
ret.Details = make([]btcjson.GetTransactionDetailsResult, 0, len(details.Credits))
} else {
ret.Details = make([]btcjson.GetTransactionDetailsResult, 1, len(details.Credits)+1)
ret.Details[0] = btcjson.GetTransactionDetailsResult{
// Fields left zeroed:
// InvolvesWatchOnly
// Account
// Address
// Vout
//
// TODO(jrick): Address and Vout should always be set,
// but we're doing the wrong thing here by not matching
// core. Instead, gettransaction should only be adding
// details for transaction outputs, just like
// listtransactions (but using the short result format).
Category: "send",
Amount: (-debitTotal).ToBTC(), // negative since it is a send
Fee: &feeF64,
}
ret.Fee = feeF64
}
credCat := wallet.RecvCategory(details, syncBlock.Height, w.ChainParams()).String()
for _, cred := range details.Credits {
// Change is ignored.
if cred.Change {
continue
}
var address string
var accountName string
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
details.MsgTx.TxOut[cred.Index].PkScript, w.ChainParams())
if err == nil && len(addrs) == 1 {
addr := addrs[0]
address = addr.EncodeAddress()
account, err := w.AccountOfAddress(addr)
if err == nil {
name, err := w.AccountName(waddrmgr.KeyScopeBIP0044, account)
if err == nil {
accountName = name
}
}
}
ret.Details = append(ret.Details, btcjson.GetTransactionDetailsResult{
// Fields left zeroed:
// InvolvesWatchOnly
// Fee
Account: accountName,
Address: address,
Category: credCat,
Amount: cred.Amount.ToBTC(),
Vout: cred.Index,
})
}
ret.Amount = creditTotal.ToBTC()
return ret, nil
}
// These generators create the following global variables in this package:
//
// var localeHelpDescs map[string]func() map[string]string
// var requestUsages string
//
// localeHelpDescs maps from locale strings (e.g. "en_US") to a function that
// builds a map of help texts for each RPC server method. This prevents help
// text maps for every locale map from being rooted and created during init.
// Instead, the appropriate function is looked up when help text is first needed
// using the current locale and saved to the global below for further reuse.
//
// requestUsages contains single line usages for every supported request,
// separated by newlines. It is set during init. These usages are used for all
// locales.
//
//go:generate go run ../../internal/rpchelp/genrpcserverhelp.go legacyrpc
//go:generate gofmt -w rpcserverhelp.go
var helpDescs map[string]string
var helpDescsMu sync.Mutex // Help may execute concurrently, so synchronize access.
// helpWithChainRPC handles the help request when the RPC server has been
// associated with a consensus RPC client. The additional RPC client is used to
// include help messages for methods implemented by the consensus server via RPC
// passthrough.
func helpWithChainRPC(icmd interface{}, w *wallet.Wallet, chainClient *chain.RPCClient) (interface{}, error) {
return help(icmd, w, chainClient)
}
// helpNoChainRPC handles the help request when the RPC server has not been
// associated with a consensus RPC client. No help messages are included for
// passthrough requests.
func helpNoChainRPC(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
return help(icmd, w, nil)
}
// help handles the help request by returning one line usage of all available
// methods, or full help for a specific method. The chainClient is optional,
// and this is simply a helper function for the HelpNoChainRPC and
// HelpWithChainRPC handlers.
func help(icmd interface{}, _ *wallet.Wallet, chainClient *chain.RPCClient) (interface{}, error) {
cmd := icmd.(*btcjson.HelpCmd)
// btcd returns different help messages depending on the kind of
// connection the client is using. Only methods available to HTTP POST
// clients are available to be used by wallet clients, even though
// wallet itself is a websocket client to btcd. Therefore, create a
// POST client as needed.
//
// Returns nil if chainClient is currently nil or there is an error
// creating the client.
//
// This is hacky and is probably better handled by exposing help usage
// texts in a non-internal btcd package.
postClient := func() *rpcclient.Client {
if chainClient == nil {
return nil
}
c, err := chainClient.POSTClient()
if err != nil {
return nil
}
return c
}
if cmd.Command == nil || *cmd.Command == "" {
// Prepend chain server usage if it is available.
usages := requestUsages
client := postClient()
if client != nil {
rawChainUsage, err := client.RawRequest("help", nil)
var chainUsage string
if err == nil {
_ = json.Unmarshal([]byte(rawChainUsage), &chainUsage)
}
if chainUsage != "" {
usages = "Chain server usage:\n\n" + chainUsage + "\n\n" +
"Wallet server usage (overrides chain requests):\n\n" +
requestUsages
}
}
return usages, nil
}
defer helpDescsMu.Unlock()
helpDescsMu.Lock()
if helpDescs == nil {
// TODO: Allow other locales to be set via config or detemine
// this from environment variables. For now, hardcode US
// English.
helpDescs = localeHelpDescs["en_US"]()
}
helpText, ok := helpDescs[*cmd.Command]
if ok {
return helpText, nil
}
// Return the chain server's detailed help if possible.
var chainHelp string
client := postClient()
if client != nil {
param := make([]byte, len(*cmd.Command)+2)
param[0] = '"'
copy(param[1:], *cmd.Command)
param[len(param)-1] = '"'
rawChainHelp, err := client.RawRequest("help", []json.RawMessage{param})
if err == nil {
_ = json.Unmarshal([]byte(rawChainHelp), &chainHelp)
}
}
if chainHelp != "" {
return chainHelp, nil
}
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCInvalidParameter,
Message: fmt.Sprintf("No help for method '%s'", *cmd.Command),
}
}
// listAccounts handles a listaccounts request by returning a map of account
// names to their balances.
func listAccounts(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.ListAccountsCmd)
accountBalances := map[string]float64{}
results, err := w.AccountBalances(waddrmgr.KeyScopeBIP0044, int32(*cmd.MinConf))
if err != nil {
return nil, err
}
for _, result := range results {
accountBalances[result.AccountName] = result.AccountBalance.ToBTC()
}
// Return the map. This will be marshaled into a JSON object.
return accountBalances, nil
}
// listLockUnspent handles a listlockunspent request by returning an slice of
// all locked outpoints.
func listLockUnspent(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
return w.LockedOutpoints(), nil
}
// listReceivedByAccount handles a listreceivedbyaccount request by returning
// a slice of objects, each one containing:
// "account": the receiving account;
// "amount": total amount received by the account;
// "confirmations": number of confirmations of the most recent transaction.
// It takes two parameters:
// "minconf": minimum number of confirmations to consider a transaction -
// default: one;
// "includeempty": whether or not to include addresses that have no transactions -
// default: false.
func listReceivedByAccount(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.ListReceivedByAccountCmd)
results, err := w.TotalReceivedForAccounts(
waddrmgr.KeyScopeBIP0044, int32(*cmd.MinConf),
)
if err != nil {
return nil, err
}
jsonResults := make([]btcjson.ListReceivedByAccountResult, 0, len(results))
for _, result := range results {
jsonResults = append(jsonResults, btcjson.ListReceivedByAccountResult{
Account: result.AccountName,
Amount: result.TotalReceived.ToBTC(),
Confirmations: uint64(result.LastConfirmation),
})
}
return jsonResults, nil
}
// listReceivedByAddress handles a listreceivedbyaddress request by returning
// a slice of objects, each one containing:
// "account": the account of the receiving address;
// "address": the receiving address;
// "amount": total amount received by the address;
// "confirmations": number of confirmations of the most recent transaction.
// It takes two parameters:
// "minconf": minimum number of confirmations to consider a transaction -
// default: one;
// "includeempty": whether or not to include addresses that have no transactions -
// default: false.
func listReceivedByAddress(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.ListReceivedByAddressCmd)
// Intermediate data for each address.
type AddrData struct {
// Total amount received.
amount btcutil.Amount
// Number of confirmations of the last transaction.
confirmations int32
// Hashes of transactions which include an output paying to the address
tx []string
// Account which the address belongs to
account string
}
syncBlock := w.Manager.SyncedTo()
// Intermediate data for all addresses.
allAddrData := make(map[string]AddrData)
// Create an AddrData entry for each active address in the account.
// Otherwise we'll just get addresses from transactions later.
sortedAddrs, err := w.SortedActivePaymentAddresses()
if err != nil {
return nil, err
}
for _, address := range sortedAddrs {
// There might be duplicates, just overwrite them.
allAddrData[address] = AddrData{}
}
minConf := *cmd.MinConf
var endHeight int32
if minConf == 0 {
endHeight = -1
} else {
endHeight = syncBlock.Height - int32(minConf) + 1
}
err = wallet.UnstableAPI(w).RangeTransactions(0, endHeight, func(details []wtxmgr.TxDetails) (bool, error) {
confirmations := confirms(details[0].Block.Height, syncBlock.Height)
for _, tx := range details {
for _, cred := range tx.Credits {
pkScript := tx.MsgTx.TxOut[cred.Index].PkScript
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
pkScript, w.ChainParams())
if err != nil {
// Non standard script, skip.
continue
}
for _, addr := range addrs {
addrStr := addr.EncodeAddress()
addrData, ok := allAddrData[addrStr]
if ok {
addrData.amount += cred.Amount
// Always overwrite confirmations with newer ones.
addrData.confirmations = confirmations
} else {
addrData = AddrData{
amount: cred.Amount,
confirmations: confirmations,
}
}
addrData.tx = append(addrData.tx, tx.Hash.String())
allAddrData[addrStr] = addrData
}
}
}
return false, nil
})
if err != nil {
return nil, err
}
// Massage address data into output format.
numAddresses := len(allAddrData)
ret := make([]btcjson.ListReceivedByAddressResult, numAddresses)
idx := 0
for address, addrData := range allAddrData {
ret[idx] = btcjson.ListReceivedByAddressResult{
Address: address,
Amount: addrData.amount.ToBTC(),
Confirmations: uint64(addrData.confirmations),
TxIDs: addrData.tx,
Account: addrData.account,
}
idx++
}
return ret, nil
}
// listSinceBlock handles a listsinceblock request by returning an array of maps
// with details of sent and received wallet transactions since the given block.
func listSinceBlock(icmd interface{}, w *wallet.Wallet, chainClient *chain.RPCClient) (interface{}, error) {
cmd := icmd.(*btcjson.ListSinceBlockCmd)
syncBlock := w.Manager.SyncedTo()
targetConf := int64(*cmd.TargetConfirmations)
// For the result we need the block hash for the last block counted
// in the blockchain due to confirmations. We send this off now so that
// it can arrive asynchronously while we figure out the rest.
gbh := chainClient.GetBlockHashAsync(int64(syncBlock.Height) + 1 - targetConf)
var start int32
if cmd.BlockHash != nil {
hash, err := chainhash.NewHashFromStr(*cmd.BlockHash)
if err != nil {
return nil, DeserializationError{err}
}
block, err := chainClient.GetBlockVerboseTx(hash)
if err != nil {
return nil, err
}
start = int32(block.Height) + 1
}
txInfoList, err := w.ListSinceBlock(start, -1, syncBlock.Height)
if err != nil {
return nil, err
}
// Done with work, get the response.
blockHash, err := gbh.Receive()
if err != nil {
return nil, err
}
res := btcjson.ListSinceBlockResult{
Transactions: txInfoList,
LastBlock: blockHash.String(),
}
return res, nil
}
// listTransactions handles a listtransactions request by returning an
// array of maps with details of sent and recevied wallet transactions.
func listTransactions(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.ListTransactionsCmd)
// TODO: ListTransactions does not currently understand the difference
// between transactions pertaining to one account from another. This
// will be resolved when wtxmgr is combined with the waddrmgr namespace.
if cmd.Account != nil && *cmd.Account != "*" {
// For now, don't bother trying to continue if the user
// specified an account, since this can't be (easily or
// efficiently) calculated.
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCWallet,
Message: "Transactions are not yet grouped by account",
}
}
return w.ListTransactions(*cmd.From, *cmd.Count)
}
// listAddressTransactions handles a listaddresstransactions request by
// returning an array of maps with details of spent and received wallet
// transactions. The form of the reply is identical to listtransactions,
// but the array elements are limited to transaction details which are
// about the addresess included in the request.
func listAddressTransactions(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.ListAddressTransactionsCmd)
if cmd.Account != nil && *cmd.Account != "*" {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCInvalidParameter,
Message: "Listing transactions for addresses may only be done for all accounts",
}
}
// Decode addresses.
hash160Map := make(map[string]struct{})
for _, addrStr := range cmd.Addresses {
addr, err := decodeAddress(addrStr, w.ChainParams())
if err != nil {
return nil, err
}
hash160Map[string(addr.ScriptAddress())] = struct{}{}
}
return w.ListAddressTransactions(hash160Map)
}
// listAllTransactions handles a listalltransactions request by returning
// a map with details of sent and recevied wallet transactions. This is
// similar to ListTransactions, except it takes only a single optional
// argument for the account name and replies with all transactions.
func listAllTransactions(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.ListAllTransactionsCmd)
if cmd.Account != nil && *cmd.Account != "*" {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCInvalidParameter,
Message: "Listing all transactions may only be done for all accounts",
}
}
return w.ListAllTransactions()
}
// listUnspent handles the listunspent command.
func listUnspent(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.ListUnspentCmd)
if cmd.Addresses != nil && len(*cmd.Addresses) > 0 {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCInvalidParameter,
Message: "Filtering by addresses has been deprecated",
}
}
return w.ListUnspent(int32(*cmd.MinConf), int32(*cmd.MaxConf), "")
}
// lockUnspent handles the lockunspent command.
func lockUnspent(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.LockUnspentCmd)
switch {
case cmd.Unlock && len(cmd.Transactions) == 0:
w.ResetLockedOutpoints()
default:
for _, input := range cmd.Transactions {
txHash, err := chainhash.NewHashFromStr(input.Txid)
if err != nil {
return nil, ParseError{err}
}
op := wire.OutPoint{Hash: *txHash, Index: input.Vout}
if cmd.Unlock {
w.UnlockOutpoint(op)
} else {
w.LockOutpoint(op)
}
}
}
return true, nil
}
// makeOutputs creates a slice of transaction outputs from a pair of address
// strings to amounts. This is used to create the outputs to include in newly
// created transactions from a JSON object describing the output destinations
// and amounts.
func makeOutputs(pairs map[string]btcutil.Amount, chainParams *chaincfg.Params) ([]*wire.TxOut, error) {
outputs := make([]*wire.TxOut, 0, len(pairs))
for addrStr, amt := range pairs {
addr, err := btcutil.DecodeAddress(addrStr, chainParams)
if err != nil {
return nil, fmt.Errorf("cannot decode address: %s", err)
}
pkScript, err := txscript.PayToAddrScript(addr)
if err != nil {
return nil, fmt.Errorf("cannot create txout script: %s", err)
}
outputs = append(outputs, wire.NewTxOut(int64(amt), pkScript))
}
return outputs, nil
}
// sendPairs creates and sends payment transactions.
// It returns the transaction hash in string format upon success
// All errors are returned in btcjson.RPCError format
func sendPairs(w *wallet.Wallet, amounts map[string]btcutil.Amount,
keyScope waddrmgr.KeyScope, account uint32, minconf int32,
feeSatPerKb btcutil.Amount) (string, error) {
outputs, err := makeOutputs(amounts, w.ChainParams())
if err != nil {
return "", err
}
tx, err := w.SendOutputs(
outputs, &keyScope, account, minconf, feeSatPerKb,
wallet.CoinSelectionLargest, "",
)
if err != nil {
if err == txrules.ErrAmountNegative {
return "", ErrNeedPositiveAmount
}
if waddrmgr.IsError(err, waddrmgr.ErrLocked) {
return "", &ErrWalletUnlockNeeded
}
if _, ok := err.(btcjson.RPCError); ok {
return "", err
}
return "", &btcjson.RPCError{
Code: btcjson.ErrRPCInternal.Code,
Message: err.Error(),
}
}
txHashStr := tx.TxHash().String()
log.Infof("Successfully sent transaction %v", txHashStr)
return txHashStr, nil
}
func isNilOrEmpty(s *string) bool {
return s == nil || *s == ""
}
// sendFrom handles a sendfrom RPC request by creating a new transaction
// spending unspent transaction outputs for a wallet to another payment
// address. Leftover inputs not sent to the payment address or a fee for
// the miner are sent back to a new address in the wallet. Upon success,
// the TxID for the created transaction is returned.
func sendFrom(icmd interface{}, w *wallet.Wallet, chainClient *chain.RPCClient) (interface{}, error) {
cmd := icmd.(*btcjson.SendFromCmd)
// Transaction comments are not yet supported. Error instead of
// pretending to save them.
if !isNilOrEmpty(cmd.Comment) || !isNilOrEmpty(cmd.CommentTo) {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCUnimplemented,
Message: "Transaction comments are not yet supported",
}
}
account, err := w.AccountNumber(
waddrmgr.KeyScopeBIP0044, cmd.FromAccount,
)
if err != nil {
return nil, err
}
// Check that signed integer parameters are positive.
if cmd.Amount < 0 {
return nil, ErrNeedPositiveAmount
}
minConf := int32(*cmd.MinConf)
if minConf < 0 {
return nil, ErrNeedPositiveMinconf
}
// Create map of address and amount pairs.
amt, err := btcutil.NewAmount(cmd.Amount)
if err != nil {
return nil, err
}
pairs := map[string]btcutil.Amount{
cmd.ToAddress: amt,
}
return sendPairs(w, pairs, waddrmgr.KeyScopeBIP0044, account, minConf,
txrules.DefaultRelayFeePerKb)
}
// sendMany handles a sendmany RPC request by creating a new transaction
// spending unspent transaction outputs for a wallet to any number of
// payment addresses. Leftover inputs not sent to the payment address
// or a fee for the miner are sent back to a new address in the wallet.
// Upon success, the TxID for the created transaction is returned.
func sendMany(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.SendManyCmd)
// Transaction comments are not yet supported. Error instead of
// pretending to save them.
if !isNilOrEmpty(cmd.Comment) {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCUnimplemented,
Message: "Transaction comments are not yet supported",
}
}
account, err := w.AccountNumber(waddrmgr.KeyScopeBIP0044, cmd.FromAccount)
if err != nil {
return nil, err
}
// Check that minconf is positive.
minConf := int32(*cmd.MinConf)
if minConf < 0 {
return nil, ErrNeedPositiveMinconf
}
// Recreate address/amount pairs, using dcrutil.Amount.
pairs := make(map[string]btcutil.Amount, len(cmd.Amounts))
for k, v := range cmd.Amounts {
amt, err := btcutil.NewAmount(v)
if err != nil {
return nil, err
}
pairs[k] = amt
}
return sendPairs(w, pairs, waddrmgr.KeyScopeBIP0044, account, minConf, txrules.DefaultRelayFeePerKb)
}
// sendToAddress handles a sendtoaddress RPC request by creating a new
// transaction spending unspent transaction outputs for a wallet to another
// payment address. Leftover inputs not sent to the payment address or a fee
// for the miner are sent back to a new address in the wallet. Upon success,
// the TxID for the created transaction is returned.
func sendToAddress(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.SendToAddressCmd)
// Transaction comments are not yet supported. Error instead of
// pretending to save them.
if !isNilOrEmpty(cmd.Comment) || !isNilOrEmpty(cmd.CommentTo) {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCUnimplemented,
Message: "Transaction comments are not yet supported",
}
}
amt, err := btcutil.NewAmount(cmd.Amount)
if err != nil {
return nil, err
}
// Check that signed integer parameters are positive.
if amt < 0 {
return nil, ErrNeedPositiveAmount
}
// Mock up map of address and amount pairs.
pairs := map[string]btcutil.Amount{
cmd.Address: amt,
}
// sendtoaddress always spends from the default account, this matches bitcoind
return sendPairs(w, pairs, waddrmgr.KeyScopeBIP0044, waddrmgr.DefaultAccountNum, 1,
txrules.DefaultRelayFeePerKb)
}
// setTxFee sets the transaction fee per kilobyte added to transactions.
func setTxFee(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.SetTxFeeCmd)
// Check that amount is not negative.
if cmd.Amount < 0 {
return nil, ErrNeedPositiveAmount
}
// A boolean true result is returned upon success.
return true, nil
}
// signMessage signs the given message with the private key for the given
// address
func signMessage(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.SignMessageCmd)
addr, err := decodeAddress(cmd.Address, w.ChainParams())
if err != nil {
return nil, err
}
privKey, err := w.PrivKeyForAddress(addr)
if err != nil {
return nil, err
}
var buf bytes.Buffer
_ = wire.WriteVarString(&buf, 0, "Bitcoin Signed Message:\n")
_ = wire.WriteVarString(&buf, 0, cmd.Message)
messageHash := chainhash.DoubleHashB(buf.Bytes())
sigbytes, err := ecdsa.SignCompact(privKey, messageHash, true)
if err != nil {
return nil, err
}
return base64.StdEncoding.EncodeToString(sigbytes), nil
}
// signRawTransaction handles the signrawtransaction command.
func signRawTransaction(icmd interface{}, w *wallet.Wallet, chainClient *chain.RPCClient) (interface{}, error) {
cmd := icmd.(*btcjson.SignRawTransactionCmd)
serializedTx, err := decodeHexStr(cmd.RawTx)
if err != nil {
return nil, err
}
var tx wire.MsgTx
err = tx.Deserialize(bytes.NewBuffer(serializedTx))
if err != nil {
e := errors.New("TX decode failed")
return nil, DeserializationError{e}
}
var hashType txscript.SigHashType
switch *cmd.Flags {
case "ALL":
hashType = txscript.SigHashAll
case "NONE":
hashType = txscript.SigHashNone
case "SINGLE":
hashType = txscript.SigHashSingle
case "ALL|ANYONECANPAY":
hashType = txscript.SigHashAll | txscript.SigHashAnyOneCanPay
case "NONE|ANYONECANPAY":
hashType = txscript.SigHashNone | txscript.SigHashAnyOneCanPay
case "SINGLE|ANYONECANPAY":
hashType = txscript.SigHashSingle | txscript.SigHashAnyOneCanPay
default:
e := errors.New("invalid sighash parameter")
return nil, InvalidParameterError{e}
}
// TODO: really we probably should look these up with btcd anyway to
// make sure that they match the blockchain if present.
inputs := make(map[wire.OutPoint][]byte)
scripts := make(map[string][]byte)
var cmdInputs []btcjson.RawTxInput
if cmd.Inputs != nil {
cmdInputs = *cmd.Inputs
}
for _, rti := range cmdInputs {
inputHash, err := chainhash.NewHashFromStr(rti.Txid)
if err != nil {
return nil, DeserializationError{err}
}
script, err := decodeHexStr(rti.ScriptPubKey)
if err != nil {
return nil, err
}
// redeemScript is only actually used iff the user provided
// private keys. In which case, it is used to get the scripts
// for signing. If the user did not provide keys then we always
// get scripts from the wallet.
// Empty strings are ok for this one and hex.DecodeString will
// DTRT.
if cmd.PrivKeys != nil && len(*cmd.PrivKeys) != 0 {
redeemScript, err := decodeHexStr(rti.RedeemScript)
if err != nil {
return nil, err
}
addr, err := btcutil.NewAddressScriptHash(redeemScript,
w.ChainParams())
if err != nil {
return nil, DeserializationError{err}
}
scripts[addr.String()] = redeemScript
}
inputs[wire.OutPoint{
Hash: *inputHash,
Index: rti.Vout,
}] = script
}
// Now we go and look for any inputs that we were not provided by
// querying btcd with getrawtransaction. We queue up a bunch of async
// requests and will wait for replies after we have checked the rest of
// the arguments.
requested := make(map[wire.OutPoint]rpcclient.FutureGetTxOutResult)
for _, txIn := range tx.TxIn {
// Did we get this outpoint from the arguments?
if _, ok := inputs[txIn.PreviousOutPoint]; ok {
continue
}
// Asynchronously request the output script.
requested[txIn.PreviousOutPoint] = chainClient.GetTxOutAsync(
&txIn.PreviousOutPoint.Hash, txIn.PreviousOutPoint.Index,
true)
}
// Parse list of private keys, if present. If there are any keys here
// they are the keys that we may use for signing. If empty we will
// use any keys known to us already.
var keys map[string]*btcutil.WIF
if cmd.PrivKeys != nil {
keys = make(map[string]*btcutil.WIF)
for _, key := range *cmd.PrivKeys {
wif, err := btcutil.DecodeWIF(key)
if err != nil {
return nil, DeserializationError{err}
}
if !wif.IsForNet(w.ChainParams()) {
s := "key network doesn't match wallet's"
return nil, DeserializationError{errors.New(s)}
}
addr, err := btcutil.NewAddressPubKey(wif.SerializePubKey(),
w.ChainParams())
if err != nil {
return nil, DeserializationError{err}
}
keys[addr.EncodeAddress()] = wif
}
}
// We have checked the rest of the args. now we can collect the async
// txs. TODO: If we don't mind the possibility of wasting work we could
// move waiting to the following loop and be slightly more asynchronous.
for outPoint, resp := range requested {
result, err := resp.Receive()
if err != nil {
return nil, err
}
script, err := hex.DecodeString(result.ScriptPubKey.Hex)
if err != nil {
return nil, err
}
inputs[outPoint] = script
}
// All args collected. Now we can sign all the inputs that we can.
// `complete' denotes that we successfully signed all outputs and that
// all scripts will run to completion. This is returned as part of the
// reply.
signErrs, err := w.SignTransaction(&tx, hashType, inputs, keys, scripts)
if err != nil {
return nil, err
}
var buf bytes.Buffer
buf.Grow(tx.SerializeSize())
// All returned errors (not OOM, which panics) encountered during
// bytes.Buffer writes are unexpected.
if err = tx.Serialize(&buf); err != nil {
panic(err)
}
signErrors := make([]btcjson.SignRawTransactionError, 0, len(signErrs))
for _, e := range signErrs {
input := tx.TxIn[e.InputIndex]
signErrors = append(signErrors, btcjson.SignRawTransactionError{
TxID: input.PreviousOutPoint.Hash.String(),
Vout: input.PreviousOutPoint.Index,
ScriptSig: hex.EncodeToString(input.SignatureScript),
Sequence: input.Sequence,
Error: e.Error.Error(),
})
}
return btcjson.SignRawTransactionResult{
Hex: hex.EncodeToString(buf.Bytes()),
Complete: len(signErrors) == 0,
Errors: signErrors,
}, nil
}
// validateAddress handles the validateaddress command.
func validateAddress(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.ValidateAddressCmd)
result := btcjson.ValidateAddressWalletResult{}
addr, err := decodeAddress(cmd.Address, w.ChainParams())
if err != nil {
// Use result zero value (IsValid=false).
return result, nil
}
// We could put whether or not the address is a script here,
// by checking the type of "addr", however, the reference
// implementation only puts that information if the script is
// "ismine", and we follow that behaviour.
result.Address = addr.EncodeAddress()
result.IsValid = true
ainfo, err := w.AddressInfo(addr)
if err != nil {
if waddrmgr.IsError(err, waddrmgr.ErrAddressNotFound) {
// No additional information available about the address.
return result, nil
}
return nil, err
}
// The address lookup was successful which means there is further
// information about it available and it is "mine".
result.IsMine = true
acctName, err := w.AccountName(
waddrmgr.KeyScopeBIP0044, ainfo.InternalAccount(),
)
if err != nil {
return nil, &ErrAccountNameNotFound
}
result.Account = acctName
switch ma := ainfo.(type) {
case waddrmgr.ManagedPubKeyAddress:
result.IsCompressed = ma.Compressed()
result.PubKey = ma.ExportPubKey()
case waddrmgr.ManagedScriptAddress:
result.IsScript = true
// The script is only available if the manager is unlocked, so
// just break out now if there is an error.
script, err := ma.Script()
if err != nil {
break
}
result.Hex = hex.EncodeToString(script)
// This typically shouldn't fail unless an invalid script was
// imported. However, if it fails for any reason, there is no
// further information available, so just set the script type
// a non-standard and break out now.
class, addrs, reqSigs, err := txscript.ExtractPkScriptAddrs(
script, w.ChainParams())
if err != nil {
result.Script = txscript.NonStandardTy.String()
break
}
addrStrings := make([]string, len(addrs))
for i, a := range addrs {
addrStrings[i] = a.EncodeAddress()
}
result.Addresses = addrStrings
// Multi-signature scripts also provide the number of required
// signatures.
result.Script = class.String()
if class == txscript.MultiSigTy {
result.SigsRequired = int32(reqSigs)
}
}
return result, nil
}
// verifyMessage handles the verifymessage command by verifying the provided
// compact signature for the given address and message.
func verifyMessage(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.VerifyMessageCmd)
addr, err := decodeAddress(cmd.Address, w.ChainParams())
if err != nil {
return nil, err
}
// decode base64 signature
sig, err := base64.StdEncoding.DecodeString(cmd.Signature)
if err != nil {
return nil, err
}
// Validate the signature - this just shows that it was valid at all.
// we will compare it with the key next.
var buf bytes.Buffer
_ = wire.WriteVarString(&buf, 0, "Bitcoin Signed Message:\n")
_ = wire.WriteVarString(&buf, 0, cmd.Message)
expectedMessageHash := chainhash.DoubleHashB(buf.Bytes())
pk, wasCompressed, err := ecdsa.RecoverCompact(sig, expectedMessageHash)
if err != nil {
return nil, err
}
var serializedPubKey []byte
if wasCompressed {
serializedPubKey = pk.SerializeCompressed()
} else {
serializedPubKey = pk.SerializeUncompressed()
}
// Verify that the signed-by address matches the given address
switch checkAddr := addr.(type) {
case *btcutil.AddressPubKeyHash: // ok
return bytes.Equal(btcutil.Hash160(serializedPubKey), checkAddr.Hash160()[:]), nil
case *btcutil.AddressPubKey: // ok
return string(serializedPubKey) == checkAddr.String(), nil
default:
return nil, errors.New("address type not supported")
}
}
// walletIsLocked handles the walletislocked extension request by
// returning the current lock state (false for unlocked, true for locked)
// of an account.
func walletIsLocked(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
return w.Locked(), nil
}
// walletLock handles a walletlock request by locking the all account
// wallets, returning an error if any wallet is not encrypted (for example,
// a watching-only wallet).
func walletLock(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
w.Lock()
return nil, nil
}
// walletPassphrase responds to the walletpassphrase request by unlocking
// the wallet. The decryption key is saved in the wallet until timeout
// seconds expires, after which the wallet is locked.
func walletPassphrase(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.WalletPassphraseCmd)
timeout := time.Second * time.Duration(cmd.Timeout)
var unlockAfter <-chan time.Time
if timeout != 0 {
unlockAfter = time.After(timeout)
}
err := w.Unlock([]byte(cmd.Passphrase), unlockAfter)
return nil, err
}
// walletPassphraseChange responds to the walletpassphrasechange request
// by unlocking all accounts with the provided old passphrase, and
// re-encrypting each private key with an AES key derived from the new
// passphrase.
//
// If the old passphrase is correct and the passphrase is changed, all
// wallets will be immediately locked.
func walletPassphraseChange(icmd interface{}, w *wallet.Wallet) (interface{}, error) {
cmd := icmd.(*btcjson.WalletPassphraseChangeCmd)
err := w.ChangePrivatePassphrase([]byte(cmd.OldPassphrase),
[]byte(cmd.NewPassphrase))
if waddrmgr.IsError(err, waddrmgr.ErrWrongPassphrase) {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCWalletPassphraseIncorrect,
Message: "Incorrect passphrase",
}
}
return nil, err
}
// decodeHexStr decodes the hex encoding of a string, possibly prepending a
// leading '0' character if there is an odd number of bytes in the hex string.
// This is to prevent an error for an invalid hex string when using an odd
// number of bytes when calling hex.Decode.
func decodeHexStr(hexStr string) ([]byte, error) {
if len(hexStr)%2 != 0 {
hexStr = "0" + hexStr
}
decoded, err := hex.DecodeString(hexStr)
if err != nil {
return nil, &btcjson.RPCError{
Code: btcjson.ErrRPCDecodeHexString,
Message: "Hex string decode failed: " + err.Error(),
}
}
return decoded, nil
}
|
package main
import (
"encoding/xml"
"fmt"
"io/ioutil"
"os"
"strings"
"util"
"github.com/tidwall/gjson"
)
type Spider struct {
Path string
Chs []chan int
UrlList, FileName []string
An util.Analysis
}
//抓取API类接口
func (s *Spider) getAPI() {
for i, v := range s.UrlList {
s.Chs[i] = make(chan int)
s.An.Path = s.Path
go s.An.GetContent(v, s.FileName[i], i, s.Chs[i])
}
}
//抓取html类页面
func (s *Spider) getLocalHTML(filter string, tag string, fileNameFilter string) {
//从本地读取文件列表
dir := new(util.Dir)
dir.FilePath = s.Path
fileList := dir.GetFileList()
for _, file := range fileList {
//将本地文件的内容读入到内存
cache := new(util.File)
cache.FilePath = s.Path
cache.FileName = file.Name()
content, _ := ioutil.ReadFile(cache.FilePath + cache.FileName)
cache.Content = string(content)
//使用Json解析库解析json数据
value := gjson.Get(cache.Content, tag)
urls := make([]string, 0, 10)
for _, url := range value.Array() {
urls = append(urls, url.Str)
}
s.getHtml(urls, filter, fileNameFilter)
}
}
func (s *Spider) getHtml(value []string, filter string, fileNameFilter string) {
for key, url := range value {
s.Chs[key] = make(chan int)
s.An.Path = s.Path
s.An.ConFilter.Grep = filter
//提取存储使用的文件名
nameFilter := new(util.Filter)
nameFilter.Grep = fileNameFilter
nameFilter.Content = url
filename := nameFilter.Filter()
go s.An.GetContent(url, filename, key, s.Chs[key])
}
}
func (s *Spider) getXML() {
for key, url := range s.UrlList {
s.Chs[key] = make(chan int)
s.An.Path = s.Path
s.An.ConFilter.Grep = ""
//提取存储使用的文件名
nameFilter := new(util.Filter)
nameFilter.Grep = ""
nameFilter.Content = url
// filename := nameFilter.Filter()
go s.An.GetContent(url, "xml", key, s.Chs[key])
}
}
func (s *Spider) getXMLContent() {
file, err := os.Open(s.Path + "data")
if err != nil {
fmt.Println(err)
return
}
defer file.Close()
data, err := ioutil.ReadAll(file)
if err != nil {
fmt.Println(err)
return
}
// fmt.Println(string(data))
v := SListBucketResult{}
err = xml.Unmarshal(data, &v)
if err != nil {
fmt.Printf("error: %v", err)
return
}
// fmt.Println(v)
getRs(s, v)
}
func getRs(s *Spider, v SListBucketResult) {
var ch chan int
for key, element := range v.Content {
s.An.Path = s.Path
s.An.ConFilter.Grep = ""
fmt.Println("=================================>")
fmt.Println("http://archive.bbx.com/" + element.Key)
s.An.GetContent("http://archive.bbx.com/"+element.Key, getFileName(element.Key), key, ch)
}
}
func getFileName(url string) string {
a := strings.Split(url, "/")
filename := a[len(a)-1]
return filename
}
type SListBucketResult struct {
XMLName xml.Name `xml:"ListBucketResult"`
Content []SContent `xml:"Contents"`
}
type SContent struct {
Key string `xml:"Key"`
}
|
package vips
import (
"fmt"
"github.com/sherifabdlnaby/bimg"
cfg "github.com/sherifabdlnaby/prism/pkg/config"
"github.com/sherifabdlnaby/prism/pkg/payload"
)
type rotate struct {
Raw rotateRawConfig `mapstructure:",squash"`
angle cfg.Selector
}
type rotateRawConfig struct {
Angle string
}
func (o *rotate) Init() (bool, error) {
var err error
if o.Raw == *rotateDefaults() {
return false, nil
}
o.angle, err = cfg.NewSelector(o.Raw.Angle)
if err != nil {
return false, err
}
return true, nil
}
func (o *rotate) Apply(p *bimg.Options, data payload.Data) error {
angle, err := o.angle.Evaluate(data)
if err != nil {
return err
}
switch angle {
case "0":
p.Rotate = bimg.D0
case "90":
p.Rotate = bimg.D90
case "180":
p.Rotate = bimg.D180
case "270":
p.Rotate = bimg.D270
default:
err = fmt.Errorf("invalid value for field [angle], got: %s", angle)
}
return err
}
|
package main
import (
"log"
"net/http"
"storage/conf"
"storage/objects"
"storage/util/heartBeat"
"strconv"
)
func main() {
conf.InitConfig()
go heartBeat.StartHeartBeat()
go heartBeat.StartLocate()
http.HandleFunc("/objects/",objects.Handler)
log.Fatal(http.ListenAndServe(":" + strconv.Itoa(conf.GetConfig().Env.Port),nil))
}
|
package main
import (
"fmt"
)
func maxSlidingWindow(nums []int, k int) []int {
if len(nums) == 0 || len(nums) < k {
return nil
}
//存储下标
window := make([]int, 0, k)
res := make([]int, 0, len(nums)-k+1)
for i, v := range nums {
for len(window) != 0 && nums[window[len(window)-1]] <= v {
window = window[0 : len(window)-1]
}
window = append(window, i)
//判断当前位置是否过期
if window[0] == i-k {
window = window[1:len(window)]
}
//更新
if i >= k-1 {
res = append(res, nums[window[0]])
}
}
return res
}
func main() {
var a = []int{1, 3, -1, -3, 5, 3, 6, 7}
window := maxSlidingWindow(a, 3)
fmt.Printf("%v", window)
}
|
package main
import (
"embed"
"fmt"
"text/template"
"github.com/authelia/authelia/v4/internal/templates"
)
//go:embed templates/*
var templatesFS embed.FS
var (
tmplCodeConfigurationSchemaKeys = template.Must(newTMPL("internal_configuration_schema_keys.go"))
tmplGitHubIssueTemplateBug = template.Must(newTMPL("github_issue_template_bug_report.yml"))
tmplIssueTemplateFeature = template.Must(newTMPL("github_issue_template_feature.yml"))
tmplWebI18NIndex = template.Must(newTMPL("web_i18n_index.ts"))
tmplDotCommitLintRC = template.Must(newTMPL("dot_commitlintrc.js"))
tmplDocsCommitMessageGuidelines = template.Must(newTMPL("docs-contributing-development-commitmsg.md"))
tmplScriptsGen = template.Must(newTMPL("cmd-authelia-scripts-gen.go"))
tmplServer = template.Must(newTMPL("server_gen.go"))
)
func newTMPL(name string) (tmpl *template.Template, err error) {
funcs := templates.FuncMap()
funcs["joinX"] = templates.FuncStringJoinX
return template.New(name).Funcs(funcs).Parse(mustLoadTmplFS(name))
}
func mustLoadTmplFS(tmpl string) string {
var (
content []byte
err error
)
if content, err = templatesFS.ReadFile(fmt.Sprintf("templates/%s.tmpl", tmpl)); err != nil {
panic(err)
}
return string(content)
}
|
/*
* Copyright (c) 2020 - present Kurtosis Technologies LLC.
* All Rights Reserved.
*/
package services
/*
The identifier used for services with the network.
*/
type ServiceID string
/*
The developer should implement their own use-case-specific interface that extends this one
*/
type Service interface {
GetServiceID() ServiceID
// Returns the IP address of the service
GetIPAddress() string
// Returns true if the service is available
IsAvailable() bool
}
|
package osversion
import (
"io/ioutil"
"os"
"testing"
)
func TestVersion(t *testing.T) {
tmpFile, err := ioutil.TempFile("", "os-release")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpFile.Name())
versionString := "CentOS release 8.8 (Final) "
_, err = tmpFile.Write([]byte(versionString))
if err != nil {
t.Fatalf("Couldn't write version string to release file: %s", err)
}
detector := NewDetector(tmpFile.Name())
os, version, err := detector.Version()
if err != nil {
t.Fatal(err)
}
if os != "CentOS" {
t.Errorf("Expected os version to be 'CentOS', was '%s'", os)
}
if version != "8.8" {
t.Errorf("Expected os version to be '8.8', was '%s'", os)
}
err = tmpFile.Truncate(0)
if err != nil {
t.Fatalf("Unable to truncate release file: %s", err)
}
_, err = tmpFile.Seek(0, 0)
if err != nil {
t.Fatalf("Unable to seek to the beginning of the file to write another version: %s", err)
}
versionString = "CentOS Linux release 9.0.091761 (Core)"
_, err = tmpFile.Write([]byte(versionString))
if err != nil {
t.Fatalf("Couldn't write version string to release file: %s", err)
}
detector = NewDetector(tmpFile.Name())
os, version, err = detector.Version()
if err != nil {
t.Fatal(err)
}
if os != "CentOS" {
t.Errorf("Expected os version to be 'CentOS', was '%s'", os)
}
if version != "9.0.091761" {
t.Errorf("Expected os version to be '9.0.091761', was '%s'", os)
}
}
|
package cli
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func newDumpCmd() *cobra.Command {
result := &cobra.Command{
Use: "dump",
Short: "dump internal Tilt state",
Long: `Dumps internal Tilt state to stdout.
Intended to help Tilt developers inspect Tilt when things go wrong,
and figure out better ways to expose this info to Tilt users.
The format of the dump state does not make any API or compatibility promises,
and may change frequently.
`,
}
result.AddCommand(newDumpWebviewCmd())
result.AddCommand(newDumpEngineCmd())
result.AddCommand(newDumpLogStoreCmd())
return result
}
func newDumpWebviewCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "webview",
Short: "dump the state backing the webview",
Long: `Dumps the state backing the webview to stdout.
The webview is the JSON used to render the React UX.
The format of the dump state does not make any API or compatibility promises,
and may change frequently.
`,
Run: dumpWebview,
}
cmd.Flags().IntVar(&webPort, "port", DefaultWebPort, "Port for the Tilt HTTP server")
return cmd
}
func newDumpEngineCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "engine",
Short: "dump the engine state",
Long: `Dumps the state of the Tilt engine to stdout.
The engine state is the central store where Tilt keeps all information about
the build specification, build history, and deployed resources.
The format of the dump state does not make any API or compatibility promises,
and may change frequently.
Excludes logs.
`,
Run: dumpEngine,
}
cmd.Flags().IntVar(&webPort, "port", DefaultWebPort, "Port for the Tilt HTTP server")
return cmd
}
func newDumpLogStoreCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "logstore",
Short: "dump the log store",
Long: `Dumps the state of the Tilt log store to stdout.
Every log of a Tilt-managed resource is aggregated into a central structured log
store before display. Dumps the JSON representation of this store.
The format of the dump state does not make any API or compatibility promises,
and may change frequently.
`,
Run: dumpLogStore,
}
cmd.Flags().IntVar(&webPort, "port", DefaultWebPort, "Port for the Tilt HTTP server")
return cmd
}
func cmdFail(err error) {
_, _ = fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
func dumpWebview(cmd *cobra.Command, args []string) {
url := fmt.Sprintf("http://localhost:%d/api/view", webPort)
res, err := http.Get(url)
if err != nil {
cmdFail(fmt.Errorf("Could not connect to Tilt at %s: %v", url, err))
}
defer func() {
_ = res.Body.Close()
}()
if res.StatusCode != http.StatusOK {
cmdFail(fmt.Errorf("Error connecting to Tilt at %s: %d", url, res.StatusCode))
}
err = dumpJSON(res.Body)
if err != nil {
cmdFail(fmt.Errorf("dump webview: %v", err))
}
}
func dumpEngine(cmd *cobra.Command, args []string) {
url := fmt.Sprintf("http://localhost:%d/api/dump/engine", webPort)
res, err := http.Get(url)
if err != nil {
cmdFail(fmt.Errorf("Could not connect to Tilt at %s: %v", url, err))
}
defer func() {
_ = res.Body.Close()
}()
if res.StatusCode != http.StatusOK {
cmdFail(fmt.Errorf("Error connecting to Tilt at %s: %d", url, res.StatusCode))
}
result, err := decodeJSON(res.Body)
if err != nil {
cmdFail(fmt.Errorf("dump engine: %v", err))
}
obj, ok := result.(map[string]interface{})
if ok {
delete(obj, "LogStore")
}
err = encodeJSON(obj)
if err != nil {
cmdFail(fmt.Errorf("dump engine: %v", err))
}
}
func dumpLogStore(cmd *cobra.Command, args []string) {
url := fmt.Sprintf("http://localhost:%d/api/dump/engine", webPort)
res, err := http.Get(url)
if err != nil {
cmdFail(fmt.Errorf("Could not connect to Tilt at %s: %v", url, err))
}
defer func() {
_ = res.Body.Close()
}()
if res.StatusCode != http.StatusOK {
cmdFail(fmt.Errorf("Error connecting to Tilt at %s: %d", url, res.StatusCode))
}
result, err := decodeJSON(res.Body)
if err != nil {
cmdFail(fmt.Errorf("dump LogStore: %v", err))
}
var logStore interface{}
obj, ok := result.(map[string]interface{})
if ok {
logStore, ok = obj["LogStore"]
}
if !ok {
cmdFail(fmt.Errorf("No LogStore in engine: %v", err))
}
err = encodeJSON(logStore)
if err != nil {
cmdFail(fmt.Errorf("dump LogStore: %v", err))
}
}
func dumpJSON(reader io.Reader) error {
result, err := decodeJSON(reader)
if err != nil {
return err
}
return encodeJSON(result)
}
func decodeJSON(reader io.Reader) (interface{}, error) {
decoder := json.NewDecoder(reader)
var result interface{}
err := decoder.Decode(&result)
if err != nil {
return nil, errors.Wrap(err, "Could not decode")
}
return result, err
}
func encodeJSON(result interface{}) error {
encoder := json.NewEncoder(os.Stdout)
encoder.SetIndent("", " ")
err := encoder.Encode(result)
if err != nil {
return errors.Wrap(err, "Could not print")
}
return nil
}
|
package nullable
import (
"database/sql"
"testing"
)
func TestNullableString(t *testing.T) {
for _, unit := range []struct {
value *String
expected string
}{
{&String{sql.NullString{"aquas", true}}, "\"aquas\""},
{&String{sql.NullString{"aquas", false}}, "null"},
{&String{sql.NullString{"", false}}, "null"},
} {
if actually, _ := unit.value.MarshalJSON(); string(actually) != unit.expected {
t.Errorf("combination: [%v], type:%T, actually: [%v], type: %T", unit.expected, unit.expected, string(actually), string(actually))
}
}
}
func TestNullableInt64(t *testing.T) {
for _, unit := range []struct {
value *Int64
expected string
}{
{&Int64{sql.NullInt64{0, false}}, "null"},
{&Int64{sql.NullInt64{10, false}}, "null"},
{&Int64{sql.NullInt64{10, true}}, "10"},
} {
if actually, _ := unit.value.MarshalJSON(); string(actually) != unit.expected {
t.Errorf("combination: [%v], type:%T, actually: [%v], type: %T", unit.expected, unit.expected, string(actually), string(actually))
}
}
}
|
package types
import "time"
const (
TwentySeconds = 20 * time.Second
SixtyHours = 60 * time.Hour
Day = 24 * time.Hour
TwoDays = 2 * Day
ThreeDays = 3 * Day
FiveDays = 5 * Day
Week = 7 * Day
)
|
package knot
import "testing"
func TestSimpleKnot(t *testing.T) {
expected := []int{0,1,2,3,4}
knot := New(5)
if !IntArrayEquals(knot.numbers, expected) {
t.Errorf("knot got messed up %v should have been %v", knot.numbers, expected)
}
knot = Twist(knot, 3)
expected = []int{2, 1, 0, 3, 4}
if !IntArrayEquals(knot.numbers, expected) {
t.Errorf("knot got messed up %v should have been %v", knot.numbers, expected)
}
knot = Twist(knot, 4)
expected = []int{4, 3, 0, 1, 2}
if !IntArrayEquals(knot.numbers, expected) {
t.Errorf("knot got messed up %v should have been %v", knot.numbers, expected)
}
knot = Twist(knot, 1)
expected = []int{4, 3, 0, 1, 2}
if !IntArrayEquals(knot.numbers, expected) {
t.Errorf("knot got messed up %v should have been %v", knot.numbers, expected)
}
knot = Twist(knot, 5)
expected = []int{3, 4, 2, 1, 0}
if !IntArrayEquals(knot.numbers, expected) {
t.Errorf("knot got messed up %v should have been %v", knot.numbers, expected)
}
if knot.skip_size != 4 {
t.Errorf("messed up skip_size")
}
if knot.current_position != 19 {
t.Errorf("messed up current_position")
}
}
func TestGeneratingHashes(t *testing.T) {
hash := Hash("")
expected := "a2582a3a0e66e6e86e3812dcb672a272"
if hash != expected {
t.Errorf("got %v but expected %v", hash, expected)
}
hash = Hash("AoC 2017")
expected = "33efeb34ea91902bb2f59c9920caa6cd"
if hash != expected {
t.Errorf("got %v but expected %v", hash, expected)
}
hash = Hash("1,2,3")
expected = "3efbe78a8d82f29979031a4aa0b16a9d"
if hash != expected {
t.Errorf("got %v but expected %v", hash, expected)
}
hash = Hash("1,2,4")
expected = "63960835bcdc130f0b66d7ff4f6a5a8e"
if hash != expected {
t.Errorf("got %v but expected %v", hash, expected)
}
}
func IntArrayEquals(a []int, b []int) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
|
package dao
import (
"errors"
"github.com/golang/glog"
"qipai/model"
)
var Game gameDao
type gameDao struct{
}
func (this *gameDao) GetGames(roomId uint, current int) (games []model.Game, err error) {
if Db().Where(&model.Game{RoomId: roomId, Current: current}).Find(&games).Error != nil {
err = errors.New("获取游戏信息失败")
return
}
return
}
func (this *gameDao) Players(roomId uint) (players []model.Player) {
Db().Where(&model.Player{RoomId: roomId}).Find(&players)
return
}
func (this *gameDao) GetCurrentGames(roomId uint) (game []model.Game, err error) {
room, e := Room.Get(roomId)
if e != nil {
err = e
return
}
game, err = Game.GetGames(roomId, room.Current)
return
}
func (me *gameDao) GetLastGames(roomId uint)(game []model.Game, err error) {
room, e := Room.Get(roomId)
if e != nil {
err = e
return
}
if room.Current==1{
err = errors.New("这是第一局")
return
}
game, err = Game.GetGames(roomId, room.Current-1)
return
}
func (gameDao) GetGame(roomId, uid uint, current int) (game model.Game, err error) {
if Db().Where(&model.Game{RoomId: roomId, PlayerId: uid, Current: current}).First(&game).RecordNotFound() {
err = errors.New("获取游戏数据失败")
return
}
return
}
func (this *gameDao) GetCurrentGame(roomId, uid uint) (game model.Game, err error) {
room, e := Room.Get(roomId)
if e != nil {
err = e
return
}
game, err = this.GetGame(roomId, uid, room.Current)
return
}
func (gameDao) Player(rid, uid uint) (player model.Player, err error) {
ret:=Db().Where("uid=? and room_id=?", uid, rid).First(&player)
if ret.Error !=nil {
err = errors.New("查找数据库失败")
glog.Errorln(ret.Error)
return
}
if ret.RecordNotFound(){
err = errors.New("用户未进入当前房间,如果已进入,可以尝试退出房间重新进入")
return
}
return
}
// 第一个玩家
func (gameDao)FirstPlayer(roomId uint)(player model.Player,err error){
res := Db().Where("room_id=? and desk_id > 0", roomId).Order("joined_at asc").First(&player)
if res.Error != nil || res.RecordNotFound() {
glog.Error(res.Error)
err = errors.New("该房间还没有玩家")
return
}
return
}
|
package tokenizer
import (
"fmt"
"unicode/utf8"
"github.com/soundTricker/kagome/dic"
)
const (
_MAX_INT32 = 1<<31 - 1
_MAX_UNKNOWN_WORD_LENGTH = 1024
_INIT_NODE_BUFFER_SIZE = 512
)
type lattice struct {
input []byte
list [][]*Node
output []*Node
pool *NodePool
udic *dic.UserDic
}
func NewLattice() *lattice {
ret := new(lattice)
ret.pool = NewNodePool(_INIT_NODE_BUFFER_SIZE)
return ret
}
func (this *lattice) setUserDic(a_userdic *dic.UserDic) {
this.udic = a_userdic
}
func (this *lattice) addNode(a_id, a_pos, a_start int, a_surface []byte, a_class NodeClass) {
var cost dic.Cost
switch a_class {
case DUMMY:
// use default cost
case KNOWN:
cost = dic.Costs[a_id]
case UNKNOWN:
cost = dic.UnkCosts[a_id]
case USER:
// use default cost
}
node := this.pool.get()
node.id = a_id
node.start = a_start
node.class = a_class
node.left, node.right, node.weight = int32(cost.Left), int32(cost.Right), int32(cost.Weight)
node.surface = a_surface
node.prev = nil
p := a_pos + utf8.RuneCount(node.surface)
this.list[p] = append(this.list[p], node)
}
func (this *lattice) build(a_input *string) (err error) {
this.pool.clear()
this.input = []byte(*a_input)
runeCount := utf8.RuneCount(this.input)
this.list = make([][]*Node, runeCount+2)
this.addNode(BOSEOS, 0, 0, this.input[0:0], DUMMY)
this.addNode(BOSEOS, runeCount+1, runeCount, this.input[runeCount:runeCount], DUMMY)
chPos := -1
for bufPos, ch := range *a_input {
chPos++
// (1) TODO: USER DIC
anyMatches := false
if this.udic != nil {
prefixs, ids := this.udic.Index.CommonPrefixSearchBytes(this.input[bufPos:])
anyMatches = len(prefixs) > 0
for key, substr := range prefixs {
id := ids[key]
this.addNode(id, chPos, chPos, this.input[bufPos:bufPos+len(substr)], USER)
}
}
if anyMatches {
continue
}
// (2) KNOWN DIC
prefixs, ids := dic.Index.CommonPrefixSearchBytes(this.input[bufPos:])
anyMatches = len(prefixs) > 0
for key, substr := range prefixs {
id := ids[key]
c, ok := dic.Counts[id]
if !ok {
c = 1
}
for x := 0; x < c; x++ {
this.addNode(id+x, chPos, chPos, this.input[bufPos:bufPos+len(substr)], KNOWN)
}
}
// (3) UNKNOWN DIC
if !anyMatches || dic.InvokeList[dic.CharacterCategoryList[ch]] {
class := dic.CharacterCategoryList[ch]
endPos := bufPos + utf8.RuneLen(ch)
unkWordLen := 1
for i, w, size := endPos, 1, len(this.input); i < size; i += w {
var c rune
c, w = utf8.DecodeRune(this.input[i:])
if dic.CharacterCategoryList[c] != class {
break
}
endPos += w
unkWordLen++
if unkWordLen >= _MAX_UNKNOWN_WORD_LENGTH {
break
}
}
pair := dic.UnkIndex[class]
for i, w := bufPos, 0; i < endPos; i += w {
_, w = utf8.DecodeRune(this.input[i:])
end := i + w
for x := 0; x < pair[1]; x++ {
this.addNode(pair[0]+x, chPos, chPos, this.input[bufPos:end], UNKNOWN)
}
}
}
}
return
}
func (this *lattice) String() string {
str := ""
for i, nodes := range this.list {
str += fmt.Sprintf("[%v] :\n", i)
for _, node := range nodes {
str += fmt.Sprintf("%v\n", node)
}
str += "\n"
}
return str
}
func (this *lattice) forward() (err error) {
for i, size := 1, len(this.list); i < size; i++ {
currentList := this.list[i]
for index, target := range currentList {
prevList := this.list[target.start]
if len(prevList) == 0 {
this.list[i][index].cost = _MAX_INT32
continue
}
for j, n := range prevList {
var c int16
if n.class != USER && target.class != USER {
c, err = dic.Connection.At(int(n.right), int(target.left))
}
if err != nil {
err = fmt.Errorf("lattice.forward(): dic.Connection.At(%d, %d), %v", n.right, target.left, err)
return
}
totalCost := int64(c) + int64(target.weight) + int64(n.cost)
if totalCost > _MAX_INT32 {
totalCost = _MAX_INT32
}
if j == 0 || int32(totalCost) < this.list[i][index].cost {
this.list[i][index].cost = int32(totalCost)
this.list[i][index].prev = this.list[target.start][j]
}
}
}
}
return
}
func (this *lattice) backward() {
size := len(this.list)
this.output = make([]*Node, 0, size)
for p := this.list[size-1][0]; p != nil; p = p.prev {
this.output = append(this.output, p)
}
}
|
// Copyright 2021 Clivern. All rights reserved.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
package driver
import (
"context"
"fmt"
"strings"
"time"
"github.com/clivern/peanut/core/util"
"github.com/spf13/viper"
"go.etcd.io/etcd/clientv3"
)
// Etcd driver
type Etcd struct {
client *clientv3.Client
}
// NewEtcdDriver create a new instance
func NewEtcdDriver() Database {
return new(Etcd)
}
// Connect connect to etcd server
func (e *Etcd) Connect() error {
var err error
e.client, err = clientv3.New(clientv3.Config{
Endpoints: strings.Split(viper.GetString("app.database.etcd.endpoints"), ","),
DialTimeout: time.Duration(viper.GetInt("app.database.etcd.timeout")) * time.Second,
Username: viper.GetString("app.database.etcd.username"),
Password: viper.GetString("app.database.etcd.password"),
})
if err != nil {
return err
}
return nil
}
// IsConnected checks if there is an etcd connection
func (e *Etcd) IsConnected() bool {
return e.client != nil
}
// Put sets a record
func (e *Etcd) Put(key, value string) error {
ctx, cancel := context.WithTimeout(
context.Background(),
time.Duration(viper.GetInt("app.database.etcd.timeout"))*time.Second,
)
_, err := e.client.Put(ctx, key, value)
defer cancel()
if err != nil {
return err
}
return nil
}
// PutWithLease sets a record
func (e *Etcd) PutWithLease(key, value string, leaseID clientv3.LeaseID) error {
ctx, cancel := context.WithTimeout(
context.Background(),
time.Duration(viper.GetInt("app.database.etcd.timeout"))*time.Second,
)
_, err := e.client.Put(ctx, key, value, clientv3.WithLease(leaseID))
defer cancel()
if err != nil {
return err
}
return nil
}
// Get gets a record value
func (e *Etcd) Get(key string) (map[string]string, error) {
result := make(map[string]string, 0)
ctx, cancel := context.WithTimeout(
context.Background(),
time.Duration(viper.GetInt("app.database.etcd.timeout"))*time.Second,
)
resp, err := e.client.Get(ctx, key, clientv3.WithPrefix())
defer cancel()
if err != nil {
return result, err
}
for _, ev := range resp.Kvs {
result[string(ev.Key)] = string(ev.Value)
}
return result, nil
}
// Delete deletes a record
func (e *Etcd) Delete(key string) (int64, error) {
ctx, cancel := context.WithTimeout(
context.Background(),
time.Duration(viper.GetInt("app.database.etcd.timeout"))*time.Second,
)
dresp, err := e.client.Delete(ctx, key, clientv3.WithPrefix())
defer cancel()
if err != nil {
return 0, err
}
return dresp.Deleted, nil
}
// CreateLease creates a lease
func (e *Etcd) CreateLease(seconds int64) (clientv3.LeaseID, error) {
var result clientv3.LeaseID
ctx, cancel := context.WithTimeout(
context.Background(),
time.Duration(viper.GetInt("app.database.etcd.timeout"))*time.Second,
)
resp, err := e.client.Grant(ctx, seconds)
defer cancel()
if err != nil {
return result, err
}
return resp.ID, nil
}
// RenewLease renews a lease
func (e *Etcd) RenewLease(leaseID clientv3.LeaseID) error {
ctx, cancel := context.WithTimeout(
context.Background(),
time.Duration(viper.GetInt("app.database.etcd.timeout"))*time.Second,
)
_, err := e.client.KeepAliveOnce(ctx, leaseID)
defer cancel()
if err != nil {
return err
}
return nil
}
// GetKeys gets a record sub keys
// This method will return only the keys under one key
func (e *Etcd) GetKeys(key string) ([]string, error) {
result := []string{}
ctx, cancel := context.WithTimeout(
context.Background(),
time.Duration(viper.GetInt("app.database.etcd.timeout"))*time.Second,
)
resp, err := e.client.Get(ctx, key, clientv3.WithPrefix())
defer cancel()
if err != nil {
return result, err
}
for _, ev := range resp.Kvs {
sub := strings.Replace(string(ev.Key), util.EnsureTrailingSlash(key), "", -1)
subKeys := strings.Split(sub, "/")
newKey := fmt.Sprintf("%s%s", util.EnsureTrailingSlash(key), subKeys[0])
if !util.InArray(newKey, result) {
result = append(result, newKey)
}
}
return result, nil
}
// Exists checks if a record exists
func (e *Etcd) Exists(key string) (bool, error) {
result, err := e.Get(key)
return len(result) > 0, err
}
// Close closes the etcd connection
func (e *Etcd) Close() {
e.client.Close()
}
|
package leetcode
/*Write code to remove duplicates from an unsorted linked list.*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func removeDuplicateNodes(head *ListNode) *ListNode {
mapList := make(map[int]int, 0)
cur, prv := head, head
for cur != nil {
_, ok := mapList[cur.Val]
if ok {
prv.Next = cur.Next
} else {
mapList[cur.Val] = 1
prv = cur
}
cur = cur.Next
}
return head
}
|
package hashtable
import (
"fmt"
"hash/fnv"
"sync"
"sync/atomic"
"unsafe"
"ustr"
)
// A hashtable with a lock-free Get()
type hashtable_i interface {
Get(key interface{}) (interface{}, bool)
Set(key interface{}, val interface{}) (interface{}, bool)
Del(key interface{})
}
type elem_t struct {
key interface{}
value interface{}
keyHash uint32
next *elem_t
}
type bucket_t struct {
sync.RWMutex
first *elem_t
//_ [64-2*8]uint8
}
func (b *bucket_t) len() int {
b.RLock()
defer b.RUnlock()
l := 0
for e := b.first; e != nil; e = e.next {
l++
}
return l
}
func (b *bucket_t) elems() []Pair_t {
b.RLock()
defer b.RUnlock()
p := make([]Pair_t, 0)
for e := b.first; e != nil; e = e.next {
p = append(p, Pair_t{Key: e.key, Value: e.value})
}
return p
}
func (b *bucket_t) iter(f func(interface{}, interface{}) bool) bool {
for e := b.first; e != nil; e = loadptr(&e.next) {
if f(e.key, e.value) {
return true
}
}
return false
}
type Hashtable_t struct {
table []*bucket_t
capacity int
maxchain int
}
func MkHash(size int) *Hashtable_t {
ht := &Hashtable_t{}
ht.capacity = size
ht.table = make([]*bucket_t, size)
ht.maxchain = 1
for i, _ := range ht.table {
ht.table[i] = &bucket_t{}
}
return ht
}
func (ht *Hashtable_t) String() string {
s := ""
for i, b := range ht.table {
if b.first != nil {
s += fmt.Sprintf("b %d:\n", i)
for e := b.first; e != nil; e = loadptr(&e.next) {
s += fmt.Sprintf("(%v, %v), ", e.keyHash, e.key)
}
s += fmt.Sprintf("\n")
}
}
return s
}
func (ht *Hashtable_t) Size() int {
n := 0
for _, b := range ht.table {
n += b.len()
}
return n
}
type Pair_t struct {
Key interface{}
Value interface{}
}
func (ht *Hashtable_t) Elems() []Pair_t {
p := make([]Pair_t, 0)
for _, b := range ht.table {
n := b.elems()
if n != nil {
p = append(p, n...)
}
}
return p
}
func (ht *Hashtable_t) Get(key interface{}) (interface{}, bool) {
kh := khash(key)
b := ht.table[ht.hash(kh)]
n := 0
for e := loadptr(&b.first); e != nil; e = loadptr(&e.next) {
if e.keyHash == kh && equal(e.key, key) {
return e.value, true
}
n += 1
if n > ht.maxchain {
ht.maxchain = n
//if n >= 3 {
// fmt.Printf("maxchain: %d\n", ht.maxchain)
// fmt.Printf("key %s collides with %s\n", key, e.key)
//}
}
}
return nil, false
}
// For performance comparisons
func (ht *Hashtable_t) GetRLock(key interface{}) (interface{}, bool) {
kh := khash(key)
b := ht.table[ht.hash(kh)]
b.RLock()
defer b.RUnlock()
n := 0
for e := b.first; e != nil; e = e.next {
if e.keyHash == kh && equal(e.key, key) {
return e.value, true
}
n += 1
if n > ht.maxchain {
ht.maxchain = n
//if n >= 3 {
// fmt.Printf("maxchain: %d\n", ht.maxchain)
// fmt.Printf("key %s collides with %s\n", key, e.key)
//}
}
}
return nil, false
}
// Set returns false if key already exists
func (ht *Hashtable_t) Set(key interface{}, value interface{}) (interface{}, bool) {
kh := khash(key)
b := ht.table[ht.hash(kh)]
b.Lock()
defer b.Unlock()
add := func(last *elem_t, b *bucket_t) {
if last == nil {
n := &elem_t{key: key, value: value, keyHash: kh, next: b.first}
storeptr(&b.first, n)
} else {
n := &elem_t{key: key, value: value, keyHash: kh, next: last.next}
storeptr(&last.next, n)
}
}
var last *elem_t
for e := b.first; e != nil; e = e.next {
if e.keyHash == kh && equal(e.key, key) {
return e.value, false
}
if kh < e.keyHash {
add(last, b)
return value, true
}
last = e
}
add(last, b)
return value, true
}
// returns true if the key was removed
func (ht *Hashtable_t) Del(key interface{}) {
kh := khash(key)
b := ht.table[ht.hash(kh)]
b.Lock()
defer b.Unlock()
rem := func(last *elem_t, b *bucket_t, n *elem_t) {
if last == nil {
// b.first = n.next
storeptr(&b.first, n.next)
} else {
// last.next = n.next
storeptr(&last.next, n.next)
}
}
var last *elem_t
for e := b.first; e != nil; e = e.next {
if e.keyHash == kh && equal(e.key, key) {
rem(last, b, e)
return
}
if kh < e.keyHash {
panic("del of non-existing key")
}
last = e
}
panic("del of non-existing key")
}
// Returns true if at least one call to f returned true. stops iterating once f
// returns true.
func (ht *Hashtable_t) Iter(f func(interface{}, interface{}) bool) bool {
for _, b := range ht.table {
if b.iter(f) {
return true
}
}
return false
}
func (ht *Hashtable_t) hash(keyHash uint32) int {
return int(keyHash % uint32(len(ht.table)))
}
// Without an explicit memory model, it is hard to know if this code is
// correct. LoadPointer/StorePointer don't issue a memory fence, but for
// traversing pointers in Get() and updating them in Set()/Del(), this might be
// ok on x86. The Go compiler also hopefully doesn't reorder loads
// wrt. LoadPointer.
func loadptr(e **elem_t) *elem_t {
ptr := (*unsafe.Pointer)(unsafe.Pointer(e))
p := atomic.LoadPointer(ptr)
n := (*elem_t)(unsafe.Pointer(p))
return n
}
func storeptr(p **elem_t, n *elem_t) {
ptr := (*unsafe.Pointer)(unsafe.Pointer(p))
v := (unsafe.Pointer)(n)
atomic.StorePointer(ptr, v)
}
func hashUstr(s ustr.Ustr) uint32 {
h := fnv.New32a()
h.Write(s)
return h.Sum32()
}
func hashString(s string) uint32 {
h := fnv.New32a()
h.Write([]byte(s))
return h.Sum32()
}
func khash(key interface{}) uint32 {
h := hash(key)
return uint32(2654435761) * h
}
func hash(key interface{}) uint32 {
switch x := key.(type) {
case ustr.Ustr:
return hashUstr(x)
case int:
return uint32(x)
case int32:
return uint32(x)
case string:
return hashString(x)
}
panic(fmt.Errorf("unsupported key type %T", key))
}
func equal(key1 interface{}, key2 interface{}) bool {
switch x := key1.(type) {
case ustr.Ustr:
us1 := key1.(ustr.Ustr)
us2 := key2.(ustr.Ustr)
return us1.Eq(us2)
case int32:
n1 := int32(x)
n2 := key2.(int32)
return n1 == n2
case int:
n1 := int(x)
n2 := key2.(int)
return n1 == n2
case string:
s1 := key1.(string)
s2 := key2.(string)
return s1 == s2
}
panic(fmt.Errorf("unsupported key type %T", key1))
}
|
package controller
import (
"gopetstore/src/config"
"gopetstore/src/domain"
"gopetstore/src/service"
"gopetstore/src/util"
"log"
"net/http"
"path/filepath"
)
const (
signInFormFile = "signInForm.html"
registerFormFile = "registerForm.html"
accountFieldFile = "accountField.html"
editAccountFormFile = "editAccountForm.html"
)
var (
signInFormPath = filepath.Join(config.Front, config.Web, config.Account, signInFormFile)
registerFormPath = filepath.Join(config.Front, config.Web, config.Account, registerFormFile)
accountFieldPath = filepath.Join(config.Front, config.Web, config.Account, accountFieldFile)
editAccountFormPath = filepath.Join(config.Front, config.Web, config.Account, editAccountFormFile)
languages = []string{
"english",
"japanese",
}
categories = []string{
"FISH",
"DOGS",
"REPTILES",
"CATS",
"BIRDS",
}
)
// get 跳转到登录页面 或者 post 登录
func ViewLoginOrPostLogin(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
err := util.RenderWithCommon(w, nil, signInFormPath)
if err != nil {
log.Printf("view signInForm error: %v", err.Error())
}
} else {
err := r.ParseForm()
if err != nil {
log.Printf("parse login form error: %v", err.Error())
}
userName := r.FormValue("username")
password := r.FormValue("password")
account, err := service.GetAccountByUserNameAndPassword(userName, password)
if err != nil {
log.Printf("do login error with %s %s: %v", userName, password, err.Error())
m := map[string]interface{}{
"Message": "登录失败,用户名或密码错误!",
}
err := util.RenderWithCommon(w, m, signInFormPath)
if err != nil {
log.Printf("view signInForm error: %v", err.Error())
}
}
if account != nil {
// session 中保存 account
s, err := util.GetSession(r)
if err != nil {
log.Printf("get session error: %v", err.Error())
}
if s != nil {
err = s.Save(config.AccountKey, account, w, r)
if err != nil {
log.Printf("session save account error: %v", err.Error())
}
// 登录成功后跳转到主页
ViewMain(w, r)
}
}
}
}
// 跳转到注册页面
func ViewRegister(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
m["Languages"] = languages
m["Categories"] = categories
err := util.RenderWithAccount(w, r, m, registerFormPath, config.CommonPath, accountFieldPath)
if err != nil {
log.Printf("view registerForm error: %v", err.Error())
}
}
// 登出
func SignOut(w http.ResponseWriter, r *http.Request) {
s, err := util.GetSession(r)
if err != nil {
log.Printf("get session error: %v", err.Error())
}
if s != nil {
err = s.Del(config.AccountKey, w, r)
err = s.Del(config.CartKey, w, r)
err = s.Del(config.OrderKey, w, r)
if err != nil {
log.Printf("session delete error: %v", err.Error())
}
}
// 重定向到主页
ViewMain(w, r)
}
// 编辑用户信息
func ViewEditAccount(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
m["Languages"] = languages
m["Categories"] = categories
err := util.RenderWithAccount(w, r, m, editAccountFormPath, config.CommonPath, accountFieldPath)
if err != nil {
log.Printf("view editAccountForm error: %v", err.Error())
}
}
// 注册用户
func NewAccount(w http.ResponseWriter, r *http.Request) {
a := getAccountFromInfoForm(r)
repeatedPassword := r.FormValue("repeatedPassword")
m := make(map[string]interface{})
m["Languages"] = languages
m["Categories"] = categories
if len(a.Password) == 0 {
m["Account"] = a
m["Message"] = "密码不能为空"
// 返回到注册页面
err := util.Render(w, m, registerFormPath, config.CommonPath, accountFieldPath)
if err != nil {
log.Printf("NewAccount RenderWithAccount error: %v", err.Error())
}
return
}
if repeatedPassword != a.Password {
m["Account"] = a
m["Message"] = "密码和重复密码不符"
// 返回到注册页面
err := util.Render(w, m, registerFormPath, config.CommonPath, accountFieldPath)
if err != nil {
log.Printf("NewAccount RenderWithAccount error: %v", err.Error())
}
return
}
// 检查是否已存在
oldAccount, err := service.GetAccountByUserName(a.UserName)
if oldAccount != nil {
m["Account"] = a
m["Message"] = "用户名已存在"
// 返回到注册页面
err := util.Render(w, m, registerFormPath, config.CommonPath, accountFieldPath)
if err != nil {
log.Printf("NewAccount RenderWithAccount error: %v", err.Error())
}
return
}
// 进行注册
err = service.InsertAccount(a)
if err != nil {
log.Printf("NewAccount InsertAccount error: %v", err.Error())
}
// 到登录页面并提示注册成功
m["Message"] = "注册成功!"
err = util.RenderWithCommon(w, m, signInFormPath)
if err != nil {
log.Printf("NewAccount RenderWithAccountAndCommonTem error: %v", err.Error())
}
}
// 确认修改用户信息
func ConfirmEdit(w http.ResponseWriter, r *http.Request) {
a := getAccountFromInfoForm(r)
repeatedPassword := r.FormValue("repeatedPassword")
m := make(map[string]interface{})
m["Languages"] = languages
m["Categories"] = categories
if len(a.Password) == 0 {
m["Message"] = "密码不能为空"
err := util.RenderWithAccount(w, r, m, editAccountFormPath, config.CommonPath, accountFieldPath)
if err != nil {
log.Printf("ConfirmEdit RenderWithAccount error: %v", err.Error())
}
return
}
if repeatedPassword != a.Password {
m["Message"] = "密码和重复密码不符"
// 返回到修改信息页面
err := util.RenderWithAccount(w, r, m, editAccountFormPath, config.CommonPath, accountFieldPath)
if err != nil {
log.Printf("ConfirmEdit RenderWithAccount error: %v", err.Error())
}
return
}
err := service.UpdateAccount(a)
if err != nil {
log.Printf("ConfirmEdit UpdateAccount error: %v", err.Error())
m["Message"] = err.Error()
// 返回到修改信息页面
err := util.RenderWithAccount(w, r, m, editAccountFormPath, config.CommonPath, accountFieldPath)
if err != nil {
log.Printf("ConfirmEdit UpdateAccount error: %v", err.Error())
}
return
}
newAccount, err := service.GetAccountByUserName(a.UserName)
if err != nil {
log.Printf("ConfirmEdit GetAccountByUserName error: %v", err.Error())
}
m["Message"] = "修改成功"
// 修改成功后需要重置 session
s, err := util.GetSession(r)
if err != nil {
log.Printf("ConfirmEdit GetSession error: %v", err.Error())
}
if s != nil {
err = s.Save(config.AccountKey, newAccount, w, r)
if err != nil {
log.Printf("ConfirmEdit Save error: %v", err.Error())
}
}
// 返回到修改信息页面
err = util.RenderWithAccount(w, r, m, editAccountFormPath, config.CommonPath, accountFieldPath)
if err != nil {
log.Printf("ConfirmEdit RenderWithAccount error: %v", err.Error())
}
}
// 从用户详情表单中获取 account
func getAccountFromInfoForm(r *http.Request) *domain.Account {
err := r.ParseForm()
if err != nil {
log.Printf("parse register form error: %v", err.Error())
}
userName := r.FormValue("username")
password := r.FormValue("password")
firstName := r.FormValue("firstName")
lastName := r.FormValue("lastName")
email := r.FormValue("email")
phone := r.FormValue("phone")
address1 := r.FormValue("address1")
address2 := r.FormValue("address2")
city := r.FormValue("city")
state := r.FormValue("state")
zip := r.FormValue("zip")
country := r.FormValue("country")
languagePreference := r.FormValue("languagePreference")
favouriteCategoryId := r.FormValue("favouriteCategoryId")
listOption := r.FormValue("listOption")
bannerOption := r.FormValue("bannerOption")
finalListOption := len(listOption) > 0
finalBannerOption := len(bannerOption) > 0
a := &domain.Account{
UserName: userName,
Password: password,
Email: email,
FirstName: firstName,
LastName: lastName,
Address1: address1,
Address2: address2,
City: city,
State: state,
Zip: zip,
Country: country,
Phone: phone,
FavouriteCategoryId: favouriteCategoryId,
LanguagePreference: languagePreference,
ListOption: finalListOption,
BannerOption: finalBannerOption,
}
return a
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
alphapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/dataproc/alpha/dataproc_alpha_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc/alpha"
)
// ClusterServer implements the gRPC interface for Cluster.
type ClusterServer struct{}
// ProtoToClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum converts a ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum enum from its proto representation.
func ProtoToDataprocAlphaClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(e alphapb.DataprocAlphaClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum) *alpha.ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.DataprocAlphaClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum_name[int32(e)]; ok {
e := alpha.ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(n[len("DataprocAlphaClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum"):])
return &e
}
return nil
}
// ProtoToClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum converts a ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum enum from its proto representation.
func ProtoToDataprocAlphaClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(e alphapb.DataprocAlphaClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum) *alpha.ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.DataprocAlphaClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum_name[int32(e)]; ok {
e := alpha.ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(n[len("DataprocAlphaClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum"):])
return &e
}
return nil
}
// ProtoToClusterConfigMasterConfigPreemptibilityEnum converts a ClusterConfigMasterConfigPreemptibilityEnum enum from its proto representation.
func ProtoToDataprocAlphaClusterConfigMasterConfigPreemptibilityEnum(e alphapb.DataprocAlphaClusterConfigMasterConfigPreemptibilityEnum) *alpha.ClusterConfigMasterConfigPreemptibilityEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.DataprocAlphaClusterConfigMasterConfigPreemptibilityEnum_name[int32(e)]; ok {
e := alpha.ClusterConfigMasterConfigPreemptibilityEnum(n[len("DataprocAlphaClusterConfigMasterConfigPreemptibilityEnum"):])
return &e
}
return nil
}
// ProtoToClusterConfigWorkerConfigPreemptibilityEnum converts a ClusterConfigWorkerConfigPreemptibilityEnum enum from its proto representation.
func ProtoToDataprocAlphaClusterConfigWorkerConfigPreemptibilityEnum(e alphapb.DataprocAlphaClusterConfigWorkerConfigPreemptibilityEnum) *alpha.ClusterConfigWorkerConfigPreemptibilityEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.DataprocAlphaClusterConfigWorkerConfigPreemptibilityEnum_name[int32(e)]; ok {
e := alpha.ClusterConfigWorkerConfigPreemptibilityEnum(n[len("DataprocAlphaClusterConfigWorkerConfigPreemptibilityEnum"):])
return &e
}
return nil
}
// ProtoToClusterConfigSecondaryWorkerConfigPreemptibilityEnum converts a ClusterConfigSecondaryWorkerConfigPreemptibilityEnum enum from its proto representation.
func ProtoToDataprocAlphaClusterConfigSecondaryWorkerConfigPreemptibilityEnum(e alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigPreemptibilityEnum) *alpha.ClusterConfigSecondaryWorkerConfigPreemptibilityEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigPreemptibilityEnum_name[int32(e)]; ok {
e := alpha.ClusterConfigSecondaryWorkerConfigPreemptibilityEnum(n[len("DataprocAlphaClusterConfigSecondaryWorkerConfigPreemptibilityEnum"):])
return &e
}
return nil
}
// ProtoToClusterConfigSoftwareConfigOptionalComponentsEnum converts a ClusterConfigSoftwareConfigOptionalComponentsEnum enum from its proto representation.
func ProtoToDataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnum(e alphapb.DataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnum) *alpha.ClusterConfigSoftwareConfigOptionalComponentsEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.DataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnum_name[int32(e)]; ok {
e := alpha.ClusterConfigSoftwareConfigOptionalComponentsEnum(n[len("DataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnum"):])
return &e
}
return nil
}
// ProtoToClusterConfigDataprocMetricConfigMetricsMetricSourceEnum converts a ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum enum from its proto representation.
func ProtoToDataprocAlphaClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(e alphapb.DataprocAlphaClusterConfigDataprocMetricConfigMetricsMetricSourceEnum) *alpha.ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.DataprocAlphaClusterConfigDataprocMetricConfigMetricsMetricSourceEnum_name[int32(e)]; ok {
e := alpha.ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(n[len("DataprocAlphaClusterConfigDataprocMetricConfigMetricsMetricSourceEnum"):])
return &e
}
return nil
}
// ProtoToClusterStatusStateEnum converts a ClusterStatusStateEnum enum from its proto representation.
func ProtoToDataprocAlphaClusterStatusStateEnum(e alphapb.DataprocAlphaClusterStatusStateEnum) *alpha.ClusterStatusStateEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.DataprocAlphaClusterStatusStateEnum_name[int32(e)]; ok {
e := alpha.ClusterStatusStateEnum(n[len("DataprocAlphaClusterStatusStateEnum"):])
return &e
}
return nil
}
// ProtoToClusterStatusSubstateEnum converts a ClusterStatusSubstateEnum enum from its proto representation.
func ProtoToDataprocAlphaClusterStatusSubstateEnum(e alphapb.DataprocAlphaClusterStatusSubstateEnum) *alpha.ClusterStatusSubstateEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.DataprocAlphaClusterStatusSubstateEnum_name[int32(e)]; ok {
e := alpha.ClusterStatusSubstateEnum(n[len("DataprocAlphaClusterStatusSubstateEnum"):])
return &e
}
return nil
}
// ProtoToClusterStatusHistoryStateEnum converts a ClusterStatusHistoryStateEnum enum from its proto representation.
func ProtoToDataprocAlphaClusterStatusHistoryStateEnum(e alphapb.DataprocAlphaClusterStatusHistoryStateEnum) *alpha.ClusterStatusHistoryStateEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.DataprocAlphaClusterStatusHistoryStateEnum_name[int32(e)]; ok {
e := alpha.ClusterStatusHistoryStateEnum(n[len("DataprocAlphaClusterStatusHistoryStateEnum"):])
return &e
}
return nil
}
// ProtoToClusterStatusHistorySubstateEnum converts a ClusterStatusHistorySubstateEnum enum from its proto representation.
func ProtoToDataprocAlphaClusterStatusHistorySubstateEnum(e alphapb.DataprocAlphaClusterStatusHistorySubstateEnum) *alpha.ClusterStatusHistorySubstateEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.DataprocAlphaClusterStatusHistorySubstateEnum_name[int32(e)]; ok {
e := alpha.ClusterStatusHistorySubstateEnum(n[len("DataprocAlphaClusterStatusHistorySubstateEnum"):])
return &e
}
return nil
}
// ProtoToClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum enum from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(e alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum) *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum_name[int32(e)]; ok {
e := alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(n[len("DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum"):])
return &e
}
return nil
}
// ProtoToClusterConfig converts a ClusterConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfig(p *alphapb.DataprocAlphaClusterConfig) *alpha.ClusterConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfig{
StagingBucket: dcl.StringOrNil(p.GetStagingBucket()),
TempBucket: dcl.StringOrNil(p.GetTempBucket()),
GceClusterConfig: ProtoToDataprocAlphaClusterConfigGceClusterConfig(p.GetGceClusterConfig()),
MasterConfig: ProtoToDataprocAlphaClusterConfigMasterConfig(p.GetMasterConfig()),
WorkerConfig: ProtoToDataprocAlphaClusterConfigWorkerConfig(p.GetWorkerConfig()),
SecondaryWorkerConfig: ProtoToDataprocAlphaClusterConfigSecondaryWorkerConfig(p.GetSecondaryWorkerConfig()),
SoftwareConfig: ProtoToDataprocAlphaClusterConfigSoftwareConfig(p.GetSoftwareConfig()),
EncryptionConfig: ProtoToDataprocAlphaClusterConfigEncryptionConfig(p.GetEncryptionConfig()),
AutoscalingConfig: ProtoToDataprocAlphaClusterConfigAutoscalingConfig(p.GetAutoscalingConfig()),
SecurityConfig: ProtoToDataprocAlphaClusterConfigSecurityConfig(p.GetSecurityConfig()),
LifecycleConfig: ProtoToDataprocAlphaClusterConfigLifecycleConfig(p.GetLifecycleConfig()),
EndpointConfig: ProtoToDataprocAlphaClusterConfigEndpointConfig(p.GetEndpointConfig()),
GkeClusterConfig: ProtoToDataprocAlphaClusterConfigGkeClusterConfig(p.GetGkeClusterConfig()),
MetastoreConfig: ProtoToDataprocAlphaClusterConfigMetastoreConfig(p.GetMetastoreConfig()),
DataprocMetricConfig: ProtoToDataprocAlphaClusterConfigDataprocMetricConfig(p.GetDataprocMetricConfig()),
}
for _, r := range p.GetInitializationActions() {
obj.InitializationActions = append(obj.InitializationActions, *ProtoToDataprocAlphaClusterConfigInitializationActions(r))
}
return obj
}
// ProtoToClusterConfigGceClusterConfig converts a ClusterConfigGceClusterConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigGceClusterConfig(p *alphapb.DataprocAlphaClusterConfigGceClusterConfig) *alpha.ClusterConfigGceClusterConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigGceClusterConfig{
Zone: dcl.StringOrNil(p.GetZone()),
Network: dcl.StringOrNil(p.GetNetwork()),
Subnetwork: dcl.StringOrNil(p.GetSubnetwork()),
InternalIPOnly: dcl.Bool(p.GetInternalIpOnly()),
PrivateIPv6GoogleAccess: ProtoToDataprocAlphaClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(p.GetPrivateIpv6GoogleAccess()),
ServiceAccount: dcl.StringOrNil(p.GetServiceAccount()),
ReservationAffinity: ProtoToDataprocAlphaClusterConfigGceClusterConfigReservationAffinity(p.GetReservationAffinity()),
NodeGroupAffinity: ProtoToDataprocAlphaClusterConfigGceClusterConfigNodeGroupAffinity(p.GetNodeGroupAffinity()),
ShieldedInstanceConfig: ProtoToDataprocAlphaClusterConfigGceClusterConfigShieldedInstanceConfig(p.GetShieldedInstanceConfig()),
ConfidentialInstanceConfig: ProtoToDataprocAlphaClusterConfigGceClusterConfigConfidentialInstanceConfig(p.GetConfidentialInstanceConfig()),
}
for _, r := range p.GetServiceAccountScopes() {
obj.ServiceAccountScopes = append(obj.ServiceAccountScopes, r)
}
for _, r := range p.GetTags() {
obj.Tags = append(obj.Tags, r)
}
return obj
}
// ProtoToClusterConfigGceClusterConfigReservationAffinity converts a ClusterConfigGceClusterConfigReservationAffinity object from its proto representation.
func ProtoToDataprocAlphaClusterConfigGceClusterConfigReservationAffinity(p *alphapb.DataprocAlphaClusterConfigGceClusterConfigReservationAffinity) *alpha.ClusterConfigGceClusterConfigReservationAffinity {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigGceClusterConfigReservationAffinity{
ConsumeReservationType: ProtoToDataprocAlphaClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(p.GetConsumeReservationType()),
Key: dcl.StringOrNil(p.GetKey()),
}
for _, r := range p.GetValues() {
obj.Values = append(obj.Values, r)
}
return obj
}
// ProtoToClusterConfigGceClusterConfigNodeGroupAffinity converts a ClusterConfigGceClusterConfigNodeGroupAffinity object from its proto representation.
func ProtoToDataprocAlphaClusterConfigGceClusterConfigNodeGroupAffinity(p *alphapb.DataprocAlphaClusterConfigGceClusterConfigNodeGroupAffinity) *alpha.ClusterConfigGceClusterConfigNodeGroupAffinity {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigGceClusterConfigNodeGroupAffinity{
NodeGroup: dcl.StringOrNil(p.GetNodeGroup()),
}
return obj
}
// ProtoToClusterConfigGceClusterConfigShieldedInstanceConfig converts a ClusterConfigGceClusterConfigShieldedInstanceConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigGceClusterConfigShieldedInstanceConfig(p *alphapb.DataprocAlphaClusterConfigGceClusterConfigShieldedInstanceConfig) *alpha.ClusterConfigGceClusterConfigShieldedInstanceConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigGceClusterConfigShieldedInstanceConfig{
EnableSecureBoot: dcl.Bool(p.GetEnableSecureBoot()),
EnableVtpm: dcl.Bool(p.GetEnableVtpm()),
EnableIntegrityMonitoring: dcl.Bool(p.GetEnableIntegrityMonitoring()),
}
return obj
}
// ProtoToClusterConfigGceClusterConfigConfidentialInstanceConfig converts a ClusterConfigGceClusterConfigConfidentialInstanceConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigGceClusterConfigConfidentialInstanceConfig(p *alphapb.DataprocAlphaClusterConfigGceClusterConfigConfidentialInstanceConfig) *alpha.ClusterConfigGceClusterConfigConfidentialInstanceConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigGceClusterConfigConfidentialInstanceConfig{
EnableConfidentialCompute: dcl.Bool(p.GetEnableConfidentialCompute()),
}
return obj
}
// ProtoToClusterConfigMasterConfig converts a ClusterConfigMasterConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigMasterConfig(p *alphapb.DataprocAlphaClusterConfigMasterConfig) *alpha.ClusterConfigMasterConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigMasterConfig{
NumInstances: dcl.Int64OrNil(p.GetNumInstances()),
Image: dcl.StringOrNil(p.GetImage()),
MachineType: dcl.StringOrNil(p.GetMachineType()),
DiskConfig: ProtoToDataprocAlphaClusterConfigMasterConfigDiskConfig(p.GetDiskConfig()),
IsPreemptible: dcl.Bool(p.GetIsPreemptible()),
Preemptibility: ProtoToDataprocAlphaClusterConfigMasterConfigPreemptibilityEnum(p.GetPreemptibility()),
ManagedGroupConfig: ProtoToDataprocAlphaClusterConfigMasterConfigManagedGroupConfig(p.GetManagedGroupConfig()),
MinCpuPlatform: dcl.StringOrNil(p.GetMinCpuPlatform()),
}
for _, r := range p.GetInstanceNames() {
obj.InstanceNames = append(obj.InstanceNames, r)
}
for _, r := range p.GetAccelerators() {
obj.Accelerators = append(obj.Accelerators, *ProtoToDataprocAlphaClusterConfigMasterConfigAccelerators(r))
}
for _, r := range p.GetInstanceReferences() {
obj.InstanceReferences = append(obj.InstanceReferences, *ProtoToDataprocAlphaClusterConfigMasterConfigInstanceReferences(r))
}
return obj
}
// ProtoToClusterConfigMasterConfigDiskConfig converts a ClusterConfigMasterConfigDiskConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigMasterConfigDiskConfig(p *alphapb.DataprocAlphaClusterConfigMasterConfigDiskConfig) *alpha.ClusterConfigMasterConfigDiskConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigMasterConfigDiskConfig{
BootDiskType: dcl.StringOrNil(p.GetBootDiskType()),
BootDiskSizeGb: dcl.Int64OrNil(p.GetBootDiskSizeGb()),
NumLocalSsds: dcl.Int64OrNil(p.GetNumLocalSsds()),
LocalSsdInterface: dcl.StringOrNil(p.GetLocalSsdInterface()),
}
return obj
}
// ProtoToClusterConfigMasterConfigManagedGroupConfig converts a ClusterConfigMasterConfigManagedGroupConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigMasterConfigManagedGroupConfig(p *alphapb.DataprocAlphaClusterConfigMasterConfigManagedGroupConfig) *alpha.ClusterConfigMasterConfigManagedGroupConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigMasterConfigManagedGroupConfig{
InstanceTemplateName: dcl.StringOrNil(p.GetInstanceTemplateName()),
InstanceGroupManagerName: dcl.StringOrNil(p.GetInstanceGroupManagerName()),
}
return obj
}
// ProtoToClusterConfigMasterConfigAccelerators converts a ClusterConfigMasterConfigAccelerators object from its proto representation.
func ProtoToDataprocAlphaClusterConfigMasterConfigAccelerators(p *alphapb.DataprocAlphaClusterConfigMasterConfigAccelerators) *alpha.ClusterConfigMasterConfigAccelerators {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigMasterConfigAccelerators{
AcceleratorType: dcl.StringOrNil(p.GetAcceleratorType()),
AcceleratorCount: dcl.Int64OrNil(p.GetAcceleratorCount()),
}
return obj
}
// ProtoToClusterConfigMasterConfigInstanceReferences converts a ClusterConfigMasterConfigInstanceReferences object from its proto representation.
func ProtoToDataprocAlphaClusterConfigMasterConfigInstanceReferences(p *alphapb.DataprocAlphaClusterConfigMasterConfigInstanceReferences) *alpha.ClusterConfigMasterConfigInstanceReferences {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigMasterConfigInstanceReferences{
InstanceName: dcl.StringOrNil(p.GetInstanceName()),
InstanceId: dcl.StringOrNil(p.GetInstanceId()),
PublicKey: dcl.StringOrNil(p.GetPublicKey()),
PublicEciesKey: dcl.StringOrNil(p.GetPublicEciesKey()),
}
return obj
}
// ProtoToClusterConfigWorkerConfig converts a ClusterConfigWorkerConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigWorkerConfig(p *alphapb.DataprocAlphaClusterConfigWorkerConfig) *alpha.ClusterConfigWorkerConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigWorkerConfig{
NumInstances: dcl.Int64OrNil(p.GetNumInstances()),
Image: dcl.StringOrNil(p.GetImage()),
MachineType: dcl.StringOrNil(p.GetMachineType()),
DiskConfig: ProtoToDataprocAlphaClusterConfigWorkerConfigDiskConfig(p.GetDiskConfig()),
IsPreemptible: dcl.Bool(p.GetIsPreemptible()),
Preemptibility: ProtoToDataprocAlphaClusterConfigWorkerConfigPreemptibilityEnum(p.GetPreemptibility()),
ManagedGroupConfig: ProtoToDataprocAlphaClusterConfigWorkerConfigManagedGroupConfig(p.GetManagedGroupConfig()),
MinCpuPlatform: dcl.StringOrNil(p.GetMinCpuPlatform()),
}
for _, r := range p.GetInstanceNames() {
obj.InstanceNames = append(obj.InstanceNames, r)
}
for _, r := range p.GetAccelerators() {
obj.Accelerators = append(obj.Accelerators, *ProtoToDataprocAlphaClusterConfigWorkerConfigAccelerators(r))
}
for _, r := range p.GetInstanceReferences() {
obj.InstanceReferences = append(obj.InstanceReferences, *ProtoToDataprocAlphaClusterConfigWorkerConfigInstanceReferences(r))
}
return obj
}
// ProtoToClusterConfigWorkerConfigDiskConfig converts a ClusterConfigWorkerConfigDiskConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigWorkerConfigDiskConfig(p *alphapb.DataprocAlphaClusterConfigWorkerConfigDiskConfig) *alpha.ClusterConfigWorkerConfigDiskConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigWorkerConfigDiskConfig{
BootDiskType: dcl.StringOrNil(p.GetBootDiskType()),
BootDiskSizeGb: dcl.Int64OrNil(p.GetBootDiskSizeGb()),
NumLocalSsds: dcl.Int64OrNil(p.GetNumLocalSsds()),
LocalSsdInterface: dcl.StringOrNil(p.GetLocalSsdInterface()),
}
return obj
}
// ProtoToClusterConfigWorkerConfigManagedGroupConfig converts a ClusterConfigWorkerConfigManagedGroupConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigWorkerConfigManagedGroupConfig(p *alphapb.DataprocAlphaClusterConfigWorkerConfigManagedGroupConfig) *alpha.ClusterConfigWorkerConfigManagedGroupConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigWorkerConfigManagedGroupConfig{
InstanceTemplateName: dcl.StringOrNil(p.GetInstanceTemplateName()),
InstanceGroupManagerName: dcl.StringOrNil(p.GetInstanceGroupManagerName()),
}
return obj
}
// ProtoToClusterConfigWorkerConfigAccelerators converts a ClusterConfigWorkerConfigAccelerators object from its proto representation.
func ProtoToDataprocAlphaClusterConfigWorkerConfigAccelerators(p *alphapb.DataprocAlphaClusterConfigWorkerConfigAccelerators) *alpha.ClusterConfigWorkerConfigAccelerators {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigWorkerConfigAccelerators{
AcceleratorType: dcl.StringOrNil(p.GetAcceleratorType()),
AcceleratorCount: dcl.Int64OrNil(p.GetAcceleratorCount()),
}
return obj
}
// ProtoToClusterConfigWorkerConfigInstanceReferences converts a ClusterConfigWorkerConfigInstanceReferences object from its proto representation.
func ProtoToDataprocAlphaClusterConfigWorkerConfigInstanceReferences(p *alphapb.DataprocAlphaClusterConfigWorkerConfigInstanceReferences) *alpha.ClusterConfigWorkerConfigInstanceReferences {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigWorkerConfigInstanceReferences{
InstanceName: dcl.StringOrNil(p.GetInstanceName()),
InstanceId: dcl.StringOrNil(p.GetInstanceId()),
PublicKey: dcl.StringOrNil(p.GetPublicKey()),
PublicEciesKey: dcl.StringOrNil(p.GetPublicEciesKey()),
}
return obj
}
// ProtoToClusterConfigSecondaryWorkerConfig converts a ClusterConfigSecondaryWorkerConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigSecondaryWorkerConfig(p *alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfig) *alpha.ClusterConfigSecondaryWorkerConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigSecondaryWorkerConfig{
NumInstances: dcl.Int64OrNil(p.GetNumInstances()),
Image: dcl.StringOrNil(p.GetImage()),
MachineType: dcl.StringOrNil(p.GetMachineType()),
DiskConfig: ProtoToDataprocAlphaClusterConfigSecondaryWorkerConfigDiskConfig(p.GetDiskConfig()),
IsPreemptible: dcl.Bool(p.GetIsPreemptible()),
Preemptibility: ProtoToDataprocAlphaClusterConfigSecondaryWorkerConfigPreemptibilityEnum(p.GetPreemptibility()),
ManagedGroupConfig: ProtoToDataprocAlphaClusterConfigSecondaryWorkerConfigManagedGroupConfig(p.GetManagedGroupConfig()),
MinCpuPlatform: dcl.StringOrNil(p.GetMinCpuPlatform()),
}
for _, r := range p.GetInstanceNames() {
obj.InstanceNames = append(obj.InstanceNames, r)
}
for _, r := range p.GetAccelerators() {
obj.Accelerators = append(obj.Accelerators, *ProtoToDataprocAlphaClusterConfigSecondaryWorkerConfigAccelerators(r))
}
for _, r := range p.GetInstanceReferences() {
obj.InstanceReferences = append(obj.InstanceReferences, *ProtoToDataprocAlphaClusterConfigSecondaryWorkerConfigInstanceReferences(r))
}
return obj
}
// ProtoToClusterConfigSecondaryWorkerConfigDiskConfig converts a ClusterConfigSecondaryWorkerConfigDiskConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigSecondaryWorkerConfigDiskConfig(p *alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigDiskConfig) *alpha.ClusterConfigSecondaryWorkerConfigDiskConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigSecondaryWorkerConfigDiskConfig{
BootDiskType: dcl.StringOrNil(p.GetBootDiskType()),
BootDiskSizeGb: dcl.Int64OrNil(p.GetBootDiskSizeGb()),
NumLocalSsds: dcl.Int64OrNil(p.GetNumLocalSsds()),
LocalSsdInterface: dcl.StringOrNil(p.GetLocalSsdInterface()),
}
return obj
}
// ProtoToClusterConfigSecondaryWorkerConfigManagedGroupConfig converts a ClusterConfigSecondaryWorkerConfigManagedGroupConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigSecondaryWorkerConfigManagedGroupConfig(p *alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigManagedGroupConfig) *alpha.ClusterConfigSecondaryWorkerConfigManagedGroupConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigSecondaryWorkerConfigManagedGroupConfig{
InstanceTemplateName: dcl.StringOrNil(p.GetInstanceTemplateName()),
InstanceGroupManagerName: dcl.StringOrNil(p.GetInstanceGroupManagerName()),
}
return obj
}
// ProtoToClusterConfigSecondaryWorkerConfigAccelerators converts a ClusterConfigSecondaryWorkerConfigAccelerators object from its proto representation.
func ProtoToDataprocAlphaClusterConfigSecondaryWorkerConfigAccelerators(p *alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigAccelerators) *alpha.ClusterConfigSecondaryWorkerConfigAccelerators {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigSecondaryWorkerConfigAccelerators{
AcceleratorType: dcl.StringOrNil(p.GetAcceleratorType()),
AcceleratorCount: dcl.Int64OrNil(p.GetAcceleratorCount()),
}
return obj
}
// ProtoToClusterConfigSecondaryWorkerConfigInstanceReferences converts a ClusterConfigSecondaryWorkerConfigInstanceReferences object from its proto representation.
func ProtoToDataprocAlphaClusterConfigSecondaryWorkerConfigInstanceReferences(p *alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigInstanceReferences) *alpha.ClusterConfigSecondaryWorkerConfigInstanceReferences {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigSecondaryWorkerConfigInstanceReferences{
InstanceName: dcl.StringOrNil(p.GetInstanceName()),
InstanceId: dcl.StringOrNil(p.GetInstanceId()),
PublicKey: dcl.StringOrNil(p.GetPublicKey()),
PublicEciesKey: dcl.StringOrNil(p.GetPublicEciesKey()),
}
return obj
}
// ProtoToClusterConfigSoftwareConfig converts a ClusterConfigSoftwareConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigSoftwareConfig(p *alphapb.DataprocAlphaClusterConfigSoftwareConfig) *alpha.ClusterConfigSoftwareConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigSoftwareConfig{
ImageVersion: dcl.StringOrNil(p.GetImageVersion()),
}
for _, r := range p.GetOptionalComponents() {
obj.OptionalComponents = append(obj.OptionalComponents, *ProtoToDataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnum(r))
}
return obj
}
// ProtoToClusterConfigInitializationActions converts a ClusterConfigInitializationActions object from its proto representation.
func ProtoToDataprocAlphaClusterConfigInitializationActions(p *alphapb.DataprocAlphaClusterConfigInitializationActions) *alpha.ClusterConfigInitializationActions {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigInitializationActions{
ExecutableFile: dcl.StringOrNil(p.GetExecutableFile()),
ExecutionTimeout: dcl.StringOrNil(p.GetExecutionTimeout()),
}
return obj
}
// ProtoToClusterConfigEncryptionConfig converts a ClusterConfigEncryptionConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigEncryptionConfig(p *alphapb.DataprocAlphaClusterConfigEncryptionConfig) *alpha.ClusterConfigEncryptionConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigEncryptionConfig{
GcePdKmsKeyName: dcl.StringOrNil(p.GetGcePdKmsKeyName()),
}
return obj
}
// ProtoToClusterConfigAutoscalingConfig converts a ClusterConfigAutoscalingConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigAutoscalingConfig(p *alphapb.DataprocAlphaClusterConfigAutoscalingConfig) *alpha.ClusterConfigAutoscalingConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigAutoscalingConfig{
Policy: dcl.StringOrNil(p.GetPolicy()),
}
return obj
}
// ProtoToClusterConfigSecurityConfig converts a ClusterConfigSecurityConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigSecurityConfig(p *alphapb.DataprocAlphaClusterConfigSecurityConfig) *alpha.ClusterConfigSecurityConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigSecurityConfig{
KerberosConfig: ProtoToDataprocAlphaClusterConfigSecurityConfigKerberosConfig(p.GetKerberosConfig()),
IdentityConfig: ProtoToDataprocAlphaClusterConfigSecurityConfigIdentityConfig(p.GetIdentityConfig()),
}
return obj
}
// ProtoToClusterConfigSecurityConfigKerberosConfig converts a ClusterConfigSecurityConfigKerberosConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigSecurityConfigKerberosConfig(p *alphapb.DataprocAlphaClusterConfigSecurityConfigKerberosConfig) *alpha.ClusterConfigSecurityConfigKerberosConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigSecurityConfigKerberosConfig{
EnableKerberos: dcl.Bool(p.GetEnableKerberos()),
RootPrincipalPassword: dcl.StringOrNil(p.GetRootPrincipalPassword()),
KmsKey: dcl.StringOrNil(p.GetKmsKey()),
Keystore: dcl.StringOrNil(p.GetKeystore()),
Truststore: dcl.StringOrNil(p.GetTruststore()),
KeystorePassword: dcl.StringOrNil(p.GetKeystorePassword()),
KeyPassword: dcl.StringOrNil(p.GetKeyPassword()),
TruststorePassword: dcl.StringOrNil(p.GetTruststorePassword()),
CrossRealmTrustRealm: dcl.StringOrNil(p.GetCrossRealmTrustRealm()),
CrossRealmTrustKdc: dcl.StringOrNil(p.GetCrossRealmTrustKdc()),
CrossRealmTrustAdminServer: dcl.StringOrNil(p.GetCrossRealmTrustAdminServer()),
CrossRealmTrustSharedPassword: dcl.StringOrNil(p.GetCrossRealmTrustSharedPassword()),
KdcDbKey: dcl.StringOrNil(p.GetKdcDbKey()),
TgtLifetimeHours: dcl.Int64OrNil(p.GetTgtLifetimeHours()),
Realm: dcl.StringOrNil(p.GetRealm()),
}
return obj
}
// ProtoToClusterConfigSecurityConfigIdentityConfig converts a ClusterConfigSecurityConfigIdentityConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigSecurityConfigIdentityConfig(p *alphapb.DataprocAlphaClusterConfigSecurityConfigIdentityConfig) *alpha.ClusterConfigSecurityConfigIdentityConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigSecurityConfigIdentityConfig{}
return obj
}
// ProtoToClusterConfigLifecycleConfig converts a ClusterConfigLifecycleConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigLifecycleConfig(p *alphapb.DataprocAlphaClusterConfigLifecycleConfig) *alpha.ClusterConfigLifecycleConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigLifecycleConfig{
IdleDeleteTtl: dcl.StringOrNil(p.GetIdleDeleteTtl()),
AutoDeleteTime: dcl.StringOrNil(p.GetAutoDeleteTime()),
AutoDeleteTtl: dcl.StringOrNil(p.GetAutoDeleteTtl()),
IdleStartTime: dcl.StringOrNil(p.GetIdleStartTime()),
}
return obj
}
// ProtoToClusterConfigEndpointConfig converts a ClusterConfigEndpointConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigEndpointConfig(p *alphapb.DataprocAlphaClusterConfigEndpointConfig) *alpha.ClusterConfigEndpointConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigEndpointConfig{
EnableHttpPortAccess: dcl.Bool(p.GetEnableHttpPortAccess()),
}
return obj
}
// ProtoToClusterConfigGkeClusterConfig converts a ClusterConfigGkeClusterConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigGkeClusterConfig(p *alphapb.DataprocAlphaClusterConfigGkeClusterConfig) *alpha.ClusterConfigGkeClusterConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigGkeClusterConfig{
NamespacedGkeDeploymentTarget: ProtoToDataprocAlphaClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(p.GetNamespacedGkeDeploymentTarget()),
}
return obj
}
// ProtoToClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget converts a ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget object from its proto representation.
func ProtoToDataprocAlphaClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(p *alphapb.DataprocAlphaClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) *alpha.ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{
TargetGkeCluster: dcl.StringOrNil(p.GetTargetGkeCluster()),
ClusterNamespace: dcl.StringOrNil(p.GetClusterNamespace()),
}
return obj
}
// ProtoToClusterConfigMetastoreConfig converts a ClusterConfigMetastoreConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigMetastoreConfig(p *alphapb.DataprocAlphaClusterConfigMetastoreConfig) *alpha.ClusterConfigMetastoreConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigMetastoreConfig{
DataprocMetastoreService: dcl.StringOrNil(p.GetDataprocMetastoreService()),
}
return obj
}
// ProtoToClusterConfigDataprocMetricConfig converts a ClusterConfigDataprocMetricConfig object from its proto representation.
func ProtoToDataprocAlphaClusterConfigDataprocMetricConfig(p *alphapb.DataprocAlphaClusterConfigDataprocMetricConfig) *alpha.ClusterConfigDataprocMetricConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigDataprocMetricConfig{}
for _, r := range p.GetMetrics() {
obj.Metrics = append(obj.Metrics, *ProtoToDataprocAlphaClusterConfigDataprocMetricConfigMetrics(r))
}
return obj
}
// ProtoToClusterConfigDataprocMetricConfigMetrics converts a ClusterConfigDataprocMetricConfigMetrics object from its proto representation.
func ProtoToDataprocAlphaClusterConfigDataprocMetricConfigMetrics(p *alphapb.DataprocAlphaClusterConfigDataprocMetricConfigMetrics) *alpha.ClusterConfigDataprocMetricConfigMetrics {
if p == nil {
return nil
}
obj := &alpha.ClusterConfigDataprocMetricConfigMetrics{
MetricSource: ProtoToDataprocAlphaClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(p.GetMetricSource()),
}
for _, r := range p.GetMetricOverrides() {
obj.MetricOverrides = append(obj.MetricOverrides, r)
}
return obj
}
// ProtoToClusterStatus converts a ClusterStatus object from its proto representation.
func ProtoToDataprocAlphaClusterStatus(p *alphapb.DataprocAlphaClusterStatus) *alpha.ClusterStatus {
if p == nil {
return nil
}
obj := &alpha.ClusterStatus{
State: ProtoToDataprocAlphaClusterStatusStateEnum(p.GetState()),
Detail: dcl.StringOrNil(p.GetDetail()),
StateStartTime: dcl.StringOrNil(p.GetStateStartTime()),
Substate: ProtoToDataprocAlphaClusterStatusSubstateEnum(p.GetSubstate()),
}
return obj
}
// ProtoToClusterStatusHistory converts a ClusterStatusHistory object from its proto representation.
func ProtoToDataprocAlphaClusterStatusHistory(p *alphapb.DataprocAlphaClusterStatusHistory) *alpha.ClusterStatusHistory {
if p == nil {
return nil
}
obj := &alpha.ClusterStatusHistory{
State: ProtoToDataprocAlphaClusterStatusHistoryStateEnum(p.GetState()),
Detail: dcl.StringOrNil(p.GetDetail()),
StateStartTime: dcl.StringOrNil(p.GetStateStartTime()),
Substate: ProtoToDataprocAlphaClusterStatusHistorySubstateEnum(p.GetSubstate()),
}
return obj
}
// ProtoToClusterMetrics converts a ClusterMetrics object from its proto representation.
func ProtoToDataprocAlphaClusterMetrics(p *alphapb.DataprocAlphaClusterMetrics) *alpha.ClusterMetrics {
if p == nil {
return nil
}
obj := &alpha.ClusterMetrics{}
return obj
}
// ProtoToClusterVirtualClusterConfig converts a ClusterVirtualClusterConfig object from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfig(p *alphapb.DataprocAlphaClusterVirtualClusterConfig) *alpha.ClusterVirtualClusterConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterVirtualClusterConfig{
StagingBucket: dcl.StringOrNil(p.GetStagingBucket()),
KubernetesClusterConfig: ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfig(p.GetKubernetesClusterConfig()),
AuxiliaryServicesConfig: ProtoToDataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfig(p.GetAuxiliaryServicesConfig()),
}
return obj
}
// ProtoToClusterVirtualClusterConfigKubernetesClusterConfig converts a ClusterVirtualClusterConfigKubernetesClusterConfig object from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfig(p *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfig) *alpha.ClusterVirtualClusterConfigKubernetesClusterConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterVirtualClusterConfigKubernetesClusterConfig{
KubernetesNamespace: dcl.StringOrNil(p.GetKubernetesNamespace()),
GkeClusterConfig: ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(p.GetGkeClusterConfig()),
KubernetesSoftwareConfig: ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(p.GetKubernetesSoftwareConfig()),
}
return obj
}
// ProtoToClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig object from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(p *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{
GkeClusterTarget: dcl.StringOrNil(p.GetGkeClusterTarget()),
}
for _, r := range p.GetNodePoolTarget() {
obj.NodePoolTarget = append(obj.NodePoolTarget, *ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(r))
}
return obj
}
// ProtoToClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget object from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(p *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget {
if p == nil {
return nil
}
obj := &alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{
NodePool: dcl.StringOrNil(p.GetNodePool()),
NodePoolConfig: ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(p.GetNodePoolConfig()),
}
for _, r := range p.GetRoles() {
obj.Roles = append(obj.Roles, *ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(r))
}
return obj
}
// ProtoToClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig object from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(p *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{
Config: ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(p.GetConfig()),
Autoscaling: ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(p.GetAutoscaling()),
}
for _, r := range p.GetLocations() {
obj.Locations = append(obj.Locations, r)
}
return obj
}
// ProtoToClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig object from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(p *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{
MachineType: dcl.StringOrNil(p.GetMachineType()),
LocalSsdCount: dcl.Int64OrNil(p.GetLocalSsdCount()),
Preemptible: dcl.Bool(p.GetPreemptible()),
MinCpuPlatform: dcl.StringOrNil(p.GetMinCpuPlatform()),
BootDiskKmsKey: dcl.StringOrNil(p.GetBootDiskKmsKey()),
EphemeralStorageConfig: ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(p.GetEphemeralStorageConfig()),
Spot: dcl.Bool(p.GetSpot()),
}
for _, r := range p.GetAccelerators() {
obj.Accelerators = append(obj.Accelerators, *ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(r))
}
return obj
}
// ProtoToClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators object from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(p *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators {
if p == nil {
return nil
}
obj := &alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{
AcceleratorCount: dcl.Int64OrNil(p.GetAcceleratorCount()),
AcceleratorType: dcl.StringOrNil(p.GetAcceleratorType()),
GpuPartitionSize: dcl.StringOrNil(p.GetGpuPartitionSize()),
}
return obj
}
// ProtoToClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig object from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(p *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{
LocalSsdCount: dcl.Int64OrNil(p.GetLocalSsdCount()),
}
return obj
}
// ProtoToClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling object from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(p *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling {
if p == nil {
return nil
}
obj := &alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{
MinNodeCount: dcl.Int64OrNil(p.GetMinNodeCount()),
MaxNodeCount: dcl.Int64OrNil(p.GetMaxNodeCount()),
}
return obj
}
// ProtoToClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig converts a ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig object from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(p *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{}
return obj
}
// ProtoToClusterVirtualClusterConfigAuxiliaryServicesConfig converts a ClusterVirtualClusterConfigAuxiliaryServicesConfig object from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfig(p *alphapb.DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfig) *alpha.ClusterVirtualClusterConfigAuxiliaryServicesConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterVirtualClusterConfigAuxiliaryServicesConfig{
MetastoreConfig: ProtoToDataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(p.GetMetastoreConfig()),
SparkHistoryServerConfig: ProtoToDataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(p.GetSparkHistoryServerConfig()),
}
return obj
}
// ProtoToClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig converts a ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig object from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(p *alphapb.DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) *alpha.ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{
DataprocMetastoreService: dcl.StringOrNil(p.GetDataprocMetastoreService()),
}
return obj
}
// ProtoToClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig converts a ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig object from its proto representation.
func ProtoToDataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(p *alphapb.DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) *alpha.ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig {
if p == nil {
return nil
}
obj := &alpha.ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{
DataprocCluster: dcl.StringOrNil(p.GetDataprocCluster()),
}
return obj
}
// ProtoToCluster converts a Cluster resource from its proto representation.
func ProtoToCluster(p *alphapb.DataprocAlphaCluster) *alpha.Cluster {
obj := &alpha.Cluster{
Project: dcl.StringOrNil(p.GetProject()),
Name: dcl.StringOrNil(p.GetName()),
Config: ProtoToDataprocAlphaClusterConfig(p.GetConfig()),
Status: ProtoToDataprocAlphaClusterStatus(p.GetStatus()),
ClusterUuid: dcl.StringOrNil(p.GetClusterUuid()),
Metrics: ProtoToDataprocAlphaClusterMetrics(p.GetMetrics()),
Location: dcl.StringOrNil(p.GetLocation()),
VirtualClusterConfig: ProtoToDataprocAlphaClusterVirtualClusterConfig(p.GetVirtualClusterConfig()),
}
for _, r := range p.GetStatusHistory() {
obj.StatusHistory = append(obj.StatusHistory, *ProtoToDataprocAlphaClusterStatusHistory(r))
}
return obj
}
// ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumToProto converts a ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum enum to its proto representation.
func DataprocAlphaClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumToProto(e *alpha.ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum) alphapb.DataprocAlphaClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum {
if e == nil {
return alphapb.DataprocAlphaClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(0)
}
if v, ok := alphapb.DataprocAlphaClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum_value["ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum"+string(*e)]; ok {
return alphapb.DataprocAlphaClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(v)
}
return alphapb.DataprocAlphaClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(0)
}
// ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumToProto converts a ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum enum to its proto representation.
func DataprocAlphaClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumToProto(e *alpha.ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum) alphapb.DataprocAlphaClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum {
if e == nil {
return alphapb.DataprocAlphaClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(0)
}
if v, ok := alphapb.DataprocAlphaClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum_value["ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum"+string(*e)]; ok {
return alphapb.DataprocAlphaClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(v)
}
return alphapb.DataprocAlphaClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(0)
}
// ClusterConfigMasterConfigPreemptibilityEnumToProto converts a ClusterConfigMasterConfigPreemptibilityEnum enum to its proto representation.
func DataprocAlphaClusterConfigMasterConfigPreemptibilityEnumToProto(e *alpha.ClusterConfigMasterConfigPreemptibilityEnum) alphapb.DataprocAlphaClusterConfigMasterConfigPreemptibilityEnum {
if e == nil {
return alphapb.DataprocAlphaClusterConfigMasterConfigPreemptibilityEnum(0)
}
if v, ok := alphapb.DataprocAlphaClusterConfigMasterConfigPreemptibilityEnum_value["ClusterConfigMasterConfigPreemptibilityEnum"+string(*e)]; ok {
return alphapb.DataprocAlphaClusterConfigMasterConfigPreemptibilityEnum(v)
}
return alphapb.DataprocAlphaClusterConfigMasterConfigPreemptibilityEnum(0)
}
// ClusterConfigWorkerConfigPreemptibilityEnumToProto converts a ClusterConfigWorkerConfigPreemptibilityEnum enum to its proto representation.
func DataprocAlphaClusterConfigWorkerConfigPreemptibilityEnumToProto(e *alpha.ClusterConfigWorkerConfigPreemptibilityEnum) alphapb.DataprocAlphaClusterConfigWorkerConfigPreemptibilityEnum {
if e == nil {
return alphapb.DataprocAlphaClusterConfigWorkerConfigPreemptibilityEnum(0)
}
if v, ok := alphapb.DataprocAlphaClusterConfigWorkerConfigPreemptibilityEnum_value["ClusterConfigWorkerConfigPreemptibilityEnum"+string(*e)]; ok {
return alphapb.DataprocAlphaClusterConfigWorkerConfigPreemptibilityEnum(v)
}
return alphapb.DataprocAlphaClusterConfigWorkerConfigPreemptibilityEnum(0)
}
// ClusterConfigSecondaryWorkerConfigPreemptibilityEnumToProto converts a ClusterConfigSecondaryWorkerConfigPreemptibilityEnum enum to its proto representation.
func DataprocAlphaClusterConfigSecondaryWorkerConfigPreemptibilityEnumToProto(e *alpha.ClusterConfigSecondaryWorkerConfigPreemptibilityEnum) alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigPreemptibilityEnum {
if e == nil {
return alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigPreemptibilityEnum(0)
}
if v, ok := alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigPreemptibilityEnum_value["ClusterConfigSecondaryWorkerConfigPreemptibilityEnum"+string(*e)]; ok {
return alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigPreemptibilityEnum(v)
}
return alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigPreemptibilityEnum(0)
}
// ClusterConfigSoftwareConfigOptionalComponentsEnumToProto converts a ClusterConfigSoftwareConfigOptionalComponentsEnum enum to its proto representation.
func DataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnumToProto(e *alpha.ClusterConfigSoftwareConfigOptionalComponentsEnum) alphapb.DataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnum {
if e == nil {
return alphapb.DataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnum(0)
}
if v, ok := alphapb.DataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnum_value["ClusterConfigSoftwareConfigOptionalComponentsEnum"+string(*e)]; ok {
return alphapb.DataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnum(v)
}
return alphapb.DataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnum(0)
}
// ClusterConfigDataprocMetricConfigMetricsMetricSourceEnumToProto converts a ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum enum to its proto representation.
func DataprocAlphaClusterConfigDataprocMetricConfigMetricsMetricSourceEnumToProto(e *alpha.ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum) alphapb.DataprocAlphaClusterConfigDataprocMetricConfigMetricsMetricSourceEnum {
if e == nil {
return alphapb.DataprocAlphaClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(0)
}
if v, ok := alphapb.DataprocAlphaClusterConfigDataprocMetricConfigMetricsMetricSourceEnum_value["ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum"+string(*e)]; ok {
return alphapb.DataprocAlphaClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(v)
}
return alphapb.DataprocAlphaClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(0)
}
// ClusterStatusStateEnumToProto converts a ClusterStatusStateEnum enum to its proto representation.
func DataprocAlphaClusterStatusStateEnumToProto(e *alpha.ClusterStatusStateEnum) alphapb.DataprocAlphaClusterStatusStateEnum {
if e == nil {
return alphapb.DataprocAlphaClusterStatusStateEnum(0)
}
if v, ok := alphapb.DataprocAlphaClusterStatusStateEnum_value["ClusterStatusStateEnum"+string(*e)]; ok {
return alphapb.DataprocAlphaClusterStatusStateEnum(v)
}
return alphapb.DataprocAlphaClusterStatusStateEnum(0)
}
// ClusterStatusSubstateEnumToProto converts a ClusterStatusSubstateEnum enum to its proto representation.
func DataprocAlphaClusterStatusSubstateEnumToProto(e *alpha.ClusterStatusSubstateEnum) alphapb.DataprocAlphaClusterStatusSubstateEnum {
if e == nil {
return alphapb.DataprocAlphaClusterStatusSubstateEnum(0)
}
if v, ok := alphapb.DataprocAlphaClusterStatusSubstateEnum_value["ClusterStatusSubstateEnum"+string(*e)]; ok {
return alphapb.DataprocAlphaClusterStatusSubstateEnum(v)
}
return alphapb.DataprocAlphaClusterStatusSubstateEnum(0)
}
// ClusterStatusHistoryStateEnumToProto converts a ClusterStatusHistoryStateEnum enum to its proto representation.
func DataprocAlphaClusterStatusHistoryStateEnumToProto(e *alpha.ClusterStatusHistoryStateEnum) alphapb.DataprocAlphaClusterStatusHistoryStateEnum {
if e == nil {
return alphapb.DataprocAlphaClusterStatusHistoryStateEnum(0)
}
if v, ok := alphapb.DataprocAlphaClusterStatusHistoryStateEnum_value["ClusterStatusHistoryStateEnum"+string(*e)]; ok {
return alphapb.DataprocAlphaClusterStatusHistoryStateEnum(v)
}
return alphapb.DataprocAlphaClusterStatusHistoryStateEnum(0)
}
// ClusterStatusHistorySubstateEnumToProto converts a ClusterStatusHistorySubstateEnum enum to its proto representation.
func DataprocAlphaClusterStatusHistorySubstateEnumToProto(e *alpha.ClusterStatusHistorySubstateEnum) alphapb.DataprocAlphaClusterStatusHistorySubstateEnum {
if e == nil {
return alphapb.DataprocAlphaClusterStatusHistorySubstateEnum(0)
}
if v, ok := alphapb.DataprocAlphaClusterStatusHistorySubstateEnum_value["ClusterStatusHistorySubstateEnum"+string(*e)]; ok {
return alphapb.DataprocAlphaClusterStatusHistorySubstateEnum(v)
}
return alphapb.DataprocAlphaClusterStatusHistorySubstateEnum(0)
}
// ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumToProto converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum enum to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumToProto(e *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum) alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum {
if e == nil {
return alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(0)
}
if v, ok := alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum_value["ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum"+string(*e)]; ok {
return alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(v)
}
return alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(0)
}
// ClusterConfigToProto converts a ClusterConfig object to its proto representation.
func DataprocAlphaClusterConfigToProto(o *alpha.ClusterConfig) *alphapb.DataprocAlphaClusterConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfig{}
p.SetStagingBucket(dcl.ValueOrEmptyString(o.StagingBucket))
p.SetTempBucket(dcl.ValueOrEmptyString(o.TempBucket))
p.SetGceClusterConfig(DataprocAlphaClusterConfigGceClusterConfigToProto(o.GceClusterConfig))
p.SetMasterConfig(DataprocAlphaClusterConfigMasterConfigToProto(o.MasterConfig))
p.SetWorkerConfig(DataprocAlphaClusterConfigWorkerConfigToProto(o.WorkerConfig))
p.SetSecondaryWorkerConfig(DataprocAlphaClusterConfigSecondaryWorkerConfigToProto(o.SecondaryWorkerConfig))
p.SetSoftwareConfig(DataprocAlphaClusterConfigSoftwareConfigToProto(o.SoftwareConfig))
p.SetEncryptionConfig(DataprocAlphaClusterConfigEncryptionConfigToProto(o.EncryptionConfig))
p.SetAutoscalingConfig(DataprocAlphaClusterConfigAutoscalingConfigToProto(o.AutoscalingConfig))
p.SetSecurityConfig(DataprocAlphaClusterConfigSecurityConfigToProto(o.SecurityConfig))
p.SetLifecycleConfig(DataprocAlphaClusterConfigLifecycleConfigToProto(o.LifecycleConfig))
p.SetEndpointConfig(DataprocAlphaClusterConfigEndpointConfigToProto(o.EndpointConfig))
p.SetGkeClusterConfig(DataprocAlphaClusterConfigGkeClusterConfigToProto(o.GkeClusterConfig))
p.SetMetastoreConfig(DataprocAlphaClusterConfigMetastoreConfigToProto(o.MetastoreConfig))
p.SetDataprocMetricConfig(DataprocAlphaClusterConfigDataprocMetricConfigToProto(o.DataprocMetricConfig))
sInitializationActions := make([]*alphapb.DataprocAlphaClusterConfigInitializationActions, len(o.InitializationActions))
for i, r := range o.InitializationActions {
sInitializationActions[i] = DataprocAlphaClusterConfigInitializationActionsToProto(&r)
}
p.SetInitializationActions(sInitializationActions)
return p
}
// ClusterConfigGceClusterConfigToProto converts a ClusterConfigGceClusterConfig object to its proto representation.
func DataprocAlphaClusterConfigGceClusterConfigToProto(o *alpha.ClusterConfigGceClusterConfig) *alphapb.DataprocAlphaClusterConfigGceClusterConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigGceClusterConfig{}
p.SetZone(dcl.ValueOrEmptyString(o.Zone))
p.SetNetwork(dcl.ValueOrEmptyString(o.Network))
p.SetSubnetwork(dcl.ValueOrEmptyString(o.Subnetwork))
p.SetInternalIpOnly(dcl.ValueOrEmptyBool(o.InternalIPOnly))
p.SetPrivateIpv6GoogleAccess(DataprocAlphaClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumToProto(o.PrivateIPv6GoogleAccess))
p.SetServiceAccount(dcl.ValueOrEmptyString(o.ServiceAccount))
p.SetReservationAffinity(DataprocAlphaClusterConfigGceClusterConfigReservationAffinityToProto(o.ReservationAffinity))
p.SetNodeGroupAffinity(DataprocAlphaClusterConfigGceClusterConfigNodeGroupAffinityToProto(o.NodeGroupAffinity))
p.SetShieldedInstanceConfig(DataprocAlphaClusterConfigGceClusterConfigShieldedInstanceConfigToProto(o.ShieldedInstanceConfig))
p.SetConfidentialInstanceConfig(DataprocAlphaClusterConfigGceClusterConfigConfidentialInstanceConfigToProto(o.ConfidentialInstanceConfig))
sServiceAccountScopes := make([]string, len(o.ServiceAccountScopes))
for i, r := range o.ServiceAccountScopes {
sServiceAccountScopes[i] = r
}
p.SetServiceAccountScopes(sServiceAccountScopes)
sTags := make([]string, len(o.Tags))
for i, r := range o.Tags {
sTags[i] = r
}
p.SetTags(sTags)
mMetadata := make(map[string]string, len(o.Metadata))
for k, r := range o.Metadata {
mMetadata[k] = r
}
p.SetMetadata(mMetadata)
return p
}
// ClusterConfigGceClusterConfigReservationAffinityToProto converts a ClusterConfigGceClusterConfigReservationAffinity object to its proto representation.
func DataprocAlphaClusterConfigGceClusterConfigReservationAffinityToProto(o *alpha.ClusterConfigGceClusterConfigReservationAffinity) *alphapb.DataprocAlphaClusterConfigGceClusterConfigReservationAffinity {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigGceClusterConfigReservationAffinity{}
p.SetConsumeReservationType(DataprocAlphaClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumToProto(o.ConsumeReservationType))
p.SetKey(dcl.ValueOrEmptyString(o.Key))
sValues := make([]string, len(o.Values))
for i, r := range o.Values {
sValues[i] = r
}
p.SetValues(sValues)
return p
}
// ClusterConfigGceClusterConfigNodeGroupAffinityToProto converts a ClusterConfigGceClusterConfigNodeGroupAffinity object to its proto representation.
func DataprocAlphaClusterConfigGceClusterConfigNodeGroupAffinityToProto(o *alpha.ClusterConfigGceClusterConfigNodeGroupAffinity) *alphapb.DataprocAlphaClusterConfigGceClusterConfigNodeGroupAffinity {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigGceClusterConfigNodeGroupAffinity{}
p.SetNodeGroup(dcl.ValueOrEmptyString(o.NodeGroup))
return p
}
// ClusterConfigGceClusterConfigShieldedInstanceConfigToProto converts a ClusterConfigGceClusterConfigShieldedInstanceConfig object to its proto representation.
func DataprocAlphaClusterConfigGceClusterConfigShieldedInstanceConfigToProto(o *alpha.ClusterConfigGceClusterConfigShieldedInstanceConfig) *alphapb.DataprocAlphaClusterConfigGceClusterConfigShieldedInstanceConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigGceClusterConfigShieldedInstanceConfig{}
p.SetEnableSecureBoot(dcl.ValueOrEmptyBool(o.EnableSecureBoot))
p.SetEnableVtpm(dcl.ValueOrEmptyBool(o.EnableVtpm))
p.SetEnableIntegrityMonitoring(dcl.ValueOrEmptyBool(o.EnableIntegrityMonitoring))
return p
}
// ClusterConfigGceClusterConfigConfidentialInstanceConfigToProto converts a ClusterConfigGceClusterConfigConfidentialInstanceConfig object to its proto representation.
func DataprocAlphaClusterConfigGceClusterConfigConfidentialInstanceConfigToProto(o *alpha.ClusterConfigGceClusterConfigConfidentialInstanceConfig) *alphapb.DataprocAlphaClusterConfigGceClusterConfigConfidentialInstanceConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigGceClusterConfigConfidentialInstanceConfig{}
p.SetEnableConfidentialCompute(dcl.ValueOrEmptyBool(o.EnableConfidentialCompute))
return p
}
// ClusterConfigMasterConfigToProto converts a ClusterConfigMasterConfig object to its proto representation.
func DataprocAlphaClusterConfigMasterConfigToProto(o *alpha.ClusterConfigMasterConfig) *alphapb.DataprocAlphaClusterConfigMasterConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigMasterConfig{}
p.SetNumInstances(dcl.ValueOrEmptyInt64(o.NumInstances))
p.SetImage(dcl.ValueOrEmptyString(o.Image))
p.SetMachineType(dcl.ValueOrEmptyString(o.MachineType))
p.SetDiskConfig(DataprocAlphaClusterConfigMasterConfigDiskConfigToProto(o.DiskConfig))
p.SetIsPreemptible(dcl.ValueOrEmptyBool(o.IsPreemptible))
p.SetPreemptibility(DataprocAlphaClusterConfigMasterConfigPreemptibilityEnumToProto(o.Preemptibility))
p.SetManagedGroupConfig(DataprocAlphaClusterConfigMasterConfigManagedGroupConfigToProto(o.ManagedGroupConfig))
p.SetMinCpuPlatform(dcl.ValueOrEmptyString(o.MinCpuPlatform))
sInstanceNames := make([]string, len(o.InstanceNames))
for i, r := range o.InstanceNames {
sInstanceNames[i] = r
}
p.SetInstanceNames(sInstanceNames)
sAccelerators := make([]*alphapb.DataprocAlphaClusterConfigMasterConfigAccelerators, len(o.Accelerators))
for i, r := range o.Accelerators {
sAccelerators[i] = DataprocAlphaClusterConfigMasterConfigAcceleratorsToProto(&r)
}
p.SetAccelerators(sAccelerators)
sInstanceReferences := make([]*alphapb.DataprocAlphaClusterConfigMasterConfigInstanceReferences, len(o.InstanceReferences))
for i, r := range o.InstanceReferences {
sInstanceReferences[i] = DataprocAlphaClusterConfigMasterConfigInstanceReferencesToProto(&r)
}
p.SetInstanceReferences(sInstanceReferences)
return p
}
// ClusterConfigMasterConfigDiskConfigToProto converts a ClusterConfigMasterConfigDiskConfig object to its proto representation.
func DataprocAlphaClusterConfigMasterConfigDiskConfigToProto(o *alpha.ClusterConfigMasterConfigDiskConfig) *alphapb.DataprocAlphaClusterConfigMasterConfigDiskConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigMasterConfigDiskConfig{}
p.SetBootDiskType(dcl.ValueOrEmptyString(o.BootDiskType))
p.SetBootDiskSizeGb(dcl.ValueOrEmptyInt64(o.BootDiskSizeGb))
p.SetNumLocalSsds(dcl.ValueOrEmptyInt64(o.NumLocalSsds))
p.SetLocalSsdInterface(dcl.ValueOrEmptyString(o.LocalSsdInterface))
return p
}
// ClusterConfigMasterConfigManagedGroupConfigToProto converts a ClusterConfigMasterConfigManagedGroupConfig object to its proto representation.
func DataprocAlphaClusterConfigMasterConfigManagedGroupConfigToProto(o *alpha.ClusterConfigMasterConfigManagedGroupConfig) *alphapb.DataprocAlphaClusterConfigMasterConfigManagedGroupConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigMasterConfigManagedGroupConfig{}
p.SetInstanceTemplateName(dcl.ValueOrEmptyString(o.InstanceTemplateName))
p.SetInstanceGroupManagerName(dcl.ValueOrEmptyString(o.InstanceGroupManagerName))
return p
}
// ClusterConfigMasterConfigAcceleratorsToProto converts a ClusterConfigMasterConfigAccelerators object to its proto representation.
func DataprocAlphaClusterConfigMasterConfigAcceleratorsToProto(o *alpha.ClusterConfigMasterConfigAccelerators) *alphapb.DataprocAlphaClusterConfigMasterConfigAccelerators {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigMasterConfigAccelerators{}
p.SetAcceleratorType(dcl.ValueOrEmptyString(o.AcceleratorType))
p.SetAcceleratorCount(dcl.ValueOrEmptyInt64(o.AcceleratorCount))
return p
}
// ClusterConfigMasterConfigInstanceReferencesToProto converts a ClusterConfigMasterConfigInstanceReferences object to its proto representation.
func DataprocAlphaClusterConfigMasterConfigInstanceReferencesToProto(o *alpha.ClusterConfigMasterConfigInstanceReferences) *alphapb.DataprocAlphaClusterConfigMasterConfigInstanceReferences {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigMasterConfigInstanceReferences{}
p.SetInstanceName(dcl.ValueOrEmptyString(o.InstanceName))
p.SetInstanceId(dcl.ValueOrEmptyString(o.InstanceId))
p.SetPublicKey(dcl.ValueOrEmptyString(o.PublicKey))
p.SetPublicEciesKey(dcl.ValueOrEmptyString(o.PublicEciesKey))
return p
}
// ClusterConfigWorkerConfigToProto converts a ClusterConfigWorkerConfig object to its proto representation.
func DataprocAlphaClusterConfigWorkerConfigToProto(o *alpha.ClusterConfigWorkerConfig) *alphapb.DataprocAlphaClusterConfigWorkerConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigWorkerConfig{}
p.SetNumInstances(dcl.ValueOrEmptyInt64(o.NumInstances))
p.SetImage(dcl.ValueOrEmptyString(o.Image))
p.SetMachineType(dcl.ValueOrEmptyString(o.MachineType))
p.SetDiskConfig(DataprocAlphaClusterConfigWorkerConfigDiskConfigToProto(o.DiskConfig))
p.SetIsPreemptible(dcl.ValueOrEmptyBool(o.IsPreemptible))
p.SetPreemptibility(DataprocAlphaClusterConfigWorkerConfigPreemptibilityEnumToProto(o.Preemptibility))
p.SetManagedGroupConfig(DataprocAlphaClusterConfigWorkerConfigManagedGroupConfigToProto(o.ManagedGroupConfig))
p.SetMinCpuPlatform(dcl.ValueOrEmptyString(o.MinCpuPlatform))
sInstanceNames := make([]string, len(o.InstanceNames))
for i, r := range o.InstanceNames {
sInstanceNames[i] = r
}
p.SetInstanceNames(sInstanceNames)
sAccelerators := make([]*alphapb.DataprocAlphaClusterConfigWorkerConfigAccelerators, len(o.Accelerators))
for i, r := range o.Accelerators {
sAccelerators[i] = DataprocAlphaClusterConfigWorkerConfigAcceleratorsToProto(&r)
}
p.SetAccelerators(sAccelerators)
sInstanceReferences := make([]*alphapb.DataprocAlphaClusterConfigWorkerConfigInstanceReferences, len(o.InstanceReferences))
for i, r := range o.InstanceReferences {
sInstanceReferences[i] = DataprocAlphaClusterConfigWorkerConfigInstanceReferencesToProto(&r)
}
p.SetInstanceReferences(sInstanceReferences)
return p
}
// ClusterConfigWorkerConfigDiskConfigToProto converts a ClusterConfigWorkerConfigDiskConfig object to its proto representation.
func DataprocAlphaClusterConfigWorkerConfigDiskConfigToProto(o *alpha.ClusterConfigWorkerConfigDiskConfig) *alphapb.DataprocAlphaClusterConfigWorkerConfigDiskConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigWorkerConfigDiskConfig{}
p.SetBootDiskType(dcl.ValueOrEmptyString(o.BootDiskType))
p.SetBootDiskSizeGb(dcl.ValueOrEmptyInt64(o.BootDiskSizeGb))
p.SetNumLocalSsds(dcl.ValueOrEmptyInt64(o.NumLocalSsds))
p.SetLocalSsdInterface(dcl.ValueOrEmptyString(o.LocalSsdInterface))
return p
}
// ClusterConfigWorkerConfigManagedGroupConfigToProto converts a ClusterConfigWorkerConfigManagedGroupConfig object to its proto representation.
func DataprocAlphaClusterConfigWorkerConfigManagedGroupConfigToProto(o *alpha.ClusterConfigWorkerConfigManagedGroupConfig) *alphapb.DataprocAlphaClusterConfigWorkerConfigManagedGroupConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigWorkerConfigManagedGroupConfig{}
p.SetInstanceTemplateName(dcl.ValueOrEmptyString(o.InstanceTemplateName))
p.SetInstanceGroupManagerName(dcl.ValueOrEmptyString(o.InstanceGroupManagerName))
return p
}
// ClusterConfigWorkerConfigAcceleratorsToProto converts a ClusterConfigWorkerConfigAccelerators object to its proto representation.
func DataprocAlphaClusterConfigWorkerConfigAcceleratorsToProto(o *alpha.ClusterConfigWorkerConfigAccelerators) *alphapb.DataprocAlphaClusterConfigWorkerConfigAccelerators {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigWorkerConfigAccelerators{}
p.SetAcceleratorType(dcl.ValueOrEmptyString(o.AcceleratorType))
p.SetAcceleratorCount(dcl.ValueOrEmptyInt64(o.AcceleratorCount))
return p
}
// ClusterConfigWorkerConfigInstanceReferencesToProto converts a ClusterConfigWorkerConfigInstanceReferences object to its proto representation.
func DataprocAlphaClusterConfigWorkerConfigInstanceReferencesToProto(o *alpha.ClusterConfigWorkerConfigInstanceReferences) *alphapb.DataprocAlphaClusterConfigWorkerConfigInstanceReferences {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigWorkerConfigInstanceReferences{}
p.SetInstanceName(dcl.ValueOrEmptyString(o.InstanceName))
p.SetInstanceId(dcl.ValueOrEmptyString(o.InstanceId))
p.SetPublicKey(dcl.ValueOrEmptyString(o.PublicKey))
p.SetPublicEciesKey(dcl.ValueOrEmptyString(o.PublicEciesKey))
return p
}
// ClusterConfigSecondaryWorkerConfigToProto converts a ClusterConfigSecondaryWorkerConfig object to its proto representation.
func DataprocAlphaClusterConfigSecondaryWorkerConfigToProto(o *alpha.ClusterConfigSecondaryWorkerConfig) *alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfig{}
p.SetNumInstances(dcl.ValueOrEmptyInt64(o.NumInstances))
p.SetImage(dcl.ValueOrEmptyString(o.Image))
p.SetMachineType(dcl.ValueOrEmptyString(o.MachineType))
p.SetDiskConfig(DataprocAlphaClusterConfigSecondaryWorkerConfigDiskConfigToProto(o.DiskConfig))
p.SetIsPreemptible(dcl.ValueOrEmptyBool(o.IsPreemptible))
p.SetPreemptibility(DataprocAlphaClusterConfigSecondaryWorkerConfigPreemptibilityEnumToProto(o.Preemptibility))
p.SetManagedGroupConfig(DataprocAlphaClusterConfigSecondaryWorkerConfigManagedGroupConfigToProto(o.ManagedGroupConfig))
p.SetMinCpuPlatform(dcl.ValueOrEmptyString(o.MinCpuPlatform))
sInstanceNames := make([]string, len(o.InstanceNames))
for i, r := range o.InstanceNames {
sInstanceNames[i] = r
}
p.SetInstanceNames(sInstanceNames)
sAccelerators := make([]*alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigAccelerators, len(o.Accelerators))
for i, r := range o.Accelerators {
sAccelerators[i] = DataprocAlphaClusterConfigSecondaryWorkerConfigAcceleratorsToProto(&r)
}
p.SetAccelerators(sAccelerators)
sInstanceReferences := make([]*alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigInstanceReferences, len(o.InstanceReferences))
for i, r := range o.InstanceReferences {
sInstanceReferences[i] = DataprocAlphaClusterConfigSecondaryWorkerConfigInstanceReferencesToProto(&r)
}
p.SetInstanceReferences(sInstanceReferences)
return p
}
// ClusterConfigSecondaryWorkerConfigDiskConfigToProto converts a ClusterConfigSecondaryWorkerConfigDiskConfig object to its proto representation.
func DataprocAlphaClusterConfigSecondaryWorkerConfigDiskConfigToProto(o *alpha.ClusterConfigSecondaryWorkerConfigDiskConfig) *alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigDiskConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigDiskConfig{}
p.SetBootDiskType(dcl.ValueOrEmptyString(o.BootDiskType))
p.SetBootDiskSizeGb(dcl.ValueOrEmptyInt64(o.BootDiskSizeGb))
p.SetNumLocalSsds(dcl.ValueOrEmptyInt64(o.NumLocalSsds))
p.SetLocalSsdInterface(dcl.ValueOrEmptyString(o.LocalSsdInterface))
return p
}
// ClusterConfigSecondaryWorkerConfigManagedGroupConfigToProto converts a ClusterConfigSecondaryWorkerConfigManagedGroupConfig object to its proto representation.
func DataprocAlphaClusterConfigSecondaryWorkerConfigManagedGroupConfigToProto(o *alpha.ClusterConfigSecondaryWorkerConfigManagedGroupConfig) *alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigManagedGroupConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigManagedGroupConfig{}
p.SetInstanceTemplateName(dcl.ValueOrEmptyString(o.InstanceTemplateName))
p.SetInstanceGroupManagerName(dcl.ValueOrEmptyString(o.InstanceGroupManagerName))
return p
}
// ClusterConfigSecondaryWorkerConfigAcceleratorsToProto converts a ClusterConfigSecondaryWorkerConfigAccelerators object to its proto representation.
func DataprocAlphaClusterConfigSecondaryWorkerConfigAcceleratorsToProto(o *alpha.ClusterConfigSecondaryWorkerConfigAccelerators) *alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigAccelerators {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigAccelerators{}
p.SetAcceleratorType(dcl.ValueOrEmptyString(o.AcceleratorType))
p.SetAcceleratorCount(dcl.ValueOrEmptyInt64(o.AcceleratorCount))
return p
}
// ClusterConfigSecondaryWorkerConfigInstanceReferencesToProto converts a ClusterConfigSecondaryWorkerConfigInstanceReferences object to its proto representation.
func DataprocAlphaClusterConfigSecondaryWorkerConfigInstanceReferencesToProto(o *alpha.ClusterConfigSecondaryWorkerConfigInstanceReferences) *alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigInstanceReferences {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigSecondaryWorkerConfigInstanceReferences{}
p.SetInstanceName(dcl.ValueOrEmptyString(o.InstanceName))
p.SetInstanceId(dcl.ValueOrEmptyString(o.InstanceId))
p.SetPublicKey(dcl.ValueOrEmptyString(o.PublicKey))
p.SetPublicEciesKey(dcl.ValueOrEmptyString(o.PublicEciesKey))
return p
}
// ClusterConfigSoftwareConfigToProto converts a ClusterConfigSoftwareConfig object to its proto representation.
func DataprocAlphaClusterConfigSoftwareConfigToProto(o *alpha.ClusterConfigSoftwareConfig) *alphapb.DataprocAlphaClusterConfigSoftwareConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigSoftwareConfig{}
p.SetImageVersion(dcl.ValueOrEmptyString(o.ImageVersion))
mProperties := make(map[string]string, len(o.Properties))
for k, r := range o.Properties {
mProperties[k] = r
}
p.SetProperties(mProperties)
sOptionalComponents := make([]alphapb.DataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnum, len(o.OptionalComponents))
for i, r := range o.OptionalComponents {
sOptionalComponents[i] = alphapb.DataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnum(alphapb.DataprocAlphaClusterConfigSoftwareConfigOptionalComponentsEnum_value[string(r)])
}
p.SetOptionalComponents(sOptionalComponents)
return p
}
// ClusterConfigInitializationActionsToProto converts a ClusterConfigInitializationActions object to its proto representation.
func DataprocAlphaClusterConfigInitializationActionsToProto(o *alpha.ClusterConfigInitializationActions) *alphapb.DataprocAlphaClusterConfigInitializationActions {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigInitializationActions{}
p.SetExecutableFile(dcl.ValueOrEmptyString(o.ExecutableFile))
p.SetExecutionTimeout(dcl.ValueOrEmptyString(o.ExecutionTimeout))
return p
}
// ClusterConfigEncryptionConfigToProto converts a ClusterConfigEncryptionConfig object to its proto representation.
func DataprocAlphaClusterConfigEncryptionConfigToProto(o *alpha.ClusterConfigEncryptionConfig) *alphapb.DataprocAlphaClusterConfigEncryptionConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigEncryptionConfig{}
p.SetGcePdKmsKeyName(dcl.ValueOrEmptyString(o.GcePdKmsKeyName))
return p
}
// ClusterConfigAutoscalingConfigToProto converts a ClusterConfigAutoscalingConfig object to its proto representation.
func DataprocAlphaClusterConfigAutoscalingConfigToProto(o *alpha.ClusterConfigAutoscalingConfig) *alphapb.DataprocAlphaClusterConfigAutoscalingConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigAutoscalingConfig{}
p.SetPolicy(dcl.ValueOrEmptyString(o.Policy))
return p
}
// ClusterConfigSecurityConfigToProto converts a ClusterConfigSecurityConfig object to its proto representation.
func DataprocAlphaClusterConfigSecurityConfigToProto(o *alpha.ClusterConfigSecurityConfig) *alphapb.DataprocAlphaClusterConfigSecurityConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigSecurityConfig{}
p.SetKerberosConfig(DataprocAlphaClusterConfigSecurityConfigKerberosConfigToProto(o.KerberosConfig))
p.SetIdentityConfig(DataprocAlphaClusterConfigSecurityConfigIdentityConfigToProto(o.IdentityConfig))
return p
}
// ClusterConfigSecurityConfigKerberosConfigToProto converts a ClusterConfigSecurityConfigKerberosConfig object to its proto representation.
func DataprocAlphaClusterConfigSecurityConfigKerberosConfigToProto(o *alpha.ClusterConfigSecurityConfigKerberosConfig) *alphapb.DataprocAlphaClusterConfigSecurityConfigKerberosConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigSecurityConfigKerberosConfig{}
p.SetEnableKerberos(dcl.ValueOrEmptyBool(o.EnableKerberos))
p.SetRootPrincipalPassword(dcl.ValueOrEmptyString(o.RootPrincipalPassword))
p.SetKmsKey(dcl.ValueOrEmptyString(o.KmsKey))
p.SetKeystore(dcl.ValueOrEmptyString(o.Keystore))
p.SetTruststore(dcl.ValueOrEmptyString(o.Truststore))
p.SetKeystorePassword(dcl.ValueOrEmptyString(o.KeystorePassword))
p.SetKeyPassword(dcl.ValueOrEmptyString(o.KeyPassword))
p.SetTruststorePassword(dcl.ValueOrEmptyString(o.TruststorePassword))
p.SetCrossRealmTrustRealm(dcl.ValueOrEmptyString(o.CrossRealmTrustRealm))
p.SetCrossRealmTrustKdc(dcl.ValueOrEmptyString(o.CrossRealmTrustKdc))
p.SetCrossRealmTrustAdminServer(dcl.ValueOrEmptyString(o.CrossRealmTrustAdminServer))
p.SetCrossRealmTrustSharedPassword(dcl.ValueOrEmptyString(o.CrossRealmTrustSharedPassword))
p.SetKdcDbKey(dcl.ValueOrEmptyString(o.KdcDbKey))
p.SetTgtLifetimeHours(dcl.ValueOrEmptyInt64(o.TgtLifetimeHours))
p.SetRealm(dcl.ValueOrEmptyString(o.Realm))
return p
}
// ClusterConfigSecurityConfigIdentityConfigToProto converts a ClusterConfigSecurityConfigIdentityConfig object to its proto representation.
func DataprocAlphaClusterConfigSecurityConfigIdentityConfigToProto(o *alpha.ClusterConfigSecurityConfigIdentityConfig) *alphapb.DataprocAlphaClusterConfigSecurityConfigIdentityConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigSecurityConfigIdentityConfig{}
mUserServiceAccountMapping := make(map[string]string, len(o.UserServiceAccountMapping))
for k, r := range o.UserServiceAccountMapping {
mUserServiceAccountMapping[k] = r
}
p.SetUserServiceAccountMapping(mUserServiceAccountMapping)
return p
}
// ClusterConfigLifecycleConfigToProto converts a ClusterConfigLifecycleConfig object to its proto representation.
func DataprocAlphaClusterConfigLifecycleConfigToProto(o *alpha.ClusterConfigLifecycleConfig) *alphapb.DataprocAlphaClusterConfigLifecycleConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigLifecycleConfig{}
p.SetIdleDeleteTtl(dcl.ValueOrEmptyString(o.IdleDeleteTtl))
p.SetAutoDeleteTime(dcl.ValueOrEmptyString(o.AutoDeleteTime))
p.SetAutoDeleteTtl(dcl.ValueOrEmptyString(o.AutoDeleteTtl))
p.SetIdleStartTime(dcl.ValueOrEmptyString(o.IdleStartTime))
return p
}
// ClusterConfigEndpointConfigToProto converts a ClusterConfigEndpointConfig object to its proto representation.
func DataprocAlphaClusterConfigEndpointConfigToProto(o *alpha.ClusterConfigEndpointConfig) *alphapb.DataprocAlphaClusterConfigEndpointConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigEndpointConfig{}
p.SetEnableHttpPortAccess(dcl.ValueOrEmptyBool(o.EnableHttpPortAccess))
mHttpPorts := make(map[string]string, len(o.HttpPorts))
for k, r := range o.HttpPorts {
mHttpPorts[k] = r
}
p.SetHttpPorts(mHttpPorts)
return p
}
// ClusterConfigGkeClusterConfigToProto converts a ClusterConfigGkeClusterConfig object to its proto representation.
func DataprocAlphaClusterConfigGkeClusterConfigToProto(o *alpha.ClusterConfigGkeClusterConfig) *alphapb.DataprocAlphaClusterConfigGkeClusterConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigGkeClusterConfig{}
p.SetNamespacedGkeDeploymentTarget(DataprocAlphaClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetToProto(o.NamespacedGkeDeploymentTarget))
return p
}
// ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetToProto converts a ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget object to its proto representation.
func DataprocAlphaClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetToProto(o *alpha.ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) *alphapb.DataprocAlphaClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{}
p.SetTargetGkeCluster(dcl.ValueOrEmptyString(o.TargetGkeCluster))
p.SetClusterNamespace(dcl.ValueOrEmptyString(o.ClusterNamespace))
return p
}
// ClusterConfigMetastoreConfigToProto converts a ClusterConfigMetastoreConfig object to its proto representation.
func DataprocAlphaClusterConfigMetastoreConfigToProto(o *alpha.ClusterConfigMetastoreConfig) *alphapb.DataprocAlphaClusterConfigMetastoreConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigMetastoreConfig{}
p.SetDataprocMetastoreService(dcl.ValueOrEmptyString(o.DataprocMetastoreService))
return p
}
// ClusterConfigDataprocMetricConfigToProto converts a ClusterConfigDataprocMetricConfig object to its proto representation.
func DataprocAlphaClusterConfigDataprocMetricConfigToProto(o *alpha.ClusterConfigDataprocMetricConfig) *alphapb.DataprocAlphaClusterConfigDataprocMetricConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigDataprocMetricConfig{}
sMetrics := make([]*alphapb.DataprocAlphaClusterConfigDataprocMetricConfigMetrics, len(o.Metrics))
for i, r := range o.Metrics {
sMetrics[i] = DataprocAlphaClusterConfigDataprocMetricConfigMetricsToProto(&r)
}
p.SetMetrics(sMetrics)
return p
}
// ClusterConfigDataprocMetricConfigMetricsToProto converts a ClusterConfigDataprocMetricConfigMetrics object to its proto representation.
func DataprocAlphaClusterConfigDataprocMetricConfigMetricsToProto(o *alpha.ClusterConfigDataprocMetricConfigMetrics) *alphapb.DataprocAlphaClusterConfigDataprocMetricConfigMetrics {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterConfigDataprocMetricConfigMetrics{}
p.SetMetricSource(DataprocAlphaClusterConfigDataprocMetricConfigMetricsMetricSourceEnumToProto(o.MetricSource))
sMetricOverrides := make([]string, len(o.MetricOverrides))
for i, r := range o.MetricOverrides {
sMetricOverrides[i] = r
}
p.SetMetricOverrides(sMetricOverrides)
return p
}
// ClusterStatusToProto converts a ClusterStatus object to its proto representation.
func DataprocAlphaClusterStatusToProto(o *alpha.ClusterStatus) *alphapb.DataprocAlphaClusterStatus {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterStatus{}
p.SetState(DataprocAlphaClusterStatusStateEnumToProto(o.State))
p.SetDetail(dcl.ValueOrEmptyString(o.Detail))
p.SetStateStartTime(dcl.ValueOrEmptyString(o.StateStartTime))
p.SetSubstate(DataprocAlphaClusterStatusSubstateEnumToProto(o.Substate))
return p
}
// ClusterStatusHistoryToProto converts a ClusterStatusHistory object to its proto representation.
func DataprocAlphaClusterStatusHistoryToProto(o *alpha.ClusterStatusHistory) *alphapb.DataprocAlphaClusterStatusHistory {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterStatusHistory{}
p.SetState(DataprocAlphaClusterStatusHistoryStateEnumToProto(o.State))
p.SetDetail(dcl.ValueOrEmptyString(o.Detail))
p.SetStateStartTime(dcl.ValueOrEmptyString(o.StateStartTime))
p.SetSubstate(DataprocAlphaClusterStatusHistorySubstateEnumToProto(o.Substate))
return p
}
// ClusterMetricsToProto converts a ClusterMetrics object to its proto representation.
func DataprocAlphaClusterMetricsToProto(o *alpha.ClusterMetrics) *alphapb.DataprocAlphaClusterMetrics {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterMetrics{}
mHdfsMetrics := make(map[string]string, len(o.HdfsMetrics))
for k, r := range o.HdfsMetrics {
mHdfsMetrics[k] = r
}
p.SetHdfsMetrics(mHdfsMetrics)
mYarnMetrics := make(map[string]string, len(o.YarnMetrics))
for k, r := range o.YarnMetrics {
mYarnMetrics[k] = r
}
p.SetYarnMetrics(mYarnMetrics)
return p
}
// ClusterVirtualClusterConfigToProto converts a ClusterVirtualClusterConfig object to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigToProto(o *alpha.ClusterVirtualClusterConfig) *alphapb.DataprocAlphaClusterVirtualClusterConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterVirtualClusterConfig{}
p.SetStagingBucket(dcl.ValueOrEmptyString(o.StagingBucket))
p.SetKubernetesClusterConfig(DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigToProto(o.KubernetesClusterConfig))
p.SetAuxiliaryServicesConfig(DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigToProto(o.AuxiliaryServicesConfig))
return p
}
// ClusterVirtualClusterConfigKubernetesClusterConfigToProto converts a ClusterVirtualClusterConfigKubernetesClusterConfig object to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigToProto(o *alpha.ClusterVirtualClusterConfigKubernetesClusterConfig) *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfig{}
p.SetKubernetesNamespace(dcl.ValueOrEmptyString(o.KubernetesNamespace))
p.SetGkeClusterConfig(DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigToProto(o.GkeClusterConfig))
p.SetKubernetesSoftwareConfig(DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigToProto(o.KubernetesSoftwareConfig))
return p
}
// ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigToProto converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig object to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigToProto(o *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{}
p.SetGkeClusterTarget(dcl.ValueOrEmptyString(o.GkeClusterTarget))
sNodePoolTarget := make([]*alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, len(o.NodePoolTarget))
for i, r := range o.NodePoolTarget {
sNodePoolTarget[i] = DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetToProto(&r)
}
p.SetNodePoolTarget(sNodePoolTarget)
return p
}
// ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetToProto converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget object to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetToProto(o *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{}
p.SetNodePool(dcl.ValueOrEmptyString(o.NodePool))
p.SetNodePoolConfig(DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigToProto(o.NodePoolConfig))
sRoles := make([]alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum, len(o.Roles))
for i, r := range o.Roles {
sRoles[i] = alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum_value[string(r)])
}
p.SetRoles(sRoles)
return p
}
// ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigToProto converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig object to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigToProto(o *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{}
p.SetConfig(DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigToProto(o.Config))
p.SetAutoscaling(DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingToProto(o.Autoscaling))
sLocations := make([]string, len(o.Locations))
for i, r := range o.Locations {
sLocations[i] = r
}
p.SetLocations(sLocations)
return p
}
// ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigToProto converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig object to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigToProto(o *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{}
p.SetMachineType(dcl.ValueOrEmptyString(o.MachineType))
p.SetLocalSsdCount(dcl.ValueOrEmptyInt64(o.LocalSsdCount))
p.SetPreemptible(dcl.ValueOrEmptyBool(o.Preemptible))
p.SetMinCpuPlatform(dcl.ValueOrEmptyString(o.MinCpuPlatform))
p.SetBootDiskKmsKey(dcl.ValueOrEmptyString(o.BootDiskKmsKey))
p.SetEphemeralStorageConfig(DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigToProto(o.EphemeralStorageConfig))
p.SetSpot(dcl.ValueOrEmptyBool(o.Spot))
sAccelerators := make([]*alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, len(o.Accelerators))
for i, r := range o.Accelerators {
sAccelerators[i] = DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsToProto(&r)
}
p.SetAccelerators(sAccelerators)
return p
}
// ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsToProto converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators object to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsToProto(o *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{}
p.SetAcceleratorCount(dcl.ValueOrEmptyInt64(o.AcceleratorCount))
p.SetAcceleratorType(dcl.ValueOrEmptyString(o.AcceleratorType))
p.SetGpuPartitionSize(dcl.ValueOrEmptyString(o.GpuPartitionSize))
return p
}
// ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigToProto converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig object to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigToProto(o *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{}
p.SetLocalSsdCount(dcl.ValueOrEmptyInt64(o.LocalSsdCount))
return p
}
// ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingToProto converts a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling object to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingToProto(o *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{}
p.SetMinNodeCount(dcl.ValueOrEmptyInt64(o.MinNodeCount))
p.SetMaxNodeCount(dcl.ValueOrEmptyInt64(o.MaxNodeCount))
return p
}
// ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigToProto converts a ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig object to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigToProto(o *alpha.ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) *alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{}
mComponentVersion := make(map[string]string, len(o.ComponentVersion))
for k, r := range o.ComponentVersion {
mComponentVersion[k] = r
}
p.SetComponentVersion(mComponentVersion)
mProperties := make(map[string]string, len(o.Properties))
for k, r := range o.Properties {
mProperties[k] = r
}
p.SetProperties(mProperties)
return p
}
// ClusterVirtualClusterConfigAuxiliaryServicesConfigToProto converts a ClusterVirtualClusterConfigAuxiliaryServicesConfig object to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigToProto(o *alpha.ClusterVirtualClusterConfigAuxiliaryServicesConfig) *alphapb.DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfig{}
p.SetMetastoreConfig(DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigToProto(o.MetastoreConfig))
p.SetSparkHistoryServerConfig(DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigToProto(o.SparkHistoryServerConfig))
return p
}
// ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigToProto converts a ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig object to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigToProto(o *alpha.ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) *alphapb.DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{}
p.SetDataprocMetastoreService(dcl.ValueOrEmptyString(o.DataprocMetastoreService))
return p
}
// ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigToProto converts a ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig object to its proto representation.
func DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigToProto(o *alpha.ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) *alphapb.DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig {
if o == nil {
return nil
}
p := &alphapb.DataprocAlphaClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{}
p.SetDataprocCluster(dcl.ValueOrEmptyString(o.DataprocCluster))
return p
}
// ClusterToProto converts a Cluster resource to its proto representation.
func ClusterToProto(resource *alpha.Cluster) *alphapb.DataprocAlphaCluster {
p := &alphapb.DataprocAlphaCluster{}
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetConfig(DataprocAlphaClusterConfigToProto(resource.Config))
p.SetStatus(DataprocAlphaClusterStatusToProto(resource.Status))
p.SetClusterUuid(dcl.ValueOrEmptyString(resource.ClusterUuid))
p.SetMetrics(DataprocAlphaClusterMetricsToProto(resource.Metrics))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
p.SetVirtualClusterConfig(DataprocAlphaClusterVirtualClusterConfigToProto(resource.VirtualClusterConfig))
mLabels := make(map[string]string, len(resource.Labels))
for k, r := range resource.Labels {
mLabels[k] = r
}
p.SetLabels(mLabels)
sStatusHistory := make([]*alphapb.DataprocAlphaClusterStatusHistory, len(resource.StatusHistory))
for i, r := range resource.StatusHistory {
sStatusHistory[i] = DataprocAlphaClusterStatusHistoryToProto(&r)
}
p.SetStatusHistory(sStatusHistory)
return p
}
// applyCluster handles the gRPC request by passing it to the underlying Cluster Apply() method.
func (s *ClusterServer) applyCluster(ctx context.Context, c *alpha.Client, request *alphapb.ApplyDataprocAlphaClusterRequest) (*alphapb.DataprocAlphaCluster, error) {
p := ProtoToCluster(request.GetResource())
res, err := c.ApplyCluster(ctx, p)
if err != nil {
return nil, err
}
r := ClusterToProto(res)
return r, nil
}
// applyDataprocAlphaCluster handles the gRPC request by passing it to the underlying Cluster Apply() method.
func (s *ClusterServer) ApplyDataprocAlphaCluster(ctx context.Context, request *alphapb.ApplyDataprocAlphaClusterRequest) (*alphapb.DataprocAlphaCluster, error) {
cl, err := createConfigCluster(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyCluster(ctx, cl, request)
}
// DeleteCluster handles the gRPC request by passing it to the underlying Cluster Delete() method.
func (s *ClusterServer) DeleteDataprocAlphaCluster(ctx context.Context, request *alphapb.DeleteDataprocAlphaClusterRequest) (*emptypb.Empty, error) {
cl, err := createConfigCluster(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteCluster(ctx, ProtoToCluster(request.GetResource()))
}
// ListDataprocAlphaCluster handles the gRPC request by passing it to the underlying ClusterList() method.
func (s *ClusterServer) ListDataprocAlphaCluster(ctx context.Context, request *alphapb.ListDataprocAlphaClusterRequest) (*alphapb.ListDataprocAlphaClusterResponse, error) {
cl, err := createConfigCluster(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListCluster(ctx, request.GetProject(), request.GetLocation())
if err != nil {
return nil, err
}
var protos []*alphapb.DataprocAlphaCluster
for _, r := range resources.Items {
rp := ClusterToProto(r)
protos = append(protos, rp)
}
p := &alphapb.ListDataprocAlphaClusterResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigCluster(ctx context.Context, service_account_file string) (*alpha.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return alpha.NewClient(conf), nil
}
|
package session
import (
"golang.org/x/net/context"
)
type HandlerFunc func(ctx context.Context, session Session, msgByte interface{}) (error, interface{})
func (hf HandlerFunc) Handle(ctx context.Context, session Session, msgByte interface{}) (error, interface{}) {
return hf(ctx, session, msgByte)
}
type Handler interface {
Handle(ctx context.Context, session Session, msgByte interface{}) (error, interface{})
}
|
package ziface
//把客户端数据包装成一个requst
type IRequest interface {
//得到当前连接
GetConnection() IConnection
//得到请求数据
GetData() string
}
|
package main
import (
"flag"
"log"
"time"
"github.com/bpostlethwaite/ahipbot/asana"
)
var bot *Hipbot
var web *Webapp
func main() {
flag.Parse()
bot = NewHipbot(*configFile)
// TODO: make this a goroutine to run the bot also
go launchWebapp()
bot.loadBaseConfig()
bot.registerPlugins()
asanaClient, err := asana.NewClient(
"", "")
if err != nil {
log.Println("ASANA - Failed: ", err)
}
go StormWatch(asanaClient)
for {
log.Println("Connecting client...")
err := bot.connectClient()
if err != nil {
log.Println(" `- Failed: ", err)
time.Sleep(3 * time.Second)
continue
}
disconnect := bot.setupHandlers()
select {
case <-disconnect:
log.Println("Disconnected...")
time.Sleep(1 * time.Second)
continue
}
}
}
|
//Package deployment provides a top-level API to control Kyma deployment and uninstallation.
package deployment
import (
"context"
"fmt"
"strings"
"time"
"github.com/kyma-incubator/hydroform/parallel-install/pkg/components"
"github.com/kyma-incubator/hydroform/parallel-install/pkg/config"
"github.com/kyma-incubator/hydroform/parallel-install/pkg/engine"
"github.com/kyma-incubator/hydroform/parallel-install/pkg/helm"
"github.com/kyma-incubator/hydroform/parallel-install/pkg/logger"
"github.com/kyma-incubator/hydroform/parallel-install/pkg/overrides"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
//these components will be removed from component list if running on a local cluster
var incompatibleLocalComponents = []string{"apiserver-proxy", "iam-kubeconfig-service"}
type core struct {
// Contains list of components to install (inclusive pre-requisites)
cfg *config.Config
overrides *Overrides
// Used to send progress events of a running install/uninstall process
processUpdates chan<- ProcessUpdate
kubeClient kubernetes.Interface
}
//new creates a new core instance
//
//cfg includes configuration parameters for the installer lib
//
//overrides bundles all overrides which have to be considered by Helm
//
//kubeClient is the kubernetes client
//
//processUpdates can be an optional feedback channel provided by the caller
func newCore(cfg *config.Config, ob *OverridesBuilder, kubeClient kubernetes.Interface, processUpdates chan<- ProcessUpdate) (*core, error) {
if isK3dCluster(kubeClient) {
cfg.Log.Infof("Running in K3d cluster: removing incompatible components '%s'", strings.Join(incompatibleLocalComponents, "', '"))
removeFromComponentList(cfg.ComponentList, incompatibleLocalComponents)
}
registerOverridesInterceptors(kubeClient, ob, cfg.Log)
overrides, err := ob.Build()
if err != nil {
return nil, err
}
return &core{
cfg: cfg,
overrides: &overrides,
processUpdates: processUpdates,
kubeClient: kubeClient,
}, nil
}
func (i *core) logStatuses(statusMap map[string]string) {
i.cfg.Log.Infof("Components processed so far:")
for k, v := range statusMap {
i.cfg.Log.Infof("Component: %s, Status: %s", k, v)
}
}
func (i *core) getConfig() (overrides.Provider, *engine.Engine, *engine.Engine, error) {
overridesProvider, err := overrides.New(i.kubeClient, i.overrides.Map(), i.cfg.Log)
if err != nil {
return nil, nil, nil, fmt.Errorf("Failed to create overrides provider: exiting")
}
//create KymaComponentMetadataTemplate and set prerequisites flag
kymaMetadataTpl := helm.NewKymaComponentMetadataTemplate(i.cfg.Version, i.cfg.Profile)
prerequisitesProvider := components.NewComponentsProvider(overridesProvider, i.cfg, i.cfg.ComponentList.Prerequisites, kymaMetadataTpl.ForPrerequisites())
componentsProvider := components.NewComponentsProvider(overridesProvider, i.cfg, i.cfg.ComponentList.Components, kymaMetadataTpl.ForComponents())
prerequisitesEngineCfg := engine.Config{
// prerequisite components need to be installed sequentially, so only 1 worker should be used
WorkersCount: 1,
Log: i.cfg.Log,
}
componentsEngineCfg := engine.Config{
WorkersCount: i.cfg.WorkersCount,
Log: i.cfg.Log,
}
prerequisitesEng := engine.NewEngine(overridesProvider, prerequisitesProvider, prerequisitesEngineCfg)
componentsEng := engine.NewEngine(overridesProvider, componentsProvider, componentsEngineCfg)
return overridesProvider, prerequisitesEng, componentsEng, nil
}
func calculateDuration(start time.Time, end time.Time, duration time.Duration) time.Duration {
elapsedTime := end.Sub(start)
return duration - elapsedTime
}
// Send process update event
func (i *core) processUpdate(phase InstallationPhase, event ProcessEvent, err error) {
if i.processUpdates == nil {
return
}
// fire event
i.processUpdates <- ProcessUpdate{
Event: event,
Phase: phase,
Component: components.KymaComponent{},
Error: err,
}
}
// Send process update event related to a component
func (i *core) processUpdateComponent(phase InstallationPhase, comp components.KymaComponent) {
if i.processUpdates == nil {
return
}
// define event type
event := ProcessRunning
if comp.Status == components.StatusError {
event = ProcessExecutionFailure
}
// fire event
i.processUpdates <- ProcessUpdate{
Event: event,
Phase: phase,
Component: comp,
}
}
func isK3dCluster(kubeClient kubernetes.Interface) bool {
nodeList, err := kubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
if err != nil {
return false
}
for _, node := range nodeList.Items {
if strings.HasPrefix(node.GetName(), "k3d-") {
return true
}
}
return false
}
func removeFromComponentList(cl *config.ComponentList, componentNames []string) {
for _, compName := range componentNames {
cl.Remove(compName)
}
}
func registerOverridesInterceptors(kubeClient kubernetes.Interface, o *OverridesBuilder, log logger.Interface) {
//hide certificate data
o.AddInterceptor([]string{"global.domainName", "global.ingress.domainName"}, NewDomainNameOverrideInterceptor(kubeClient, log))
o.AddInterceptor([]string{"global.tlsCrt", "global.tlsKey"}, NewCertificateOverrideInterceptor("global.tlsCrt", "global.tlsKey"))
}
|
// +build integration
package integration
import (
"fmt"
"io/ioutil"
"os"
"strings"
"time"
)
const (
tmpTestFilePathDefault = "/tmp/gollum_test.log"
tmpTestFilePathFoo = "/tmp/gollum_test_foo.log"
tmpTestFilePathBar = "/tmp/gollum_test_bar.log"
tmpTestFilePathGlob0 = "/tmp/gollum_test_glob0.log"
tmpTestFilePathGlob1 = "/tmp/gollum_test_glob1.log"
tmpTestFilePathGlob2 = "/tmp/gollum_test_glob2.log"
)
var (
tmpTestFiles = []string{
tmpTestFilePathDefault,
tmpTestFilePathFoo,
tmpTestFilePathBar,
tmpTestFilePathGlob0,
tmpTestFilePathGlob1,
tmpTestFilePathGlob2,
}
)
func readResultFile(filepath string) (string, int, error) {
start := time.Now()
for time.Since(start) < maxFetchResultTime {
file, err := os.Open(filepath)
if err != nil {
time.Sleep(200 * time.Millisecond)
continue
}
defer file.Close()
// read file
buffer, err := ioutil.ReadAll(file)
if err != nil {
return "", 0, err
}
content := string(buffer)
lines := strings.Count(content, "\n") + 1
return content, lines, nil
}
return "", 0, fmt.Errorf("timed out while reading from %s", filepath)
}
func removeTestResultFiles() {
for _, path := range tmpTestFiles {
if _, err := os.Stat(path); err == nil {
os.Remove(path)
}
}
}
|
package handlers
import (
"net/http"
"github.com/saravase/golang_mux_swagger/plant-api/data"
)
// swagger:route POST /plant plants addPlant
// responses:
// 200: successContent
// 422: errorValidation
// 400: errorResponse
// AddPlant used to insert the new plant data into the datastore
func (plant *Plant) AddPlant(response http.ResponseWriter, request *http.Request) {
plant.logger.Printf("[DEBUG] Add the plant data")
plantData := request.Context().Value(KeyPlant{}).(*data.Plant)
plant.logger.Printf("[DEBUG] Added plant data: %#v\n", plantData)
data.AddPlant(plantData)
}
|
package utility
import "Perekoter/models"
func NewError(text string) {
db := models.DB()
defer db.Close()
db.Create(&models.Error{
Text: text,
Active: true,
})
}
func ConfirmError(num int) {
db := models.DB()
defer db.Close()
var targetError models.Error
db.First(&targetError, num)
targetError.Active = false
db.Save(&targetError)
}
func CoonfirmAllErrors() {
db := models.DB()
defer db.Close()
var allErrors []models.Error
db.Find(allErrors)
db.Model(&allErrors).Update("Active", true)
}
func GetAllErrors(active bool) []models.Error {
db := models.DB()
defer db.Close()
var activeErrors []models.Error
db.Find(&activeErrors, models.Error{
Active: active,
})
return activeErrors
}
func GetActiveErrorsCount() int {
db := models.DB()
defer db.Close()
var count int
db.Find(models.Error{
Active: true,
}).Count(&count)
return count
}
func NewHistoryPoint(text string) {
db := models.DB()
defer db.Close()
db.Create(&models.HistoryPoint{
Text: text,
})
}
func GetAllHistoryPoints() []models.HistoryPoint {
db := models.DB()
defer db.Close()
var historyPoints []models.HistoryPoint
db.Find(&historyPoints)
return historyPoints
}
|
package rpc_01
type HelloService struct {
}
// 满足go语言的RPC规则:
// 1。 方法只能有两个可序列化的参数,其中第二个参数为指针类型
// 2。 并且返回一个error类型,同时必须是公开的方法
func (p *HelloService)Hello (request string,reply *string) error {
*reply="hello:"+request
return nil
}
|
package main
import (
"database/sql"
"fmt"
//_ "github.com/mattn/go-sqlite3"
_ "github.com/go-sql-driver/mysql"
)
const (
//insertItemQuery = "insert into items ('name', 'price', 'description') values (?,?,?)"
insertItemQuery = "insert into items (name, price, description) values (?,?,?)"
insertItemQuery2 = "insert into items (name, price, description) values ('brownie',240,'sizzling')"
selectItemQuery = "select * from items where id=?"
selectItemQuery2 = "select * from items where id=5"
)
var db sql.DB
/*
func init() {
}
*/
func useQuery(db *sql.DB, query string, params ...interface{}) error {
if len(params) > 0 {
rs, err := db.Query(query, params...)
if err != nil {
return err
}
rs.Close()
return nil
}
rs, err := db.Query(query)
if err != nil {
return err
}
for rs.Next() {
fmt.Println("just iterating")
}
defer rs.Close()
return nil
}
func useExec(db *sql.DB, query string, params ...interface{}) error {
if len(params) > 0 {
_, err := db.Exec(query, params)
if err != nil {
return err
}
}
_, err := db.Exec(query)
if err != nil {
return err
}
return nil
}
func usePrepared(db *sql.DB, ch chan error) error {
stmt, err := db.Prepare(insertItemQuery)
defer func() {
stmt.Close()
}()
if err != nil {
fmt.Println(err.Error())
return err
}
_, err = stmt.Exec("brownie", 240, "sizzling")
if err != nil {
ch <- err
fmt.Println(err.Error())
return err
}
return nil
}
func main() {
//db, err := sql.Open("sqlite3", "file:/media/sf_alok/swiggy.db?cache=shared")
db, err := sql.Open("mysql", "vm:vm@tcp(192.168.56.101:3306)/items")
ch := make(chan error)
fmt.Println(db.Stats())
if err != nil {
fmt.Println(err.Error())
return
}
for i := 0; i < 300; i++ {
/*
err := useQuery(db, selectItemQuery2)
if err != nil {
fmt.Println(err.Error())
}
*/
/*
err := useQuery(db, selectItemQuery, 5)
if err != nil {
fmt.Println(err.Error())
}
*/
/*
err = useExec(db, insertItemQuery2)
if err != nil {
fmt.Println(err.Error())
}
*/
/*
err = useExec(db, insertItemQuery, "brownie", 240, "sizzling")
if err != nil {
fmt.Println(err.Error())
}
*/
go usePrepared(db, ch)
/*
if err != nil {
fmt.Println(err.Error())
}
*/
}
fmt.Println(db.Stats())
fmt.Println("Test End")
fmt.Println((<-ch).Error())
}
|
package local
import (
"encoding/json"
"errors"
"io/ioutil"
"os"
"path/filepath"
"github.com/10gen/realm-cli/internal/cloud/realm"
)
// AppData is the Realm app data
type AppData interface {
ConfigData() ([]byte, error)
ConfigVersion() realm.AppConfigVersion
ID() string
Name() string
Location() realm.Location
DeploymentModel() realm.DeploymentModel
Environment() realm.Environment
LoadData(rootDir string) error
WriteData(rootDir string) error
}
// set of supported local names
const (
extJS = ".js"
extJSON = ".json"
// app configs
NameRealmConfig = "realm_config"
NameConfig = "config"
NameStitch = "stitch"
// environments
NameEnvironments = "environments"
// auth
NameAuth = "auth"
NameAuthProviders = "auth_providers"
NameCustomUserData = "custom_user_data"
NameProviders = "providers"
// functions
NameFunctions = "functions"
nameNodeModules = "node_modules"
NameSource = "source"
NamePackageJSON = "package.json"
// graphql
NameGraphQL = "graphql"
NameCustomResolvers = "custom_resolvers"
// hosting
NameHosting = "hosting"
NameFiles = "files"
NameMetadata = "metadata"
// services
NameDataSources = "data_sources"
NameHTTPEndpoints = "http_endpoints"
NameIncomingWebhooks = "incoming_webhooks"
NameRules = "rules"
NameSchema = "schema"
NameServices = "services"
NameRelationships = "relationships"
// triggers
NameTriggers = "triggers"
// sync
NameSync = "sync"
// values
NameSecrets = "secrets"
NameValues = "values"
)
// set of supported local files
var (
// app configs
FileRealmConfig = File{NameRealmConfig, extJSON}
FileConfig = File{NameConfig, extJSON}
FileStitch = File{NameStitch, extJSON}
// auth
FileCustomUserData = File{NameCustomUserData, extJSON}
FileProviders = File{NameProviders, extJSON}
// data sources
FileRules = File{NameRules, extJSON}
FileSchema = File{NameSchema, extJSON}
FileRelationships = File{NameRelationships, extJSON}
// functions
FileSource = File{NameSource, extJS}
// values
FileSecrets = File{NameSecrets, extJSON}
)
// File is a local Realm app file
type File struct {
Name string
Ext string
}
func (f File) String() string { return f.Name + f.Ext }
func walk(rootDir string, ignorePaths map[string]struct{}, fn func(file os.FileInfo, path string) error) error {
if ignorePaths == nil {
ignorePaths = map[string]struct{}{}
}
dw := directoryWalker{path: rootDir}
if err := dw.walk(func(f os.FileInfo, p string) error {
if _, ok := ignorePaths[f.Name()]; ok {
return nil
}
if f.IsDir() {
return walk(p, ignorePaths, fn)
}
return fn(f, p)
}); err != nil {
return err
}
return nil
}
type directoryWalker struct {
path string
continueOnError bool
failOnNotExist bool
onlyDirs bool
onlyFiles bool
}
func (dw directoryWalker) walk(fn func(file os.FileInfo, path string) error) error {
if _, err := os.Stat(dw.path); err != nil {
if os.IsNotExist(err) && !dw.failOnNotExist {
return nil
}
return err
}
files, filesErr := ioutil.ReadDir(dw.path)
if filesErr != nil {
return filesErr
}
for _, file := range files {
if dw.onlyDirs && !file.IsDir() || dw.onlyFiles && file.IsDir() {
continue
}
err := fn(file, filepath.Join(dw.path, file.Name()))
if err != nil {
if dw.continueOnError {
continue
}
return err
}
}
return nil
}
func readFile(path string) ([]byte, error) {
return readFileWithOptions(path, false)
}
func readFileWithOptions(path string, failOnMissing bool) ([]byte, error) {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) && !failOnMissing {
return nil, nil
}
return nil, err
}
return ioutil.ReadFile(path)
}
func unmarshalJSON(data []byte, out interface{}) error {
return unmarshalJSONWithOptions(data, out, false)
}
func unmarshalJSONWithOptions(data []byte, out interface{}, failOnEmpty bool) error {
if len(data) == 0 {
if failOnEmpty {
return errors.New("no file contents")
}
return nil
}
return json.Unmarshal(data, out)
}
// AddAuthProvider adds an auth provider to the provided app data
func AddAuthProvider(appData AppData, name string, config map[string]interface{}) {
switch ad := appData.(type) {
case *AppStitchJSON:
ad.AuthProviders = append(ad.AuthProviders, config)
case *AppConfigJSON:
ad.AuthProviders = append(ad.AuthProviders, config)
case *AppRealmConfigJSON:
if ad.Auth.Providers == nil {
ad.Auth.Providers = map[string]interface{}{}
}
ad.Auth.Providers[name] = config
}
}
// AddDataSource adds a data source to the app data
func AddDataSource(appData AppData, config map[string]interface{}) {
switch ad := appData.(type) {
case *AppStitchJSON:
ad.Services = append(ad.Services, ServiceStructure{Config: config})
case *AppConfigJSON:
ad.Services = append(ad.Services, ServiceStructure{Config: config})
case *AppRealmConfigJSON:
ad.DataSources = append(ad.DataSources, DataSourceStructure{Config: config})
}
}
|
package ebook
import (
"bytes"
"crypto/rand"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"text/template"
"github.com/lwllvyb/gktime2book/util"
"github.com/mattn/godown"
)
func newUUID() (string, error) {
uuid := make([]byte, 16)
n, err := io.ReadFull(rand.Reader, uuid)
if n != len(uuid) || err != nil {
return "", err
}
// variant bits; see section 4.1.1
uuid[8] = uuid[8]&^0xc0 | 0x80
// version 4 (pseudo-random); see section 4.1.3
uuid[6] = uuid[6]&^0xf0 | 0x40
return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil
}
func ParseImage(content string, outDir string, relative string) (result string) {
reg := regexp.MustCompile("img (.{1,15}=\".*?\") src=\".*?\"")
fuckingStyles := reg.FindAllStringSubmatch(content, -1)
if fuckingStyles != nil {
for _, style := range fuckingStyles {
content = strings.Replace(content, style[1], "", -1)
}
}
reg = regexp.MustCompile("img\\s+src=\"(.*?)\"")
imgURLList := reg.FindAllStringSubmatch(content, -1)
if imgURLList != nil {
for _, url := range imgURLList {
uuid, _ := newUUID()
urlLocal := uuid + ".jpg"
if err := util.DownloadFile(filepath.Join(outDir, urlLocal), url[1]); err != nil {
log.Printf("DownloadFile [%s] -> %s error: %v\n", url[1], filepath.Join(outDir, urlLocal))
}
content = strings.Replace(content, url[1], filepath.Join(relative, urlLocal), -1)
}
}
return content
}
func ParseAudio(audioHTTP, outDir string) string {
u, err := url.Parse(audioHTTP)
if err != nil {
log.Fatalln(err)
return ""
}
audioName := path.Base(u.Path)
downloadPath := filepath.Join(outDir, audioName)
if err := util.DownloadFile(downloadPath, audioHTTP); err != nil {
log.Printf("DownloadFile [%s] -> %s error:%v", downloadPath, audioHTTP, err)
}
return audioName
}
type htmlTemplate struct {
Title string
Content string
}
func renderHTML(title string, content string) (bytes.Buffer, error) {
data := htmlTemplate{Title: title, Content: content}
pwd, _ := os.Getwd()
tmpl, err := template.New("template.html").ParseFiles(filepath.Join(pwd, "ebook", "template.html"))
if err != nil {
return bytes.Buffer{}, fmt.Errorf("New error: %v", err)
}
var tpl bytes.Buffer
if err := tmpl.Execute(&tpl, data); err != nil {
return bytes.Buffer{}, fmt.Errorf("Execute error: %v", err)
}
return tpl, nil
}
func writeHTMLFile(dstFile, title string, content []byte) {
out, err := os.Create(dstFile)
if err != nil {
log.Fatalf("create %v\n", err)
return
}
defer out.Close()
out.Write(content)
}
func genHTMLFile(dstFile, title, content string) {
tpl, err := renderHTML(title, content)
if err != nil {
log.Fatalf("renderHTML error:%v", err)
return
}
writeHTMLFile(dstFile, title, tpl.Bytes())
}
func renderH(title, content string) string {
return fmt.Sprintf("<h1>%s</h1>\n%s", title, content)
}
func genMardown(dstFile, title, content, audioFile string) {
// tpl, err := renderHTML(title, content)
// if err != nil {
// log.Fatalf("renderHTML error:%v", err)
// return
// }
f, err := os.OpenFile(dstFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
panic(err)
}
defer f.Close()
headerData := fmt.Sprintf("# %s\n", title)
headerData += genAudioData(audioFile)
if _, err = f.WriteString(headerData); err != nil {
panic(err)
}
var buf bytes.Buffer
rBuf := bytes.NewReader([]byte(content))
godown.Convert(&buf, rBuf, nil)
if _, err = f.WriteString(buf.String()); err != nil {
panic(err)
}
}
func genBookJSONFile(dstFile, columnTitle, language string) {
data := map[string]string{
"title": columnTitle,
"language": language,
}
jsonData, err := json.Marshal(data)
if err != nil {
log.Fatalf("json.Marshal fail, error:%v", err)
return
}
ioutil.WriteFile(dstFile, jsonData, os.ModePerm)
}
func genSummaryData(name, dstFile string) string {
return fmt.Sprintf("* [%s](%s)\n", name, dstFile)
}
func genAudioData(audioFile string) string {
if len(audioFile) == 0 {
return ""
}
return fmt.Sprintf(`<audio id="audio" controls="" preload="none"><source id="mp3" src="%s"></audio>`, audioFile) + "\n\n"
}
|
package repositories
import (
"database/sql"
"errors"
"fmt"
"log"
"ocg-be/database"
"ocg-be/models"
)
type CollectionRepo struct {
}
func (*CollectionRepo) Count(search string) int64 {
var rows *sql.Rows
var err error
rows, err = database.DB.Query("SELECT COUNT(*) FROM collections WHERE name LIKE ?", search)
if err != nil {
log.Println(err)
}
var total int64
for rows.Next() {
rows.Scan(&total)
}
return total
}
func (*CollectionRepo) Take(limit int, offset int, orderBy string, sort string, search string) interface{} {
var collections []models.Collection
var rows *sql.Rows
var err error
qtext := fmt.Sprintf("SELECT * FROM collections WHERE name LIKE ? ORDER BY %s %s LIMIT ? OFFSET ? ", orderBy, sort)
rows, err = database.DB.Query(qtext, search, limit, offset)
if err != nil {
log.Println(err)
return nil
}
for rows.Next() {
var collection models.Collection
err = rows.Scan(&collection.Id, &collection.Name, &collection.Handle, &collection.Thumbnail)
if err != nil {
log.Println(err)
return nil
}
collections = append(collections, collection)
}
return collections
}
func (*CollectionRepo) Add(collection *models.Collection) (int, error) {
_, err := database.DB.Query("INSERT INTO collections VALUES (?,?, ?,?)", collection.Id, collection.Name, collection.Handle, collection.Thumbnail)
if err != nil {
collection = nil
fmt.Println(err)
return 0, errors.New("collection bi trung ten anh oi")
}
rows, _ := database.DB.Query("SELECT id FROM collections ORDER BY id DESC LIMIT 1")
if rows.Next() {
err = rows.Scan(&collection.Id)
if err != nil {
collection = nil
fmt.Println(err)
return 0, err
}
}
return int(collection.Id), nil
}
func (*CollectionRepo) Delete(id int) error {
_, err := database.DB.Query("DELETE FROM collection_product WHERE collection_product.collection_id=?", id)
if err != nil {
return err
}
_, err = database.DB.Query("DELETE FROM collections WHERE collections.id=?", id)
if err != nil {
return err
}
return nil
}
func (*CollectionRepo) GetCollectionByHandle(handle string) (models.Collection, error) {
var collection models.Collection
rows, err := database.DB.Query("SELECT * from collections WHERE handle = ?", handle)
if err != nil {
fmt.Println(err)
return collection, err
}
if rows.Next() {
err = rows.Scan(&collection.Id, &collection.Name, &collection.Handle, &collection.Thumbnail)
if err != nil {
fmt.Println(err)
return collection, err
}
} else {
return collection, errors.New("khong tim thay collection")
}
return collection, nil
}
func (*CollectionRepo) AddProductToCollectionDb(collection_id int, productId int) error {
qtext, err := database.DB.Prepare(" INSERT INTO collection_product " +
"(product_id,collection_id) " +
"VALUES (?,?) ")
if err != nil {
return err
}
_, err = qtext.Exec(productId, collection_id)
if err != nil {
return err
}
return nil
}
func (*CollectionRepo) RemoveProductFromCollection(productId int) error {
_, err := database.DB.Query("DELETE FROM collection_product WHERE collection_product.product_id=?", productId)
if err != nil {
return err
}
return nil
}
|
package memcache
import (
"bytes"
"github.com/valyala/ybc/bindings/go/ybc"
"testing"
"time"
)
func newCachingClientServerCache(t *testing.T) (cc *CachingClient, s *Server, cache ybc.Cacher) {
c, s, cache := newClientServerCache(t)
c.Start()
cc = &CachingClient{
Client: c,
Cache: newCache(t),
}
return
}
func verifyItem(item *Item, value []byte, flags uint32, location string, t *testing.T) {
if !bytes.Equal(item.Value, value) {
t.Fatalf("Unexpected item.Value=[%s]. Expected [%s]. location=[%s]", item.Value, value, location)
}
if item.Flags != flags {
t.Fatalf("Unexpected item.Flags=%d. Expected %d. location=[%s]", item.Flags, flags, location)
}
}
func TestCachingClient_SetGetDelete(t *testing.T) {
c, s, cache := newCachingClientServerCache(t)
defer cache.Close()
defer s.Stop()
defer c.Cache.Close()
defer c.Client.(Cacher).Stop()
key := []byte("key")
value := []byte("value")
flags := uint32(23899)
item := Item{
Key: key,
Value: value,
Flags: flags,
}
// Both local cache and server don't contain the item now.
if err := c.Get(&item); err != ErrCacheMiss {
t.Fatalf("Unexpected error returned from CachingClient.Get(): [%s]. Expected ErrCacheMiss", err)
}
// Server should contain the item after this call.
if err := c.Set(&item); err != nil {
t.Fatalf("Error in CachingClient.Set(): [%s]", err)
}
// Local cache should populate the item after this call.
item.Value = nil
item.Flags = 0
if err := c.Get(&item); err != nil {
t.Fatalf("Error in CachingClient.Get() when obtaining item from the server: [%s]", err)
}
verifyItem(&item, value, flags, "1", t)
// The item should be returned from the local cache now
// (with revalidation on the server).
for i := 0; i < 10; i++ {
item.Value = nil
item.Flags = 0
if err := c.Get(&item); err != nil {
t.Fatalf("Error in CachingClient.Get() when revalidating locally cached item: [%s]", err)
}
verifyItem(&item, value, flags, "2", t)
}
// Store the item, which may be locally cached for extended period of time
key = []byte("new_key")
value = []byte("new_value")
item.Key = key
item.Value = value
validateTtl := time.Millisecond * 100
if err := c.SetWithValidateTtl(&item, validateTtl); err != nil {
t.Fatalf("Error in CachingClient.SetWithValidateTtl(): [%s]", err)
}
// Local cache should populate the item after this call.
item.Value = nil
item.Flags = 0
if err := c.Get(&item); err != nil {
t.Fatalf("Error in CachingClient.Get() when obtaining item from the server: [%s]", err)
}
verifyItem(&item, value, flags, "3", t)
// The item should be returned from the local cache now
// (without revalidation on the server).
for i := 0; i < 10; i++ {
item.Value = nil
item.Flags = 0
if err := c.Get(&item); err != nil {
t.Fatalf("Error in CachingClient.Get() when obtaining locally cached item without validation: [%s]", err)
}
verifyItem(&item, value, flags, "4", t)
}
// Sleep for short period of time and make sure item revalidation works
time.Sleep(validateTtl + time.Millisecond*10)
// The item should be returned from the local cache now
// (with revalidation on the server during the first iteration).
for i := 0; i < 10; i++ {
item.Value = nil
item.Flags = 0
if err := c.Get(&item); err != nil {
t.Fatalf("Error in CachingClient.Get() when obtaining locally cached item on iteration %d (iteration 0 incurs item revalidation): [%s]", i, err)
}
verifyItem(&item, value, flags, "5", t)
}
// Delete the item and make sure it is really deleted.
if err := c.Delete(item.Key); err != nil {
t.Fatalf("Error in CachingClient.Delete(): [%s]", err)
}
for i := 0; i < 10; i++ {
item.Value = nil
item.Flags = 0
if err := c.Get(&item); err != ErrCacheMiss {
t.Fatalf("Unexpected error in CachingClient.Get() when trying to obtain deleted item: [%s]", err)
}
}
if err := c.Delete(item.Key); err != ErrCacheMiss {
t.Fatalf("Unexpected error in CachingClient.Delete() on already deleted item: [%s]", err)
}
}
|
package models
import (
"github.com/jinzhu/gorm"
)
// GALLERY - ERRORS
const (
ErrAccountIDRequired modelError = "models: account ID is required"
ErrTitleRequired modelError = "models: title is required"
)
var _ GalleryDB = &galleryGorm{}
type Gallery struct {
gorm.Model
AccountID uint `gorm:"not_null;index"`
Title string `gorm:"not_null"`
Images []Image `gorm:"-"`
}
type GalleryService interface {
GalleryDB
}
type GalleryDB interface {
ByID(id uint) (*Gallery, error)
ByAccountID(accountID uint) ([]Gallery, error)
Create(gallery *Gallery) error
Update(gallery *Gallery) error
Delete(id uint) error
}
// GALLERY - SERVICE
type galleryService struct {
GalleryDB
}
// GALLERY - VALIDATION
type galleryValidator struct {
GalleryDB
}
// GALLERY - GORM
type galleryGorm struct {
db *gorm.DB
}
type galleryValFn func(*Gallery) error
// GALLERY - VALIDATION
func runGalleryValFns(gallery *Gallery, fns ...galleryValFn) error {
for _, fn := range fns {
if err := fn(gallery); err != nil {
return err
}
}
return nil
}
// GALLERY - VALIDATION
func (mv *galleryValidator) accountIDRequired(m *Gallery) error {
if m.AccountID <= 0 {
return ErrAccountIDRequired
}
return nil
}
// GALLERY - VALIDATION
func (mv *galleryValidator) titleRequired(m *Gallery) error {
if m.Title == "" {
return ErrTitleRequired
}
return nil
}
// GALLERY - VALIDATION
// // categoryRequired
// // imageRequired
// GALLERY - VALIDATION - Create
func (mv *galleryValidator) Create(gallery *Gallery) error {
err := runGalleryValFns(gallery,
mv.accountIDRequired,
mv.titleRequired)
if err != nil {
return err
}
return mv.GalleryDB.Create(gallery)
}
// GALLERY - VALIDATION - Update
func (mv *galleryValidator) Update(gallery *Gallery) error {
err := runGalleryValFns(gallery,
mv.accountIDRequired,
mv.titleRequired)
if err != nil {
return err
}
return mv.GalleryDB.Update(gallery)
}
// GALLERY - VALIDATION - nonZeroID
func (mv *galleryValidator) nonZeroID(gallery *Gallery) error {
if gallery.ID <= 0 {
return ErrIDInvalid
}
return nil
}
// GALLERY - VALIDATION - Delete
func (mv *galleryValidator) Delete(id uint) error {
var gallery Gallery
gallery.ID = id
if err := runGalleryValFns(&gallery, mv.nonZeroID); err != nil {
return err
}
return mv.GalleryDB.Delete(gallery.ID)
}
// // GALLERY - VALIDATION - categoryTattoo
// func (mv *galleryValidator) categoryTattoo(gallery *Gallery) error {
// if gallery.Category == "tattoo" {
// return ErrIDInvalid
// }
// return nil
// }
// GALLERY - GORM
func (mg *galleryGorm) ByID(id uint) (*Gallery, error) {
var gallery Gallery
db := mg.db.Where("id = ?", id)
err := first(db, &gallery)
if err != nil {
return nil, err
}
return &gallery, nil
}
// GALLERY - GORM
func (mg *galleryGorm) ByAccountID(accountID uint) ([]Gallery, error) {
var galleries []Gallery
db := mg.db.Where("account_id = ?", accountID)
if err := db.Find(&galleries).Error; err != nil {
return nil, err
}
return galleries, nil
}
// GALLERY - GORM
// // ByCategory
// // ByTag
// // ByDateCreated
// GALLERY - GORM
func (mg *galleryGorm) Create(gallery *Gallery) error {
return mg.db.Create(gallery).Error
}
// GALLERY - GORM
func (mg *galleryGorm) Update(gallery *Gallery) error {
return mg.db.Save(gallery).Error
}
// GALLERY - GORM
func (mg *galleryGorm) Delete(id uint) error {
gallery := Gallery{Model: gorm.Model{ID: id}}
return mg.db.Delete(&gallery).Error
}
// GALLERY - SERVICE
func NewGalleryService(db *gorm.DB) GalleryService {
return &galleryService{
GalleryDB: &galleryValidator{
GalleryDB: &galleryGorm{
db: db,
},
},
}
}
// Render Image columns easier
func (m *Gallery) ImagesSplitN(n int) [][]Image {
// Create out 2D slice
ret := make([][]Image, n)
// Create the inner slices - we need N of them,
// and we will start them with a size of 0.
for i := 0; i < n; i++ {
ret[i] = make([]Image, 0)
}
// Iterate over our images, using the index % n
// to determine which of the slices in ret to add the image to.
for i, img := range m.Images {
// % is the remainder operator in Go
// eg:
// 0%3 = 0
// 1%3 = 1
// 2%3 = 2
// 3%3 = 0
// 4%3 = 1
// 5%3 = 2
bucket := i % n
ret[bucket] = append(ret[bucket], img)
}
return ret
}
|
package test_matchers
import (
"fmt"
"reflect"
"github.com/onsi/gomega/format"
"github.com/onsi/gomega/types"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
func PrometheusMetric(expected prometheus.Metric) types.GomegaMatcher {
expectedMetric := &dto.Metric{}
expected.Write(expectedMetric)
return &PrometheusMetricMatcher{
Desc: expected.Desc(),
Metric: expectedMetric,
}
}
type PrometheusMetricMatcher struct {
Desc *prometheus.Desc
Metric *dto.Metric
}
func (matcher *PrometheusMetricMatcher) Match(actual interface{}) (success bool, err error) {
metric, ok := actual.(prometheus.Metric)
if !ok {
return false, fmt.Errorf("PrometheusMetric matcher expects a prometheus.Metric")
}
actualMetric := &dto.Metric{}
metric.Write(actualMetric)
if !reflect.DeepEqual(metric.Desc().String(), matcher.Desc.String()) {
return false, nil
}
return reflect.DeepEqual(actualMetric.String(), matcher.Metric.String()), nil
}
func (matcher *PrometheusMetricMatcher) FailureMessage(actual interface{}) (message string) {
metric, ok := actual.(prometheus.Metric)
if ok {
actualMetric := &dto.Metric{}
metric.Write(actualMetric)
return format.Message(
fmt.Sprintf("\n%s\nMetric{%s}", metric.Desc().String(), actualMetric.String()),
"to equal",
fmt.Sprintf("\n%s\nMetric{%s}", matcher.Desc.String(), matcher.Metric.String()),
)
}
return format.Message(actual, "to equal", matcher)
}
func (matcher *PrometheusMetricMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not to equal", matcher)
}
|
package main
import (
"fmt"
"encoding/json"
"net"
)
type Msgdata struct{
MsgType uint32 `json:"msg_type"`
Data uint32
}
func main() {
fmt.Println("server running")
localAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:8889")
if err != nil {
fmt.Println(err)
}
l, err1 := net.ListenUDP("udp", localAddr)
if err1 != nil {
fmt.Println(err1)
}
defer l.Close()
fmt.Println("listen udp come in")
var readMsg = make([]byte, 2048)
for {
fmt.Println("begin read")
n, remoteAddr,err2 := l.ReadFromUDP(readMsg)
if err2 != nil {
fmt.Println(err2,n)
}
fmt.Println(readMsg[:n])
fmt.Println(remoteAddr)
var msg = Msgdata{}
json.Unmarshal(readMsg[:n],&msg)
fmt.Println(msg)
}
}
|
package middleware
import (
"net/http"
"net/http/httputil"
"strings"
"github.com/gin-gonic/gin"
)
const (
DBMongoPool = "db_mongo_pool"
DBRedisPool = "redis_pool"
)
type Middleware struct{}
func (m Middleware) HandleAuthLevel(auth int, endpoint gin.HandlerFunc) []gin.HandlerFunc {
var rtn []gin.HandlerFunc
switch auth {
case 0: // grant access to everyone
case 1: // check for session
rtn = append(rtn, m.VerifySession)
}
rtn = append(rtn, endpoint)
return rtn
}
func (m Middleware) Request(c *gin.Context) {
_, err := httputil.DumpRequest(c.Request, true)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "unable to parse request"})
return
}
// prevent log binary file, eg. upload image file
ct := c.Request.Header.Get("Content-Type")
if strings.HasPrefix(ct, "application/json") {
}
c.Next()
}
func (m Middleware) VerifySession(c *gin.Context) {
// assume check session from redis
session := false
if !session {
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{"message": "you don't have authorize to call this method."})
return
}
c.Next()
}
|
package 结构调整
// ----------------- 先标记,再删除 -----------------
const removeFlag = 1000000000000
func removeLeafNodes(root *TreeNode, target int) *TreeNode {
markRemovedLeafNodes(root, target)
return removeMarkNode(root)
}
func removeMarkNode(root *TreeNode) *TreeNode {
if root == nil || root.Val == removeFlag {
return nil
}
root.Left = removeMarkNode(root.Left)
root.Right = removeMarkNode(root.Right)
return root
}
func markRemovedLeafNodes(root *TreeNode, target int) {
if root == nil {
return
}
markRemovedLeafNodes(root.Left, target)
markRemovedLeafNodes(root.Right, target)
if isNeedToDelete(root, target) {
root.Val = removeFlag
}
}
func isNeedToDelete(root *TreeNode, target int) bool {
return isLeaf(root) && root.Val == target
}
func isLeaf(root *TreeNode) bool {
return (root.Left == nil || root.Left.Val == removeFlag) && (root.Right == nil || root.Right.Val == removeFlag)
}
// ----------------- 直接删除 -----------------
func removeLeafNodes(root *TreeNode, target int) *TreeNode {
if root == nil {
return nil
}
root.Left = removeLeafNodes(root.Left, target)
root.Right = removeLeafNodes(root.Right, target)
if isNeedToDelete(root, target) {
return nil
}
return root
}
func isNeedToDelete(root *TreeNode, target int) bool {
return isLeaf(root) && root.Val == target
}
func isLeaf(root *TreeNode) bool {
return root.Left == nil && root.Right == nil
}
/*
题目链接: https://leetcode-cn.com/problems/delete-leaves-with-a-given-value/
*/
|
package command
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/jixwanwang/jixbot/channel"
)
type uptime struct {
cp *CommandPool
lastCheck time.Time
upComm *subCommand
}
func (T *uptime) Init() {
T.upComm = &subCommand{
command: "!uptime",
numArgs: 0,
cooldown: 30 * time.Second,
lastCalled: time.Now().Add(-15 * time.Second),
clearance: channel.VIEWER,
}
}
func (T *uptime) ID() string {
return "uptime"
}
func (T *uptime) Response(username, message string, whisper bool) {
if whisper {
return
}
message = strings.TrimSpace(strings.ToLower(message))
clearance := T.cp.channel.GetLevel(username)
_, err := T.upComm.parse(message, clearance)
if err == nil {
if !T.cp.channel.Broadcaster.Online {
T.cp.Say(fmt.Sprintf("%s isn't online.", T.cp.channel.GetChannelName()))
return
}
uptime := time.Now().UTC().Sub(T.cp.channel.Broadcaster.OnlineSince)
minutes := int(uptime.Minutes())
T.cp.Say(fmt.Sprintf("%s hours, %s minutes", strconv.Itoa(minutes/60), strconv.Itoa(minutes%60)))
}
}
|
package data
import (
"errors"
"log"
"os"
"path"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/bgokden/go-cache"
"github.com/bgokden/veri/annoyindex"
pb "github.com/bgokden/veri/veriservice"
)
type DataSource interface {
StreamSearch(datumList *pb.Datum, scoredDatumStream chan<- *pb.ScoredDatum, queryWaitGroup *sync.WaitGroup, config *pb.SearchConfig) error
Insert(datum *pb.Datum, config *pb.InsertConfig) error
GetDataInfo() *pb.DataInfo
GetID() string
}
type Annoyer struct {
sync.RWMutex
DataIndex *[]*pb.Datum
AnnoyIndex annoyindex.AnnoyIndexAngular
BuildFileName string
}
// Data represents a dataset with similar struture
type Data struct {
sync.RWMutex
Config *pb.DataConfig
Path string
Avg []float32
N uint64
MaxDistance float64
Hist []float32
Timestamp uint64
LastRunTimestamp uint64
DBPath string
Dirty bool
Sources *cache.Cache
QueryCache *cache.Cache
Initialized bool
Alive bool
Annoyer Annoyer
Runs int32
DBMap sync.Map
RecentInsertCount uint64
}
func (d *Data) GetConfig() *pb.DataConfig {
return d.Config
}
// NewData creates a data struct
func NewData(config *pb.DataConfig, dataPath string) (*Data, error) {
dt := &Data{
Config: config,
}
// log.Printf("Create Data\n")
dt.DBPath = path.Join(dataPath, config.Name)
dt.InitData()
return dt, nil
}
// NewPreData creates a data struct
func NewPreData(config *pb.DataConfig, dataPath string) *Data {
dt := &Data{
Config: config,
}
atomic.StoreUint64(&(dt.RecentInsertCount), 0)
// log.Printf("Pre Create Data %v\n", dt.Config)
dt.DBPath = path.Join(dataPath, config.Name)
return dt
}
func (dt *Data) InitData() error {
log.Printf("Init Data %v\n", dt.Config)
dt.Lock()
defer dt.Unlock()
if dt.Initialized == false {
dt.Sources = cache.New(10*time.Minute, 1*time.Minute)
dt.QueryCache = cache.New(5*time.Minute, 1*time.Minute)
dt.Alive = true
go dt.Run()
dt.Initialized = true
}
return nil
}
// NewTempData return an inmemory badger instance
func NewTempData() (*Data, error) {
dt := &Data{
Config: &pb.DataConfig{
NoTarget: true,
},
}
return dt, nil
}
// Close currently closes underlying kv store
func (dt *Data) Close() error {
dt.Alive = false
if dt.Sources != nil && len(dt.Sources.Items()) > 0 {
for dt.N > 0 {
dt.Process(true)
}
}
// return dt.DB.Close()
dt.DeletePath()
return nil
}
// Delete currently deletes underlying data folder ignores errors.
func (dt *Data) DeletePath() error {
// dt.DB.Close()
os.RemoveAll(dt.DBPath)
return nil
}
// Check if dataset is active
func (dt *Data) CheckIfActive() bool {
if dt.N > 0 {
return true
}
if dt.Sources != nil && len(dt.Sources.Items()) > 0 {
sourceList := dt.Sources.Items()
for _, sourceItem := range sourceList { // Assumption is that random map runs are random enough
source := sourceItem.Object.(DataSource)
if source.GetDataInfo().N > 0 {
return true
}
}
}
return false
}
// Run runs statistical calculation regularly
func (dt *Data) Run() error {
if atomic.LoadInt32(&dt.Runs) >= 1 {
log.Printf("Multiple Run calls detected.")
return errors.New("Another instance of processor is running for data")
}
atomic.AddInt32(&dt.Runs, 1)
defer atomic.AddInt32(&dt.Runs, -1)
nextTime := getCurrentTime()
gcCounter := 10
for {
if !dt.Alive {
break
}
if nextTime <= getCurrentTime() {
secondsToSleep := uint64(10) // increment this based on load
dt.Process(false)
nextTime = getCurrentTime() + secondsToSleep
gcCounter--
}
time.Sleep(time.Duration(1000) * time.Millisecond)
}
return nil
}
func (dt *Data) DataSourceDiffMap() (map[string]uint64, uint64) {
localInfo := dt.GetDataInfo()
localN := localInfo.N
diffMap := map[string]uint64{}
sum := uint64(0)
dt.RunOnRandomSources(5, func(source DataSource) error {
info := source.GetDataInfo()
if info != nil {
diff := minUint64(((localN-info.N)/2)+1, 1000) // diff may be negative
fraction := float64(diff) / float64(localN+1)
if info.N > localN {
diff = 1
}
if VectorDistance(localInfo.Avg, info.Avg)+VectorDistance(localInfo.Hist, info.Hist) <= 0.01*localInfo.GetMaxDistance() { // This is arbitary
diff = 1 // close enough
if fraction < 0.01 { // small or negative
diff = 0
}
}
diffMap[source.GetID()] = diff
sum += diff
}
return nil
})
return diffMap, sum
}
func CheckIfUnkownError(err error) bool {
if strings.Contains(err.Error(), "Number of elements is over the target") || strings.Contains(err.Error(), "Node is in drain mode") {
return false
}
return true
}
// GetDataInfo out of data
func (dt *Data) GetDataInfo() *pb.DataInfo {
// log.Printf("Data: %v\n", dt)
return &pb.DataInfo{
Avg: dt.Avg,
N: dt.N,
MaxDistance: dt.MaxDistance,
Hist: dt.Hist,
Timestamp: dt.Timestamp,
Version: dt.Config.Version,
Name: dt.Config.Name,
TargetN: dt.Config.TargetN,
TargetUtilization: dt.Config.TargetUtilization,
NoTarget: dt.Config.NoTarget,
}
}
// AddSource adds a source
func (dt *Data) AddSource(dataSource DataSource) error {
if dataSource == nil {
return errors.New("DataSource is nil")
}
if dt.Sources == nil {
dt.InitData()
}
if dt.Sources == nil {
return errors.New("Sources is still nil")
}
return dt.Sources.Add(dataSource.GetID(), dataSource, cache.DefaultExpiration)
}
func (dt *Data) GetID() string {
return dt.Config.Name
}
func (dt *Data) RunOnRandomSources(sourceLimit int, sourceFunction func(dataSource DataSource) error) error {
sourceList := dt.Sources.Items()
for _, sourceItem := range sourceList { // Assumption is that random map runs are random enough
if sourceLimit < 0 {
break
}
sourceLimit--
source := sourceItem.Object.(DataSource)
err := sourceFunction(source)
if err != nil {
return err
}
}
return nil
}
|
package main
import "fmt"
func main() {
fmt.Println(foo(1, 2))
fmt.Println(foo(1, 2, 3))
aSlice := []int{1, 2, 3, 4}
fmt.Println(foo(aSlice...))
fmt.Println(foo())
}
func foo(n ...int) int {
min := 1
for _, v := range n {
if min > v {
min = v
}
}
return min
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"context"
"fmt"
"math"
"os"
"path"
"path/filepath"
"sync"
"testing"
"github.com/cockroachdb/pebble"
"github.com/cockroachdb/pebble/sstable"
"github.com/google/uuid"
"github.com/pingcap/tidb/br/pkg/lightning/backend"
"github.com/pingcap/tidb/br/pkg/lightning/common"
"github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/stretchr/testify/require"
)
func makePebbleDB(t *testing.T, opt *pebble.Options) (*pebble.DB, string) {
dir := t.TempDir()
db, err := pebble.Open(path.Join(dir, "test"), opt)
require.NoError(t, err)
tmpPath := filepath.Join(dir, "test.sst")
err = os.Mkdir(tmpPath, 0o755)
require.NoError(t, err)
return db, tmpPath
}
func TestGetEngineSizeWhenImport(t *testing.T) {
opt := &pebble.Options{
MemTableSize: 1024 * 1024,
MaxConcurrentCompactions: 16,
L0CompactionThreshold: math.MaxInt32, // set to max try to disable compaction
L0StopWritesThreshold: math.MaxInt32, // set to max try to disable compaction
DisableWAL: true,
ReadOnly: false,
}
db, tmpPath := makePebbleDB(t, opt)
_, engineUUID := backend.MakeUUID("ww", 0)
engineCtx, cancel := context.WithCancel(context.Background())
f := &Engine{
UUID: engineUUID,
sstDir: tmpPath,
ctx: engineCtx,
cancel: cancel,
sstMetasChan: make(chan metaOrFlush, 64),
keyAdapter: common.NoopKeyAdapter{},
logger: log.L(),
}
f.db.Store(db)
// simulate import
f.lock(importMutexStateImport)
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
engineFileSize := f.getEngineFileSize()
require.Equal(t, f.UUID, engineFileSize.UUID)
require.True(t, engineFileSize.IsImporting)
}()
wg.Wait()
f.unlock()
require.NoError(t, f.Close())
}
func TestIngestSSTWithClosedEngine(t *testing.T) {
opt := &pebble.Options{
MemTableSize: 1024 * 1024,
MaxConcurrentCompactions: 16,
L0CompactionThreshold: math.MaxInt32, // set to max try to disable compaction
L0StopWritesThreshold: math.MaxInt32, // set to max try to disable compaction
DisableWAL: true,
ReadOnly: false,
}
db, tmpPath := makePebbleDB(t, opt)
_, engineUUID := backend.MakeUUID("ww", 0)
engineCtx, cancel := context.WithCancel(context.Background())
f := &Engine{
UUID: engineUUID,
sstDir: tmpPath,
ctx: engineCtx,
cancel: cancel,
sstMetasChan: make(chan metaOrFlush, 64),
keyAdapter: common.NoopKeyAdapter{},
logger: log.L(),
}
f.db.Store(db)
f.sstIngester = dbSSTIngester{e: f}
sstPath := path.Join(tmpPath, uuid.New().String()+".sst")
file, err := os.Create(sstPath)
require.NoError(t, err)
w := sstable.NewWriter(file, sstable.WriterOptions{})
for i := 0; i < 10; i++ {
require.NoError(t, w.Add(sstable.InternalKey{
Trailer: uint64(sstable.InternalKeyKindSet),
UserKey: []byte(fmt.Sprintf("key%d", i)),
}, nil))
}
require.NoError(t, w.Close())
require.NoError(t, f.ingestSSTs([]*sstMeta{
{
path: sstPath,
},
}))
require.NoError(t, f.Close())
require.ErrorIs(t, f.ingestSSTs([]*sstMeta{
{
path: sstPath,
},
}), errorEngineClosed)
}
func TestGetFirstAndLastKey(t *testing.T) {
db, tmpPath := makePebbleDB(t, nil)
f := &Engine{
sstDir: tmpPath,
}
f.db.Store(db)
err := db.Set([]byte("a"), []byte("a"), nil)
require.NoError(t, err)
err = db.Set([]byte("c"), []byte("c"), nil)
require.NoError(t, err)
err = db.Set([]byte("e"), []byte("e"), nil)
require.NoError(t, err)
first, last, err := f.GetFirstAndLastKey(nil, nil)
require.NoError(t, err)
require.Equal(t, []byte("a"), first)
require.Equal(t, []byte("e"), last)
first, last, err = f.GetFirstAndLastKey([]byte("b"), []byte("d"))
require.NoError(t, err)
require.Equal(t, []byte("c"), first)
require.Equal(t, []byte("c"), last)
first, last, err = f.GetFirstAndLastKey([]byte("b"), []byte("f"))
require.NoError(t, err)
require.Equal(t, []byte("c"), first)
require.Equal(t, []byte("e"), last)
first, last, err = f.GetFirstAndLastKey([]byte("y"), []byte("z"))
require.NoError(t, err)
require.Nil(t, first)
require.Nil(t, last)
first, last, err = f.GetFirstAndLastKey([]byte("e"), []byte(""))
require.NoError(t, err)
require.Equal(t, []byte("e"), first)
require.Equal(t, []byte("e"), last)
}
|
// See the accompanying README.
// The Go compiler is pretty cool/smart!
package main
import (
"fmt"
)
func main() {
a := 20
b := 7
c := add(a, b)
fmt.Println(c)
}
func add(x, y int) int {
return x + y
}
|
package migrations
import (
"github.com/go-pg/migrations"
log "github.com/sirupsen/logrus"
)
func init() {
migrations.Register(func(db migrations.DB) error {
log.Info("migrate 20171010114409_create-nut-plugin")
_, err := db.Exec(`
CREATE TABLE users (
id BIGSERIAL PRIMARY KEY,
name VARCHAR(32) NOT NULL,
email VARCHAR(255) NOT NULL,
uid VARCHAR(36) NOT NULL,
password bytea,
provider_id VARCHAR(255) NOT NULL,
provider_type VARCHAR(32) NOT NULL,
logo VARCHAR(255),
sign_in_count INT NOT NULL DEFAULT 0,
current_sign_in_at TIMESTAMP WITHOUT TIME ZONE,
current_sign_in_ip INET,
last_sign_in_at TIMESTAMP WITHOUT TIME ZONE,
last_sign_in_ip INET,
confirmed_at TIMESTAMP WITHOUT TIME ZONE,
locked_at TIMESTAMP WITHOUT TIME ZONE,
created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT now(),
updated_at TIMESTAMP WITHOUT TIME ZONE NOT NULL
);
CREATE UNIQUE INDEX idx_users_uid
ON users (uid);
CREATE UNIQUE INDEX idx_users_email
ON users (email);
CREATE UNIQUE INDEX idx_users_provider_id_type
ON users (provider_id, provider_type);
CREATE INDEX idx_users_name
ON users (name);
CREATE INDEX idx_users_provider_id
ON users (provider_id);
CREATE INDEX idx_users_provider_type
ON users (provider_type);
CREATE TABLE logs (
id BIGSERIAL PRIMARY KEY,
user_id BIGINT REFERENCES users NOT NULL,
ip INET NOT NULL,
message VARCHAR(255) NOT NULL,
created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT now()
);
CREATE INDEX idx_logs_ip
ON logs (ip);
CREATE TABLE roles (
id BIGSERIAL PRIMARY KEY,
name VARCHAR(32) NOT NULL,
resource_id BIGINT NOT NULL,
resource_type VARCHAR(255) NOT NULL,
created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT now(),
updated_at TIMESTAMP WITHOUT TIME ZONE NOT NULL
);
CREATE UNIQUE INDEX idx_roles_name_resource_type_id
ON roles (name, resource_type, resource_id);
CREATE INDEX idx_roles_name
ON roles (name);
CREATE INDEX idx_roles_resource_type
ON roles (resource_type);
CREATE TABLE policies (
id BIGSERIAL PRIMARY KEY,
user_id BIGINT REFERENCES users NOT NULL,
role_id BIGINT REFERENCES roles NOT NULL,
_begin DATE NOT NULL DEFAULT current_date,
_end DATE NOT NULL DEFAULT '2016-12-13',
created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT now(),
updated_at TIMESTAMP WITHOUT TIME ZONE NOT NULL
);
CREATE UNIQUE INDEX idx_policies
ON policies (user_id, role_id);
CREATE TABLE votes (
id BIGSERIAL PRIMARY KEY,
resource_type VARCHAR(255) NOT NULL,
resource_id BIGINT NOT NULL,
point INT NOT NULL DEFAULT 0,
created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT now(),
updated_at TIMESTAMP WITHOUT TIME ZONE NOT NULL
);
CREATE UNIQUE INDEX idx_votes_resources
ON votes (resource_type, resource_id);
CREATE INDEX idx_votes_resource_type
ON votes (resource_type);
CREATE TABLE attachments (
id BIGSERIAL PRIMARY KEY,
title VARCHAR(255) NOT NULL,
url VARCHAR(255) NOT NULL,
length INT NOT NULL,
media_type VARCHAR(32) NOT NULL,
resource_type VARCHAR(255) NOT NULL,
resource_id BIGINT NOT NULL,
user_id BIGINT REFERENCES users NOT NULL,
created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT now(),
updated_at TIMESTAMP WITHOUT TIME ZONE NOT NULL
);
CREATE UNIQUE INDEX idx_attachments_url
ON attachments (url);
CREATE INDEX idx_attachments_title
ON attachments (title);
CREATE INDEX idx_attachments_resource_type
ON attachments (resource_type);
CREATE INDEX idx_attachments_media_type
ON attachments (media_type);
CREATE TABLE leave_words (
id BIGSERIAL PRIMARY KEY,
body TEXT NOT NULL,
type VARCHAR(8) NOT NULL DEFAULT 'markdown',
created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT now()
);
CREATE TABLE links (
id BIGSERIAL PRIMARY KEY,
lang VARCHAR(8) NOT NULL,
href VARCHAR(255) NOT NULL,
label VARCHAR(255) NOT NULL,
loc VARCHAR(16) NOT NULL,
sort_order INT NOT NULL DEFAULT 0,
created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT now(),
updated_at TIMESTAMP WITHOUT TIME ZONE NOT NULL
);
CREATE INDEX idx_links_loc_lang ON links (loc, lang);
CREATE INDEX idx_links_lang ON links (lang);
CREATE TABLE cards (
id BIGSERIAL PRIMARY KEY,
lang VARCHAR(8) NOT NULL,
title VARCHAR(255) NOT NULL,
summary VARCHAR(2048) NOT NULL,
type VARCHAR(8) NOT NULL DEFAULT 'markdown',
action VARCHAR(32) NOT NULL,
href VARCHAR(255) NOT NULL,
logo VARCHAR(255) NOT NULL,
loc VARCHAR(16) NOT NULL,
sort_order INT NOT NULL DEFAULT 0,
created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT now(),
updated_at TIMESTAMP WITHOUT TIME ZONE NOT NULL
);
CREATE INDEX idx_cards_loc_lang ON cards (loc, lang);
CREATE INDEX idx_cards_lang ON cards (lang);
CREATE TABLE friend_links (
id BIGSERIAL PRIMARY KEY,
title VARCHAR(255) NOT NULL,
home VARCHAR(255) NOT NULL,
logo VARCHAR(255) NOT NULL,
sort_order INT NOT NULL DEFAULT 0,
created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT now(),
updated_at TIMESTAMP WITHOUT TIME ZONE NOT NULL
);
`)
return err
}, func(db migrations.DB) error {
log.Info("rollback 20171010114409_create-nut-plugin")
_, err := db.Exec(`
DROP TABLE friend_links;
DROP TABLE cards;
DROP TABLE links;
DROP TABLE leave_words;
DROP TABLE attachments;
DROP TABLE votes;
DROP TABLE policies;
DROP TABLE roles;
DROP TABLE logs;
DROP TABLE users;
`)
return err
})
}
|
// This file is subject to a 1-clause BSD license.
// Its contents can be found in the enclosed LICENSE file.
package evdev
import "unsafe"
// Relative events describe relative changes in a property.
// For example, a mouse may move to the left by a certain
// number of units, but its absolute position in space is unknown.
// If the absolute position is known, EvAbsolute codes should be used
// instead of EvRelative codes.
//
// RelWheel and RelHWheel are used for vertical and horizontal scroll
// wheels, respectively.
const (
RelX = 0x00
RelY = 0x01
RelZ = 0x02
RelRX = 0x03
RelRY = 0x04
RelRZ = 0x05
RelHWheel = 0x06
RelDial = 0x07
RelWheel = 0x08
RelMisc = 0x09
RelMax = 0x0f
RelCount = RelMax + 1
)
// RelativeAxes returns a bitfield indicating which relative axes are
// supported by the device.
//
// This is only applicable to devices with EvRelative event support.
func (d *Device) RelativeAxes() Bitset {
bs := NewBitset(RelMax)
buf := bs.Bytes()
ioctl(d.fd.Fd(), _EVIOCGBIT(EvRelative, len(buf)), unsafe.Pointer(&buf[0]))
return bs
}
|
package types
import (
"fmt"
"strings"
blk "github.com/DynamoGraph/block"
param "github.com/DynamoGraph/dygparam"
slog "github.com/DynamoGraph/syslog"
"github.com/DynamoGraph/types/internal/db"
)
const (
logid = "types: "
)
type Ty = string // type
type TyAttr = string // type:attr
type AttrTy = string
//type FacetIdent string // type:attr:facet
//
// Derived Type Attributes cache
//
type TyCache map[Ty]blk.TyAttrBlock
//
// caches for type-attribute and type-attribute-facet
//
type TyAttrCache map[TyAttr]blk.TyAttrD // map[TyAttr]blk.TyItem
//TODO: create a cache for lookup via attribute long name to get type, type-short-name, attribute-short-name. This map will be used to support the Has function.
type AttrTyCache map[AttrTy]string
//var TyAttrC TyAttrCache
//
type TypeCache struct {
//sync.RWMutex // as all types are loaded at startup - no concurrency control required
TyAttrC TyAttrCache
TyC TyCache
AttrTy AttrTyCache
}
var (
err error
//
graph string
TypeC TypeCache
tyShortNm map[string]string
)
func logerr(e error, panic_ ...bool) {
if len(panic_) > 0 && panic_[0] {
slog.Log(logid, e.Error(), true)
panic(e)
}
slog.Log(logid, e.Error())
}
func GetTyShortNm(longNm string) (string, bool) {
s, ok := tyShortNm[longNm]
return s, ok
}
func GetTyLongNm(tyNm string) (string, bool) {
for shortNm, longNm := range tyShortNm {
if tyNm == longNm {
return shortNm, true
}
}
return "", false
}
func syslog(s string) {
slog.Log(logid, s)
}
func SetGraph(graph_ string) {
graph = graph_
db.SetGraph(graph)
//
// cache holding the attributes belonging to a type
///
TypeC.TyC = make(TyCache)
//
// DataTy caches for type-attribute and type-attribute-facet
//
TypeC.TyAttrC = make(TyAttrCache)
//
TypeC.AttrTy = make(AttrTyCache)
//
tynames, err := db.GetTypeShortNames()
if err != nil {
panic(err)
}
if len(tynames) == 0 {
panic(fmt.Errorf("No short name type data loaded"))
}
//
// populate type short name cache. This cache is conccurent safe as it is readonly from now on.
//
tyShortNm = make(map[string]string)
for _, v := range tynames {
tyShortNm[v.LongNm] = v.ShortNm
}
//
// Load data dictionary (i.e ALL type info) - makes for concurrent safe FetchType()
//
{
dd, err := db.LoadDataDictionary() // type TyIBlock []TyItem
if err != nil {
panic(err)
}
populateTyCaches(dd)
}
}
func populateTyCaches(allTypes blk.TyIBlock) {
var (
tyNm string
a blk.TyAttrD
tc blk.TyAttrBlock
tyMap map[string]bool
)
tyMap = make(map[string]bool)
genTyAttr := func(ty string, attr string) TyAttr {
var s strings.Builder
// generte key for TyAttrC: <typeName>:<attrName> e.g. Person:Age
s.WriteString(ty)
s.WriteByte(':')
s.WriteString(attr)
return s.String()
}
for k, v := range allTypes {
tyNm = v.Nm[strings.Index(v.Nm, ".")+1:]
v.Nm = tyNm
if _, ok := tyMap[tyNm]; !ok {
tyMap[tyNm] = true
}
allTypes[k] = v
}
for k, v := range tyMap {
fmt.Println("tyMap: ", k, v)
}
for k, v := range allTypes {
fmt.Printf("allTypes: %d %#v\n", k, v)
}
for ty, _ := range tyMap {
fmt.Println("load type data for ", ty)
for _, v := range allTypes {
// if not current ty then
if v.Nm != ty {
continue
}
//
TypeC.AttrTy[v.Atr+"#"+v.Nm] = v.C // support attribute lookup for Has(<attribute>) function
//
// checl of DT is a UID attribute and gets its base type
// fmt.Printf("DT:%#v \n", v)
if len(v.Ty) == 0 {
panic(fmt.Errorf("DT not defined for %#v", v))
}
//
// scalar type or abstract type e.g [person]
//
if v.Ty[0] == '[' {
a = blk.TyAttrD{Name: v.Atr, DT: "Nd", C: v.C, Ty: v.Ty[1 : len(v.Ty)-1], P: v.P, Pg: v.Pg, N: v.N, IncP: v.IncP, Ix: v.Ix, Card: "1:N"}
} else {
// check if Ty is a known Type
if _, ok := tyMap[v.Ty]; ok {
a = blk.TyAttrD{Name: v.Atr, DT: "Nd", C: v.C, Ty: v.Ty, P: v.P, Pg: v.Pg, N: v.N, IncP: v.IncP, Ix: v.Ix, Card: "1:1"}
} else {
// scalar
a = blk.TyAttrD{Name: v.Atr, DT: v.Ty, C: v.C, P: v.P, N: v.N, Pg: v.Pg, IncP: v.IncP, Ix: v.Ix}
}
}
tc = append(tc, a)
//
TypeC.TyAttrC[genTyAttr(ty, v.Atr)] = a
tyShortNm, ok := GetTyShortNm(ty)
if !ok {
panic(fmt.Errorf("Error in populateTyCaches: Type short name not found"))
}
TypeC.TyAttrC[genTyAttr(tyShortNm, v.Atr)] = a
// fc, _ := FacetCache[tyAttr]
// for _, vf := range v.F {
// vfs := strings.Split(vf, "#")
// if len(vfs) == 3 {
// f := FacetTy{Name: vfs[0], DT: vfs[1], C: vfs[2]}
// fc = append(fc, f)
// } else {
// panic(fmt.Errorf("%s", "Facet type information must contain 3 elements: <facetName>#<datatype>#<compressedIdentifer>"))
// }
// }
// FacetCache[tyAttr] = fc
}
//
TypeC.TyC[ty] = tc
tc = nil
}
if param.DebugOn {
fmt.Println("==== TypeC.AttrTy")
for k, v := range TypeC.AttrTy {
fmt.Printf("%s shortName: %s\n", k, v)
}
fmt.Println("\n==== TypeC.TyC")
for k, v := range TypeC.TyC {
for _, v2 := range v {
fmt.Printf("%s %#v\n", k, v2)
}
}
fmt.Println("\n===== TypeC.TyAttrC")
for k, v := range TypeC.TyAttrC {
fmt.Printf("%s %#v\n", k, v)
}
}
// confirm caches are populated
if len(TypeC.TyC) == 0 {
panic(fmt.Errorf("typeC.TyC is empty"))
}
if len(TypeC.AttrTy) == 0 {
panic(fmt.Errorf("typeC.AttrTy is empty"))
}
if len(TypeC.TyAttrC) == 0 {
panic(fmt.Errorf("typeC.TyAttrC is empty"))
}
//panic(fmt.Errorf("Testing load of DD"))
fmt.Println("End populateTyCaches...")
}
func FetchType(ty Ty) (blk.TyAttrBlock, error) {
// check if ty is long name using GetTyShortNm which presumes the input is a long name
if _, ok := GetTyShortNm(ty); !ok {
// must be a short name - check it exists using GetTyLongNm which only accepts a short name
if longTy, ok := GetTyLongNm(ty); !ok {
return nil, fmt.Errorf("FetchType: error %q type not found or short name not defined", ty)
} else {
ty = longTy
}
}
if ty, ok := TypeC.TyC[ty]; ok { // ty= Person
return ty, nil
}
return nil, fmt.Errorf("No type %q found", ty)
}
func IsScalarPred(pred string) bool { //TODO: pass in Type so uid-pred is checked against type not whole data dictionary
for _, v := range TypeC.TyC {
for _, vv := range v {
if vv.Name == pred && len(vv.Ty) == 0 {
// is a scalar in one type so presume its ok
return true
}
}
}
return false
}
func IsUidPred(pred string) bool { //TODO: pass in Type so uid-pred is checked against type not whole data dictionary
for _, v := range TypeC.TyC {
for _, vv := range v {
if vv.Name == pred && len(vv.Ty) > 0 && vv.DT == "Nd" {
// is a uid-pred in one type so presume its ok
return true
}
}
}
return false
}
func IsScalarInTy(ty string, pred string) bool { //TODO: pass in Type so uid-pred is checked against type not whole data dictionary
if t, ok := TypeC.TyAttrC[ty+":"+pred]; !ok {
return false
} else if len(t.Ty) != 0 {
return false
}
return true
}
func IsUidPredInTy(ty string, pred string) bool { //TODO: pass in Type so uid-pred is checked against type not whole data dictionary
if t, ok := TypeC.TyAttrC[ty+":"+pred]; !ok {
return false
} else if len(t.Ty) == 0 {
return false
}
return true
}
|
package pathfileops
import (
"strings"
"testing"
)
func TestFileHelper_GetPathFromPathFileName_01(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\xt_dirmgr_01_test.go")
expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common")
result, isEmpty, err := fh.GetPathFromPathFileName(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). commonDir='%v' Error='%v'", commonDir, err.Error())
}
if isEmpty != false {
t.Errorf("Expected isEmpty GetPathFromPathFileName for valid file extension to return 'false', instead isEmpty='%v' ", isEmpty)
}
if result != expectedDir {
t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid pathn/file name, instead got: %v", expectedDir, result)
}
}
func TestFileHelper_GetPathFromPathFileName_02(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash("..\\..\\pathfilego\\003_filehelper\\common\\xt_dirmgr_01_test.go")
expectedDir := fh.AdjustPathSlash("..\\..\\pathfilego\\003_filehelper\\common")
result, isEmpty, err := fh.GetPathFromPathFileName(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). commonDir='%v' Error='%v'", commonDir, err.Error())
}
if isEmpty != false {
t.Errorf("Expected isEmpty GetPathFromPathFileName for valid file extension to return 'false', instead isEmpty='%v' ", isEmpty)
}
if result != expectedDir {
t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid path/file name, instead got: %v", expectedDir, result)
}
}
func TestFileHelper_GetPathFromPathFileName_03(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash("D:\\go\\work\\src\\MikeAustin71\\pathfilego\\" +
"003_filehelper\\common\\xt_dirmgr_01_test.go")
expectedDir := fh.AdjustPathSlash("D:\\go\\work\\src\\MikeAustin71\\pathfilego\\" +
"003_filehelper\\common")
result, isEmpty, err := fh.GetPathFromPathFileName(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). commonDir='%v' Error='%v'", commonDir, err.Error())
}
if isEmpty != false {
t.Errorf("Expected isEmpty GetPathFromPathFileName for valid file extension to return 'false', instead isEmpty='%v' ", isEmpty)
}
if result != expectedDir {
t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid file name. Instead path='%v'", expectedDir, result)
}
}
func TestFileHelper_GetPathFromPathFileName_04(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash("D:\\go\\work\\src\\MikeAustin71\\pathfilego\\" +
"003_filehelper\\common\\xt_dirmgr_01_test.go")
expectedDir := fh.AdjustPathSlash("D:\\go\\work\\src\\MikeAustin71\\pathfilego\\" +
"003_filehelper\\common")
result, isEmpty, err := fh.GetPathFromPathFileName(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). commonDir='%v' Error='%v'",
commonDir, err.Error())
}
if isEmpty != false {
t.Errorf("Expected isEmpty='%v', instead isEmpty='%v' ", false, isEmpty)
}
if result != expectedDir {
t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid path/file name. "+
"Instead path=='%v' ", expectedDir, result)
}
}
func TestFileHelper_GetPathFromPathFileName_05(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash("xt_dirmgr_01_test.go")
expectedDir := fh.AdjustPathSlash("")
result, isEmpty, err := fh.GetPathFromPathFileName(commonDir)
if err != nil {
t.Errorf("Expected no error returned from fh.GetPathFromPathFileName(commonDir). "+
"Instead an error WAS Returned. commonDir='%v' Error='%v'", commonDir, err.Error())
}
if isEmpty != true {
t.Errorf("Expected isEmpty='%v', instead isEmpty='%v' ", true, isEmpty)
}
if result != expectedDir {
t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid path/file name. "+
"Instead path=='%v' ", expectedDir, result)
}
}
func TestFileHelper_GetPathFromPathFileName_06(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\")
expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common")
result, isEmpty, err := fh.GetPathFromPathFileName(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). "+
"commonDir='%v' Error='%v'", commonDir, err.Error())
}
if isEmpty != false {
t.Errorf("Expected isEmpty GetPathFromPathFileName for valid file extension to return "+
"'false', instead isEmpty='%v' ", isEmpty)
}
if result != expectedDir {
t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid pathn/file "+
"name, instead got: %v", expectedDir, result)
}
}
func TestFileHelper_GetPathFromPathFileName_07(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash("./")
expectedDir := fh.AdjustPathSlash("./")
result, isEmpty, err := fh.GetPathFromPathFileName(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). "+
"commonDir='%v' Error='%v'", commonDir, err.Error())
return
}
if false != isEmpty {
t.Errorf("Expected GetPathFromPathFileName isEmpty=='%v'. Instead, isEmpty='%v' ",
false, isEmpty)
}
if result != expectedDir {
t.Errorf("Expected GetPathFromPathFileName to return path == '%v'\n"+
"for valid path/file name.\nInstead return path == '%v'\n",
expectedDir, result)
}
}
func TestFileHelper_GetPathFromPathFileName_08(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash(".")
expectedDir := fh.AdjustPathSlash(".")
result, isEmpty, err := fh.GetPathFromPathFileName(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). commonDir='%v' "+
"Error='%v'", commonDir, err.Error())
}
if false != isEmpty {
t.Errorf("Expected GetPathFromPathFileName isEmpty=='%v'. Instead, isEmpty='%v' ",
false, isEmpty)
}
if result != expectedDir {
t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid "+
"path/file name, instead got: %v", expectedDir, result)
}
}
func TestFileHelper_GetPathFromPathFileName_09(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash("..")
expectedDir := fh.AdjustPathSlash("..")
result, isEmpty, err := fh.GetPathFromPathFileName(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). commonDir='%v' Error='%v'",
commonDir, err.Error())
}
if false != isEmpty {
t.Errorf("Expected GetPathFromPathFileName isEmpty=='%v'. Instead, isEmpty='%v' ",
false, isEmpty)
}
if result != expectedDir {
t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid path/file "+
"name, instead got: %v", expectedDir, result)
}
}
func TestFileHelper_GetPathFromPathFileName_10(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash("")
expectedDir := fh.AdjustPathSlash("")
result, isEmpty, err := fh.GetPathFromPathFileName(commonDir)
if err == nil {
t.Errorf("Expected error to be returned from fh.GetPathFromPathFileName(commonDir). "+
"commonDir='%v' No Error Returned!", commonDir)
}
if true != isEmpty {
t.Errorf("Expected GetPathFromPathFileName isEmpty=='%v'. Instead, isEmpty='%v' ",
true, isEmpty)
}
if result != expectedDir {
t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid pathn/file"+
"name, instead got: %v", expectedDir, result)
}
}
func TestFileHelper_GetPathFromPathFileName_11(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash("../../../")
expectedDir := fh.AdjustPathSlash("../../../")
result, isEmpty, err := fh.GetPathFromPathFileName(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir).\n"+
"commonDir='%v'\nError='%v'\n",
commonDir, err.Error())
return
}
if false != isEmpty {
t.Errorf("Expected GetPathFromPathFileName isEmpty=='%v'.\n"+
"Instead, isEmpty='%v'\n",
false, isEmpty)
}
if result != expectedDir {
t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid path/file "+
"name\n"+
"Instead return path == '%v'\n",
expectedDir, result)
}
}
func TestFileHelper_GetPathFromPathFileName_12(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash("./xt_dirmgr_01_test.go")
expectedDir := fh.AdjustPathSlash("./")
result, isEmpty, err := fh.GetPathFromPathFileName(commonDir)
if err != nil {
t.Errorf("Expected no error returned from fh.GetPathFromPathFileName(commonDir). "+
"Instead an error WAS Returned. commonDir='%v' Error='%v'", commonDir, err.Error())
}
if isEmpty != false {
t.Errorf("Expected isEmpty='%v', instead isEmpty='%v' ", false, isEmpty)
}
if result != expectedDir {
t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid path/file"+
"name. Instead path=='%v' ", expectedDir, result)
}
}
func TestFileHelper_GetPathFromPathFileName_13(t *testing.T) {
fh := FileHelper{}
result, isEmpty, err := fh.GetPathFromPathFileName(" ")
if err == nil {
t.Error("Expected an error return from fh.GetPathFromPathFileName(\" \") " +
"because the input parameter consists entirely of spaces. " +
"However, NO ERROR WAS RETURNED!")
}
if isEmpty == false {
t.Error("Expected isEmpty='true', instead isEmpty='false' ")
}
if result != "" {
t.Errorf("Expected GetPathFromPathFileName to return path == 'empty string'. "+
"Instead path=='%v' ", result)
}
}
func TestFileHelper_GetPathFromPathFileName_14(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\.git")
expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common")
result, isEmpty, err := fh.GetPathFromPathFileName(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir).\n"+
"commonDir='%v'\nError='%v'", commonDir, err.Error())
}
if isEmpty != false {
t.Errorf("Expected isEmpty GetPathFromPathFileName for valid file extension\n"+
"to return 'false'. Instead isEmpty='%v'\n", isEmpty)
}
if result != expectedDir {
t.Errorf("ERROR: Expected GetPathFromPathFileName to return "+
"path == '%v' for valid path/file name.\n"+
"Instead path == %v\n", expectedDir, result)
}
}
func TestFileHelper_GetPathAndFileNameExt_01(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\xt_dirmgr_01_test.go")
expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common")
expectedFileNameExt := "xt_dirmgr_01_test.go"
pathDir, fileNameExt, bothAreEmpty, err := fh.GetPathAndFileNameExt(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathAndFileNameExt(commonDir). commonDir='%v' "+
"Error='%v'", commonDir, err.Error())
}
if false != bothAreEmpty {
t.Errorf("Expected GetPathAndFileNameExt bothAreEmpty='%v'. Instead, bothAreEmpty='%v' ",
false, bothAreEmpty)
}
if pathDir != expectedDir {
t.Errorf("Expected GetPathAndFileNameExt to return path == '%v'. "+
"Instead, path== '%v' ", expectedDir, pathDir)
}
if fileNameExt != expectedFileNameExt {
t.Errorf("Expected GetPathAndFileNameExt to return fileNameExt == '%v'. Instead, "+
"fileNameExt == '%v' ", expectedFileNameExt, fileNameExt)
}
}
func TestFileHelper_GetPathAndFileNameExt_02(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\")
expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common")
expectedFileNameExt := ""
pathDir, fileNameExt, bothAreEmpty, err := fh.GetPathAndFileNameExt(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathAndFileNameExt(commonDir). commonDir='%v' Error='%v'",
commonDir, err.Error())
}
if false != bothAreEmpty {
t.Errorf("Expected GetPathAndFileNameExt bothAreEmpty='%v'. Instead, bothAreEmpty='%v' ",
false, bothAreEmpty)
}
if pathDir != expectedDir {
t.Errorf("Expected GetPathAndFileNameExt to return path == '%v'. Instead, path== '%v' ",
expectedDir, pathDir)
}
if fileNameExt != expectedFileNameExt {
t.Errorf("Expected GetPathAndFileNameExt to return fileNameExt == '%v'. Instead, "+
"fileNameExt == '%v' ", expectedFileNameExt, fileNameExt)
}
}
func TestFileHelper_GetPathAndFileNameExt_03(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\dirmgr_test")
expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common")
expectedFileNameExt := "dirmgr_test"
pathDir, fileNameExt, bothAreEmpty, err := fh.GetPathAndFileNameExt(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathAndFileNameExt(commonDir). commonDir='%v' "+
"Error='%v'", commonDir, err.Error())
}
if false != bothAreEmpty {
t.Errorf("Expected GetPathAndFileNameExt bothAreEmpty='%v'. Instead, bothAreEmpty='%v' ",
false, bothAreEmpty)
}
if pathDir != expectedDir {
t.Errorf("Expected GetPathAndFileNameExt to return path == '%v'. Instead, path== '%v' ",
expectedDir, pathDir)
}
if fileNameExt != expectedFileNameExt {
t.Errorf("Expected GetPathAndFileNameExt to return fileNameExt == '%v'. Instead, "+
"fileNameExt == '%v' ", expectedFileNameExt, fileNameExt)
}
}
func TestFileHelper_GetPathAndFileNameExt_04(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash("xt_dirmgr_01_test.go")
expectedDir := fh.AdjustPathSlash("")
expectedFileNameExt := "xt_dirmgr_01_test.go"
pathDir, fileNameExt, bothAreEmpty, err := fh.GetPathAndFileNameExt(commonDir)
if err != nil {
t.Errorf("Error returned from fh.GetPathAndFileNameExt(commonDir). commonDir='%v' Error='%v'", commonDir, err.Error())
}
if false != bothAreEmpty {
t.Errorf("Expected GetPathAndFileNameExt bothAreEmpty='%v'. Instead, bothAreEmpty='%v' ", false, bothAreEmpty)
}
if pathDir != expectedDir {
t.Errorf("Expected GetPathAndFileNameExt to return path == '%v'. Instead, path== '%v' ", expectedDir, pathDir)
}
if fileNameExt != expectedFileNameExt {
t.Errorf("Expected GetPathAndFileNameExt to return fileNameExt == '%v'. Instead, fileNameExt == '%v' ", expectedFileNameExt, fileNameExt)
}
}
func TestFileHelper_GetPathAndFileNameExt_05(t *testing.T) {
fh := FileHelper{}
pathDir, fileNameExt, bothAreEmpty, err := fh.GetPathAndFileNameExt("")
if err == nil {
t.Error("Expected error return from fh.GetPathAndFileNameExt(\"\") because " +
"the input parameter is an empty string. " +
"However, NO ERROR WAS RETURNED!")
}
if pathDir != "" {
t.Errorf("Expected pathDir would be an empty string. Instead, pathDir='%v'",
pathDir)
}
if fileNameExt != "" {
t.Errorf("Expected fileNameExt would be an empty string. Instead, pathDir='%v'",
fileNameExt)
}
if bothAreEmpty == false {
t.Error("Expected bothAreEmpty='true'. Instead, bothArEmpty='false'. ")
}
}
func TestFileHelper_GetPathAndFileNameExt_06(t *testing.T) {
fh := FileHelper{}
pathDir, fileNameExt, bothAreEmpty, err := fh.GetPathAndFileNameExt(" ")
if err == nil {
t.Error("Expected error return from fh.GetPathAndFileNameExt(\" \") because " +
"the input parameter consists entirely of blank spaces. " +
"However, NO ERROR WAS RETURNED!")
}
if pathDir != "" {
t.Errorf("Expected pathDir would be an empty string. Instead, pathDir='%v'",
pathDir)
}
if fileNameExt != "" {
t.Errorf("Expected fileNameExt would be an empty string. Instead, pathDir='%v'",
fileNameExt)
}
if bothAreEmpty == false {
t.Error("Expected bothAreEmpty='true'. Instead, bothArEmpty='false'. ")
}
}
func TestFileHelper_GetPathSeparatorIndexesInPathStr_01(t *testing.T) {
fh := FileHelper{}
idxs, err := fh.GetPathSeparatorIndexesInPathStr("")
if err == nil {
t.Error("Expected error return from fh.GetPathSeparatorIndexesInPathStr(\"\") " +
"because the input parameter is an empty string. " +
"However, NO ERROR WAS RETURNED!")
}
if len(idxs) != 0 {
t.Errorf("Expected length of indexes='0'. Instead length of indexes='%v' ",
len(idxs))
}
}
func TestFileHelper_GetPathSeparatorIndexesInPathStr_02(t *testing.T) {
fh := FileHelper{}
idxs, err := fh.GetPathSeparatorIndexesInPathStr(" ")
if err == nil {
t.Error("Expected error return from fh.GetPathSeparatorIndexesInPathStr(\" \") " +
"because the input parameter consists entirely of blank spaces. " +
"However, NO ERROR WAS RETURNED!")
}
if len(idxs) != 0 {
t.Errorf("Expected length of indexes='0'. Instead length of indexes='%v' ",
len(idxs))
}
}
// /d/gowork/src/MikeAustin71/pathfileopsgo/pathfileops
// D:\gowork\src\MikeAustin71\pathfileopsgo\pathfileops
func TestFileHelper_GetVolumeName_01(t *testing.T) {
fh := FileHelper{}
volumeName := fh.GetVolumeName("")
if volumeName != "" {
t.Errorf("Expected an empty string return from fh.GetVolumeName(\"\") because "+
"the input parameter is an empty string. Instead, the return value='%v' ", volumeName)
}
}
func TestFileHelper_GetVolumeName_02(t *testing.T) {
fh := FileHelper{}
volumeName := fh.GetVolumeName(" ")
if volumeName != "" {
t.Errorf("Expected an empty string return from fh.GetVolumeName(\"\") because "+
"the input parameter consists of blank spaces. Instead, the return value='%v' ", volumeName)
}
}
func TestFileHelper_GetVolumeName_03(t *testing.T) {
fh := FileHelper{}
testVolStr := "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\pathfileops"
expectedVolName := strings.ToLower("D:")
volumeName := fh.GetVolumeName(testVolStr)
if expectedVolName != strings.ToLower(volumeName) {
t.Errorf("Expected volumeName='%v'. Instead, volName='%v' ",
expectedVolName, strings.ToLower(volumeName))
}
}
func TestFileHelper_GetVolumeName_04(t *testing.T) {
fh := FileHelper{}
testVolStr := "D:\\"
expectedVolName := strings.ToLower("D:")
volumeName := fh.GetVolumeName(testVolStr)
if expectedVolName != strings.ToLower(volumeName) {
t.Errorf("Expected volumeName='%v'. Instead, volName='%v' ",
expectedVolName, strings.ToLower(volumeName))
}
}
func TestFileHelper_GetVolumeName_05(t *testing.T) {
fh := FileHelper{}
testVolStr := "D:"
expectedVolName := strings.ToLower("D:")
volumeName := fh.GetVolumeName(testVolStr)
if expectedVolName != strings.ToLower(volumeName) {
t.Errorf("Expected volumeName='%v'. Instead, volName='%v' ",
expectedVolName, strings.ToLower(volumeName))
}
}
func TestFileHelper_IsAbsolutePath_01(t *testing.T) {
fh := FileHelper{}
commonDir := fh.AdjustPathSlash("../filesfortest/levelfilesfortest/level_01_dir/level_02_dir/" +
"level_03_dir/level_3_1_test.txt")
result := fh.IsAbsolutePath(commonDir)
if result == true {
t.Error("IsAbsolutePath result is INVALID. Relative path classified as Absolute path!")
}
}
func TestFileHelper_IsAbsolutePath_02(t *testing.T) {
fh := FileHelper{}
absPathDir := fh.AdjustPathSlash("D:/gowork/src/MikeAustin71/pathfileopsgo/filesfortest/" +
"levelfilesfortest/level_01_dir/level_02_dir/level_03_dir/level_3_1_test.txt")
result := fh.IsAbsolutePath(absPathDir)
if result == false {
t.Error("IsAbsolutePath result is INVALID. Absolute path classified as Relative Path!")
}
}
func TestFileHelper_IsAbsolutePath_03(t *testing.T) {
fh := FileHelper{}
absPathDir := ""
result := fh.IsAbsolutePath(absPathDir)
if result == true {
t.Error("Expected a return value of 'false' from fh.IsAbsolutePath(absPathDir) because\n" +
"'absPathDir' is an empty string. However, the returned value was 'true'. ERROR!\n")
}
}
func TestFileHelper_IsPathFileString_01(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("../filesfortest/levelfilesfortest/level_01_dir/" +
"level_02_dir/level_03_dir/level_3_1_test.txt")
expectedPathFile := fh.AdjustPathSlash("..\\filesfortest\\levelfilesfortest\\level_01_dir\\" +
"level_02_dir\\level_03_dir\\level_3_1_test.txt")
pathFileType, absolutePath, err := fh.IsPathFileString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
return
}
if pathFileType != PathFileType.PathFile() {
t.Errorf("Expected PathFileTypeCode='PathFile'. Instead, PathFileTypeCode='%v' ",
pathFileType.String())
}
absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+
"expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error())
}
if absExpectedPathFile != absolutePath {
t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.",
absExpectedPathFile, absolutePath)
}
}
func TestFileHelper_IsPathFileString_02(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("../filesfortest/levelfilesfortest/level_01_dir/" +
"level_02_dir/level_03_dir/iDoNotExist.txt")
expectedPathFile := fh.AdjustPathSlash("..\\filesfortest\\levelfilesfortest\\level_01_dir\\" +
"level_02_dir\\level_03_dir\\iDoNotExist.txt")
pathFileType, absolutePath, err := fh.IsPathFileString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
return
}
if pathFileType != PathFileType.PathFile() {
t.Errorf("Expected PathFileTypeCode='PathFile'. Instead, PathFileTypeCode='%v' ",
pathFileType.String())
}
absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+
"expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error())
}
if absExpectedPathFile != absolutePath {
t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.",
absExpectedPathFile, absolutePath)
}
}
func TestFileHelper_IsPathFileString_03(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("../filesfortest/levelfilesfortest/level_01_dir/" +
"level_02_dir/level_03_dir")
expectedPathFile := fh.AdjustPathSlash("..\\filesfortest\\levelfilesfortest\\level_01_dir\\" +
"level_02_dir\\level_03_dir")
pathFileType, absolutePath, err := fh.IsPathFileString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
return
}
if pathFileType != PathFileType.Path() {
t.Errorf("Expected PathFileTypeCode='PathFile'. Instead, PathFileTypeCode='%v' ",
pathFileType.String())
}
absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+
"expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error())
}
if absExpectedPathFile != absolutePath {
t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.",
absExpectedPathFile, absolutePath)
}
}
func TestFileHelper_IsPathFileString_04(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("../filesfortest/levelfilesfortest/level_01_dir/" +
"level_02_dir/iDoNotExist")
expectedPathFile := fh.AdjustPathSlash("..\\filesfortest\\levelfilesfortest\\level_01_dir\\" +
"level_02_dir\\iDoNotExist")
pathFileType, absolutePath, err := fh.IsPathFileString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
return
}
expectedFileType := PathFileType.Indeterminate()
if expectedFileType != pathFileType {
t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+
"testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath)
}
absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+
"expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error())
}
if absExpectedPathFile != absolutePath {
t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.",
absExpectedPathFile, absolutePath)
}
}
func TestFileHelper_IsPathFileString_05(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("D:")
expectedPathFile := fh.AdjustPathSlash("D:")
pathFileType, absolutePath, err := fh.IsPathFileString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
return
}
expectedFileType := PathFileType.Volume()
if expectedFileType != pathFileType {
t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+
"testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath)
}
absExpectedPathFile := strings.ToLower(expectedPathFile)
absolutePath = strings.ToLower(absolutePath)
if absExpectedPathFile != absolutePath {
t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.",
absExpectedPathFile, absolutePath)
}
}
func TestFileHelper_IsPathFileString_06(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("D:\\")
expectedPathFile := fh.AdjustPathSlash("D:\\")
pathFileType, absolutePath, err := fh.IsPathFileString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
return
}
expectedFileType := PathFileType.Path()
if expectedFileType != pathFileType {
t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+
"testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath)
}
absExpectedPathFile := strings.ToLower(expectedPathFile)
absolutePath = strings.ToLower(absolutePath)
if absExpectedPathFile != absolutePath {
t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.",
absExpectedPathFile, absolutePath)
}
}
func TestFileHelper_IsPathFileString_07(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("fileIDoNotExist.txt")
expectedPathFile := fh.AdjustPathSlash("fileIDoNotExist.txt")
pathFileType, absolutePath, err := fh.IsPathFileString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
return
}
expectedFileType := PathFileType.File()
if expectedFileType != pathFileType {
t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+
"testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath)
}
absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+
"expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error())
}
if absExpectedPathFile != absolutePath {
t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.",
absExpectedPathFile, absolutePath)
}
}
func TestFileHelper_IsPathFileString_08(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("fileIDoNotExist")
expectedPathFile := fh.AdjustPathSlash("fileIDoNotExist")
pathFileType, absolutePath, err := fh.IsPathFileString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
return
}
expectedFileType := PathFileType.File()
if expectedFileType != pathFileType {
t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+
"testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath)
}
absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+
"expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error())
}
if absExpectedPathFile != absolutePath {
t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.",
absExpectedPathFile, absolutePath)
}
}
func TestFileHelper_IsPathFileString_09(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("..")
expectedPathFile := fh.AdjustPathSlash("..")
pathFileType, absolutePath, err := fh.IsPathFileString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
return
}
expectedFileType := PathFileType.Path()
if expectedFileType != pathFileType {
t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+
"testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath)
}
absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+
"expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error())
}
if absExpectedPathFile != absolutePath {
t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.",
absExpectedPathFile, absolutePath)
}
}
func TestFileHelper_IsPathFileString_10(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash(".")
expectedPathFile := fh.AdjustPathSlash(".")
pathFileType, absolutePath, err := fh.IsPathFileString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
return
}
expectedFileType := PathFileType.Path()
if expectedFileType != pathFileType {
t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+
"testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath)
}
absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+
"expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error())
}
if absExpectedPathFile != absolutePath {
t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.",
absExpectedPathFile, absolutePath)
}
}
func TestFileHelper_IsPathFileString_11(t *testing.T) {
fh := FileHelper{}
pathFile := ""
_, _, err := fh.IsPathFileString(pathFile)
if err == nil {
t.Error("Expected an error return from fh.IsPathFileString(pathFile) " +
"because 'pathFile' is an empty string. However, NO ERROR WAS RETURNED! ")
}
}
func TestFileHelper_IsPathFileString_12(t *testing.T) {
fh := FileHelper{}
pathFile := " "
_, _, err := fh.IsPathFileString(pathFile)
if err == nil {
t.Error("Expected an error return from fh.IsPathFileString(pathFile) " +
"because 'pathFile' consists of blank spaces. However, NO ERROR WAS RETURNED! ")
}
}
func TestFileHelper_IsPathFileString_13(t *testing.T) {
fh := FileHelper{}
pathFile := "..\\...\\"
_, _, err := fh.IsPathFileString(pathFile)
if err == nil {
t.Error("Expected an error return from fh.IsPathFileString(pathFile) " +
"because 'pathFile' 3-dots ('...'). However, NO ERROR WAS RETURNED! ")
}
}
func TestFileHelper_IsPathFileString_14(t *testing.T) {
fh := FileHelper{}
pathFile := "....\\"
_, _, err := fh.IsPathFileString(pathFile)
if err == nil {
t.Error("Expected an error return from fh.IsPathFileString(pathFile) " +
"because 'pathFile' 4-dots ('....'). However, NO ERROR WAS RETURNED! ")
}
}
func TestFileHelper_IsPathFileString_15(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash(".\\")
expectedPathFile := fh.AdjustPathSlash(".\\")
pathFileType, absolutePath, err := fh.IsPathFileString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
return
}
expectedFileType := PathFileType.Path()
if expectedFileType != pathFileType {
t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+
"testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath)
}
absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+
"expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error())
}
if absExpectedPathFile != absolutePath {
t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.",
absExpectedPathFile, absolutePath)
}
}
func TestFileHelper_IsPathFileString_16(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("..\\..\\..\\")
expectedPathFile := fh.AdjustPathSlash("..\\..\\..\\")
pathFileType, absolutePath, err := fh.IsPathFileString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
return
}
expectedFileType := PathFileType.Path()
if expectedFileType != pathFileType {
t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+
"testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath)
}
absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile)
if err != nil {
t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+
"expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error())
}
if absExpectedPathFile != absolutePath {
t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.",
absExpectedPathFile, absolutePath)
}
}
func TestFileHelper_IsPathString_01(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("..\\..\\..\\")
expectedPathStr := fh.AdjustPathSlash("..\\..\\..\\")
isPath, cannotDetermine, testPathStr, err := fh.IsPathString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
}
if true != isPath {
t.Errorf("Expected isPath='%v'. Instead, isPath='%v' "+
"testPathStr='%v' ", true, isPath, testPathStr)
}
if expectedPathStr != testPathStr {
t.Errorf("Error: Expected 'expectedPathStr'='%v'. Instead, 'expectedPathStr='%v'.",
expectedPathStr, testPathStr)
}
if false != cannotDetermine {
t.Errorf("Error: Expected 'cannotDetermine'='%v'. Instead, 'cannotDetermine'='%v' ",
false, cannotDetermine)
}
}
func TestFileHelper_IsPathString_02(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("../filesfortest/levelfilesfortest/level_01_dir/" +
"level_02_dir/level_03_dir")
expectedPathStr := fh.AdjustPathSlash("..\\filesfortest\\levelfilesfortest\\level_01_dir\\" +
"level_02_dir\\level_03_dir")
isPath, cannotDetermine, testPathStr, err := fh.IsPathString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
}
if true != isPath {
t.Errorf("Expected isPath='%v'. Instead, isPath='%v' "+
"testPathStr='%v' ", true, isPath, testPathStr)
}
if expectedPathStr != testPathStr {
t.Errorf("Error: Expected 'expectedPathStr'='%v'. Instead, 'expectedPathStr='%v'.",
expectedPathStr, testPathStr)
}
if false != cannotDetermine {
t.Errorf("Error: Expected 'cannotDetermine'='%v'. Instead, 'cannotDetermine'='%v' ",
false, cannotDetermine)
}
}
func TestFileHelper_IsPathString_03(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("../filesfortest/levelfilesfortest/level_01_dir/" +
"level_02_dir/iDoNotExist")
expectedPathStr := fh.AdjustPathSlash("..\\filesfortest\\levelfilesfortest\\level_01_dir\\" +
"level_02_dir\\iDoNotExist")
isPath, cannotDetermine, testPathStr, err := fh.IsPathString(pathFile)
if err != nil {
t.Errorf("Error returned from fh.IsPathString(pathFile). "+
"pathFile='%v' Error='%v' ", pathFile, err.Error())
}
if false != isPath {
t.Errorf("Expected isPath='%v'. Instead, isPath='%v' "+
"testPathStr='%v' ", false, isPath, testPathStr)
}
if expectedPathStr != testPathStr {
t.Errorf("Error: Expected 'expectedPathStr'='%v'. Instead, 'expectedPathStr='%v'.",
expectedPathStr, testPathStr)
}
if true != cannotDetermine {
t.Errorf("Error: Expected 'cannotDetermine'='%v'. Instead, 'cannotDetermine'='%v' ",
true, cannotDetermine)
}
}
func TestFileHelper_IsPathString_04(t *testing.T) {
fh := FileHelper{}
pathFile := ""
_, _, _, err := fh.IsPathString(pathFile)
if err == nil {
t.Errorf("Expected an error return from fh.IsPathString(pathFile) " +
"because 'pathFile' is an empty string. " +
"However, NO ERROR WAS RETURNED!")
}
}
func TestFileHelper_IsPathString_05(t *testing.T) {
fh := FileHelper{}
pathFile := " "
_, _, _, err := fh.IsPathString(pathFile)
if err == nil {
t.Errorf("Expected an error return from fh.IsPathString(pathFile) " +
"because 'pathFile' consists entirely of blank spaces. " +
"However, NO ERROR WAS RETURNED!")
}
}
func TestFileHelper_IsPathString_06(t *testing.T) {
fh := FileHelper{}
pathFile := fh.AdjustPathSlash("../filesfortest/levelfilesfortest/level_01_dir/" +
"level_02_dir/level_03_dir")
pathFile = "." + pathFile
_, _, _, err := fh.IsPathString(pathFile)
if err == nil {
t.Errorf("Expected an error return from fh.IsPathString(pathFile) " +
"because 'pathFile' includes the text '...' . " +
"However, NO ERROR WAS RETURNED!")
}
}
|
package hasp
import (
"fmt"
"strings"
"github.com/rmcsoft/chanim"
"github.com/sirupsen/logrus"
)
func isTransitFrameSeries(frameSeries chanim.FrameSeries) bool {
// <AnimationName>_entry - transition frames to entry the animation
if strings.HasSuffix(frameSeries.Name, "_entry") {
return true
}
// <AnimationName>_exit - transition frames to exit the animation
if strings.HasSuffix(frameSeries.Name, "_exit") {
return true
}
return false
}
func isAnimationFrameSeries(frameSeries chanim.FrameSeries) bool {
return !isTransitFrameSeries(frameSeries)
}
func getExitFrameSeries(animation chanim.Animation, allFrameSeries []chanim.FrameSeries) *chanim.FrameSeries {
exitFrameSeriesName := animation.Name + "_exit"
for _, frameSeries := range allFrameSeries {
if frameSeries.Name == exitFrameSeriesName {
return &frameSeries
}
}
return nil
}
func getEntryFrameSeries(animation chanim.Animation, allFrameSeries []chanim.FrameSeries) *chanim.FrameSeries {
entryFrameSeriesName := animation.Name + "_entry"
for _, frameSeries := range allFrameSeries {
if frameSeries.Name == entryFrameSeriesName {
return &frameSeries
}
}
return nil
}
func getAnimationFrames(animation chanim.Animation, allFrameSeries []chanim.FrameSeries) []chanim.Frame {
for _, frameSeries := range allFrameSeries {
if animation.FrameSeriesName == frameSeries.Name {
return frameSeries.Frames
}
}
return nil
}
func createAnimations(allFrameSeries []chanim.FrameSeries) (chanim.Animations, error) {
animations := make(chanim.Animations, 0)
for _, frameSeries := range allFrameSeries {
if isAnimationFrameSeries(frameSeries) {
if len(frameSeries.Frames) == 0 {
return nil, fmt.Errorf("Animation '%s' has no frame", frameSeries.Name)
}
// The name of the animation matches the name of its frame series.
animation := chanim.Animation{
Name: frameSeries.Name,
FrameSeriesName: frameSeries.Name,
}
animations = append(animations, animation)
}
}
return animations, nil
}
func createTransitionBetween(from chanim.Animation, to chanim.Animation, allFrameSeries *[]chanim.FrameSeries) chanim.Transition {
exitFrameSeries := getExitFrameSeries(from, *allFrameSeries)
entryFrameSeries := getEntryFrameSeries(to, *allFrameSeries)
if exitFrameSeries == nil && entryFrameSeries == nil {
return chanim.Transition{
DestAnimationName: to.Name,
}
}
if exitFrameSeries != nil && entryFrameSeries == nil {
return chanim.Transition{
DestAnimationName: to.Name,
FrameSeriesName: exitFrameSeries.Name,
}
}
if exitFrameSeries == nil && entryFrameSeries != nil {
return chanim.Transition{
DestAnimationName: to.Name,
FrameSeriesName: entryFrameSeries.Name,
}
}
// exitFrameSeries != nil && entryFrameSeries != nil
// transition requires a new series
newFrameSeries := chanim.FrameSeries{
Name: fmt.Sprintf("%s -> %s", from.Name, to.Name),
Frames: append(exitFrameSeries.Frames, entryFrameSeries.Frames...),
}
*allFrameSeries = append(*allFrameSeries, newFrameSeries)
return chanim.Transition{
DestAnimationName: to.Name,
FrameSeriesName: newFrameSeries.Name,
}
}
func makeTransitionsFrom(from chanim.Animation, animations chanim.Animations, allFrameSeries *[]chanim.FrameSeries) []chanim.Transition {
transitions := make([]chanim.Transition, 0, len(animations)-1)
for _, to := range animations {
if from.Name == to.Name {
continue
}
transition := createTransitionBetween(from, to, allFrameSeries)
transitions = append(transitions, transition)
}
return transitions
}
func initTransitionFrames(animations chanim.Animations, allFrameSeries []chanim.FrameSeries) []chanim.FrameSeries {
for _, animation := range animations {
animationFrames := getAnimationFrames(animation, allFrameSeries)
transitions := makeTransitionsFrom(animation, animations, &allFrameSeries)
// All animations have two transitional frames - the first and the last
firstTransitFrame := &animationFrames[0]
firstTransitFrame.Transitions = transitions
secondTransitFrame := &animationFrames[len(animationFrames)-1]
secondTransitFrame.Transitions = transitions
}
return allFrameSeries
}
// CreateAnimator creates an animator
func CreateAnimator(paintEngine chanim.PaintEngine, frameSeriesPath string) (*chanim.Animator, error) {
logrus.Debug("Loading frames")
allFrameSeries, err := LoadFrameSeries(frameSeriesPath)
if err != nil {
return nil, err
}
logrus.Debug("Creating animations")
animations, err := createAnimations(allFrameSeries)
if err != nil {
return nil, err
}
logrus.Debug("Initializing transition frames")
allFrameSeries = initTransitionFrames(animations, allFrameSeries)
logrus.Debug("Making animator")
return chanim.NewAnimator(paintEngine, animations, allFrameSeries)
}
|
package build
import (
"bytes"
"io"
"time"
)
func send(w io.Writer, msg []byte) error {
_, err := w.Write(msg)
return err
}
func respond(w io.Writer, msg string) error {
buff := &bytes.Buffer{}
buff.WriteString(time.Now().Format("2006/01/02 15:04:05.000000000 MST"))
buff.WriteString(": ")
buff.WriteString(msg)
buff.WriteString("\n")
return send(w, buff.Bytes())
}
|
package main
import "fmt"
type Person struct {
name string
age int
}
/**
* created: 2019/7/15 13:06
* By Will Fan
*/
func main() {
var x Person
x.age = 12
var p *Person
p = new(Person)
p.age = 12
y := Person{
"bob",
13,
}
jim := Person{
name: "Jim",
}
pjohn := &Person{
name: "John",
}
fmt.Println(y, jim, pjohn)
}
|
package security
import (
"testing"
)
func TestStringCamelCase(t *testing.T) {
for _, v := range [][]string{
[]string{"AppleFish", "Apple Fish"},
[]string{"A fish head", "A Fish Head"},
[]string{"A.fish-head_cat", "A Fish Head Cat"},
[]string{"AteFishToday...Fun", "Ate Fish Today Fun"},
[]string{"Address[3].Suburb", "Address 3 Suburb"},
[]string{"StudentUUID", "Student UUID"},
[]string{"eat3cat4", "Eat 3 Cat 4"},
[]string{"myURL", "My URL"},
[]string{"go2House", "Go 2 House"},
[]string{"uuid to int", "UUID To Int"},
[]string{"end 22 end", "End 22 End"},
} {
if ToCamelCaseSpaced(v[0], " ") != v[1] {
t.Fatalf("ToCamelSpaced(\"%s\") failed. Returned: %s Expected: %s", v[0], ToCamelCaseSpaced(v[0], " "), v[1])
}
}
}
|
package main
import "fmt"
func main() {
var i, v interface{} = "Hello", 34
fmt.Printf("%#v %#v\n", i, v)
fmt.Printf("%T %T\n", i, v)
disp(4,"Hye",true)
}
func disp(i ...interface{}) {
fmt.Println(i)
}
|
package main
import "github.com/sashko/go-uinput"
func keyboardExample() {
keyboard, err := uinput.CreateKeyboard()
if err != nil {
return
}
defer keyboard.Close()
// Press left Shift key, press G, release Shift key
keyboard.KeyDown(uinput.KeyLeftShift)
keyboard.KeyPress(uinput.KeyG)
keyboard.KeyUp(uinput.KeyLeftShift)
// Press O key
keyboard.KeyPress(uinput.KeyO)
}
|
package command
import (
"github.com/codegangsta/cli"
)
var Flags = []cli.Flag{
cli.StringFlag{
Name: "p, provider",
Usage: "Path to provider for fetching secrets",
},
cli.StringFlag{
Name: "e, environment",
Usage: "Specify section/environment to parse from secrets.yaml",
},
cli.StringFlag{
Name: "f",
Value: "secrets.yml",
Usage: "Path to secrets.yml",
},
cli.StringSliceFlag{
Name: "D",
Value: &cli.StringSlice{},
Usage: "var=value causes substitution of value to $var",
},
cli.StringFlag{
Name: "yaml",
Usage: "secrets.yml as a literal string",
},
cli.StringSliceFlag{
Name: "ignore, i",
Value: &cli.StringSlice{},
Usage: "Ignore the specified key if is isn't accessible or doesn’t exist",
},
}
|
package main
const (
UNLOCK_CODE_SHOULD_BE_SPECIFIED = "Unlock code is required"
UNLOCK_CODE_IS_INVALID = "Sorry, that code is invalid"
UNLOCK_CODE_IS_ALREADY_USED = "Sorry, that code has already been used"
UNLOCK_SUCCESSFUL = "Great! Your account is unlocked."
)
type UnlockCommand struct {
}
func (cmd *UnlockCommand) Name() string {
return "unlock"
}
func (cmd *UnlockCommand) Execute(params []string, message *IncomingSlackMessage, executor *CommandExecutor) (string, error) {
if len(params) != 1 {
return UNLOCK_CODE_SHOULD_BE_SPECIFIED, nil
}
unlockCode := params[0]
details, err := executor.repo.AccessDetails.GetForUser(message.TeamId, message.UserId)
if err != nil {
return "", err
} else if details == nil {
return COMMAND_STRAVA_NOT_CONNECTED, nil
}
if details.StravaUserId == 0 {
return COMMAND_STRAVA_NOT_CONNECTED, nil
}
sentCode, err := executor.repo.SentCodes.Get(unlockCode)
if err != nil {
return "", err
} else if sentCode == nil {
return UNLOCK_CODE_IS_INVALID, nil
} else if sentCode.Used {
return UNLOCK_CODE_IS_ALREADY_USED, nil
} else {
err := executor.repo.SentCodes.Update(sentCode, map[string]interface{}{"Used": true})
if err != nil {
return "", err
}
err = executor.unlockUser(details.StravaUserId, unlockCode, message.TeamId)
if err != nil {
return "", err
}
return UNLOCK_SUCCESSFUL, nil
}
}
|
package handler
import (
"context"
"encoding/json"
"cloud.google.com/go/pubsub"
"github.com/line/line-bot-sdk-go/linebot"
"github.com/sh0e1/translation-konjac/internal/message"
"github.com/sh0e1/translation-konjac/pkg/datastore/resources"
"github.com/sh0e1/translation-konjac/pkg/language"
"github.com/sh0e1/translation-konjac/pkg/line/postback"
ps "github.com/sh0e1/translation-konjac/pkg/service/pubsub"
)
// PostbackHandler ...
type PostbackHandler struct {
*BaseEventHandler
}
// Handle ...
func (h *PostbackHandler) Handle(ctx context.Context) error {
var data postback.Data
if err := json.Unmarshal([]byte(h.Event.Postback.Data), &data); err != nil {
return err
}
var fn postbackHandleFunc
switch data.Action {
case postback.SelectLanguageAction:
fn = h.handleSelectLanguagePostBack
case postback.SelectAudioLanguageAction:
fn = h.handleSelectAudioLanguagePostBack
}
return fn(ctx, &data)
}
type postbackHandleFunc func(ctx context.Context, data *postback.Data) error
func (h *PostbackHandler) handleSelectLanguagePostBack(ctx context.Context, data *postback.Data) error {
u := &resources.User{ID: h.Event.Source.UserID}
if err := u.Load(ctx); err != nil {
return err
}
u.SelectLanguage = data.Language
if err := u.Save(ctx); err != nil {
return err
}
reply := linebot.NewTextMessage(message.SelectedLanguage.Format(u.SelectLanguage))
_, err := h.Bot.ReplyMessage(h.Event.ReplyToken, reply).WithContext(ctx).Do()
return err
}
func (h *PostbackHandler) handleSelectAudioLanguagePostBack(ctx context.Context, data *postback.Data) error {
if language.IsMultipleSpeechCode(data.Language) {
}
audio := &resources.Audio{ID: data.MessageID}
if err := audio.Load(ctx); err != nil {
return err
}
audio.SourceLanguage = language.GetSpeechCode(data.Language)
if err := audio.Save(ctx); err != nil {
return err
}
msgData := &ps.Data{
MessageID: data.MessageID,
ReplayToken: h.Event.ReplyToken,
AudioFilePath: audio.Path,
SourceLanguage: audio.SourceLanguage,
}
msg := &pubsub.Message{
Data: msgData.Marshal(),
}
if _, err := h.Topic.Publish(ctx, msg).Get(ctx); err != nil {
return err
}
return nil
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"testing"
"github.com/eonpatapon/contrail-gremlin/neutron"
"github.com/stretchr/testify/assert"
)
func makePortRequest(tenantID string, isAdmin bool, data RequestData) *http.Response {
return makeRequest("port", ListRequest, tenantID, isAdmin, data)
}
func parsePorts(resp *http.Response) (ports []neutron.Port) {
body, _ := ioutil.ReadAll(resp.Body)
err := json.Unmarshal(body, &ports)
if err != nil {
panic(fmt.Sprintf("%s: %s", string(body), err))
}
return ports
}
func TestListUser(t *testing.T) {
resp := makePortRequest(tenantID, false, RequestData{})
assert.Equal(t, 200, resp.StatusCode, "")
ports := parsePorts(resp)
assert.Equal(t, 6, len(ports))
}
func TestUserAAP(t *testing.T) {
resp := makePortRequest(tenantID, false, RequestData{
Filters: RequestFilters{
"name": []interface{}{"aap_vm1_port"},
},
})
assert.Equal(t, 200, resp.StatusCode, "")
ports := parsePorts(resp)
assert.Equal(t, 1, len(ports))
assert.Equal(t, "15.15.15.15", ports[0].AAPs[0].IP)
assert.Equal(t, "00:00:5e:00:01:33", ports[0].AAPs[0].MAC)
}
func TestListUserFilterID(t *testing.T) {
resp := makePortRequest(tenantID, false, RequestData{
Filters: RequestFilters{
"id": []interface{}{"ec12373a-7452-4a51-af9c-5cd9cfb48513"},
},
})
assert.Equal(t, 200, resp.StatusCode, "")
ports := parsePorts(resp)
assert.Equal(t, 1, len(ports))
assert.Equal(t, "ec12373a-7452-4a51-af9c-5cd9cfb48513", ports[0].ID.String())
}
func TestListUserFilterName(t *testing.T) {
resp := makePortRequest(tenantID, false, RequestData{
Filters: RequestFilters{
"name": []interface{}{"aap_vm2_port"},
},
})
assert.Equal(t, 200, resp.StatusCode, "")
ports := parsePorts(resp)
assert.Equal(t, 1, len(ports))
assert.Equal(t, "aap_vm2_port", ports[0].Name)
}
func TestListUserFilterNames(t *testing.T) {
resp := makePortRequest(tenantID, false, RequestData{
Filters: RequestFilters{
"name": []interface{}{"aap_vm1_port", "aap_vm2_port"},
},
})
assert.Equal(t, 200, resp.StatusCode, "")
ports := parsePorts(resp)
assert.Equal(t, 2, len(ports))
}
func TestListUserFilterVMs(t *testing.T) {
resp := makePortRequest(tenantID, false, RequestData{
Filters: RequestFilters{
"device_id": []interface{}{"bb68ae24-8b17-42b8-86a3-74c99f937b30", "31ca7629-5b57-42b7-978b-5c767b24b4b2"},
},
})
assert.Equal(t, 200, resp.StatusCode, "")
ports := parsePorts(resp)
assert.Equal(t, 2, len(ports))
}
func TestListUserFilterNetwork(t *testing.T) {
resp := makePortRequest(tenantID, false, RequestData{
Filters: RequestFilters{
"network_id": []interface{}{"e863c27f-ae81-4c0c-926d-28a95ef8b21f"},
},
})
assert.Equal(t, 200, resp.StatusCode, "")
ports := parsePorts(resp)
assert.Equal(t, 4, len(ports))
}
func TestListUserFilterIPAddress(t *testing.T) {
resp := makePortRequest(tenantID, false, RequestData{
Filters: RequestFilters{
"ip_address": []interface{}{"15.15.15.5"},
},
})
assert.Equal(t, 200, resp.StatusCode, "")
ports := parsePorts(resp)
assert.Equal(t, 1, len(ports))
}
func TestListUserFilterSubnetID(t *testing.T) {
resp := makePortRequest(tenantID, false, RequestData{
Filters: RequestFilters{
"subnet_id": []interface{}{"04613d72-cae0-4cf1-83c6-327d163e238d"},
},
})
assert.Equal(t, 200, resp.StatusCode, "")
ports := parsePorts(resp)
assert.Equal(t, 2, len(ports))
}
func TestListAdmin(t *testing.T) {
resp := makePortRequest(tenantID, true, RequestData{})
assert.Equal(t, 200, resp.StatusCode, "")
ports := parsePorts(resp)
assert.Equal(t, 107, len(ports))
}
func TestListUserFields(t *testing.T) {
resp := makePortRequest(tenantID, false, RequestData{
Fields: []string{"id", "mac_address"},
})
assert.Equal(t, 200, resp.StatusCode, "")
ports := parsePorts(resp)
assert.Equal(t, "", ports[0].Status)
assert.Equal(t, "", ports[0].DeviceID)
}
|
/**
* @Time : 2020/9/16 4:34 PM
* @Author : solacowa@gmail.com
* @File : config
* @Software: GoLand
*/
package foo
import (
"strings"
"github.com/Unknwon/goconfig"
)
const (
SectionServer = "server"
SectionRedis = "redis"
SectionMysql = "mysql"
SectionService = "service"
SectionCors = "cors"
)
type Config struct {
*goconfig.ConfigFile
env string
}
func NewConfig(path string) (*Config, error) {
// 处理配置文件
cfg, err := goconfig.LoadConfigFile(path)
if err != nil {
return nil, err
}
return &Config{ConfigFile: cfg}, nil
}
func (c *Config) Section(key string) string {
return c.env + "." + key
}
func (c *Config) SetEnv(env string) {
c.env = env
}
func (c *Config) GetEnv() string {
return c.env
}
func (c *Config) GetString(section, key string) string {
var val string
val, _ = c.GetValue(section, key)
return val
}
func (c *Config) GetStrings(section, key string) []string {
val := c.GetString(section, key)
return strings.Split(val, ",")
}
func (c *Config) GetInt(section, key string) int {
val, _ := c.Int(section, key)
return val
}
func (c *Config) GetBool(section, key string) bool {
val, _ := c.Bool(section, key)
return val
}
|
/*
package game
модуль start_level
отвечает за отрисовку стартового меню.
*/
package game
import (
"github.com/JoelOtter/termloop"
)
//startLevel стартовый уровень игры
type startLevel struct {
termloop.Level
startMenu *startMenu
}
//startMenu объект стартовое окно
type startMenu struct {
*termloop.Text
}
//createStartMenu стартовое состояние игры
func createStartMenu() *startMenu {
startObj := new(startMenu)
startObj.Text = termloop.NewText(0, 0, "press Enter to srart game",
termloop.ColorWhite,
termloop.ColorDefault)
return startObj
}
//Tick отслеживаем события
func (s *startMenu) Tick(event termloop.Event) {
if event.Type == termloop.EventKey {
// если нажали enter, то начинаем этап подготовки всех играков к игре
if event.Key == termloop.KeyEnter {
// сначала установим базовую часть уровня
// и уже она будет по своему тику пинать сервер для ожидания начала игры
level := startBaseSnakeLevel()
termloopGame.Screen().SetLevel(level)
}
}
}
|
// FindFileString
package DaeseongLib
import (
"bufio"
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
)
var FileItem map[string]string
func flags() (string, string) {
spath := flag.String("path", "C:\\Go\\src\\DaeseongLib\\lib", "Search Path")
skey := flag.String("key", "Split", "Search word")
flag.Parse()
return *spath, *skey
}
func Right(sString string, nCount int) string {
if nCount < 0 {
return sString
}
nLength := len(sString)
if nLength <= nCount {
return sString
}
sString = string(sString[nLength-nCount : nLength])
return sString
}
func Left(sString string, nCount int) string {
if nCount < 0 {
return sString
}
nLength := len(sString)
if nLength <= nCount {
return sString
}
sString = string(sString[:nCount])
return sString
}
func ReadLines(spath string) ([]string, error) {
file, err := os.Open(spath)
if err != nil {
return nil, err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines, scanner.Err()
}
func SearchFiles(spath string, bRecursive bool) {
FileItem = make(map[string]string)
files, err := ioutil.ReadDir(spath)
if err != nil {
fmt.Println(err)
return
}
for _, file := range files {
if file.IsDir() {
if bRecursive {
SearchFiles(spath+file.Name()+string(os.PathSeparator), true)
}
} else {
content, err := ioutil.ReadFile(spath + string(os.PathSeparator) + file.Name())
if err == nil {
FileItem[file.Name()] = string(content)
}
}
}
}
func SearchSourceFile() {
path, key := flags()
if Right(path, 1) != "/" {
path += string(os.PathSeparator)
}
SearchFiles(path, true)
for filename, content := range FileItem {
if strings.Contains(content, key) {
fmt.Println("Contains:", filename)
}
}
}
func FindFileWords(sPath, sKey string) {
var wg sync.WaitGroup
files, _ := ioutil.ReadDir(sPath)
for _, file := range files {
if file.IsDir() {
sSubPath := filepath.Join(sPath, file.Name())
FindFileWords(sSubPath, sKey)
} else {
sFullPath := filepath.Join(sPath, file.Name())
wg.Add(1)
go func() {
defer wg.Done()
pattern := regexp.MustCompile(sKey)
lines, err := ReadLines(sFullPath)
if err == nil {
for _, line := range lines {
if line != "" {
data := pattern.FindAllStringSubmatch(line, -1)
if len(data) != 0 {
fmt.Println(sFullPath, line)
}
}
}
}
}()
}
}
wg.Wait()
}
/*
func f1() {
//test1
//if len(os.Args) < 2 {
// panic("panic")
//}
//ProgramName := os.Args[0:1]
//Arg1 := os.Args[1:2]
//Arg2 := os.Args[2:3]
//allArgs := os.Args[1:]
//fmt.Println(ProgramName, Arg1, Arg2, allArgs)
//test2
//flagstring := flag.String("sValue", "", "command line string")
//flagint := flag.Int("nValue", 0, "command line int")
//flagbool := flag.Bool("Ok", false, "command line bool")
//flag.Parse()
//fmt.Println(*flagstring, *flagint, *flagbool)
//test3
//lines, err := ReadLines("C:\\Go\\src\\DaeseongLib\\lib\\YoutubeInfo.go")
//if err == nil {
// for _, line := range lines {
// index := strings.Index(line, "Split")
// if index > 0 {
// fmt.Println(line)
// }
// }
//}
//test4
//lines, err = ReadLines("C:\\Go\\src\\DaeseongLib\\lib\\YoutubeInfo.go")
//if err == nil {
//
// pattern := regexp.MustCompile("Split")
// for _, line := range lines {
//
// data := pattern.FindAllStringSubmatch(line, -1)
// if len(data) != 0 {
// fmt.Println(line)
// }
// }
//}
}
func f2() {
SearchSourceFile()
}
func f3() {
sPath, sKey := flags()
FindFileWords(sPath, sKey)
}
func main() {
f3()
}
*/
|
package main
import (
"fmt"
"sync/atomic"
)
var n uint64
func main() {
fmt.Println("vim-go")
for i := 0; i < 100; i++ {
atomic.AddUint64(&n, 10)
}
fmt.Println(n)
}
|
package hook
/*
// #include "event/hook_async.h"
*/
import "C"
import (
"log"
"time"
"encoding/json"
)
//export go_send
func go_send(s *C.char) {
str := []byte(C.GoString(s))
out := Event{}
err := json.Unmarshal(str, &out)
if err != nil {
log.Fatal("json.Unmarshal error is: ", err)
}
if out.Keychar != CharUndefined {
lck.Lock()
raw2key[out.Rawcode] = string([]rune{out.Keychar})
lck.Unlock()
}
// todo bury this deep into the C lib so that the time is correct
out.When = time.Now() // at least it's consistent
if err != nil {
log.Fatal("json.Unmarshal error is: ", err)
}
// todo: maybe make non-bloking
ev <- out
}
|
package test
import (
"fmt"
"sharemusic/models/tool"
"testing"
)
func TestTime(t *testing.T) {
fmt.Println(tool.GetTime(true))
}
func TestHashCode(t *testing.T) {
fmt.Println(tool.HashCode("123"))
}
func TestConvert2(t *testing.T) {
a := map[string]string{}
a["id"] = "123"
fmt.Println(a)
fmt.Println(tool.Convert2(a))
}
|
package member
type Member struct {
Id int
Name string
Phone string
Age int
Gender string
}
func NewMember(id int, name string, phone string, age int, gender string) *Member {
return &Member{Id: id, Name: name, Phone: phone, Age: age, Gender: gender}
}
|
// Copyright 2015 The Go Circuit Project
// Use of this source code is governed by the license for
// The Go Circuit Project, found in the LICENSE file.
//
// Authors:
// 2015 Petar Maymounkov <p@gocircuit.org>
package io
import (
"io"
"runtime"
"github.com/gocircuit/runtime/circuit"
"github.com/gocircuit/runtime/errors"
)
// Server-side types
// XReader is a cross-worker exportable object that exposes an underlying local io.Reader.
type XReader struct {
io.Reader
}
func (x XReader) Read(n int) ([]byte, error) {
p := make([]byte, n)
m, err := x.Reader.Read(p)
return p[:m], errors.Pack(err)
}
// XWriter is a cross-worker exportable object that exposes an underlying local io.Writer.
type XWriter struct {
io.Writer
}
func (x XWriter) Write(p []byte) (int, error) {
n, err := x.Writer.Write(p)
return n, errors.Pack(err)
}
// XCloser is a cross-worker exportable object that exposes an underlying local io.Writer.
type XCloser struct {
io.Closer
}
// NewXCloser attaches a finalizer to the object which calls Close.
// In cases when a cross-interface to this object is lost because of a failed remote worker,
// the attached finalizer will ensure that before we forget this object the channel it
// encloses will be closed.
func NewXCloser(u io.Closer) *XCloser {
x := &XCloser{u}
runtime.SetFinalizer(x, func(x *XCloser) {
x.Closer.Close()
})
return x
}
func (x XCloser) Close() error {
return errors.Pack(x.Closer.Close())
}
// XReadWriteCloser
type XReadWriteCloser struct {
XReader
XWriter
*XCloser
}
func NewXReadWriteCloser(u io.ReadWriteCloser) circuit.X {
return circuit.Ref(&XReadWriteCloser{XReader{u}, XWriter{u}, NewXCloser(u)})
}
// XReadCloser
type XReadCloser struct {
XReader
*XCloser
}
func NewXReader(u io.Reader) circuit.X {
return circuit.Ref(XReader{u})
}
func NewXReadCloser(u io.ReadCloser) circuit.X {
return circuit.Ref(&XReadCloser{XReader{u}, NewXCloser(u)})
}
// XWriteCloser
type XWriteCloser struct {
XWriter
*XCloser
}
func NewXWriteCloser(u io.WriteCloser) circuit.X {
return circuit.Ref(&XWriteCloser{XWriter{u}, NewXCloser(u)})
}
// XReadWriter
type XReadWriter struct {
XReader
XWriter
}
func NewXReadWriter(u io.ReadWriter) circuit.X {
return circuit.Ref(&XReadWriter{XReader{u}, XWriter{u}})
}
|
package constants
type Ingredient struct {
Name string
Category string
InvX int
InvY int
InvPage int
}
const (
Fruit = "Fruit"
Mushroom = "Mushroom"
Plant = "Plant"
Meat = "Meat"
Other = "Other"
Dragon = "Dragon"
Nut = "Nut"
Fish = "Fish"
Insect = "Insect"
Monster = "Monster"
Ore = "Ore"
)
var Ingredients = []*Ingredient{
{Name: HeartyDurian, Category: Fruit},
{Name: PalmFruit, Category: Fruit},
{Name: Apple, Category: Fruit},
{Name: Wildberry, Category: Fruit},
{Name: Hydromelon, Category: Fruit},
{Name: SpicyPepper, Category: Fruit},
{Name: Voltfruit, Category: Fruit},
{Name: FleetLotusSeeds, Category: Fruit},
{Name: MightyBananas, Category: Fruit},
{Name: BigHeartyTruffle, Category: Mushroom},
{Name: HeartyTruffle, Category: Mushroom},
{Name: EnduraShroom, Category: Mushroom},
{Name: HylianMushroom, Category: Mushroom},
{Name: StamellaMushroom, Category: Mushroom},
{Name: Chillshroom, Category: Mushroom},
{Name: Sunshroom, Category: Mushroom},
{Name: Zapshroom, Category: Mushroom},
{Name: Rushroom, Category: Mushroom},
{Name: Razorshroom, Category: Mushroom},
{Name: Ironshroom, Category: Mushroom},
{Name: SilentShroom, Category: Mushroom},
{Name: BigHeartyRadish, Category: Plant},
{Name: HeartyRadish, Category: Plant},
{Name: EnduraCarrot, Category: Plant},
{Name: HyruleHerb, Category: Plant},
{Name: SwiftCarrot, Category: Plant},
{Name: FortifiedPumpkin, Category: Plant},
{Name: CoolSafflina, Category: Plant},
{Name: WarmSafflina, Category: Plant},
{Name: ElectricSafflina, Category: Plant},
{Name: SwiftViolet, Category: Plant},
{Name: MightyThistle, Category: Plant},
{Name: Armoranth, Category: Plant},
{Name: BlueNightshade, Category: Plant},
{Name: SilentPrincess, Category: Plant},
{Name: RawGourmetMeat, Category: Meat},
{Name: RawWholeBird, Category: Meat},
{Name: RawPrimeMeat, Category: Meat},
{Name: RawBirdThigh, Category: Meat},
{Name: RawMeat, Category: Meat},
{Name: RawBirdDrumstick, Category: Meat},
{Name: CourserBeeHoney, Category: Other},
{Name: HylianRice, Category: Other},
{Name: BirdEgg, Category: Other},
{Name: TabanthaWheat, Category: Other},
{Name: FreshMilk, Category: Other},
{Name: Acorn, Category: Nut},
{Name: ChickalooTreeNut, Category: Nut},
{Name: CaneSugar, Category: Other},
{Name: GoatButter, Category: Other},
{Name: GoronSpice, Category: Other},
{Name: RockSalt, Category: Other},
{Name: MonsterExtract, Category: Other},
{Name: StarFragment, Category: Other},
{Name: DinraalsScale, Category: Dragon},
{Name: DinraalsClaw, Category: Dragon},
{Name: ShardofDinraalsFang, Category: Dragon},
{Name: ShardofDinraalsHorn, Category: Dragon},
{Name: NyadrasScale, Category: Dragon},
{Name: NyadrasClaw, Category: Dragon},
{Name: ShardofNyadrasFang, Category: Dragon},
{Name: ShardofNyadrasHorn, Category: Dragon},
{Name: FaroshsScale, Category: Dragon},
{Name: FaroshsClaw, Category: Dragon},
{Name: ShardofFaroshsFang, Category: Dragon},
{Name: ShardofFaroshsHorn, Category: Dragon},
{Name: HeartySalmon, Category: Fish},
{Name: HeartyBlueshellSnail, Category: Fish},
{Name: HeartyBass, Category: Fish},
{Name: HylianBass, Category: Fish},
{Name: StaminokaBass, Category: Fish},
{Name: ChillfinTrout, Category: Fish},
{Name: SizzlefinTrout, Category: Fish},
{Name: VoltfinTrout, Category: Fish},
{Name: StealthfinTrout, Category: Fish},
{Name: MightyCarp, Category: Fish},
{Name: ArmoredCarp, Category: Fish},
{Name: SankeCarp, Category: Fish},
{Name: MightyPorgy, Category: Fish},
{Name: ArmoredPorgy, Category: Fish},
{Name: SneakyRiverSnail, Category: Fish},
{Name: RazorclawCrab, Category: Fish},
{Name: IronshellCrab, Category: Fish},
{Name: BrightEyedCrab, Category: Fish},
{Name: Fairy, Category: Other},
{Name: WinterwingButterfly, Category: Insect},
{Name: SummerwingButterfly, Category: Insect},
{Name: ThunderwingButterfly, Category: Insect},
{Name: SmotherwingButterfly, Category: Insect},
{Name: ColdDarner, Category: Insect},
{Name: WarmDarner, Category: Insect},
{Name: ElectricDarner, Category: Insect},
{Name: RestlessCricket, Category: Insect},
{Name: BladedRhinoBeetle, Category: Insect},
{Name: RuggedRhinoBeetle, Category: Insect},
{Name: EnergeticRhinoBeetle, Category: Insect},
{Name: SunsetFirefly, Category: Insect},
{Name: HotFootedFrog, Category: Insect},
{Name: TirelessFrog, Category: Insect},
{Name: HightailLizard, Category: Insect},
{Name: HeartyLizard, Category: Insect},
{Name: FireproofLizard, Category: Insect},
{Name: Flint, Category: Ore},
{Name: Amber, Category: Ore},
{Name: Opal, Category: Ore},
{Name: LuminousStone, Category: Ore},
{Name: Topaz, Category: Ore},
{Name: Ruby, Category: Ore},
{Name: Sapphire, Category: Ore},
{Name: Diamond, Category: Ore},
{Name: BokoblinHorn, Category: Monster},
{Name: BokoblinFang, Category: Monster},
{Name: BokoblinGuts, Category: Monster},
{Name: MoblinHorn, Category: Monster},
{Name: MoblinFang, Category: Monster},
{Name: MoblinGuts, Category: Monster},
{Name: LizalfosHorn, Category: Monster},
{Name: LizalfosTalon, Category: Monster},
{Name: LizalfosTail, Category: Monster},
{Name: IcyLizalfosTail, Category: Monster},
{Name: RedLizalfosTail, Category: Monster},
{Name: YellowLizalfosTail, Category: Monster},
{Name: LynelHorn, Category: Monster},
{Name: LynelHoof, Category: Monster},
{Name: LynelGuts, Category: Monster},
{Name: ChuchuJelly, Category: Monster},
{Name: WhiteChuchuJelly, Category: Monster},
{Name: RedChuchuJelly, Category: Monster},
{Name: YellowChuchuJelly, Category: Monster},
{Name: KeeseWing, Category: Monster},
{Name: IceKeeseWing, Category: Monster},
{Name: FireKeeseWing, Category: Monster},
{Name: ElectricKeeseWing, Category: Monster},
{Name: KeeseEyeball, Category: Monster},
{Name: OctorokTentacle, Category: Monster},
{Name: OctorokEyeball, Category: Monster},
{Name: OctoBalloon, Category: Monster},
{Name: MoldugaFin, Category: Monster},
{Name: MoldugaGuts, Category: Monster},
{Name: HinoxToenail, Category: Monster},
{Name: HinoxTooth, Category: Monster},
{Name: HinoxGuts, Category: Monster},
{Name: AncientScrew, Category: Monster},
{Name: AncientSpring, Category: Monster},
{Name: AncientGear, Category: Monster},
{Name: AncientShaft, Category: Monster},
{Name: AncientCore, Category: Monster},
{Name: GiantAncientCore, Category: Monster},
{Name: Wood, Category: Other},
}
|
package main
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"time"
)
const usageStr = `The Lucifer binary makes requests to the Lucifer server.
Usage:
lucifer command [arguments]
The commands are:
invalidate Invalidate the cache for a given file
run Run tests for a given file
Use "lucifer help [command]" for more information about a command.
`
const version = "0.1"
const userAgent = "lucifer-client/" + version
func usage() {
fmt.Fprintf(os.Stderr, usageStr)
flag.PrintDefaults()
os.Exit(2)
}
const baseUri = "http://127.0.0.1:11666"
const timeout = time.Duration(5) * time.Second
type Filename string
type LuciferInvalidateRequest struct {
Files []Filename `json:"files"`
}
type LuciferRunRequest struct {
Bail bool `json:"bail"`
Files []Filename `json:"files"`
Grep string `json:"grep"`
}
func makeRequest(method string, uri string, body *bytes.Buffer) (*http.Response, error) {
req, err := http.NewRequest(method, uri, body)
if err != nil {
return nil, err
}
req.Header.Add("User-Agent", userAgent)
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Accept", "application/json, q=0.8; application/problem+json, q=0.6; */*, q=0.3")
client := http.Client{
Timeout: timeout,
}
return client.Do(req)
}
func makeRunRequest(fnames []Filename, bail bool, grep string) (string, error) {
body := LuciferRunRequest{
Bail: bail,
Files: fnames,
}
if grep != "" {
body.Grep = grep
}
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
enc.Encode(body)
resp, err := makeRequest("POST", fmt.Sprintf("%s/v1/test_runs", baseUri), &buf)
if err != nil {
return "", err
}
defer resp.Body.Close()
rbody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode >= 300 {
// XXX
return "", errors.New(string(rbody))
}
return string(rbody), nil
}
func makeInvalidateRequest(fnames []Filename) (string, error) {
body := LuciferInvalidateRequest{
Files: fnames,
}
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
enc.Encode(body)
resp, err := makeRequest("POST", fmt.Sprintf("%s/v1/cache/invalidate", baseUri), &buf)
if err != nil {
return "", err
}
defer resp.Body.Close()
rbody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode >= 300 {
// XXX
return "", errors.New(string(rbody))
}
return string(rbody), nil
}
func handleError(err error, verbose bool) {
if verbose {
log.Fatal(err)
} else {
os.Exit(0)
}
}
func init() {
flag.Usage = usage
}
func doInvalidate(flags *flag.FlagSet, sync bool, verbose bool) {
err := flags.Parse(os.Args[2:])
if err != nil {
handleError(err, verbose)
}
args := flags.Args()
var fnames []Filename
for i := 0; i < len(args); i++ {
fnames = append(fnames, Filename(args[i]))
}
body, err := makeInvalidateRequest(fnames)
if err != nil {
handleError(err, verbose)
}
if verbose {
fmt.Println(body)
}
}
func doRun(flags *flag.FlagSet, bail bool, verbose bool, grep string) {
args := flags.Args()
var fnames []Filename
for i := 0; i < len(args); i++ {
fnames = append(fnames, Filename(args[i]))
}
body, err := makeRunRequest(fnames, bail, grep)
if err != nil {
handleError(err, verbose)
}
if verbose {
fmt.Println(body)
}
}
func main() {
invalidateflags := flag.NewFlagSet("invalidate", flag.ExitOnError)
sync := invalidateflags.Bool("sync", true, "Make request synchronously")
verbose := invalidateflags.Bool("verbose", false, "Verbose output")
runflags := flag.NewFlagSet("run", flag.ExitOnError)
bail := runflags.Bool("bail", false, "Bail after a single test failure")
runverbose := runflags.Bool("verbose", false, "Verbose response output")
grepHelp := "Grep for the given pattern"
grep := runflags.String("grep", "", grepHelp)
runflags.StringVar(grep, "g", "", grepHelp+" (shorthand)")
if len(os.Args) < 2 {
usage()
}
switch os.Args[1] {
case "invalidate":
err := invalidateflags.Parse(os.Args[2:])
if err != nil {
handleError(err, true)
}
doInvalidate(invalidateflags, *sync, *verbose)
case "run":
err := runflags.Parse(os.Args[2:])
if err != nil {
handleError(err, true)
}
doRun(runflags, *bail, *runverbose, *grep)
default:
usage()
}
}
|
package goSolution
func canSwimToTheEnd(grid [][]int, t int) bool {
n := len(grid)
m := len(grid[0])
q := [][]int{{0, 0}}
v := Initialize2DIntSlice(n, m, -1)
v[0][0] = 0
for h := 0; h < len(q); h++ {
x, y := q[h][0], q[h][1]
for d := 0; d < 4; d++ {
tx, ty := x + DX[d], y + DY[d]
if tx >= 0 && ty >= 0 && tx < n && ty < m && v[tx][ty] == -1 && grid[tx][ty] <= t {
q = append(q, []int{tx, ty})
v[tx][ty] = v[x][y] + 1
}
}
if v[n - 1][m - 1] != -1 {
return true
}
}
return false
}
func swimInWater(grid [][]int) int {
maxTime := 0
for _, row := range grid {
maxTime = max(maxTime, max(row...))
}
var l, r int
for l, r = grid[0][0], maxTime; l <= r; {
mid := (l + r) >> 1
if canSwimToTheEnd(grid, mid) {
r = mid - 1
} else {
l = mid + 1
}
}
return r + 1
}
|
/*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package commitment
import (
"crypto"
"testing"
"github.com/stretchr/testify/require"
"github.com/trustbloc/sidetree-core-go/pkg/canonicalizer"
"github.com/trustbloc/sidetree-core-go/pkg/jws"
)
const (
sha2_256 uint = 18 // multihash code
)
func TestCalculate(t *testing.T) {
jwk := &jws.JWK{
Crv: "crv",
Kty: "kty",
X: "x",
Y: "y",
}
t.Run("success", func(t *testing.T) {
commitment, err := Calculate(jwk, sha2_256, crypto.SHA256)
require.NoError(t, err)
require.NotEmpty(t, commitment)
})
t.Run(" error - multihash not supported", func(t *testing.T) {
commitment, err := Calculate(jwk, 55, crypto.SHA256)
require.Error(t, err)
require.Empty(t, commitment)
require.Contains(t, err.Error(), "algorithm not supported, unable to compute hash")
})
t.Run(" error - hash not supported", func(t *testing.T) {
commitment, err := Calculate(jwk, sha2_256, 55)
require.Error(t, err)
require.Empty(t, commitment)
require.Contains(t, err.Error(), "hash function not available for: 55")
})
t.Run("error - canonicalization failed", func(t *testing.T) {
commitment, err := Calculate(nil, sha2_256, crypto.SHA256)
require.Error(t, err)
require.Empty(t, commitment)
require.Contains(t, err.Error(), "Expected '{' but got 'n'")
})
t.Run("interop test", func(t *testing.T) {
jwk := &jws.JWK{
Kty: "EC",
Crv: "secp256k1",
X: "5s3-bKjD1Eu_3NJu8pk7qIdOPl1GBzU_V8aR3xiacoM",
Y: "v0-Q5H3vcfAfQ4zsebJQvMrIg3pcsaJzRvuIYZ3_UOY",
}
canonicalized, err := canonicalizer.MarshalCanonical(jwk)
require.NoError(t, err)
expected := `{"crv":"secp256k1","kty":"EC","x":"5s3-bKjD1Eu_3NJu8pk7qIdOPl1GBzU_V8aR3xiacoM","y":"v0-Q5H3vcfAfQ4zsebJQvMrIg3pcsaJzRvuIYZ3_UOY"}`
require.Equal(t, string(canonicalized), expected)
})
}
|
package palendrome
// Alg returns whether a given string word
// is a palendrome.
func Alg(word string) bool {
n := len(word) - 1
for i := 0; i <= len(word)/2; i++ {
if word[i] != word[n-i] {
return false
}
}
return true
}
|
package cosmos
import "context"
// Collection performs operations on a given collection.
type Collection struct {
client Client
db Database
collID string
}
// Collections struct handles all operations involving mutiple collections
type Collections struct {
client Client
db Database
}
// Document defines possible operations on a single document. e.g. Read, Delete, Replace
func (c Collection) Document(docID string) *Document {
return newDocument(c, docID)
}
// Documents defines possible operations on multiple documents. e.g. ReadAll, Query
func (c Collection) Documents() *Documents {
return newDocuments(c)
}
// UDF defines operations on a user defined function
func (c Collection) UDF(id string) *UDF {
return newUDF(c, id)
}
// UDFs defines operations on multiple user defined functions
func (c Collection) UDFs() *UDFs {
return newUDFs(c)
}
// StoredProcedure defines operations on a single stored procedure
func (c Collection) StoredProcedure(id string) *StoredProcedure {
return newStoredProcedure(c, id)
}
// StoredProcedures defines operations on multiple stored procedures
func (c Collection) StoredProcedures() *StoredProcedures {
return newStoredProcedures(c)
}
// Trigger defines operations on a single trigger
func (c Collection) Trigger(id string) *Trigger {
return newTrigger(c, id)
}
// Triggers defines operations on multiple triggers
func (c Collection) Triggers() *Triggers {
return newTriggers(c)
}
func newCollection(db Database, collID string) *Collection {
db.client.path += "/colls/" + collID
db.client.rType = "colls"
db.client.rLink = db.client.path
coll := &Collection{
client: db.client,
db: db,
collID: collID,
}
return coll
}
func newCollections(db Database) *Collections {
db.client.path += "/colls"
db.client.rType = "colls"
coll := &Collections{
client: db.client,
db: db,
}
return coll
}
// Create new collection
func (c *Collections) Create(ctx context.Context, newColl *CollectionDefinition) (*CollectionDefinition, error) {
respColl := &CollectionDefinition{}
_, err := c.client.create(ctx, newColl, respColl)
if err != nil {
return nil, err
}
return respColl, err
}
// ReadAll returns all collections in a database.
func (c *Collections) ReadAll(ctx context.Context) (*CollectionDefinitions, error) {
data := struct {
Collections CollectionDefinitions `json:"DocumentCollections,omitempty"`
Count int `json:"_count,omitempty"`
}{}
_, err := c.client.read(ctx, &data)
return &data.Collections, err
}
// Read returns one collection
func (c *Collection) Read(ctx context.Context) (*CollectionDefinition, error) {
coll := &CollectionDefinition{}
_, err := c.client.read(ctx, coll)
return coll, err
}
// Delete collection
func (c *Collection) Delete(ctx context.Context) (*Response, error) {
return c.client.delete(ctx)
}
// Replace collection
func (c *Collection) Replace(ctx context.Context, i *IndexingPolicy, ret interface{}, opts ...CallOption) (*Response, error) {
body := struct {
ID string `json:"id"`
IndexingPolicy *IndexingPolicy `json:"indexingPolicy"`
}{c.collID, i}
return c.client.replace(ctx, &body, ret, opts...)
}
|
package backend_controller
import (
"2021/yunsongcailu/yunsong_server/backend/backend_service"
"2021/yunsongcailu/yunsong_server/common"
"2021/yunsongcailu/yunsong_server/param/backend_param"
"2021/yunsongcailu/yunsong_server/web/web_model"
"fmt"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"math/rand"
"os"
"path/filepath"
"strconv"
"time"
)
var bls = backend_service.NewBackendLinkServer()
// 获取所有链接
func PostLinkAll(ctx *gin.Context) {
res,err := bls.GetLinkAll()
if err != nil {
common.Failed(ctx,"获取链接数据失败")
return
}
common.Success(ctx,res)
return
}
// 修改链接
func PostLinkEdit(ctx *gin.Context) {
var linkTextParam backend_param.LinkTextParam
err := ctx.ShouldBindBodyWith(&linkTextParam,binding.JSON)
if err != nil {
common.Failed(ctx,"获取链接参数失败")
return
}
var newLink = web_model.LinksModel{
Id: linkTextParam.Id,
LinkIcon: linkTextParam.Icon,
LinkTitle: linkTextParam.Title,
LinkUrl: linkTextParam.LinkUrl,
Sort: linkTextParam.Sort,
}
err = bls.EditLinkById(newLink)
if err != nil {
common.Failed(ctx,"更新链接失败")
return
}
common.Success(ctx,"OK")
return
}
// 更新链接图片
func PostLinkIconEdit(ctx *gin.Context) {
linkParamData,_ := ctx.Get("linkIconParam")
if linkParamData == nil {
common.Failed(ctx,"获取上传参数失败")
return
}
linkParam := linkParamData.(backend_param.LinkIconUploadParam)
iconFileHeader := linkParam.File
code := fmt.Sprintf("%06v",rand.New(rand.NewSource(time.Now().UnixNano())).Int31n(1000000))
extString := filepath.Ext(iconFileHeader.Filename)
fileName := "webUpload/links/link" + strconv.FormatInt(time.Now().Unix(),10) + code + extString
filePath := "public/" + fileName
err := ctx.SaveUploadedFile(iconFileHeader,filePath)
if err != nil {
common.Failed(ctx,"保存链接图片失败")
return
}
// 如果存在旧文件
oldIcon:= linkParam.OldIcon
if oldIcon != "" {
oldIconPath := "public/" + oldIcon
_ = os.Remove(oldIconPath)
}
// 如果存在D 则更新数据库 如果不存在则不更新
if linkParam.LinkId > 0 {
// 更新数据
err = bls.EditLinkIcon(linkParam.LinkId,fileName)
if err != nil {
common.Failed(ctx,"上传成功,但更新数据库失败")
return
}
common.Success(ctx,fileName)
return
} else {
common.Success(ctx,fileName)
return
}
}
// 根据ID 删除链接
func PostLinkRemoveById(ctx *gin.Context) {
var linkParam backend_param.LinkTextParam
err := ctx.ShouldBindBodyWith(&linkParam,binding.JSON)
if err != nil {
common.Failed(ctx,"获取链接参数失败")
return
}
err = bls.RemoveLinkById(linkParam.Id)
if err != nil {
common.Failed(ctx,"删除链接失败")
return
}
common.Success(ctx,"删除成功")
return
}
// 添加链接
func PostLinkInsertOne(ctx *gin.Context) {
var linkParam backend_param.LinkTextParam
err := ctx.ShouldBindBodyWith(&linkParam,binding.JSON)
if err != nil {
common.Failed(ctx,"获取链接参数失败")
return
}
var link web_model.LinksModel
link.Id = linkParam.Id
link.LinkIcon = linkParam.Icon
link.LinkUrl = linkParam.LinkUrl
link.LinkTitle = linkParam.Title
link.Sort = linkParam.Sort
err = bls.AddLink(link)
if err != nil {
common.Failed(ctx,"插入数据库失败")
return
}
common.Success(ctx,"添加链接成功")
return
} |
package main
import (
"encoding/json"
"log"
"net/http"
"time"
"go-cqrs/db"
"go-cqrs/messaging"
"go-cqrs/model"
"go-cqrs/util"
uuid "github.com/satori/go.uuid"
)
func woofsHandler(w http.ResponseWriter, r *http.Request) {
req := model.WoofRequest{}
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
util.ResponseError(w, http.StatusBadRequest, "Invalid body")
return
}
defer r.Body.Close()
id, err := uuid.NewV4()
if err != nil {
util.ResponseError(w, http.StatusBadRequest, "Failed to generate woof ID")
return
}
woof := model.Woof{
ID: id.String(),
Body: req.Message,
CreatedAt: time.Now().UTC(),
}
// Create woof
err = db.InsertWoof(r.Context(), woof)
if err != nil {
util.ResponseError(w, http.StatusInternalServerError, "Failed to create woof")
return
}
// Publish woof
err = messaging.PublishWoofMessage(woof)
if err != nil {
log.Println(err)
}
util.ResponseOk(w, model.WoofResponse{
ID: woof.ID,
})
}
|
// 38. Offline dictionary attack on simplified SRP
package main
import (
"bufio"
"bytes"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"log"
"math/big"
"net"
"os"
"strings"
"sync"
)
const (
file = "passwords.txt"
addr = "localhost:4000"
dhPrime = `ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024
e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd
3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec
6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f
24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361
c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552
bb9ed529077096966d670c354e4abc9804f1746c08ca237327fff
fffffffffffff`
dhGenerator = "2"
)
func main() {
p, err := ParseBigInt(dhPrime, 16)
if err != nil {
panic(err)
}
g, err := ParseBigInt(dhGenerator, 16)
if err != nil {
panic(err)
}
if err := breakPassword("tcp", addr, p, g, file); err != nil {
fmt.Fprintln(os.Stderr, err)
}
}
// breakPassword runs the remote password protocol and attempts to crack the user's password.
func breakPassword(network, addr string, p, g *big.Int, file string) error {
server := NewPWBreaker(p, g)
l, err := server.Listen(network, addr)
if err != nil {
return err
}
done := make(chan struct{})
go func() {
c, err := l.Accept()
if err != nil {
log.Fatal(err)
}
if _, err := c.Read([]byte{}); err != nil {
log.Fatal(err)
}
fmt.Print("cracking password...")
password, err := server.Password(file)
if err != nil {
fmt.Println("failure")
} else {
fmt.Println(password)
}
close(done)
}()
var userEmail, userPassword string
fmt.Print("user email: ")
if _, err := fmt.Scanln(&userEmail); err != nil {
return err
}
fmt.Print("user password: ")
if _, err := fmt.Scanln(&userPassword); err != nil {
return err
}
client := NewPWClient(p, g, userEmail, userPassword)
c, err := client.Dial(network, addr)
if err != nil {
return err
}
if _, err := c.Read([]byte{}); err != nil {
return err
}
c.Close()
<-done
return nil
}
// PWBreaker represents a man-in-the-middle attacking a remote password protocol.
type PWBreaker struct {
*DHPrivateKey
clientEmail string
clientPub *big.Int
clientHMAC []byte
}
// NewPWBreaker returns a new remote password breaker.
func NewPWBreaker(p, g *big.Int) *PWBreaker {
return &PWBreaker{
DHPrivateKey: &DHPrivateKey{
DHPublicKey{
p: p,
g: g,
y: g,
},
big.NewInt(1),
},
}
}
// Password takes a file containing passwords and returns
// the line, if any, that matches the client's password.
func (server *PWBreaker) Password(file string) (string, error) {
if server.clientEmail == "" || server.clientPub == nil || server.clientHMAC == nil {
return "", errors.New("Password: not enough information")
}
f, err := os.Open(file)
if err != nil {
return "", err
}
salt := []byte{0}
h1 := sha256.New()
h2 := hmac.New(sha256.New, salt)
for input := bufio.NewScanner(f); input.Scan(); {
password := input.Text()
h1.Reset()
h1.Write(salt)
h1.Write([]byte(password))
secret := new(big.Int).SetBytes(h1.Sum([]byte{}))
secret.Exp(server.g, secret, server.p)
secret.Mul(server.clientPub, secret)
secret.Mod(secret, server.p)
h1.Reset()
h1.Write(secret.Bytes())
h2.Reset()
h2.Write(h1.Sum([]byte{}))
if bytes.Equal(h2.Sum([]byte{}), server.clientHMAC) {
return password, nil
}
}
return "", errors.New("Password: not found")
}
// Listen prepares the breaker to accept remote password connections.
func (server *PWBreaker) Listen(network, addr string) (net.Listener, error) {
l, err := net.Listen(network, addr)
if err != nil {
return nil, err
}
return pwListener{l, server}, nil
}
// pwBreakerState contains state stored by the breaker
// in order to execute the authentication protocol.
type pwBreakerState struct{}
// pwBreakerHandshake executes the authentication protocol for the breaker.
func pwBreakerHandshake(c net.Conn, server *PWBreaker) error {
x := new(pwBreakerState)
if err := x.receiveLoginSendResponse(c, server); err != nil {
return err
} else if err = x.receiveHMACSendOK(c, server); err != nil {
return err
}
return nil
}
// receiveLoginSendResponse receives login information and sends a salt and the server's public key.
func (x *pwBreakerState) receiveLoginSendResponse(c net.Conn, server *PWBreaker) error {
var clientEmail, clientPub string
if _, err := fmt.Fscanf(c, "email: %s\npublic key: %s\n", &clientEmail, &clientPub); err != nil {
return err
}
// Record the client's email address.
server.clientEmail = clientEmail
// Record the client's public key.
var ok bool
if server.clientPub, ok = new(big.Int).SetString(clientPub, 16); !ok {
return errors.New("receiveLoginSendResponse: invalid public key")
}
if _, err := fmt.Fprintf(c, "salt: 00\npublic key: %s\n",
hex.EncodeToString(server.y.Bytes())); err != nil {
return err
}
return nil
}
// receiveHMACSendOK receives an HMAC and sends an OK message.
func (x *pwBreakerState) receiveHMACSendOK(c net.Conn, server *PWBreaker) error {
var s string
var err error
if _, err = fmt.Fscanf(c, "hmac: %s\n", &s); err != nil {
return err
}
// Record the client's HMAC.
if server.clientHMAC, err = hex.DecodeString(s); err != nil {
return err
}
fmt.Fprintln(c, "ok")
return nil
}
// pwListener represents a socket ready to accept remote password connections.
type pwListener struct {
net.Listener
server *PWBreaker
}
// Accept accepts a remote password connection on a listening socket.
func (x pwListener) Accept() (net.Conn, error) {
c, err := x.Listener.Accept()
if err != nil {
return nil, err
}
return &pwConn{c, x.server, false, new(sync.Mutex)}, nil
}
// PWClient represents a client implementing a remote password protocol.
type PWClient struct {
*DHPrivateKey
email string
password string
}
// NewPWClient returns a new remote password protocol client.
func NewPWClient(p, g *big.Int, email, password string) *PWClient {
return &PWClient{DHGenerateKey(p, g), email, password}
}
// Dial connects the remote password client to a breaker.
func (client *PWClient) Dial(network, addr string) (net.Conn, error) {
c, err := net.Dial(network, addr)
if err != nil {
return nil, err
}
return &pwConn{c, client, false, new(sync.Mutex)}, nil
}
// pwClientState contains state stored by the client
// in order to execute the authentication protocol.
type pwClientState struct {
salt []byte
serverPub *big.Int
}
// pwClientHandshake executes the authentication protocol for the client.
func pwClientHandshake(c net.Conn, client *PWClient) error {
x := new(pwClientState)
if err := x.sendLoginReceiveResponse(c, client); err != nil {
return err
} else if err = x.sendHMACReceiveOK(c, client); err != nil {
return err
}
return nil
}
// sendLoginReceiveResponse sends login information and receives a salt and the server's public key.
func (x *pwClientState) sendLoginReceiveResponse(c net.Conn, client *PWClient) error {
var err error
if _, err = fmt.Fprintf(c, "email: %s\npublic key: %s\n",
client.email, hex.EncodeToString(client.y.Bytes())); err != nil {
return err
}
var salt, serverPub string
if _, err = fmt.Fscanf(c, "salt: %s\npublic key: %s\n", &salt, &serverPub); err != nil {
return err
}
if x.salt, err = hex.DecodeString(salt); err != nil {
return err
}
var ok bool
if x.serverPub, ok = new(big.Int).SetString(serverPub, 16); !ok {
return errors.New("ReceiveResponse: invalid public key")
}
return nil
}
// sendHMACReceiveOK sends an HMAC and receives an OK message.
func (x *pwClientState) sendHMACReceiveOK(c net.Conn, client *PWClient) error {
h := sha256.New()
h.Write(x.salt)
h.Write([]byte(client.password))
sum := new(big.Int).SetBytes(h.Sum([]byte{}))
secret := new(big.Int).Add(client.x, sum)
secret.Exp(x.serverPub, secret, client.p)
h.Reset()
h.Write(secret.Bytes())
k := h.Sum([]byte{})
h = hmac.New(sha256.New, x.salt)
h.Write(k)
fmt.Fprintf(c, "hmac: %x\n", h.Sum([]byte{}))
var s string
if _, err := fmt.Fscanln(c, &s); err != nil {
return err
} else if s != "ok" {
return errors.New("ReceiveOK: invalid response")
}
return nil
}
// pwConn represents the state of a remote password connection.
type pwConn struct {
net.Conn
config interface{}
auth bool
*sync.Mutex
}
// Read reads data from an SRP connection.
func (x *pwConn) Read(buf []byte) (int, error) {
if err := x.handshake(); err != nil {
return 0, err
}
return x.Conn.Read(buf)
}
// Write writes data to an SRP connection.
func (x *pwConn) Write(buf []byte) (int, error) {
if err := x.handshake(); err != nil {
return 0, err
}
return x.Conn.Write(buf)
}
// handshake checks if the current remote password connection is authenticated.
// If not, it attempts to execute the authentication protocol.
// If the handshake fails, it closes the connection.
func (x *pwConn) handshake() error {
x.Lock()
defer x.Unlock()
if x.auth {
return nil
} else if server, ok := x.config.(*PWBreaker); ok {
if err := pwBreakerHandshake(x.Conn, server); err != nil {
x.Close()
return err
}
} else if client, ok := x.config.(*PWClient); ok {
if err := pwClientHandshake(x.Conn, client); err != nil {
x.Close()
return err
}
} else {
x.Close()
return errors.New("handshake: invalid configuration")
}
x.auth = true
return nil
}
// DHPublicKey represents the public part of a Diffie-Hellman key pair.
type DHPublicKey struct {
p *big.Int
g *big.Int
y *big.Int
}
// DHPrivateKey represents a Diffie-Hellman key pair.
type DHPrivateKey struct {
DHPublicKey
x *big.Int
}
// DHGenerateKey generates a private key.
func DHGenerateKey(p, g *big.Int) *DHPrivateKey {
x, err := rand.Int(rand.Reader, p)
if err != nil {
panic(err)
}
y := new(big.Int).Exp(g, x, p)
return &DHPrivateKey{DHPublicKey{p, g, y}, x}
}
// Secret takes a public key and returns a shared secret.
func (priv *DHPrivateKey) Secret(pub *DHPublicKey) []byte {
return new(big.Int).Exp(pub.y, priv.x, priv.p).Bytes()
}
// Public returns a public key.
func (priv *DHPrivateKey) Public() *DHPublicKey {
return &priv.DHPublicKey
}
// ParseBigInt converts a string to an arbitrary-precision integer.
func ParseBigInt(s string, base int) (*big.Int, error) {
if base < 0 || base > 16 {
return nil, errors.New("ParseBigInt: invalid base")
}
s = strings.Replace(s, "\n", "", -1)
z, ok := new(big.Int).SetString(s, base)
if !ok {
return nil, errors.New("ParseBigInt: invalid string")
}
return z, nil
}
|
package models
import (
"time"
"github.com/rabierre/scrooge/db"
)
type Record struct {
Id uint64
Time time.Time
Amount string
LabelId uint64
}
func (r *Record) LabelName() string {
obj, err := db.Dbm.Get(Label{}, r.LabelId)
if err != nil {
panic(err)
}
return obj.(*Label).Name
}
|
package main
import (
"flag"
"fmt"
"github.com/Yafimk/go-microservices/document-service/service"
"log"
"net/url"
)
const appName = "DOCUMENT_SERVICE"
func main() {
host := flag.String("host", "http://localhost:8083", "bind address <protocol://host:port>")
flag.Parse()
fmt.Printf("Starting %v on %v\n", appName, *host)
bindAddresss, err := url.Parse(*host)
if err != nil {
log.Fatalf(err.Error())
}
if bindAddresss.Scheme == "" {
log.Fatalln("Missing protocol in bind address. host address should be in the following format <protocol://host:port>")
}
webService := service.NewService(bindAddresss.Host)
webService.Start()
}
|
package field_test
import (
"bytes"
"encoding/hex"
"io"
"testing"
"github.com/tombell/go-serato/serato/field"
)
func TestNewYearField(t *testing.T) {
data, _ := hex.DecodeString("000000170000000A00320030003100380000")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
year, err := field.NewYearField(hdr, buf)
if err != nil {
t.Fatalf("expected NewYearField err to be nil, got %v", err)
}
if year == nil {
t.Fatal("expected year to not be nil")
}
}
func TestNewYearFieldUnexpectedEOF(t *testing.T) {
data, _ := hex.DecodeString("000000170000000A0032003000000")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
_, err = field.NewYearField(hdr, buf)
if err != io.ErrUnexpectedEOF {
t.Fatalf("expected NewYearField err to be io.ErrUnexpectedEOF, got %v", err)
}
}
func TestNewYearFieldUnexpectedIdentifier(t *testing.T) {
data, _ := hex.DecodeString("0000001E0000000A00320030003100380000")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
_, err = field.NewYearField(hdr, buf)
if err != field.ErrUnexpectedIdentifier {
t.Fatalf("expected NewYearField err to be field.ErrUnexpectedIdentifier, got %v", err)
}
}
func TestYearValue(t *testing.T) {
data, _ := hex.DecodeString("000000170000000A00320030003100380000")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
year, err := field.NewYearField(hdr, buf)
if err != nil {
t.Fatalf("expected NewYearField err to be nil, got %v", err)
}
actual := year.Value()
expected := "2018"
if actual != expected {
t.Fatalf("expected value to be %v, got %v", expected, actual)
}
}
func TestYearString(t *testing.T) {
data, _ := hex.DecodeString("000000170000000A00320030003100380000")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
year, err := field.NewYearField(hdr, buf)
if err != nil {
t.Fatalf("expected NewYearField err to be nil, got %v", err)
}
actual := year.String()
expected := "2018"
if actual != expected {
t.Fatalf("expected value to be %v, got %v", expected, actual)
}
}
|
package leetcode
//
//func smallerNumbersThanCurrent(nums []int) []int {
// ret := [100]int{}
//
// l := len(nums)
// for i :=0; i<l ; i++ {
// ret[i] = nums[i]
// }
//
// r := make([]int,l)
// for i := 0; i < 100; i++ {
//
// }i
//
//}
|
package stage
import (
"context"
"github.com/werf/werf/pkg/build/builder"
"github.com/werf/werf/pkg/config"
"github.com/werf/werf/pkg/container_runtime"
"github.com/werf/werf/pkg/util"
)
func GenerateBeforeSetupStage(ctx context.Context, imageBaseConfig *config.StapelImageBase, gitPatchStageOptions *NewGitPatchStageOptions, baseStageOptions *NewBaseStageOptions) *BeforeSetupStage {
b := getBuilder(imageBaseConfig, baseStageOptions)
if b != nil && !b.IsBeforeSetupEmpty(ctx) {
return newBeforeSetupStage(b, gitPatchStageOptions, baseStageOptions)
}
return nil
}
func newBeforeSetupStage(builder builder.Builder, gitPatchStageOptions *NewGitPatchStageOptions, baseStageOptions *NewBaseStageOptions) *BeforeSetupStage {
s := &BeforeSetupStage{}
s.UserWithGitPatchStage = newUserWithGitPatchStage(builder, BeforeSetup, gitPatchStageOptions, baseStageOptions)
return s
}
type BeforeSetupStage struct {
*UserWithGitPatchStage
}
func (s *BeforeSetupStage) GetDependencies(ctx context.Context, c Conveyor, _, _ container_runtime.LegacyImageInterface) (string, error) {
stageDependenciesChecksum, err := s.getStageDependenciesChecksum(ctx, c, BeforeSetup)
if err != nil {
return "", err
}
return util.Sha256Hash(s.builder.BeforeSetupChecksum(ctx), stageDependenciesChecksum), nil
}
func (s *BeforeSetupStage) PrepareImage(ctx context.Context, c Conveyor, prevBuiltImage, image container_runtime.LegacyImageInterface) error {
if err := s.UserWithGitPatchStage.PrepareImage(ctx, c, prevBuiltImage, image); err != nil {
return err
}
if err := s.builder.BeforeSetup(ctx, image.BuilderContainer()); err != nil {
return err
}
return nil
}
|
package caaa
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00400103 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:caaa.004.001.03 Document"`
Message *AcceptorCompletionAdviceResponseV03 `xml:"AccptrCmpltnAdvcRspn"`
}
func (d *Document00400103) AddMessage() *AcceptorCompletionAdviceResponseV03 {
d.Message = new(AcceptorCompletionAdviceResponseV03)
return d.Message
}
// The AcceptorCompletionAdviceResponse message is sent by the acquirer (or its agent) to acknowledge the acceptor (or its agent) of the outcome of the payment transaction, and the transfer the financial data of the transaction contained in the completion advice.
type AcceptorCompletionAdviceResponseV03 struct {
// Completion advice response message management information.
Header *iso20022.Header8 `xml:"Hdr"`
// Information related to the completion advice response.
CompletionAdviceResponse *iso20022.AcceptorCompletionAdviceResponse3 `xml:"CmpltnAdvcRspn"`
// Trailer of the message containing a MAC.
SecurityTrailer *iso20022.ContentInformationType8 `xml:"SctyTrlr"`
}
func (a *AcceptorCompletionAdviceResponseV03) AddHeader() *iso20022.Header8 {
a.Header = new(iso20022.Header8)
return a.Header
}
func (a *AcceptorCompletionAdviceResponseV03) AddCompletionAdviceResponse() *iso20022.AcceptorCompletionAdviceResponse3 {
a.CompletionAdviceResponse = new(iso20022.AcceptorCompletionAdviceResponse3)
return a.CompletionAdviceResponse
}
func (a *AcceptorCompletionAdviceResponseV03) AddSecurityTrailer() *iso20022.ContentInformationType8 {
a.SecurityTrailer = new(iso20022.ContentInformationType8)
return a.SecurityTrailer
}
|
package main
import (
"fmt"
"testing"
)
func Test_BoardingPass_SeatId(t *testing.T) {
expected := 70
actual := NewBoardingPass("BFFFBBFRRR").Row()
if expected != actual {
t.Errorf("Row expected %s, got %s", fmt.Sprint(expected), fmt.Sprint(actual))
}
expected = 7
actual = NewBoardingPass("BFFFBBFRRR").Column()
if expected != actual {
t.Errorf("Column expected %s, got %s", fmt.Sprint(expected), fmt.Sprint(actual))
}
expected = 567
actual = NewBoardingPass("BFFFBBFRRR").SeatId()
if expected != actual {
t.Errorf("SeatId expected %s, got %s", fmt.Sprint(expected), fmt.Sprint(actual))
}
expected = 102
actual = NewBoardingPass("BBFFBBFRLL").Row()
if expected != actual {
t.Errorf("Row expected %s, got %s", fmt.Sprint(expected), fmt.Sprint(actual))
}
expected = 4
actual = NewBoardingPass("BBFFBBFRLL").Column()
if expected != actual {
t.Errorf("Column expected %s, got %s", fmt.Sprint(expected), fmt.Sprint(actual))
}
expected = 820
actual = NewBoardingPass("BBFFBBFRLL").SeatId()
if expected != actual {
t.Errorf("SeatId expected %s, got %s", fmt.Sprint(expected), fmt.Sprint(actual))
}
expected = 955
actual = part1()
if expected != actual {
t.Errorf("SeatId expected %s, got %s", fmt.Sprint(expected), fmt.Sprint(actual))
}
}
|
package main
import (
"fmt"
)
func main() {
fmt.Println(multiply("123", "100"))
}
func multiply(num1 string, num2 string) string {
if num1 == "0" || num2 == "0" {
return "0"
}
n1, n2 := len(num1), len(num2)
res := make([]int, n1+n2)
for i := n1 - 1; i >= 0; i-- {
num1 := num1[i] - '0'
for j := n2 - 1; j >= 0; j-- {
num2 := num2[j] - '0'
sum := res[i+j+1] + int(num1)*int(num2)
res[i+j] += sum / 10
res[i+j+1] = sum % 10
}
}
str := ""
for k, v := range res {
if k == 0 && v == 0 {
continue
}
str += string(v + '0')
}
return str
}
|
package main
import "flag"
// config holds settings that are set by the sysadmin running your application.
type config struct {
address string
}
func getConfig() config {
c := config{}
flag.StringVar(&c.address, "address", "localhost:8085", "The address that the server will listen on.")
flag.Parse()
return c
}
|
package models
import (
u "businessense/utils"
"fmt"
"github.com/jinzhu/gorm"
)
//PainPoint Type
type PainPoint struct {
gorm.Model
Name string `json:"name"`
}
//Create PainPoint
func (painpoint *PainPoint) Create() map[string]interface{} {
GetDB().Create(painpoint)
response := u.Message(true, "Pain Point has been created")
response["painpoint"] = painpoint
return response
}
//GetPainpoints based on search string
func GetPainpoints(search string) []*PainPoint {
painpoints := make([]*PainPoint, 0)
err := GetDB().Where("UPPER(name) LIKE UPPER(?)", "%"+search+"%").Find(&painpoints).Error
if err != nil {
fmt.Println(err)
return nil
}
return painpoints
}
|
package cloud
import (
"encoding/json"
"fmt"
"net/http"
"strings"
)
const discoveryURI = "https://wap.tplinkcloud.com?token=%s"
type tpLinkDeviceList struct {
DeviceList []TPLinkDevice `json:"deviceList"`
}
type tpLinkGetDeviceListResponse struct {
Result tpLinkDeviceList `json:"result"`
ErrorCode int `json:"error_code"`
Message string `json:"msg"`
}
// GetDeviceList takes an auth Token and returns the list of devices registered to the Token's Account
func GetDeviceList() (devices []TPLinkDevice, err error) {
var (
req *http.Request
resp *http.Response
jsonDecoder *json.Decoder
uri string
payload tpLinkGetDeviceListResponse
)
uri = fmt.Sprintf(discoveryURI, theCloud.token)
if req, err = http.NewRequest("POST", uri, strings.NewReader(`{"method":"getDeviceList"}`)); err != nil {
return
}
req.Header.Add("Content-Type", "application/json")
if resp, err = theCloud.client.Do(req); err != nil {
return
}
defer closer(req.Body)
jsonDecoder = json.NewDecoder(resp.Body)
if err = jsonDecoder.Decode(&payload); err != nil {
return
}
//log.Printf("%+v", payload)
devices = payload.Result.DeviceList
return
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.