text stringlengths 11 4.05M |
|---|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Package clusterstatus demonstrates using the Elasticsearch client for Go with Google Cloud Functions.
//
// Deploy the function with the gcloud command:
//
// $ go mod vendor
// $ gcloud functions deploy clusterstatus \
// --entry-point Health \
// --runtime go111 \
// --trigger-http \
// --memory 128MB \
// --set-env-vars ELASTICSEARCH_URL=https://...cloud.es.io:9243
//
// Invoke your function over HTTP:
//
// $ curl https://...cloudfunctions.net/clusterstatus
//
package clusterstatus
import (
"encoding/json"
"fmt"
"log"
"net/http"
"github.com/elastic/go-elasticsearch/v8"
)
// ES holds a reference to the Elasticsearch client
//
// See: https://cloud.google.com/functions/docs/concepts/go-runtime#one-time_initialization
//
var ES *elasticsearch.Client
func init() {
log.SetFlags(0)
var err error
ES, err = elasticsearch.NewDefaultClient()
if err != nil {
log.Fatalf("Error: %s", err)
}
}
// Health returns the status of the cluster (red, yellow, green).
//
func Health(w http.ResponseWriter, r *http.Request) {
var j map[string]interface{}
res, err := ES.Cluster.Health()
if err != nil {
log.Printf("Error getting response from Elasticsearch: %s", err)
http.Error(w, `{"status" : "error"}`, http.StatusBadGateway)
return
}
defer res.Body.Close()
if res.IsError() {
log.Printf("HTTP response error: %s", res.Status())
http.Error(w, `{"status" : "error"}`, http.StatusBadGateway)
return
}
if err := json.NewDecoder(res.Body).Decode(&j); err != nil {
log.Printf("Error parsing the HTTP response body: %s", err)
http.Error(w, `{"status" : "error"}`, http.StatusInternalServerError)
return
} else {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{"status":%q}`, j["status"].(string))
}
}
|
package main
import (
"MyCart/cmd/Services"
"errors"
"fmt"
"testing"
)
func TestAddProduct(t *testing.T) {
var tests = []struct {
userValue string
userName string
expected error
}{
{"Shirt", "Linu", nil}, //Already Prsent User
{"qwert", "Linu", errors.New("Sorry!! Entered Product is not present")}, //Product wrong(not in catlog)
{"", "Tina", errors.New("Data Not Found")}, //Product empty
{"LG", "", errors.New("Data Not Found")}, //user empty
{"Sony", "Rahul", nil}, //New User
}
for _, test := range tests {
testname := fmt.Sprintf("%s", test.userValue)
t.Run(testname, func(t *testing.T) {
ans := Services.AddProduct(test.userValue, test.userName)
if ans != test.expected {
t.Fail()
t.Error("Test Failed: recieved:", ans)
} else {
t.Log("Test Pass for ", test)
}
})
}
}
func TestRemoveProduct(t *testing.T) {
var tests = []struct {
userValue string
userName string
expected error
}{
{"Shirt", "Linu", nil}, //Already Prsent User
{"qwert", "Linu", errors.New("This Product was not present in cart")}, //Product wrong(not in catlog)
{"", "Tina", errors.New("Data Not Found")}, //Product empty
{"LG", "", errors.New("Data Not Found")}, //user empty
{"Sony", "Rahul", nil}, //New User
}
for _, test := range tests {
testname := fmt.Sprintf("%s", test.userValue)
t.Run(testname, func(t *testing.T) {
ans := Services.AddProduct(test.userValue, test.userName)
if ans != test.expected {
t.Fail()
t.Error("Test Failed: recieved:", ans)
} else {
t.Log("Test Pass for ", test)
}
})
}
}
|
package main
import (
"log"
"net/http"
"github.com/gorilla/mux"
)
func buildCityController(usecase *cityUseCase, cityPresenter *cityPresenter, errorPresenter *errorPresenter) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
defer func() {
if e := recover(); e != nil {
log.Print("Error occurred in city controller. params: ", params, ", error: ", e)
errorPresenter.write(w, e)
return
}
}()
input := newFetchCityInput(params)
u := *usecase
output := u.fetch(input)
cityPresenter.write(w, output)
}
}
|
package service
import (
entity "github.com/Surafeljava/Court-Case-Management-System/Entity"
"github.com/Surafeljava/Court-Case-Management-System/caseUse"
)
type AdminCourtServiceImpl struct {
courtRepo caseUse.CourtRepository
}
func NewAdminCourtServiceImpl(csRepo caseUse.CourtRepository) *AdminCourtServiceImpl {
return &AdminCourtServiceImpl{courtRepo: csRepo}
}
func (acs *AdminCourtServiceImpl) Court() (*entity.Court, []error) {
court, err := acs.courtRepo.Court()
return court, err
}
func (acs *AdminCourtServiceImpl) Admin() (*entity.Admin, []error) {
admin, err := acs.courtRepo.Admin()
return admin, err
}
func (acs *AdminCourtServiceImpl) CreateCourt(court *entity.Court) (*entity.Court, []error) {
court, err := acs.courtRepo.CreateCourt(court)
return court, err
}
func (acs *AdminCourtServiceImpl) UpdateCourt(court *entity.Court) (*entity.Court, []error) {
return nil, nil
}
// DeleteCourt(id int) error
func (acs *AdminCourtServiceImpl) CreateAdmin(admin *entity.Admin) (*entity.Admin, []error) {
admin, err := acs.courtRepo.CreateAdmin(admin)
return admin, err
}
|
package servers
import (
"time"
)
type CreateServerReq struct {
Nane string
Description string
GroupId string
SourceServerId string
IsManagedOS bool
PrimaryDns string
SecondaryDns string
IpAddress string
Password string
SourceServerPassword string
Cpu int
CpuAutoscalePolicyId string
MemoryGB int
Type string
StorageType string
AntiAffinityPolicyId string
CustomFields []CustomFieldDef
AdditionalDisks []AdditionalDiskDef
Ttl time.Time
Packages []PackageDef
}
type CustomFieldDef struct {
Id string
Value string
}
type AdditionalDiskDef struct {
Path string
SizeGB int
Type string
}
type PackageDef struct {
PackageId string
Parameters map[string]string
}
|
package base
import (
"context"
"net"
"strings"
"testing"
"time"
)
type dialer struct {
}
func (d dialer) Dial(network, address string) (net.Conn, error) {
return net.Dial(network, address)
}
func TestOutputMinerRPC(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var l net.Listener
var err error
connected := make(chan struct{})
go func() {
l, err = net.Listen("tcp", ":4028")
if err != nil {
t.Fatal(err)
}
connected <- struct{}{}
for {
c, err := l.Accept()
if err != nil {
return
}
go func() {
defer c.Close()
buf := make([]byte, 1024)
c.Read(buf)
input := string(buf)
t.Log("req: ", input)
if strings.Contains(input, "timeout") {
t.Log("Make timeout")
time.Sleep(time.Millisecond * 500)
}
t.Log("Write response")
c.Write([]byte("OK\x00"))
t.Log("Writen response")
}()
}
}()
<-connected
defer l.Close()
t.Run("requests", func(t *testing.T) {
t.Run("nromal", func(t *testing.T) {
ctx, cancel := context.WithTimeout(ctx, time.Millisecond*10000)
defer cancel()
ret, err := OutputMinerRPC(ctx, dialer{}, "test", "")
t.Log(string(ret))
if err != nil {
t.Fatal(err)
}
})
t.Run("timeout", func(t *testing.T) {
ctx, cancel := context.WithTimeout(ctx, time.Millisecond*100)
defer cancel()
_, err = OutputMinerRPC(ctx, dialer{}, "timeout", "")
t.Log(err)
if err == nil {
t.Fatal("Expected to timeout error")
}
})
})
}
|
package client
import (
"fmt"
"sync"
"github.com/Mvilstrup/mosquito/communication/errors"
"github.com/Mvilstrup/mosquito/communication/messages"
zmq "github.com/alecthomas/gozmq"
)
// A Coordinator defines the parameters for running a web crawler.
type Client struct {
// ZeroMQ specific variables
id string
context *zmq.Context // Context
socket *zmq.Socket // Socket for clients & workers
com_lock sync.Mutex
}
// New returns an initialized Coordinator.
func New(endpoint, id string) (*Client, error) {
context, err := zmq.NewContext()
if err != nil {
return nil, com_errors.ErrZMQContext
}
socket, err := context.NewSocket(zmq.REQ)
if err != nil {
return nil, com_errors.ErrZMQConnect
}
socket.Connect(endpoint)
return &Client{
context: context,
socket: socket,
id: id,
}, nil
}
func (client *Client) Start() (err error) {
// Wait for messages
return nil
}
func (client *Client) Send(message *messages.Message) {
client.com_lock.Lock()
msg, err := messages.EncodeMessage(message)
client.socket.Send(msg, 0)
// Wait for reply:
response, err := client.socket.Recv(0)
client.com_lock.Unlock()
if err != nil {
fmt.Printf(com_errors.ErrZMQRecieve.Error())
}
status, err := messages.DecodeMessage(response)
if err != nil {
fmt.Printf(err.Error())
}
fmt.Printf("%#v", status)
}
func (client *Client) Request(notification *messages.Message) (message *messages.Message, err error) {
client.com_lock.Lock()
msg, err := messages.EncodeMessage(notification)
client.socket.Send(msg, 0)
// Wait for reply:
response, err := client.socket.Recv(0)
client.com_lock.Unlock()
if err != nil {
fmt.Printf(com_errors.ErrZMQRecieve.Error())
}
message, err = messages.DecodeMessage(response)
return
}
func (c *Client) Close() {
defer c.context.Close()
defer c.socket.Close()
}
|
package goosedb
import (
"database/sql"
"errors"
"fmt"
"path/filepath"
"sort"
"github.com/kevinburke/goose/lib/goose"
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
_ "github.com/ziutek/mymysql/godrv"
)
var ErrTableDoesNotExist = errors.New("goosedb: table does not exist")
type migrationSorter []*goose.Migration
// helpers so we can use pkg sort
func (ms migrationSorter) Len() int { return len(ms) }
func (ms migrationSorter) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
func (ms migrationSorter) Less(i, j int) bool { return ms[i].Version < ms[j].Version }
func (ms migrationSorter) Sort(direction bool) {
// sort ascending or descending by version
if direction {
sort.Sort(ms)
} else {
sort.Sort(sort.Reverse(ms))
}
// now that we're sorted in the appropriate direction,
// populate next and previous for each migration
for i, m := range ms {
prev := int64(-1)
if i > 0 {
prev = ms[i-1].Version
ms[i-1].Next = m.Version
}
ms[i].Previous = prev
}
}
// Runs migration on a specific database instance.
func RunMigrationsOnDb(conf *DBConf, migrationsDir string, target int64, db *sql.DB) (err error) {
current, err := EnsureDBVersion(conf, db)
if err != nil {
return err
}
migrations, err := goose.CollectMigrations(migrationsDir, current, target)
if err != nil {
return err
}
if len(migrations) == 0 {
fmt.Printf("goose: no migrations to run. current version: %d\n", current)
return nil
}
ms := migrationSorter(migrations)
direction := current < target
ms.Sort(direction)
fmt.Printf("goose: migrating db environment '%v', current version: %d, target: %d\n",
conf.Env, current, target)
for _, m := range ms {
switch filepath.Ext(m.Source) {
case ".sql":
err = runSQLMigration(conf, db, m.Source, m.Version, direction)
}
if err != nil {
return fmt.Errorf("FAIL %w, quitting migration", err)
}
fmt.Println("OK ", filepath.Base(m.Source))
}
return nil
}
func RunMigrations(conf *DBConf, migrationsDir string, target int64) error {
db, err := OpenDBFromDBConf(conf)
if err != nil {
return err
}
defer db.Close()
return RunMigrationsOnDb(conf, migrationsDir, target, db)
}
// wrapper for EnsureDBVersion for callers that don't already have
// their own DB instance
func GetDBVersion(conf *DBConf) (int64, error) {
db, err := OpenDBFromDBConf(conf)
if err != nil {
return -1, err
}
defer db.Close()
version, err := EnsureDBVersion(conf, db)
if err != nil {
return -1, err
}
return version, nil
}
// Create the goose_db_version table
// and insert the initial 0 value into it
func createVersionTable(conf *DBConf, db *sql.DB) error {
txn, err := db.Begin()
if err != nil {
return err
}
d := conf.Driver.Dialect
if _, err := txn.Exec(d.createVersionTableSql()); err != nil {
txn.Rollback()
return err
}
version := 0
applied := true
if _, err := txn.Exec(d.insertVersionSql(), version, applied); err != nil {
txn.Rollback()
return err
}
return txn.Commit()
}
// EnsureDBVersion retrieves the current version for this DB, creating and
// initializing the DB version table if it doesn't exist.
func EnsureDBVersion(conf *DBConf, db *sql.DB) (int64, error) {
rows, err := conf.Driver.Dialect.dbVersionQuery(db)
if err != nil {
if err == ErrTableDoesNotExist {
return 0, createVersionTable(conf, db)
}
return 0, err
}
defer rows.Close()
// The most recent record for each migration specifies
// whether it has been applied or rolled back.
// The first version we find that has been applied is the current version.
toSkip := make([]int64, 0)
for rows.Next() {
var row goose.MigrationRecord
if err = rows.Scan(&row.VersionId, &row.IsApplied); err != nil {
return 0, fmt.Errorf("error scanning rows: %w", err)
}
// have we already marked this version to be skipped?
skip := false
for _, v := range toSkip {
if v == row.VersionId {
skip = true
break
}
}
if skip {
continue
}
// if version has been applied we're done
if row.IsApplied {
return row.VersionId, nil
}
// latest version of migration has not been applied.
toSkip = append(toSkip, row.VersionId)
}
panic("failure in EnsureDBVersion()")
}
|
package resource
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
)
func TestDurationForPod(t *testing.T) {
now := time.Now()
tests := []struct {
name string
pod *corev1.Pod
want wfv1.ResourcesDuration
}{
{"Empty", &corev1.Pod{}, wfv1.ResourcesDuration{}},
{"ContainerWithCPURequest", &corev1.Pod{
Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "main", Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2000m"),
},
}}}},
Status: corev1.PodStatus{
ContainerStatuses: []corev1.ContainerStatus{
{
Name: "main",
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
StartedAt: metav1.Time{Time: now.Add(-1 * time.Minute)},
FinishedAt: metav1.Time{Time: now},
},
},
},
},
},
}, wfv1.ResourcesDuration{
corev1.ResourceCPU: wfv1.NewResourceDuration(2 * time.Minute),
corev1.ResourceMemory: wfv1.NewResourceDuration(1 * time.Minute),
}},
{"ContainerWithCPURequest", &corev1.Pod{
Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "main", Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2000m"),
},
Limits: corev1.ResourceList{
corev1.ResourceName("nvidia.com/gpu"): resource.MustParse("1"),
},
}}}},
Status: corev1.PodStatus{
ContainerStatuses: []corev1.ContainerStatus{
{
Name: "main",
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
StartedAt: metav1.Time{Time: now.Add(-3 * time.Minute)},
FinishedAt: metav1.Time{Time: now},
},
},
},
},
},
}, wfv1.ResourcesDuration{
corev1.ResourceCPU: wfv1.NewResourceDuration(6 * time.Minute),
corev1.ResourceMemory: wfv1.NewResourceDuration(3 * time.Minute),
corev1.ResourceName("nvidia.com/gpu"): wfv1.NewResourceDuration(3 * time.Minute),
}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := DurationForPod(tt.pod)
assert.Equal(t, tt.want, got)
})
}
}
|
package common
type PageInfo struct {
TotalPages int32
TotalCount int32
}
|
package projection
import (
"bytes"
"github.com/satori/go.uuid"
"github.com/tobyjsullivan/ues-command-api/passwords"
)
type EmailIdentity struct {
ID uuid.UUID
Email string
PasswordHash []byte
PasswordHashAlgorithm string
PasswordSalt []byte
}
func (id *EmailIdentity) PasswordMatches(password string) (bool, error) {
hash, err := passwords.Hash(id.PasswordHashAlgorithm, password, id.PasswordSalt)
if err != nil {
return false, err
}
hashesMatch := bytes.Compare(hash, id.PasswordHash) == 0
return hashesMatch, nil
}
|
package di
import (
"fmt"
"reflect"
"github.com/teamlint/container/di/internal/dag"
"github.com/teamlint/container/di/internal/reflection"
)
// New create new container.
func New() *Container {
return &Container{
graph: dag.NewDirectedGraph(),
providers: make(map[key]provider),
history: &provideHistory{},
}
}
// Container is a dependency injection container.
type Container struct {
graph *dag.DirectedGraph
providers map[key]provider
history *provideHistory
compiled bool
}
// ProvideParams is a `Provide()` method options. Name is a unique identifier of type instance. Provider is a constructor
// function. Interfaces is a interface that implements a provider result type.
type ProvideParams struct {
Name string
Provider interface{}
Interfaces []interface{}
Parameters ParameterBag
IsPrototype bool
}
// Provide adds constructor into container with parameters.
func (c *Container) Provide(params ProvideParams) {
p := provider(newProviderConstructor(params.Name, params.Provider, c.history))
if c.exists(p.Key()) {
panicf("The `%s` type already exists in container", p.Key())
}
if !params.IsPrototype {
p = asSingleton(p)
}
// add provider to graph
c.add(p)
// parse embed parameters
for _, param := range p.ParameterList() {
if param.embed {
c.add(newProviderEmbed(param))
}
}
// provide parameter bag
if len(params.Parameters) != 0 {
c.add(createParameterBugProvider(p.Key(), params.Parameters))
}
// process interfaces
for _, iface := range params.Interfaces {
c.processProviderInterface(p, iface)
}
}
// Compile compiles the container. It iterates over all nodes
// in graph and register their parameters.
func (c *Container) Compile() {
// provide extractor
c.Provide(ProvideParams{
Provider: func() Extractor {
return c
},
})
c.Provide(ProvideParams{
Provider: func() *Graph {
return &Graph{graph: c.graph.DOTGraph()}
},
})
for _, p := range c.all() {
c.registerProviderParameters(p)
}
_, err := c.graph.DFSSort()
if err != nil {
switch err {
case dag.ErrCyclicGraph:
panicf("Cycle detected") // todo: add nodes to message
default:
panic(err.Error())
}
}
c.compiled = true
}
// ExtractParams
type ExtractParams struct {
Name string
Target interface{}
}
// Extract
func (c *Container) Extract(params ExtractParams) error {
if !c.compiled {
return fmt.Errorf("container not compiled")
}
if params.Target == nil {
return fmt.Errorf("extract target must be a pointer, got `nil`")
}
if !reflection.IsPtr(params.Target) {
return fmt.Errorf("extract target must be a pointer, got `%s`", reflect.TypeOf(params.Target))
}
typ := reflect.TypeOf(params.Target)
param := parameter{
name: params.Name,
res: typ.Elem(),
embed: isEmbedParameter(typ),
}
value, err := param.ResolveValue(c)
if err != nil {
return err
}
targetValue := reflect.ValueOf(params.Target).Elem()
targetValue.Set(value)
return nil
}
// InvokeParams
type InvokeParams struct {
// The function
Fn interface{}
}
// Invoke calls provided function.
func (c *Container) Invoke(params InvokeParams) error {
if !c.compiled {
return fmt.Errorf("container not compiled")
}
invoker, err := newInvoker(params.Fn)
if err != nil {
return err
}
return invoker.Invoke(c)
}
// Cleanup
func (c *Container) Cleanup() {
for _, k := range c.history.items {
p, _ := c.provider(k)
if cleanup, ok := p.(cleanup); ok {
cleanup.Cleanup()
}
}
}
// add
func (c *Container) add(p provider) {
c.graph.AddNode(p.Key())
c.providers[p.Key()] = p
}
// exists checks that key registered in container graph.
func (c *Container) exists(k key) bool {
return c.graph.NodeExists(k)
}
// provider checks that provider exists and return it.
func (c *Container) provider(k key) (provider, bool) {
if !c.exists(k) {
return nil, false
}
return c.providers[k], true
}
// all returns providers.
func (c *Container) all() []provider {
var providers []provider
for _, k := range c.graph.Nodes() {
p, _ := c.provider(k.(key))
providers = append(providers, p)
}
return providers
}
// processProviderInterface represents instances as interfaces and groups.
func (c *Container) processProviderInterface(provider provider, as interface{}) {
// create interface from provider
iface := newProviderInterface(provider, as)
if c.exists(iface.Key()) {
// if iface already exists, restrict interface resolving
// c.replace(newProviderStub(iface.Key(), "have several implementations"))
c.providers[iface.Key()] = newProviderStub(iface.Key(), "have several implementations")
} else {
// add interface node
c.add(iface)
}
// create group
group := newProviderGroup(iface.Key())
// check exists
if c.exists(group.Key()) {
// if exists use existing group
group = c.providers[group.Key()].(*providerGroup)
} else {
// else add new group to graph
c.add(group)
}
// add provider reference into group
group.Add(provider.Key())
}
// registerProviderParameters registers provider parameters in a dependency graph.
func (c *Container) registerProviderParameters(p provider) {
for _, param := range p.ParameterList() {
paramProvider, exists := param.ResolveProvider(c)
if exists {
c.graph.AddEdge(paramProvider.Key(), p.Key())
continue
}
if !exists && !param.optional {
panicf("%s: dependency %s not exists in container", p.Key(), param)
}
}
}
|
package gen
import (
"bufio"
"bytes"
"fmt"
"go/ast"
"go/parser"
"go/printer"
"go/token"
"io"
"log"
"os"
"path/filepath"
"sort"
"strings"
)
// I tried to do this the "right" way using go/parser but ran into various strange behavior;
// I probably just am missing something with how to use it properly. Regardless, doing this
// the hacky way should serve us just as well for now.
func mergeGoFiles(dir, out string, in ...string) error {
var pkgClause string
var importBlocks []string
var otherBlocks []string
sort.Strings(in) // try to get deterministic output
// read and split each go file
for _, fname := range in {
fpath := filepath.Join(dir, fname)
pkgPart, importPart, rest, err := readAndSplitGoFile(fpath)
if err != nil {
return fmt.Errorf("error trying to read and split Go file: %w", err)
}
if pkgClause == "" {
pkgClause = pkgPart
}
importBlocks = append(importBlocks, importPart)
otherBlocks = append(otherBlocks, rest)
}
var newPgm bytes.Buffer
// use the package part from the first one
newPgm.WriteString(pkgClause)
newPgm.WriteString("\n\n")
// concat the imports
for _, bl := range importBlocks {
newPgm.WriteString(bl)
newPgm.WriteString("\n\n")
}
// concat the rest
for _, bl := range otherBlocks {
newPgm.WriteString(bl)
newPgm.WriteString("\n\n")
}
// now read it back in using the parser and see if it will help us clean up the imports
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, out, newPgm.String(), parser.ParseComments)
if err != nil {
log.Printf("DEBUG: full merged file contents:\n%s", newPgm.String())
return fmt.Errorf("error trying to parse merged file: %w", err)
}
ast.SortImports(fset, f)
dedupAstFileImports(f)
fileout, err := os.Create(filepath.Join(dir, out))
if err != nil {
return fmt.Errorf("error trying to open output file: %w", err)
}
defer fileout.Close()
err = printer.Fprint(fileout, fset, f)
if err != nil {
return err
}
return nil
}
func readAndSplitGoFile(fpath string) (pkgPart, importPart, rest string, reterr error) {
// NOTE: this is not perfect, it's only meant to be good enough to correctly parse the files
// we generate, not any general .go file
// (it does not understand multi-line comments, for example)
var fullInput bytes.Buffer
// defer func() {
// log.Printf("readAndSplitGoFile(%q) full input:\n%s\n\nPKG:\n%s\n\nIMPORT:\n%s\n\nREST:\n%s\n\nErr:%v",
// fpath,
// fullInput.Bytes(),
// pkgPart,
// importPart,
// rest,
// reterr)
// }()
var pkgBuf, importBuf, restBuf bytes.Buffer
var commentBuf bytes.Buffer
const (
inPkg = iota
inImport
inRest
)
state := inPkg
f, err := os.Open(fpath)
if err != nil {
reterr = err
return
}
defer f.Close()
br := bufio.NewReader(f)
i := 0
loop:
for {
i++
line, err := br.ReadString('\n')
if err == io.EOF {
if len(line) == 0 {
break
}
} else if err != nil {
reterr = err
return
}
fullInput.WriteString(line)
lineFields := strings.Fields(line)
var first string
if len(lineFields) > 0 {
first = lineFields[0]
}
_ = i
// log.Printf("%s: iteration %d; lineFields=%#v", fpath, i, lineFields)
switch state {
case inPkg: // in package block, haven't see the package line yet
pkgBuf.WriteString(line)
if first == "package" {
state = inImport
}
continue loop
case inImport: // after package and are still getting what look like imports
// hack to move line comments below the import area into the rest section - since
// while we're going through there we can't know if there will be more imports or not
if strings.HasPrefix(first, "//") {
commentBuf.WriteString(line)
continue loop
}
switch first {
case "type", "func", "var":
state = inRest
restBuf.Write(commentBuf.Bytes())
commentBuf.Reset()
restBuf.WriteString(line)
continue loop
}
importBuf.Write(commentBuf.Bytes())
commentBuf.Reset()
importBuf.WriteString(line)
continue loop
// // things we assume are part of the import block:
// switch {
// case strings.TrimSpace(first) == "": // blank line
// case strings.HasPrefix(first, "//"): // line comment
// case strings.HasPrefix(first, "import"): // import statement
// case strings.HasPrefix(first, `"`): // should be a multi-line import package name
// }
case inRest:
restBuf.WriteString(line)
continue loop
default:
}
panic("unreachable")
}
pkgPart = pkgBuf.String()
importPart = importBuf.String()
rest = restBuf.String()
return
}
// // mergeGoFiles combines go source files into one.
// // dir is the package path, out and in are file names (no slashes, same directory).
// func mergeGoFiles(dir, out string, in ...string) error {
// pkgName := goGuessPkgName(dir)
// fset := token.NewFileSet()
// files := make(map[string]*ast.File)
// // parse all the files
// for _, name := range in {
// f, err := parser.ParseFile(fset, filepath.Join(dir, name), nil, parser.ParseComments)
// if err != nil {
// return fmt.Errorf("error reading file %q: %w", name, err)
// }
// files[name] = f
// }
// pkg := &ast.Package{Name: pkgName, Files: files}
// fout := ast.MergePackageFiles(pkg,
// ast.FilterImportDuplicates, // this doesn't seem to be doing anything... sigh
// )
// // ast.SortImports(fset, fout)
// // ast.Print(fset, fout.Decls)
// moveImportsToTop(fout)
// dedupAstFileImports(fout)
// var buf bytes.Buffer
// printer.Fprint(&buf, fset, fout)
// return ioutil.WriteFile(filepath.Join(dir, out), buf.Bytes(), 0644)
// }
// func moveImportsToTop(f *ast.File) {
// var idecl []ast.Decl // import decls
// var odecl []ast.Decl // other decls
// // go through every declaration and move any imports into a separate list
// for _, decl := range f.Decls {
// {
// // import must be genDecl
// genDecl, ok := decl.(*ast.GenDecl)
// if !ok {
// goto notImport
// }
// // with token "import"
// if genDecl.Tok != token.IMPORT {
// goto notImport
// }
// idecl = append(idecl, decl)
// continue
// }
// notImport:
// odecl = append(odecl, decl)
// continue
// }
// // new decl list imports plus everything else
// f.Decls = append(idecl, odecl...)
// }
|
// Copyright (c) 2016, Gerasimos Maropoulos
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER AND CONTRIBUTOR, GERASIMOS MAROPOULOS
// BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package standar
import (
"compress/gzip"
"html/template"
"github.com/kataras/iris/context"
"github.com/kataras/iris/utils"
)
var (
buffer *utils.BufferPool
)
type (
Config struct {
Directory string
// Funcs for Standar
Funcs template.FuncMap
}
Engine struct {
Config *Config
Templates *template.Template
}
)
func New() *Engine {
if buffer == nil {
buffer = utils.NewBufferPool(64)
}
return &Engine{Config: &Config{Directory: "templates", Funcs: template.FuncMap{}}}
}
func (s *Engine) BuildTemplates() error {
if s.Config.Directory == "" {
return nil
}
return nil
}
func (s *Engine) Execute(ctx context.IContext, name string, binding interface{}) error {
// Retrieve a buffer from the pool to write to.
out := buffer.Get()
err := s.Templates.ExecuteTemplate(out, name, binding)
if err != nil {
buffer.Put(out)
return err
}
w := ctx.GetRequestCtx().Response.BodyWriter()
out.WriteTo(w)
// Return the buffer to the pool.
buffer.Put(out)
return nil
}
func (s *Engine) ExecuteGzip(ctx context.IContext, name string, binding interface{}) error {
// Retrieve a buffer from the pool to write to.
out := gzip.NewWriter(ctx.GetRequestCtx().Response.BodyWriter())
err := s.Templates.ExecuteTemplate(out, name, binding)
if err != nil {
return err
}
//out.Flush()
out.Close()
ctx.GetRequestCtx().Response.Header.Add("Content-Encoding", "gzip")
return nil
}
|
package pgsql
import (
"database/sql"
"database/sql/driver"
"strconv"
)
// Int2VectorFromIntSlice returns a driver.Valuer that produces a PostgreSQL int2vector from the given Go []int.
func Int2VectorFromIntSlice(val []int) driver.Valuer {
return int2VectorFromIntSlice{val: val}
}
// Int2VectorToIntSlice returns an sql.Scanner that converts a PostgreSQL int2vector into a Go []int and sets it to val.
func Int2VectorToIntSlice(val *[]int) sql.Scanner {
return int2VectorToIntSlice{val: val}
}
// Int2VectorFromInt8Slice returns a driver.Valuer that produces a PostgreSQL int2vector from the given Go []int8.
func Int2VectorFromInt8Slice(val []int8) driver.Valuer {
return int2VectorFromInt8Slice{val: val}
}
// Int2VectorToInt8Slice returns an sql.Scanner that converts a PostgreSQL int2vector into a Go []int8 and sets it to val.
func Int2VectorToInt8Slice(val *[]int8) sql.Scanner {
return int2VectorToInt8Slice{val: val}
}
// Int2VectorFromInt16Slice returns a driver.Valuer that produces a PostgreSQL int2vector from the given Go []int16.
func Int2VectorFromInt16Slice(val []int16) driver.Valuer {
return int2VectorFromInt16Slice{val: val}
}
// Int2VectorToInt16Slice returns an sql.Scanner that converts a PostgreSQL int2vector into a Go []int16 and sets it to val.
func Int2VectorToInt16Slice(val *[]int16) sql.Scanner {
return int2VectorToInt16Slice{val: val}
}
// Int2VectorFromInt32Slice returns a driver.Valuer that produces a PostgreSQL int2vector from the given Go []int32.
func Int2VectorFromInt32Slice(val []int32) driver.Valuer {
return int2VectorFromInt32Slice{val: val}
}
// Int2VectorToInt32Slice returns an sql.Scanner that converts a PostgreSQL int2vector into a Go []int32 and sets it to val.
func Int2VectorToInt32Slice(val *[]int32) sql.Scanner {
return int2VectorToInt32Slice{val: val}
}
// Int2VectorFromInt64Slice returns a driver.Valuer that produces a PostgreSQL int2vector from the given Go []int64.
func Int2VectorFromInt64Slice(val []int64) driver.Valuer {
return int2VectorFromInt64Slice{val: val}
}
// Int2VectorToInt64Slice returns an sql.Scanner that converts a PostgreSQL int2vector into a Go []int64 and sets it to val.
func Int2VectorToInt64Slice(val *[]int64) sql.Scanner {
return int2VectorToInt64Slice{val: val}
}
// Int2VectorFromUintSlice returns a driver.Valuer that produces a PostgreSQL int2vector from the given Go []uint.
func Int2VectorFromUintSlice(val []uint) driver.Valuer {
return int2VectorFromUintSlice{val: val}
}
// Int2VectorToUintSlice returns an sql.Scanner that converts a PostgreSQL int2vector into a Go []uint and sets it to val.
func Int2VectorToUintSlice(val *[]uint) sql.Scanner {
return int2VectorToUintSlice{val: val}
}
// Int2VectorFromUint8Slice returns a driver.Valuer that produces a PostgreSQL int2vector from the given Go []uint8.
func Int2VectorFromUint8Slice(val []uint8) driver.Valuer {
return int2VectorFromUint8Slice{val: val}
}
// Int2VectorToUint8Slice returns an sql.Scanner that converts a PostgreSQL int2vector into a Go []uint8 and sets it to val.
func Int2VectorToUint8Slice(val *[]uint8) sql.Scanner {
return int2VectorToUint8Slice{val: val}
}
// Int2VectorFromUint16Slice returns a driver.Valuer that produces a PostgreSQL int2vector from the given Go []uint16.
func Int2VectorFromUint16Slice(val []uint16) driver.Valuer {
return int2VectorFromUint16Slice{val: val}
}
// Int2VectorToUint16Slice returns an sql.Scanner that converts a PostgreSQL int2vector into a Go []uint16 and sets it to val.
func Int2VectorToUint16Slice(val *[]uint16) sql.Scanner {
return int2VectorToUint16Slice{val: val}
}
// Int2VectorFromUint32Slice returns a driver.Valuer that produces a PostgreSQL int2vector from the given Go []uint32.
func Int2VectorFromUint32Slice(val []uint32) driver.Valuer {
return int2VectorFromUint32Slice{val: val}
}
// Int2VectorToUint32Slice returns an sql.Scanner that converts a PostgreSQL int2vector into a Go []uint32 and sets it to val.
func Int2VectorToUint32Slice(val *[]uint32) sql.Scanner {
return int2VectorToUint32Slice{val: val}
}
// Int2VectorFromUint64Slice returns a driver.Valuer that produces a PostgreSQL int2vector from the given Go []uint64.
func Int2VectorFromUint64Slice(val []uint64) driver.Valuer {
return int2VectorFromUint64Slice{val: val}
}
// Int2VectorToUint64Slice returns an sql.Scanner that converts a PostgreSQL int2vector into a Go []uint64 and sets it to val.
func Int2VectorToUint64Slice(val *[]uint64) sql.Scanner {
return int2VectorToUint64Slice{val: val}
}
// Int2VectorFromFloat32Slice returns a driver.Valuer that produces a PostgreSQL int2vector from the given Go []float32.
func Int2VectorFromFloat32Slice(val []float32) driver.Valuer {
return int2VectorFromFloat32Slice{val: val}
}
// Int2VectorToFloat32Slice returns an sql.Scanner that converts a PostgreSQL int2vector into a Go []float32 and sets it to val.
func Int2VectorToFloat32Slice(val *[]float32) sql.Scanner {
return int2VectorToFloat32Slice{val: val}
}
// Int2VectorFromFloat64Slice returns a driver.Valuer that produces a PostgreSQL int2vector from the given Go []float64.
func Int2VectorFromFloat64Slice(val []float64) driver.Valuer {
return int2VectorFromFloat64Slice{val: val}
}
// Int2VectorToFloat64Slice returns an sql.Scanner that converts a PostgreSQL int2vector into a Go []float64 and sets it to val.
func Int2VectorToFloat64Slice(val *[]float64) sql.Scanner {
return int2VectorToFloat64Slice{val: val}
}
type int2VectorFromIntSlice struct {
val []int
}
func (v int2VectorFromIntSlice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{}, nil
}
out := []byte{}
for _, i := range v.val {
out = strconv.AppendInt(out, int64(i), 10)
out = append(out, ' ')
}
out = out[:len(out)-1] // drop last " "
return out, nil
}
type int2VectorToIntSlice struct {
val *[]int
}
func (v int2VectorToIntSlice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseVector(arr)
ints := make([]int, len(elems))
for i := 0; i < len(elems); i++ {
i64, err := strconv.ParseInt(string(elems[i]), 10, 16)
if err != nil {
return err
}
ints[i] = int(i64)
}
*v.val = ints
return nil
}
type int2VectorFromInt8Slice struct {
val []int8
}
func (v int2VectorFromInt8Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{}, nil
}
out := []byte{}
for _, i8 := range v.val {
out = strconv.AppendInt(out, int64(i8), 10)
out = append(out, ' ')
}
out = out[:len(out)-1] // drop last " "
return out, nil
}
type int2VectorToInt8Slice struct {
val *[]int8
}
func (v int2VectorToInt8Slice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseVector(arr)
int8s := make([]int8, len(elems))
for i := 0; i < len(elems); i++ {
i64, err := strconv.ParseInt(string(elems[i]), 10, 8)
if err != nil {
return err
}
int8s[i] = int8(i64)
}
*v.val = int8s
return nil
}
type int2VectorFromInt16Slice struct {
val []int16
}
func (v int2VectorFromInt16Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{}, nil
}
out := []byte{}
for _, i16 := range v.val {
out = strconv.AppendInt(out, int64(i16), 10)
out = append(out, ' ')
}
out = out[:len(out)-1] // drop last " "
return out, nil
}
type int2VectorToInt16Slice struct {
val *[]int16
}
func (v int2VectorToInt16Slice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseVector(arr)
int16s := make([]int16, len(elems))
for i := 0; i < len(elems); i++ {
i64, err := strconv.ParseInt(string(elems[i]), 10, 16)
if err != nil {
return err
}
int16s[i] = int16(i64)
}
*v.val = int16s
return nil
}
type int2VectorFromInt32Slice struct {
val []int32
}
func (v int2VectorFromInt32Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{}, nil
}
out := []byte{}
for _, i32 := range v.val {
out = strconv.AppendInt(out, int64(i32), 10)
out = append(out, ' ')
}
out = out[:len(out)-1] // drop last " "
return out, nil
}
type int2VectorToInt32Slice struct {
val *[]int32
}
func (v int2VectorToInt32Slice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseVector(arr)
int32s := make([]int32, len(elems))
for i := 0; i < len(elems); i++ {
i64, err := strconv.ParseInt(string(elems[i]), 10, 16)
if err != nil {
return err
}
int32s[i] = int32(i64)
}
*v.val = int32s
return nil
}
type int2VectorFromInt64Slice struct {
val []int64
}
func (v int2VectorFromInt64Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{}, nil
}
out := []byte{}
for _, i64 := range v.val {
out = strconv.AppendInt(out, i64, 10)
out = append(out, ' ')
}
out = out[:len(out)-1] // drop last " "
return out, nil
}
type int2VectorToInt64Slice struct {
val *[]int64
}
func (v int2VectorToInt64Slice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseVector(arr)
int64s := make([]int64, len(elems))
for i := 0; i < len(elems); i++ {
i64, err := strconv.ParseInt(string(elems[i]), 10, 16)
if err != nil {
return err
}
int64s[i] = i64
}
*v.val = int64s
return nil
}
type int2VectorFromUintSlice struct {
val []uint
}
func (v int2VectorFromUintSlice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{}, nil
}
out := []byte{}
for _, u := range v.val {
out = strconv.AppendUint(out, uint64(u), 10)
out = append(out, ' ')
}
out = out[:len(out)-1] // drop last " "
return out, nil
}
type int2VectorToUintSlice struct {
val *[]uint
}
func (v int2VectorToUintSlice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseVector(arr)
uints := make([]uint, len(elems))
for i := 0; i < len(elems); i++ {
u64, err := strconv.ParseUint(string(elems[i]), 10, 16)
if err != nil {
return err
}
uints[i] = uint(u64)
}
*v.val = uints
return nil
}
type int2VectorFromUint8Slice struct {
val []uint8
}
func (v int2VectorFromUint8Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{}, nil
}
out := []byte{}
for _, u8 := range v.val {
out = strconv.AppendUint(out, uint64(u8), 10)
out = append(out, ' ')
}
out = out[:len(out)-1] // drop last " "
return out, nil
}
type int2VectorToUint8Slice struct {
val *[]uint8
}
func (v int2VectorToUint8Slice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseVector(arr)
uint8s := make([]uint8, len(elems))
for i := 0; i < len(elems); i++ {
u64, err := strconv.ParseUint(string(elems[i]), 10, 8)
if err != nil {
return err
}
uint8s[i] = uint8(u64)
}
*v.val = uint8s
return nil
}
type int2VectorFromUint16Slice struct {
val []uint16
}
func (v int2VectorFromUint16Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{}, nil
}
out := []byte{}
for _, u16 := range v.val {
out = strconv.AppendUint(out, uint64(u16), 10)
out = append(out, ' ')
}
out = out[:len(out)-1] // drop last " "
return out, nil
}
type int2VectorToUint16Slice struct {
val *[]uint16
}
func (v int2VectorToUint16Slice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseVector(arr)
uint16s := make([]uint16, len(elems))
for i := 0; i < len(elems); i++ {
u64, err := strconv.ParseUint(string(elems[i]), 10, 16)
if err != nil {
return err
}
uint16s[i] = uint16(u64)
}
*v.val = uint16s
return nil
}
type int2VectorFromUint32Slice struct {
val []uint32
}
func (v int2VectorFromUint32Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{}, nil
}
out := []byte{}
for _, u32 := range v.val {
out = strconv.AppendUint(out, uint64(u32), 10)
out = append(out, ' ')
}
out = out[:len(out)-1] // drop last " "
return out, nil
}
type int2VectorToUint32Slice struct {
val *[]uint32
}
func (v int2VectorToUint32Slice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseVector(arr)
uint32s := make([]uint32, len(elems))
for i := 0; i < len(elems); i++ {
u64, err := strconv.ParseUint(string(elems[i]), 10, 16)
if err != nil {
return err
}
uint32s[i] = uint32(u64)
}
*v.val = uint32s
return nil
}
type int2VectorFromUint64Slice struct {
val []uint64
}
func (v int2VectorFromUint64Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{}, nil
}
out := []byte{}
for _, u64 := range v.val {
out = strconv.AppendUint(out, u64, 10)
out = append(out, ' ')
}
out = out[:len(out)-1] // drop last " "
return out, nil
}
type int2VectorToUint64Slice struct {
val *[]uint64
}
func (v int2VectorToUint64Slice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseVector(arr)
uint64s := make([]uint64, len(elems))
for i := 0; i < len(elems); i++ {
u64, err := strconv.ParseUint(string(elems[i]), 10, 16)
if err != nil {
return err
}
uint64s[i] = u64
}
*v.val = uint64s
return nil
}
type int2VectorFromFloat32Slice struct {
val []float32
}
func (v int2VectorFromFloat32Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{}, nil
}
out := []byte{}
for _, f32 := range v.val {
out = strconv.AppendInt(out, int64(f32), 10)
out = append(out, ' ')
}
out = out[:len(out)-1] // drop last " "
return out, nil
}
type int2VectorToFloat32Slice struct {
val *[]float32
}
func (v int2VectorToFloat32Slice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseVector(arr)
float32s := make([]float32, len(elems))
for i := 0; i < len(elems); i++ {
i64, err := strconv.ParseInt(string(elems[i]), 10, 16)
if err != nil {
return err
}
float32s[i] = float32(i64)
}
*v.val = float32s
return nil
}
type int2VectorFromFloat64Slice struct {
val []float64
}
func (v int2VectorFromFloat64Slice) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
} else if len(v.val) == 0 {
return []byte{}, nil
}
out := []byte{}
for _, f64 := range v.val {
out = strconv.AppendInt(out, int64(f64), 10)
out = append(out, ' ')
}
out = out[:len(out)-1] // drop last " "
return out, nil
}
type int2VectorToFloat64Slice struct {
val *[]float64
}
func (v int2VectorToFloat64Slice) Scan(src interface{}) error {
arr, err := srcbytes(src)
if err != nil {
return err
} else if arr == nil {
*v.val = nil
return nil
}
elems := pgParseVector(arr)
float64s := make([]float64, len(elems))
for i := 0; i < len(elems); i++ {
i64, err := strconv.ParseInt(string(elems[i]), 10, 16)
if err != nil {
return err
}
float64s[i] = float64(i64)
}
*v.val = float64s
return nil
}
|
package object
import (
dstore "cloud.google.com/go/datastore"
"github.com/golang/protobuf/proto"
pb "github.com/pizzahutdigital/storage/protobufs"
"github.com/pizzahutdigital/storage/storage"
)
// Object implements Item
type Object struct {
id string
value []byte
timestamp int64
keys map[string]interface{}
// disable bool
}
func New(id string, value []byte, keys map[string]interface{}) *Object {
return &Object{
id: id,
value: value,
timestamp: storage.GenTimestamp(),
keys: keys,
}
}
func FromProto(i *pb.Item) *Object {
return &Object{
id: i.GetId(),
value: i.GetValue(),
timestamp: i.GetTimestamp(),
// keys: i.GetKeys(),
}
}
func FromResult(res *storage.Result) *Object {
return &Object{
id: res.Item.ID(),
value: res.Item.Value(),
timestamp: res.Item.Timestamp(),
// keys: res.Item.Keys(),
}
}
func FromProps(props dstore.PropertyList) *Object {
if props == nil {
return nil
}
var propMap = map[string]interface{}{}
for _, prop := range props {
propMap[prop.Name] = prop.Value
}
var (
id = propMap["id"].(string)
value = propMap["value"].([]byte)
ts = propMap["timestamp"].(int64)
)
delete(propMap, "id")
delete(propMap, "value")
delete(propMap, "timestamp")
return &Object{
id: id,
value: value,
timestamp: ts,
keys: propMap,
}
// var (
// id string
// value []byte
// timestamp int64
// ok bool
// )
// TODO: this is for good measure, but if we control it and provide assurances against that then it is negligible to check this stuff
// if propMap["id"] == nil {
// // log
// return nil
// }
// if propMap["value"] == nil {
// // log
// return nil
// }
// if propMap["timestamp"] == nil {
// // log
// return nil
// }
// id, ok = propMap["id"].(string)
// if !ok {
// // log
// return nil
// }
// value, ok = propMap["value"].([]byte)
// if !ok {
// // log
// return nil
// }
// timestamp, ok = propMap["timestamp"].(int64)
// if !ok {
// // log
// return nil
// }
// return &Object{
// id: id,
// value: value,
// timestamp: timestamp,
// }
}
func (o *Object) SetTimestamp(ts int64) {
o.timestamp = ts
}
func (o *Object) ID() string {
return o.id
}
func (o *Object) Value() []byte {
return o.value
}
func (o *Object) Timestamp() int64 {
return o.timestamp
}
func (o *Object) Keys() map[string]interface{} {
return o.keys
}
// MarshalBinary implements encoding.BinaryMarshaler
func (o *Object) MarshalBinary() (data []byte, err error) {
return proto.Marshal(&pb.Item{
Id: o.ID(),
Value: o.Value(),
Timestamp: o.Timestamp(),
// Keys: // HOW TO DO THIS
})
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (o *Object) UnmarshalBinary(data []byte) error {
var s pb.Item
err := proto.Unmarshal(data, &s)
if err != nil {
return err
}
o.id = s.GetId()
o.value = s.GetValue()
o.timestamp = s.GetTimestamp()
// o.keys = s.GetKeys()
return nil
}
|
package main
import (
"odbcstream/odbcstream"
// "fmt"
)
func main() {
odbcstream.InitialiseDBConnection("psql", "france")
// fmt.Println("Hello Go")
}
|
package repo
import (
"errors"
"fmt"
"strings"
)
// Parse parses a repository name in the format 'owner/repo'.
func Parse(value string) (*Repo, error) {
parts := strings.Split(value, "/")
if len(parts) != 2 {
return nil, errors.New("repository must be in the form owner/repo")
}
owner := parts[0]
if owner == "" {
return nil, fmt.Errorf("owner in repository %q cannot be empty", value)
}
name := parts[1]
if name == "" {
return nil, fmt.Errorf("name in repository %q cannot be empty", value)
}
return &Repo{Owner: owner, Name: name}, nil
}
|
package service
import (
"../model"
"../repository"
"errors"
"golang.org/x/crypto/bcrypt"
)
type IUserDataService interface {
AddUser(*model.User) (int64, error)
DeleteUser(int64) error
UpdateUser(user *model.User, isChangePwd bool) (err error)
FindUserByName(string) (*model.User, error)
CheckPwd(userName string, pwd string) (isOk bool, err error)
}
type UserDataService struct {
UserRepository repository.IUserRepository
}
func NewUserDataService(userRepository repository.IUserRepository) IUserDataService {
return &UserDataService{UserRepository: userRepository}
}
func GeneratePassword(userPassword string) ([]byte, error) {
return bcrypt.GenerateFromPassword([]byte(userPassword), bcrypt.DefaultCost)
}
func ValidatePassword(userPassword string, hashed string) (isOk bool, err error) {
if err = bcrypt.CompareHashAndPassword([]byte(hashed), []byte(userPassword)); err != nil {
return false, errors.New("invalid password and user")
}
return true, nil
}
func (u *UserDataService) AddUser(user *model.User) (userID int64, err error) {
pwdByte, err := GeneratePassword(user.HashPassword)
if err != nil {
return user.ID, err
}
user.HashPassword = string(pwdByte)
return u.UserRepository.CreatUser(user)
}
func (u *UserDataService) DeleteUser(userID int64) error {
return u.UserRepository.DeleteUserByID(userID)
}
func (u *UserDataService) UpdateUser(user *model.User, isChangedPwd bool) (err error) {
if isChangedPwd {
pwdByte, err := GeneratePassword(user.HashPassword)
if err != nil {
return err
}
user.HashPassword = string(pwdByte)
}
//log
return u.UserRepository.UpdateUser(user)
}
func (u *UserDataService) FindUserByName(userName string) (user *model.User, err error) {
return u.UserRepository.FindUserByName(userName)
}
func (u *UserDataService) CheckPwd(userName string, pwd string) (isOk bool, err error) {
user, err := u.UserRepository.FindUserByName(userName)
if err != nil {
return false, err
}
return ValidatePassword(pwd, user.HashPassword)
}
|
package pipelineexecution
import (
"github.com/rancher/rancher/pkg/pipeline/utils"
"github.com/pkg/errors"
"github.com/rancher/rancher/pkg/controllers/user/nslabels"
images "github.com/rancher/rancher/pkg/image"
"github.com/rancher/rancher/pkg/randomtoken"
"github.com/rancher/rancher/pkg/ref"
mv3 "github.com/rancher/types/apis/management.cattle.io/v3"
"github.com/rancher/types/apis/project.cattle.io/v3"
"github.com/sirupsen/logrus"
appsv1beta2 "k8s.io/api/apps/v1beta2"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/networking/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
)
func (l *Lifecycle) deploy(obj *v3.PipelineExecution) error {
logrus.Debug("deploy pipeline workloads and services")
nsName := utils.GetPipelineCommonName(obj)
_, pname := ref.Parse(obj.Spec.ProjectName)
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: nsName,
Labels: labels.Set(map[string]string{nslabels.ProjectIDFieldLabel: pname}),
Annotations: map[string]string{nslabels.ProjectIDFieldLabel: obj.Spec.ProjectName},
},
}
if _, err := l.namespaces.Create(ns); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "Error create ns")
}
secret := getSecret(nsName)
if _, err := l.secrets.Create(secret); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "Error create secret")
}
sa := getServiceAccount(nsName)
if _, err := l.serviceAccounts.Create(sa); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "Error create service account")
}
np := getNetworkPolicy(nsName)
if _, err := l.networkPolicies.Create(np); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "Error create networkpolicy")
}
jenkinsService := getJenkinsService(nsName)
if _, err := l.services.Create(jenkinsService); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "Error create jenkins service")
}
jenkinsDeployment := getJenkinsDeployment(nsName)
if _, err := l.deployments.Create(jenkinsDeployment); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "Error create jenkins deployment")
}
registryService := getRegistryService(nsName)
if _, err := l.services.Create(registryService); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "Error create registry service")
}
registryDeployment := getRegistryDeployment(nsName)
if _, err := l.deployments.Create(registryDeployment); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "Error create registry deployment")
}
minioService := getMinioService(nsName)
if _, err := l.services.Create(minioService); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "Error create minio service")
}
minioDeployment := getMinioDeployment(nsName)
if _, err := l.deployments.Create(minioDeployment); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "Error create minio deployment")
}
return l.reconcileRb(obj)
}
func getSecret(ns string) *corev1.Secret {
token, err := randomtoken.Generate()
if err != nil {
logrus.Warningf("warning generate random token got - %v, use default instead", err)
token = utils.PipelineSecretDefaultToken
}
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: utils.PipelineSecretName,
},
Data: map[string][]byte{
utils.PipelineSecretTokenKey: []byte(token),
utils.PipelineSecretUserKey: []byte(utils.PipelineSecretDefaultUser),
},
}
}
func getServiceAccount(ns string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: utils.JenkinsName,
},
}
}
func getRoleBindings(rbNs string, commonName string) *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: commonName,
Namespace: rbNs,
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: roleAdmin,
},
Subjects: []rbacv1.Subject{{
Kind: rbacv1.ServiceAccountKind,
Namespace: commonName,
Name: utils.JenkinsName,
}},
}
}
func getClusterRoleBindings(ns string, roleName string) *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: ns + "-" + roleName,
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: roleName,
},
Subjects: []rbacv1.Subject{{
Kind: rbacv1.ServiceAccountKind,
Namespace: ns,
Name: utils.JenkinsName,
}},
}
}
func getJenkinsService(ns string) *corev1.Service {
return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: utils.JenkinsName,
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
utils.LabelKeyApp: utils.JenkinsName,
utils.LabelKeyJenkins: utils.JenkinsMaster,
},
Ports: []corev1.ServicePort{
{
Name: "http",
Port: utils.JenkinsPort,
},
{
Name: "agent",
Port: utils.JenkinsJNLPPort,
},
},
},
}
}
func getJenkinsDeployment(ns string) *appsv1beta2.Deployment {
replicas := int32(1)
return &appsv1beta2.Deployment{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: utils.JenkinsName,
},
Spec: appsv1beta2.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{utils.LabelKeyApp: utils.JenkinsName},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
utils.LabelKeyApp: utils.JenkinsName,
utils.LabelKeyJenkins: utils.JenkinsMaster,
},
Name: utils.JenkinsName,
},
Spec: corev1.PodSpec{
ServiceAccountName: utils.JenkinsName,
Containers: []corev1.Container{
{
Name: utils.JenkinsName,
Image: images.Resolve(mv3.ToolsSystemImages.PipelineSystemImages.Jenkins),
Env: []corev1.EnvVar{
{
Name: "ADMIN_PASSWORD",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: utils.PipelineSecretName,
},
Key: utils.PipelineSecretTokenKey,
}},
}, {
Name: "ADMIN_USER",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: utils.PipelineSecretName,
},
Key: utils.PipelineSecretUserKey,
}},
}, {
Name: "JAVA_OPTS",
Value: "-Xmx300m -Dhudson.slaves.NodeProvisioner.initialDelay=0 -Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson.slaves.NodeProvisioner.MARGIN0=0.85 -Dhudson.model.LoadStatistics.clock=2000 -Dhudson.slaves.NodeProvisioner.recurrencePeriod=2000",
}, {
Name: "NAMESPACE",
Value: ns,
}, {
Name: "JENKINS_POD_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: utils.JenkinsPort,
},
{
Name: "agent",
ContainerPort: utils.JenkinsJNLPPort,
},
},
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/login",
Port: intstr.FromInt(utils.JenkinsPort),
},
},
},
},
},
},
},
},
}
}
func getNetworkPolicy(ns string) *v1.NetworkPolicy {
return &v1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: utils.NetWorkPolicyName,
},
Spec: v1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: utils.LabelKeyApp,
Operator: metav1.LabelSelectorOpIn,
Values: []string{utils.JenkinsName, utils.MinioName},
},
},
},
Ingress: []v1.NetworkPolicyIngressRule{{}},
},
}
}
func getRegistryService(ns string) *corev1.Service {
return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: utils.RegistryName,
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
utils.LabelKeyApp: utils.RegistryName,
},
Ports: []corev1.ServicePort{
{
Name: utils.RegistryName,
Port: utils.RegistryPort,
},
},
Type: corev1.ServiceTypeNodePort,
},
}
}
func getRegistryDeployment(ns string) *appsv1beta2.Deployment {
replicas := int32(1)
return &appsv1beta2.Deployment{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: utils.RegistryName,
},
Spec: appsv1beta2.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{utils.LabelKeyApp: utils.RegistryName},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{utils.LabelKeyApp: utils.RegistryName},
Name: utils.RegistryName,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: utils.RegistryName,
Image: images.Resolve(mv3.ToolsSystemImages.PipelineSystemImages.Registry),
ImagePullPolicy: corev1.PullAlways,
Ports: []corev1.ContainerPort{
{
Name: utils.RegistryName,
ContainerPort: utils.RegistryPort,
},
},
},
},
},
},
},
}
}
func getMinioService(ns string) *corev1.Service {
return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: utils.MinioName,
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
utils.LabelKeyApp: utils.MinioName,
},
Ports: []corev1.ServicePort{
{
Name: utils.MinioName,
Port: utils.MinioPort,
},
},
},
}
}
func getMinioDeployment(ns string) *appsv1beta2.Deployment {
replicas := int32(1)
return &appsv1beta2.Deployment{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: utils.MinioName,
},
Spec: appsv1beta2.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{utils.LabelKeyApp: utils.MinioName},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{utils.LabelKeyApp: utils.MinioName},
Name: utils.MinioName,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: utils.MinioName,
Image: images.Resolve(mv3.ToolsSystemImages.PipelineSystemImages.Minio),
ImagePullPolicy: corev1.PullAlways,
Args: []string{"server", "/data"},
Env: []corev1.EnvVar{
{
Name: "MINIO_SECRET_KEY",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: utils.PipelineSecretName,
},
Key: utils.PipelineSecretTokenKey,
}},
}, {
Name: "MINIO_ACCESS_KEY",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: utils.PipelineSecretName,
},
Key: utils.PipelineSecretUserKey,
}},
},
},
Ports: []corev1.ContainerPort{
{
Name: utils.MinioName,
ContainerPort: utils.MinioPort,
},
},
},
},
},
},
},
}
}
|
/*****************************************************************
* Copyright©,2020-2022, email: 279197148@qq.com
* Version: 1.0.0
* @Author: yangtxiang
* @Date: 2020-08-17 15:52
* Description:
*****************************************************************/
package xhttpServer
import (
"fmt"
"github.com/go-xe2/x/os/xfile"
"github.com/go-xe2/x/type/xtime"
"github.com/go-xe2/x/utils/xrand"
"mime/multipart"
"os"
)
type File interface {
Header() *multipart.FileHeader
Size() int64
Ext() string
FileName() string
Save(fileName string) error
MakeFileName() string
}
type THttpFile struct {
header *multipart.FileHeader
}
var _ File = (*THttpFile)(nil)
func NewHttpFile(header *multipart.FileHeader) *THttpFile {
return &THttpFile{
header: header,
}
}
func (p *THttpFile) MakeFileName() string {
return fmt.Sprintf("%s_%s.%s", xtime.Now().String(), xrand.Str(5), p.Ext())
}
func (p *THttpFile) Header() *multipart.FileHeader {
return p.header
}
func (p *THttpFile) Size() int64 {
return p.header.Size
}
func (p *THttpFile) Ext() string {
return xfile.Ext(p.FileName())
}
func (p *THttpFile) FileName() string {
return p.header.Filename
}
func (p *THttpFile) Save(fileName string) error {
dir := xfile.Dir(fileName)
if !xfile.Exists(dir) {
if err := xfile.Mkdir(dir); err != nil {
return err
}
}
f, err := p.header.Open()
if err != nil {
return err
}
defer f.Close()
// 创建输出文件
out, err := xfile.OpenWithFlag(fileName, os.O_CREATE|os.O_TRUNC|os.O_RDWR)
if err != nil {
return err
}
defer out.Close()
buf := make([]byte, 1024)
total := p.header.Size
for {
if total <= 0 {
break
}
n, err := f.Read(buf)
if err != nil {
return err
}
if n > 0 {
if _, err := out.Write(buf[:n]); err != nil {
return err
}
total = total - int64(n)
}
}
return nil
}
|
package Problem0726
import (
"sort"
"strconv"
)
func countOfAtoms(formula string) string {
return parse(count(formula))
}
func count(formula string) map[string]int {
rec := make(map[string]int, len(formula)/2)
var update = func(newRec map[string]int, times int) {
for atom, c := range newRec {
rec[atom] += c * times
}
}
atoms := ""
for len(formula) > 0 {
atoms, formula = cut(formula)
if atoms[0] == '(' {
newFormula, times := dealParenthese(atoms)
newRec := count(newFormula)
update(newRec, times)
} else {
atom, num := getAtomAndNum(atoms)
rec[atom] += num
}
}
return rec
}
func cut(formula string) (string, string) {
i := 1
if formula[0] == '(' {
i = jump(formula)
}
for i < len(formula) &&
!isUpper(formula[i]) &&
formula[i] != '(' {
i++
}
return formula[:i], formula[i:]
}
func dealParenthese(s string) (string, int) {
num, i := getNum(s)
return s[1 : i-1], num
}
func getAtomAndNum(s string) (string, int) {
num, i := getNum(s)
return s[:i], num
}
// 对于 "Ab321" 返回, 321 和 '3' 的索引号 2
func getNum(s string) (num, i int) {
i = len(s)
for 0 <= i-1 && isNum(s[i-1]) {
i--
}
num = 1
if i == len(s) {
return
}
num, _ = strconv.Atoi(s[i:])
return
}
func isNum(b byte) bool {
return '0' <= b && b <= '9'
}
func isUpper(b byte) bool {
return 'A' <= b && b <= 'Z'
}
// jump 跳过了圆括号部分
func jump(s string) int {
p := 1
i := 1
for i < len(s) && p > 0 {
if s[i] == '(' {
p++
} else if s[i] == ')' {
p--
}
i++
}
return i
}
func parse(r map[string]int) string {
atoms := make([]string, 0, len(r))
for a := range r {
atoms = append(atoms, a)
}
sort.Strings(atoms)
res := ""
for _, a := range atoms {
res += a
if r[a] > 1 {
res += strconv.Itoa(r[a])
}
}
return res
}
|
package cpu
import (
"encoding/hex"
"fmt"
logger2 "github.com/vfreex/gones/pkg/emulator/common/logger"
"github.com/vfreex/gones/pkg/emulator/memory"
)
const (
SP_BASE uint16 = 0x100
)
type Cpu struct {
// registers
PC ProgramCounter
P ProcessorStatus
SP StackPointer
A Accumulator
X, Y IndexRegister
// memory
Memory memory.Memory
// interrupts
NMI bool
IRQ bool
// waitCycles
Wait int
}
var logger = logger2.GetLogger()
func NewCpu(memory memory.Memory) *Cpu {
cpu := &Cpu{Memory: memory}
return cpu
}
func (cpu *Cpu) PowerUp() {
/*
P = $34[1] (IRQ disabled)[2]
A, X, Y = 0
S = $FD
$4017 = $00 (frame irq enabled)
$4015 = $00 (all channels disabled)
$4000-$400F = $00 (not sure about $4010-$4013)
All 15 bits of noise channel LFSR = $0000[3]. The first time the LFSR is clocked from the all-0s state, it will shift in a 1.
Internal memory ($0000-$07FF) has unreliable startup state. Some machines may have consistent RAM contents at power-on, but others do not.
Emulators often implement a consistent RAM startup state (e.g. all $00 or $FF, or a particular pattern), and flash carts like the PowerPak may partially or fully initialize RAM before starting a program, so an NES programmer must be careful not to rely on the startup contents of RAM.
*/
cpu.P = 0x34
cpu.A, cpu.X, cpu.Y = 0, 0, 0
cpu.SP = 0xfd
cpu.PC = cpu.ReadInterruptVector(IV_RESET)
logger.Debugf("entrypoint: PC=$%4x", cpu.PC)
}
func (cpu *Cpu) Test() {
for cpu.PC < 0x810f {
cycles := cpu.ExecOneInstruction()
logger.Infof("spent %d cycles", cycles)
}
}
func (cpu *Cpu) Push(b byte) {
cpu.Memory.Poke(0x100|memory.Ptr(cpu.SP), b)
cpu.SP--
}
func (cpu *Cpu) PushW(w uint16) {
cpu.Push(byte(w >> 8))
cpu.Push(byte(w & 0xff))
}
func (cpu *Cpu) Pop() byte {
cpu.SP++
return cpu.Memory.Peek(0x100 | memory.Ptr(cpu.SP))
}
func (cpu *Cpu) PopW() uint16 {
low := cpu.Pop()
high := cpu.Pop()
return uint16(high)<<8 | uint16(low)
}
func (cpu *Cpu) ExecOneInstruction() (cycles int) {
if cpu.NMI {
cpu.ExecNMI()
} else if cpu.IRQ && cpu.P&PFLAG_I == 0 {
cpu.ExecIRQ()
}
cpu.logInstruction()
opcode := cpu.Memory.Peek(cpu.PC)
handler := opcodeHandlers[opcode]
if handler == nil {
logger.Fatalf("opcode %02x is not supported", opcode)
}
cpu.PC++
operandAddr, cycles1 := cpu.AddressOperand(handler.AddressingMode)
cpu.logRegisters()
cycles2 := handler.Executor(cpu, operandAddr)
cpu.logRegisters()
wait := cpu.Wait
cpu.Wait = 0
return 1 + cycles1 + cycles2 + wait
}
func (cpu *Cpu) logInstruction() {
opcode := cpu.Memory.Peek(cpu.PC)
info := &InstructionInfos[opcode]
arguments := make([]byte, info.AddressingMode.GetArgumentCount())
switch info.AddressingMode.GetArgumentCount() {
case 2:
arguments[1] = cpu.Memory.Peek(cpu.PC + 2)
fallthrough
case 1:
arguments[0] = cpu.Memory.Peek(cpu.PC + 1)
}
logger.Debugf("L%04x: %s %s ; %02x (%s-%s) %s",
cpu.PC, info.Nemonics, formatInstructionArgument(info.AddressingMode, arguments),
opcode, info.Nemonics, info.AddressingMode.String(), hex.EncodeToString(arguments))
}
func formatInstructionArgument(am AddressingMode, args []byte) string {
r := ""
switch am {
case IMM:
r = fmt.Sprintf("#$%x", args[0])
case ZP:
r = fmt.Sprintf("$%02x", args[0])
case ZPX:
r = fmt.Sprintf("$%02x,X", args[0])
case ZPY:
r = fmt.Sprintf("$%02x,Y", args[0])
case REL:
r = fmt.Sprintf("*$%+x", int8(args[0]))
case ABS:
r = fmt.Sprintf("$%02x%02x", args[1], args[0])
case ABX:
r = fmt.Sprintf("$%02x%02x,X", args[1], args[0])
case ABY:
r = fmt.Sprintf("$%02x%02x,Y", args[1], args[0])
case IND:
r = fmt.Sprintf("($%02x%02x)", args[1], args[0])
case IZX:
r = fmt.Sprintf("($%02x,X)", args[0])
case IZY:
r = fmt.Sprintf("($%02x),Y", args[0])
}
return r
}
func (cpu *Cpu) logRegisters() {
logger.Debugf(";; PC=%04x, P=%s, SP=%02x, A=%02x, X=%02x, Y=%02x", cpu.PC, cpu.P, cpu.SP, cpu.A, cpu.X, cpu.Y)
}
|
package example
import (
"github.com/catmorte/go-inversion_of_control/pkg/context"
)
func init() {
context.SetContext(context.NewMemoryContext())
}
|
package boilerplateapi
import (
"os"
"testing"
)
type testRecord struct {
Name string `json:"name"`
Comment string `json:"comment"`
}
// TestLoadBoltstore Kicks the tires on the bolt store
func TestLoadBoltstore(t *testing.T) {
boltPath := "./test.boltdb"
if _, err := os.Stat(boltPath); os.IsExist(err) {
t.Logf("Deleting %v before test", boltPath)
os.Remove(boltPath)
}
boltConnection, err := GetBoltStore(boltPath)
if err != nil {
t.Fatalf("Error connectiong to bolt store: %v", err)
}
defer boltConnection.Close()
defer os.Remove(boltPath)
myRecord := testRecord{Name: "myName", Comment: "my comment"}
if err := boltConnection.SaveRecord("testRecord", &myRecord, myRecord.Name); err != nil {
t.Fatalf("Failed to save record: %v", err)
}
var loadedRecord testRecord
if err := boltConnection.LoadRecord("testRecord", &loadedRecord, myRecord.Name); err != nil {
t.Fatalf("Failed to load record: %v", err)
}
if loadedRecord.Name != myRecord.Name || loadedRecord.Comment != myRecord.Comment {
t.Fatalf("Records don't match")
}
}
|
package util
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/hex"
"fmt"
"io"
mrand "math/rand"
"time"
p "github.com/halivor/common/golang/packet"
_ "github.com/halivor/goutil/bufferpool"
)
var KEY = ""
func InitCrypto(key string) {
KEY = key
}
func GeneToken(uid int64) *p.Token {
key := make([]byte, 16) // TODO: 考虑使用固定key,或每个用户固定key
sign := make([]byte, 16)
io.ReadFull(rand.Reader, key)
io.ReadFull(rand.Reader, sign)
exp := time.Hour*24*30 + time.Duration(mrand.Int63n(3600))*time.Second
return &p.Token{
Uid: uid,
Key: hex.EncodeToString(key),
Sign: hex.EncodeToString(sign),
Exp: time.Now().Add(exp).Unix(),
Rand: mrand.Int63(),
}
}
func EncData(key, data []byte) string {
switch {
case len(key) != 16:
panic(fmt.Sprintf("加密密钥[%s]长度[%d]错误",
string(key), len(key)))
case len(data) == 0:
return ""
case len(data)%aes.BlockSize != 0:
data = append(data, bytes.Repeat([]byte(" "), aes.BlockSize-len(data)%aes.BlockSize)...)
}
c, _ := aes.NewCipher(key)
enc_data := make([]byte, len(data))
copy(enc_data, data[:aes.BlockSize])
mode := cipher.NewCBCEncrypter(c, enc_data[:aes.BlockSize])
mode.CryptBlocks(enc_data[aes.BlockSize:], data[aes.BlockSize:])
return fmt.Sprintf("%x", enc_data)
}
func DecData(key, data []byte) string {
switch {
case len(key) != 16:
panic(fmt.Sprintf("解密密钥[%s]长度[%d]错误",
string(key), len(key)))
case len(data) == 0:
return ""
case len(data) < aes.BlockSize, len(data)%aes.BlockSize != 0:
panic(fmt.Sprintf("解密密钥[%s]长度[%d]错误",
string(key), len(key)))
}
c, _ := aes.NewCipher(key)
mode := cipher.NewCBCDecrypter(c, data[:aes.BlockSize])
dec_data := make([]byte, len(data[aes.BlockSize:]))
mode.CryptBlocks(dec_data, data[aes.BlockSize:])
return string(dec_data)
}
|
package main
import (
"flag"
notify "tpay_backend/payapi/internal/order_notify"
"tpay_backend/payapi/internal/common"
"tpay_backend/payapi/internal/config"
"tpay_backend/payapi/internal/crontab"
"tpay_backend/payapi/internal/handler"
"tpay_backend/payapi/internal/svc"
"tpay_backend/utils"
"github.com/tal-tech/go-zero/core/conf"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/rest"
"github.com/tal-tech/go-zero/rest/httpx"
)
var configFile = flag.String("f", "payapi/etc/payapi-api.yaml", "the config file")
func main() {
flag.Parse()
var c config.Config
// 加载配置
conf.MustLoad(*configFile, &c)
// 重置redis的数据库
c.Redis.DB = utils.RedisDbPayapi
// 设置时区
utils.SetTimezone(c.Timezone)
ctx := svc.NewServiceContext(c)
server := rest.MustNewServer(c.RestConf)
defer server.Stop()
// 设置错误处理函数
httpx.SetErrorHandler(common.ErrorHandler)
// 注册服务
handler.RegisterHandlers(server, ctx)
// 启动定时任务
crontab.Start(ctx)
defer crontab.Stop()
// redis过期监听
notify.NewListenExpKeyHandler(ctx).ListenRedisExpKey()
logx.Info("test-2020-04-24 16:12...")
logx.Infof("Starting server at %s:%d...", c.Host, c.Port)
server.Start()
}
|
package authapi
// Success response
// swagger:response ok
type swaggOKResp struct{}
// Error response
// swagger:response err
type swaggErrResp struct{}
// Error response with message
// swagger:response errMsg
type swaggErrMsgResp struct {
Message string `json:"message"`
}
|
// Copyright 2019 Kuei-chun Chen. All rights reserved.
package analytics
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"time"
"github.com/simagix/gox"
"github.com/simagix/keyhole/ftdc"
"github.com/simagix/keyhole/mdb"
"go.mongodb.org/mongo-driver/bson"
)
// OSDoc -
type OSDoc struct {
Name string `json:"name" bson:"name"`
Type string `json:"type" bson:"type"`
Version string `json:"version" bson:"version"`
}
// SystemDoc -
type SystemDoc struct {
CPUArch string `json:"cpuArch" bson:"cpuArch"`
Hostname string `json:"hostname" bson:"hostname"`
NumCores int `json:"numCores" bson:"numCores"`
MemSizeMB int `json:"memSizeMB" bson:"memSizeMB"`
}
// HostInfo -
type HostInfo struct {
OS OSDoc `json:"os" bson:"os"`
System SystemDoc `json:"system" bson:"system"`
}
// BuildInfo -
type BuildInfo struct {
Version string `json:"version" bson:"version"`
}
// ServerInfoDoc -
type ServerInfoDoc struct {
HostInfo HostInfo `json:"hostInfo" bson:"hostInfo"`
BuildInfo BuildInfo `json:"buildInfo" bson:"buildInfo"`
}
// DiagnosticData -
type DiagnosticData struct {
ServerInfo interface{}
ServerStatusList []ServerStatusDoc
ReplSetStatusList []ReplSetStatusDoc
SystemMetricsList []SystemMetricsDoc
endpoints []string
}
// DiagnosticDoc -
type DiagnosticDoc struct {
Start time.Time `json:"start" bson:"start"`
ServerStatus ServerStatusDoc `json:"serverStatus" bson:"serverStatus"`
ReplSetGetStatus ReplSetStatusDoc `json:"replSetGetStatus" bson:"replSetGetStatus"`
SystemMetrics SystemMetricsDoc `json:"systemMetrics" bson:"systemMetrics"`
End time.Time `json:"end" bson:"end"`
}
// NewDiagnosticData -
func NewDiagnosticData() *DiagnosticData {
return &DiagnosticData{ServerStatusList: []ServerStatusDoc{}, ReplSetStatusList: []ReplSetStatusDoc{}}
}
// GetEndPoints gets grafana uri
func (d *DiagnosticData) GetEndPoints() []string {
return d.endpoints
}
// DecodeDiagnosticData decodes FTDC data files
func (d *DiagnosticData) DecodeDiagnosticData(filenames []string) error {
var err error
fnames := GetMetricsFilenames(filenames)
if err = d.readDiagnosticFiles(fnames); err != nil {
return err
}
if len(d.ServerStatusList) == 0 {
log.Println("no server status found")
t := time.Now().Unix() * 1000
minute := int64(60) * 1000
d.endpoints = append(d.endpoints, fmt.Sprintf(analyticsEndpoint, t, t+(10*minute)))
} else {
fmt.Printf("\nStats from %v to %v\n", d.ServerStatusList[0].LocalTime.Format("2006-01-02T15:04:05Z"),
d.ServerStatusList[len(d.ServerStatusList)-1].LocalTime.Format("2006-01-02T15:04:05Z"))
d.endpoints = append(d.endpoints, fmt.Sprintf(analyticsEndpoint,
d.ServerStatusList[0].LocalTime.Unix()*1000,
d.ServerStatusList[len(d.ServerStatusList)-1].LocalTime.Unix()*1000))
}
return nil
}
// PrintDiagnosticData prints diagnostic data of MongoD
func (d *DiagnosticData) PrintDiagnosticData(filenames []string) (string, error) {
if err := d.DecodeDiagnosticData(filenames); err != nil {
return "", err
}
strs := []string{}
if d.ServerInfo != nil {
var p mdb.ClusterStats
b, _ := json.Marshal(d.ServerInfo)
json.Unmarshal(b, &p)
result := fmt.Sprintf(`MongoDB v%v %v (%v) %v %v %v cores %v mem`,
p.BuildInfo.Version, p.HostInfo.System.Hostname, p.HostInfo.OS.Name,
p.ServerStatus.Process, p.Cluster, p.HostInfo.System.NumCores, p.HostInfo.System.MemSizeMB)
strs = append(strs, result)
}
strs = append(strs, PrintAllStats(d.ServerStatusList, -1))
return strings.Join(strs, "\n"), nil
}
// readDiagnosticDir reads diagnotics.data from a directory
func (d *DiagnosticData) readDiagnosticDir(dirname string) error {
var err error
var files []os.FileInfo
var filenames []string
if files, err = ioutil.ReadDir(dirname); err != nil {
return err
}
for _, f := range files {
if strings.Index(f.Name(), "metrics.") != 0 && strings.Index(f.Name(), "keyhole_stats.") != 0 {
continue
}
filename := dirname + "/" + f.Name()
filenames = append(filenames, filename)
}
if len(filenames) == 0 {
return errors.New("No metrics file found under " + dirname)
}
return d.readDiagnosticFiles(filenames)
}
// readDiagnosticFiles reads multiple files
func (d *DiagnosticData) readDiagnosticFiles(filenames []string) error {
var err error
if len(filenames) == 0 {
return errors.New("no valid data file found")
}
sort.Strings(filenames)
if strings.Contains(filenames[0], "keyhole_stats.") {
for _, filename := range filenames {
if err = d.analyzeServerStatusFromFile(filename); err != nil {
return err
}
}
return err
}
btime := time.Now()
log.Printf("reading %d files with %d second(s) interval\n", len(filenames), 1)
var diagDataMap = map[string]DiagnosticData{}
nThreads := runtime.NumCPU() - 1
if nThreads < 1 {
nThreads = 1
}
var wg = gox.NewWaitGroup(nThreads) // use 4 threads to read
for threadNum := 0; threadNum < len(filenames); threadNum++ {
filename := filenames[threadNum]
if !strings.Contains(filename, "metrics.") {
continue
}
wg.Add(1)
go func(filename string) {
defer wg.Done()
var diagData DiagnosticData
if diagData, err = d.readDiagnosticFile(filename); err == nil {
diagDataMap[filename] = diagData
}
}(filename)
}
wg.Wait()
keys := []string{}
for k := range diagDataMap {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
if diagDataMap[key].ServerInfo != nil {
d.ServerInfo = diagDataMap[key].ServerInfo
}
d.ServerStatusList = append(d.ServerStatusList, diagDataMap[key].ServerStatusList...)
d.SystemMetricsList = append(d.SystemMetricsList, diagDataMap[key].SystemMetricsList...)
d.ReplSetStatusList = append(d.ReplSetStatusList, diagDataMap[key].ReplSetStatusList...)
}
log.Println(len(filenames), "files loaded, time spent:", time.Since(btime))
return err
}
// readDiagnosticFile reads diagnostic.data from a file
func (d *DiagnosticData) readDiagnosticFile(filename string) (DiagnosticData, error) {
btm := time.Now()
var diagData = DiagnosticData{}
var buffer []byte
var err error
var r *bufio.Reader
if r, err = gox.NewFileReader(filename); err != nil {
return diagData, err
}
if buffer, err = ioutil.ReadAll(r); err != nil {
return diagData, err
}
metrics := ftdc.NewMetrics()
metrics.ReadAllMetrics(&buffer)
diagData.ServerInfo = metrics.Doc
for _, v := range metrics.Data {
var doc DiagnosticDoc
bson.Unmarshal(v.Block, &doc) // first document
diagData.ReplSetStatusList = append(diagData.ReplSetStatusList, doc.ReplSetGetStatus)
attrib := NewAttribs(&v.DataPointsMap)
for i := 0; i < int(v.NumDeltas); i++ {
ss := attrib.GetServerStatusDataPoints(i)
diagData.ServerStatusList = append(diagData.ServerStatusList, ss)
sm := attrib.GetSystemMetricsDataPoints(i)
diagData.SystemMetricsList = append(diagData.SystemMetricsList, sm)
}
}
filename = filepath.Base(filename)
var m runtime.MemStats
runtime.ReadMemStats(&m)
mem := fmt.Sprintf("Memory Alloc = %v MiB, TotalAlloc = %v MiB", m.Alloc/(1024*1024), m.TotalAlloc/(1024*1024))
log.Println(filename, "blocks:", len(metrics.Data), ", time:", time.Since(btm), mem)
return diagData, err
}
// analyzeServerStatus analyzes serverStatus from a file
func (d *DiagnosticData) analyzeServerStatusFromFile(filename string) error {
var err error
var reader *bufio.Reader
if reader, err = gox.NewFileReader(filename); err != nil {
return err
}
return d.AnalyzeServerStatus(reader)
}
// AnalyzeServerStatus -
func (d *DiagnosticData) AnalyzeServerStatus(reader *bufio.Reader) error {
var err error
var allDocs = []ServerStatusDoc{}
var docs = []ServerStatusDoc{}
var allRepls = []ReplSetStatusDoc{}
var repls = []ReplSetStatusDoc{}
cnt := 0
for {
line, ferr := reader.ReadBytes('\n')
if ferr == io.EOF {
break
}
cnt++
if cnt%3 == 1 { // serverStatus
if err = json.Unmarshal(line, &docs); err != nil {
return err
}
allDocs = append(allDocs, docs...)
} else if cnt%3 == 2 { // replica
if err = json.Unmarshal(line, &repls); err != nil {
return err
}
allRepls = append(allRepls, repls...)
} else if cnt == 3 { // serverInfo
d.ServerInfo = bson.M{}
if err = json.Unmarshal(line, &d.ServerInfo); err != nil {
return err
}
}
}
if len(allDocs) == 0 && len(allRepls) == 0 {
return errors.New("no doc found")
}
d.ServerStatusList = append(d.ServerStatusList, allDocs...)
if cnt < 3 && len(d.ServerStatusList) > 0 { // shortcut hack
d.ServerInfo = bson.M{"BuildInfo": bson.M{"Version": d.ServerStatusList[0].Version}}
}
d.ReplSetStatusList = append(d.ReplSetStatusList, allRepls...)
return err
}
|
/*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package obcpbft
import (
"testing"
"github.com/hyperledger/fabric/consensus"
"github.com/spf13/viper"
)
func (op *obcClassic) getPBFTCore() *pbftCore {
return op.pbft
}
func obcClassicHelper(id uint64, config *viper.Viper, stack consensus.Stack) pbftConsumer {
// It's not entirely obvious why the compiler likes the parent function, but not newObcClassic directly
return newObcClassic(id, config, stack)
}
func TestClassicNetwork(t *testing.T) {
validatorCount := 4
net := makeConsumerNetwork(validatorCount, obcClassicHelper)
defer net.stop()
broadcaster := net.endpoints[generateBroadcaster(validatorCount)].getHandle()
net.endpoints[1].(*consumerEndpoint).consumer.RecvMsg(createOcMsgWithChainTx(1), broadcaster)
net.process()
for _, ep := range net.endpoints {
ce := ep.(*consumerEndpoint)
block, err := ce.consumer.(*obcClassic).stack.GetBlock(1)
if nil != err {
t.Errorf("Replica %d executed requests, expected a new block on the chain, but could not retrieve it : %s", ce.id, err)
}
numTrans := len(block.Transactions)
if numTxResults := len(block.NonHashData.TransactionResults); numTxResults != numTrans {
t.Fatalf("Replica %d has %d txResults, expected %d", ce.id, numTxResults, numTrans)
}
}
}
func TestClassicStateTransfer(t *testing.T) {
validatorCount := 4
net := makeConsumerNetwork(validatorCount, obcClassicHelper, func(ce *consumerEndpoint) {
ce.consumer.(*obcClassic).pbft.K = 2
ce.consumer.(*obcClassic).pbft.L = 4
})
defer net.stop()
net.debug = true
filterMsg := true
net.filterFn = func(src int, dst int, msg []byte) []byte {
if filterMsg && dst == 3 { // 3 is byz
return nil
}
return msg
}
broadcaster := net.endpoints[generateBroadcaster(validatorCount)].getHandle()
net.endpoints[1].(*consumerEndpoint).consumer.RecvMsg(createOcMsgWithChainTx(1), broadcaster)
net.process()
filterMsg = false
for n := 2; n <= 9; n++ {
net.endpoints[1].(*consumerEndpoint).consumer.RecvMsg(createOcMsgWithChainTx(int64(n)), broadcaster)
}
net.process()
for _, ep := range net.endpoints {
ce := ep.(*consumerEndpoint)
obc := ce.consumer.(*obcClassic)
_, err := obc.stack.GetBlock(9)
if nil != err {
t.Errorf("Replica %d executed requests, expected a new block on the chain, but could not retrieve it : %s", ce.id, err)
}
if !obc.pbft.activeView || obc.pbft.view != 0 {
t.Errorf("Replica %d not active in view 0, is %v %d", ce.id, obc.pbft.activeView, obc.pbft.view)
}
}
}
|
package main
/*
@Time : 2020-03-27 01:06
@Author : audiRStony
@File : 13_file_buffio.go
@Software: GoLand
*/
import (
"fmt"
"bufio"
"os"
"io"
)
func main() {
file,err := os.Open("./zzh.txt")
if err != nil{
fmt.Println("打开文件失败")
return
}
defer file.Close()
reader := bufio.NewReader(file)
for {
bufstr,err := reader.ReadString('\n') /*读到指定字符停止 单引号为字符*/
if err != nil {
if err == io.EOF{ //读到文件结尾
os.Exit(0)
}
fmt.Println("文件出错")
}
fmt.Print(bufstr)
}
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"text/template"
"github.com/getkin/kin-openapi/openapi3"
"github.com/ohatakky/openapi3markdown/template_md"
)
const (
dir = "template_md/md/"
outDir = "Docs/"
)
var (
swagger *openapi3.Swagger
Template *template.Template
files []string = []string{
dir + "task.md",
dir + "header.md",
dir + "schema.md",
dir + "enum.md",
}
)
func init() {
s, err := ioutil.ReadFile(os.Args[1])
if err != nil {
panic(err)
}
swagger, err = openapi3.NewSwaggerLoader().LoadSwaggerFromData(s)
if err != nil {
panic(err)
}
Template, err = template.New("task.md").ParseFiles(files...)
if err != nil {
panic(err)
}
}
func main() {
schemas := swagger.Components.Schemas
tMap := make(map[string]*template_md.TaskTemplate, len(swagger.Components.Schemas))
for task, schema := range schemas {
h := template_md.NewHeaderTemplate(schema.Value.Description)
tMap[task] = template_md.NewTaskTemplate()
tMap[task].SetHeader(h)
s := template_md.NewSchemaTemplate(task)
// TODO: handle ref
recrusive(schema.Value.Properties, schema.Value.Required, s, tMap[task])
}
err := write(tMap)
if err != nil {
log.Fatal(err)
}
}
func recrusive(properties map[string]*openapi3.SchemaRef, required []string, s *template_md.SchemaTemplate, t *template_md.TaskTemplate) {
for key, property := range properties {
if property.Value.Items != nil {
ps := property.Value.Items.Value.Properties
if len(ps) == 0 {
s.Set(key, fmt.Sprintf("[%s]", property.Value.Items.Value.Type), property.Value.Description, contains(required, key))
} else {
s.Set(key, fmt.Sprintf("[[%s]](#%s)", key, key), property.Value.Description, contains(required, key))
ss := template_md.NewSchemaTemplate(key)
recrusive(ps, property.Value.Items.Value.Required, ss, t)
}
} else {
if property.Value.Enum != nil {
e := template_md.NewEnumTemplate(key)
for _, v := range property.Value.Enum {
e.Set(fmt.Sprintf("%s", v), "")
}
t.SetEnum(e)
s.Set(fmt.Sprintf("[%s](#%s)", key, key), fmt.Sprintf("%s", property.Value.Type), property.Value.Description, contains(required, key))
} else {
s.Set(key, fmt.Sprintf("%s", property.Value.Type), property.Value.Description, contains(required, key))
}
}
}
t.SetSchema(s)
}
func write(tMap map[string]*template_md.TaskTemplate) error {
if _, err := os.Stat(outDir); os.IsNotExist(err) {
os.Mkdir(outDir, 0777)
}
for k, v := range tMap {
file, err := os.Create(fmt.Sprintf("%s/%s.md", outDir, k))
if err != nil {
return err
}
defer file.Close()
err = v.Exec(Template, file)
if err != nil {
return err
}
}
return nil
}
func contains(s []string, e string) bool {
for _, v := range s {
if e == v {
return true
}
}
return false
}
|
package main
import (
"code.google.com/p/freetype-go/freetype"
"code.google.com/p/freetype-go/freetype/truetype"
//"database/sql"
"fmt"
"github.com/coopernurse/gorp"
"github.com/vdobler/chart"
"github.com/vdobler/chart/imgg"
"image"
"image/color"
"image/draw"
"image/png"
"io/ioutil"
"log"
"os"
"path/filepath"
"time"
)
var font *truetype.Font
func init() {
cwd, err := os.Getwd()
if err != nil {
log.Println(err)
return
}
b, err := ioutil.ReadFile(filepath.Join(cwd, "fonts", "ipaexg.ttf"))
if err != nil {
log.Fatal(err)
}
font, err = freetype.ParseFont(b)
if err != nil {
log.Println(err)
}
}
type Data struct {
MetricsId int64 `db:"metrics_id" json:"metrics_id"`
DateTime time.Time `db:"datetime" json:"datetime"`
Number float64 `db:"number" json:"number"`
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
}
func graph(dbmap *gorp.DbMap, metrics *Metrics, outdir string) error {
datas := []Data{}
_, err := dbmap.Select(&datas, "select * from data where metrics_id = ? order by datetime", metrics.Id)
if err != nil {
log.Println(err.Error())
return err
}
rgba := image.NewRGBA(image.Rect(0, 0, 400, 300))
draw.Draw(rgba, rgba.Bounds(), image.White, image.ZP, draw.Src)
img := imgg.AddTo(rgba, 0, 0, 400, 280, color.RGBA{0xff, 0xff, 0xff, 0xff}, font, imgg.ConstructFontSizes(13))
dt := make([]chart.EPoint, 0, 20)
for _, data := range datas {
dt = append(dt, chart.EPoint{
X: float64(data.DateTime.Unix()),
Y: float64(data.Number),
})
}
c := chart.ScatterChart{Title: metrics.GraphName}
c.XRange.TicSetting.Grid = 1
if len(dt) > 0 {
c.AddData("", dt, chart.PlotStyleLinesPoints, chart.Style{})
}
c.XRange.Time = true
c.XRange.TicSetting.TFormat = func(t time.Time, td chart.TimeDelta) string {
return t.Format("15:04")
}
c.YRange.Label = metrics.GraphName
c.Plot(img)
f, err := os.Create(filepath.Join(outdir, fmt.Sprintf("%s_%s_%s.png", metrics.ServiceName, metrics.SectionName, metrics.GraphName)))
if err != nil {
return err
}
defer f.Close()
return png.Encode(f, rgba)
}
|
package main
import (
"context"
"fmt"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-lambda-go/lambdacontext"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface"
"os"
"strings"
log "github.com/sirupsen/logrus"
"github.com/flow-lab/dlog"
)
type LogGroup struct {
LogGroupName *string
FunctionArn *string
}
func Handler(ctx context.Context, event events.CloudWatchEvent) (string, error) {
var functionArn = os.Getenv("DESTINATION_FUNCTION_ARN")
lambdaContext, _ := lambdacontext.FromContext(ctx)
requestLogger := dlog.NewRequestLogger(lambdaContext.AwsRequestID,"log-group-subscriber")
requestLogger.Info("About to update subscription filters ...")
sess := session.Must(session.NewSession())
client := cloudwatchlogs.New(sess, &aws.Config{})
_, err := ProcessEvent(functionArn, client, requestLogger)
if err != nil {
requestLogger.Errorf("unable to complete: %v", err)
panic(fmt.Errorf("unable to complete: %v", err))
}
return "event processed", nil
}
func ProcessEvent(functionArn string, logs cloudwatchlogsiface.CloudWatchLogsAPI, log *log.Entry) ([]string, error) {
logGroups, err := GetLogGroups(logs)
if err != nil {
return nil, fmt.Errorf("get log groups: %v", err)
}
missingSubscription, err := getLogGroupsWithMissingSubscription(logGroups, &functionArn, logs, log)
if err != nil {
return nil, fmt.Errorf("get log with missing subscriptions: %v", err)
}
result, err := PutSubscriptionFilter(missingSubscription, logs, log)
if err != nil {
return nil, fmt.Errorf("get log with missing subscriptions: %v", err)
}
return result, nil
}
func GetLogGroups(logs cloudwatchlogsiface.CloudWatchLogsAPI) ([]*cloudwatchlogs.LogGroup, error) {
var logGroups []*cloudwatchlogs.LogGroup
input := cloudwatchlogs.DescribeLogGroupsInput{}
err := logs.DescribeLogGroupsPages(&input, func(page *cloudwatchlogs.DescribeLogGroupsOutput, lastPage bool) bool {
for _, logGroup := range page.LogGroups {
logGroups = append(logGroups, logGroup)
}
return true
})
if err != nil {
return nil, fmt.Errorf("describe log Groups: %v", err)
}
return logGroups, nil
}
func DescribeSubscriptionFilters(logGroupName *string, logs cloudwatchlogsiface.CloudWatchLogsAPI) (*cloudwatchlogs.DescribeSubscriptionFiltersOutput, error) {
input := cloudwatchlogs.DescribeSubscriptionFiltersInput{
LogGroupName: logGroupName,
}
return logs.DescribeSubscriptionFilters(&input)
}
func PutSubscriptionFilter(logGroups []LogGroup, logs cloudwatchlogsiface.CloudWatchLogsAPI, log *log.Entry) ([]string, error) {
var result []string
level := ""
for _, logGroup := range logGroups {
filterName := fmt.Sprintf("%s-%s", strings.Replace(*logGroup.LogGroupName, "/", "", -1), "SubscriptionFilter")
input := cloudwatchlogs.PutSubscriptionFilterInput{
FilterName: &filterName,
LogGroupName: logGroup.LogGroupName,
DestinationArn: logGroup.FunctionArn,
FilterPattern: &level,
}
log.Printf("put subscription filter %s for %s", filterName, *logGroup.LogGroupName)
_, err := logs.PutSubscriptionFilter(&input)
if err != nil {
return result, fmt.Errorf("putSubscriptionFilter for %s: %v", *logGroup.LogGroupName, err)
}
result = append(result, *logGroup.LogGroupName)
log.Printf("PutSubscriptionFilter for %s", *logGroup.LogGroupName)
}
return result, nil
}
func getLogGroupsWithMissingSubscription(groups []*cloudwatchlogs.LogGroup, functionArn *string, logs cloudwatchlogsiface.CloudWatchLogsAPI, log *log.Entry) ([]LogGroup, error) {
var result []LogGroup
for _, element := range groups {
hasSubscriptionFilter, err := hasSubscriptionFilter(element, functionArn, logs)
if err != nil {
return nil, fmt.Errorf("getLogGroupsWithMissingSubscription: %v", err)
}
if hasSubscriptionFilter == false && *element.LogGroupName != "/aws/lambda/DatadogLogs" {
logGroup := LogGroup{
LogGroupName: element.LogGroupName,
FunctionArn: functionArn,
}
log.Printf("%s is missing subscription for %s", *logGroup.LogGroupName, *functionArn)
result = append(result, logGroup)
}
}
return result, nil
}
func hasSubscriptionFilter(logGroup *cloudwatchlogs.LogGroup, functionArn *string, logs cloudwatchlogsiface.CloudWatchLogsAPI) (bool, error) {
subscriptionFilters, err := DescribeSubscriptionFilters(logGroup.LogGroupName, logs)
if err != nil {
return false, fmt.Errorf("describe subscription filters: %s", err)
}
for _, subsFilter := range subscriptionFilters.SubscriptionFilters {
if *functionArn == *subsFilter.DestinationArn {
return true, nil
}
}
return false, nil
}
func main() {
lambda.Start(Handler)
}
|
package local
import (
"bufio"
"fmt"
"os"
"reflect"
"strconv"
"strings"
)
// Tags are the permitted tags within a test file
type Tags struct {
Name string `rt:"NAME"`
Summary string `rt:"SUMMARY"`
Author string `rt:"AUTHOR,allowmultiple"`
Labels string `rt:"LABELS"`
Repeat int `rt:"REPEAT"`
Issue string `rt:"ISSUE,allowmultiple"`
}
const allowMultiple = "allowmultiple"
func stripOptions(s string) string {
parts := strings.Split(s, ",")
return parts[0]
}
func multiplesAllowed(s string) bool {
parts := strings.Split(s, ",")
if len(parts) < 2 {
return false
}
if parts[1] == allowMultiple {
return true
}
return false
}
// ParseTags reads the provided file and returns all discovered tags or an error
func ParseTags(file string) (*Tags, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
tags := &Tags{}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
l := scanner.Text()
if strings.HasPrefix(l, "# ") {
parts := strings.SplitN(l, ":", 2)
if len(parts) < 2 {
// Empty
continue
}
tagName := parts[0][2:]
tagValue := strings.TrimSpace(parts[1])
tt := reflect.TypeOf(*tags)
for i := 0; i < tt.NumField(); i++ {
field := tt.Field(i)
if rt, ok := field.Tag.Lookup("rt"); ok {
if stripOptions(rt) == tagName {
vt := reflect.ValueOf(tags).Elem()
v := vt.Field(i)
switch v.Kind() {
case reflect.Int:
vi, err := strconv.Atoi(tagValue)
if err != nil {
continue
}
v.SetInt(int64(vi))
case reflect.String:
if multiplesAllowed(rt) {
if v.String() != "" {
v.SetString(fmt.Sprintf("%s %s", v.String(), tagValue))
} else {
v.SetString(tagValue)
}
} else {
if v.String() != "" {
return nil, fmt.Errorf("Field %s specified multiple times", rt)
}
v.SetString(tagValue)
}
}
}
}
}
}
}
return tags, nil
}
|
package faiss
import (
"context"
"github.com/jplu/visual-search-backend/domain"
"github.com/jplu/visual-search-backend/uc"
"google.golang.org/grpc"
"log"
"strconv"
)
// Faiss type
type Faiss struct {
client FaissServiceClient
}
// NewFaissClient creates a new gRPC client
func NewFaissClient(addr string, port int) (uc.Ann, error) {
servingAddress := addr + ":" + strconv.Itoa(port)
conn, err := grpc.Dial(servingAddress, grpc.WithInsecure())
if err != nil {
log.Fatalf("Cannot connect to the grpc server: %v\n", err)
}
client := NewFaissServiceClient(conn)
return Faiss{client: client}, nil
}
func (f Faiss) GetTopK(featurizedImage []float32, k uint64) (domain.Neighbors, error) {
vec := Vector{FloatVal: featurizedImage}
req := SearchRequest{Vector: &vec, TopK: k}
tt, err := f.client.Search(context.Background(), &req)
if err != nil {
log.Fatalf("%v.HeartBeat(_) = _, %v", f.client, err)
}
return toDomainNeighbors(tt.GetNeighbors())
}
func toDomainNeighbors(neighbors []*Neighbor) (domain.Neighbors, error) {
newNeighbors := make([]domain.Neighbor, 0)
for _, neighbor := range neighbors {
newNeighbors = append(newNeighbors, domain.Neighbor{ID: neighbor.Id, Score: neighbor.Score})
}
return newNeighbors, nil
}
|
package integration_test
import (
"github.com/APTrust/exchange/constants"
"github.com/APTrust/exchange/models"
"github.com/APTrust/exchange/util"
"github.com/APTrust/exchange/util/storage"
"github.com/APTrust/exchange/util/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"path/filepath"
"testing"
)
/*
These tests check the results of the integration tests for
the app apt_record. See the ingest_test.sh script in
the scripts folder, which sets up an integration context, runs
the apt_record.
*/
func TestRecordResults(t *testing.T) {
if !testutil.ShouldRunIntegrationTests() {
t.Skip("Skipping integration test. Set ENV var RUN_EXCHANGE_INTEGRATION=true if you want to run them.")
}
// Load config
configFile := filepath.Join("config", "integration.json")
config, err := models.LoadConfigFile(configFile)
require.Nil(t, err)
config.ExpandFilePaths()
// Find the log file that apt_record created when it was running
// with the "config/integration.json" config options. We'll read
// that file.
pathToJsonLog := filepath.Join(config.LogDirectory, "apt_record.json")
bagNames := append(testutil.INTEGRATION_GOOD_BAGS, testutil.INTEGRATION_GLACIER_BAGS...)
for _, bagName := range bagNames {
ingestManifest, err := testutil.FindIngestManifestInLog(pathToJsonLog, bagName)
assert.Nil(t, err)
if err != nil {
continue
}
// TODO: Test WorkItem (stage, status, etc.) below.
recordTestCommon(t, bagName, ingestManifest)
}
}
func recordTestCommon(t *testing.T, bagName string, ingestManifest *models.IngestManifest) {
// Test some basic object properties
assert.NotEmpty(t, ingestManifest.WorkItemId, "WorkItemId should not be empty for %s", bagName)
assert.NotEmpty(t, ingestManifest.S3Bucket, "S3Bucket should not be empty for %s", bagName)
assert.NotEmpty(t, ingestManifest.S3Key, "S3Key should not be empty for %s", bagName)
assert.NotEmpty(t, ingestManifest.ETag, "ETag should not be empty for %s", bagName)
// Make sure the result has some basic info in RecordResult
assert.True(t, ingestManifest.RecordResult.Attempted,
"RecordResult.Attempted should be true for %s", bagName)
assert.True(t, ingestManifest.RecordResult.AttemptNumber > 0,
"RecordResult.AttemptNumber should be > 0 %s", bagName)
assert.NotEmpty(t, ingestManifest.RecordResult.StartedAt,
"RecordResult.StartedAt should not be empty for %s", bagName)
assert.NotEmpty(t, ingestManifest.RecordResult.FinishedAt,
"RecordResult.FinishedAt should not be empty for %s", bagName)
assert.Empty(t, ingestManifest.RecordResult.Errors,
"RecordResult.Errors should be empty for %s", bagName)
assert.True(t, ingestManifest.RecordResult.Retry,
"RecordResult.Retry should be true for %s", bagName)
// Make sure the result has some basic info in CleanupResult
assert.True(t, ingestManifest.CleanupResult.Attempted,
"CleanupResult.Attempted should be true for %s", bagName)
assert.True(t, ingestManifest.CleanupResult.AttemptNumber > 0,
"CleanupResult.AttemptNumber should be > 0 %s", bagName)
assert.NotEmpty(t, ingestManifest.CleanupResult.StartedAt,
"CleanupResult.StartedAt should not be empty for %s", bagName)
assert.NotEmpty(t, ingestManifest.CleanupResult.FinishedAt,
"CleanupResult.FinishedAt should not be empty for %s", bagName)
assert.Empty(t, ingestManifest.CleanupResult.Errors,
"CleanupResult.Errors should be empty for %s", bagName)
assert.True(t, ingestManifest.CleanupResult.Retry,
"CleanupResult.Retry should be true for %s", bagName)
// Make sure our IntellectualObject got all of its PremisEvents
//obj := ingestManifest.Object
db, err := storage.NewBoltDB(ingestManifest.DBPath)
require.Nil(t, err)
obj, err := db.GetIntellectualObject(db.ObjectIdentifier())
require.Nil(t, err)
require.Equal(t, 4, len(obj.PremisEvents))
// Make sure this item was deleted from the receiving bucket
// after ingest completed.
assert.False(t, obj.IngestDeletedFromReceivingAt.IsZero(),
"Object %s was not deleted from receiving bucket", bagName)
assert.Empty(t, obj.IngestErrorMessage)
// Check the object-level events
creationEvents := obj.FindEventsByType(constants.EventCreation)
idEvents := obj.FindEventsByType(constants.EventIdentifierAssignment)
ingestEvents := obj.FindEventsByType(constants.EventIngestion)
accessEvents := obj.FindEventsByType(constants.EventAccessAssignment)
assert.Equal(t, 1, len(accessEvents), "Missing access event for %s", bagName)
assert.Equal(t, 1, len(creationEvents), "Missing creation event for %s", bagName)
assert.Equal(t, 1, len(idEvents), "Missing identifier assignment event for %s", bagName)
assert.Equal(t, 1, len(ingestEvents), "Missing ingest event for %s", bagName)
for _, event := range obj.PremisEvents {
assert.True(t, event.Id > 0, "Event %s was not saved for %s", event.EventType, obj.Identifier)
assert.True(t, event.IntellectualObjectId > 0,
"event.IntellectualObjectId not set for %s %s", event.EventType, obj.Identifier)
assert.False(t, event.DateTime.IsZero(),
"event.DateTime was not set for %s %s", event.EventType, obj.Identifier)
assert.False(t, event.CreatedAt.IsZero(),
"event.CreatedAt was not set for %s %s", event.EventType, obj.Identifier)
assert.False(t, event.UpdatedAt.IsZero(),
"event.UpdatedAt was not set for %s %s", event.EventType, obj.Identifier)
assert.True(t, util.LooksLikeUUID(event.Identifier),
"Identifier for %s %s doesn't look like a UUID", event.EventType, obj.Identifier)
assert.NotEmpty(t, event.EventType, "EventType missing for %s %s", obj.Identifier, event.Identifier)
assert.NotEmpty(t, event.Detail, "Detail is empty for %s %s", event.EventType, obj.Identifier)
assert.NotEmpty(t, event.Outcome, "Outcome is empty for %s %s", event.EventType, obj.Identifier)
assert.NotEmpty(t, event.OutcomeDetail,
"OutcomeDetail is empty for %s %s", event.EventType, obj.Identifier)
assert.NotEmpty(t, event.Object, "Object is empty for %s %s", event.EventType, obj.Identifier)
assert.NotEmpty(t, event.Agent, "Agent is empty for %s %s", event.EventType, obj.Identifier)
assert.NotEmpty(t, event.OutcomeInformation,
"OutcomeInformation is empty for %s %s", event.EventType, obj.Identifier)
assert.Equal(t, obj.Identifier, event.IntellectualObjectIdentifier,
"IntellectualObjectIdentifier is wrong for %s %s", event.EventType, obj.Identifier)
}
for _, gfIdentifier := range db.FileIdentifiers() {
gf, err := db.GetGenericFile(gfIdentifier)
require.Nil(t, err, gfIdentifier)
// Skip these checks for files that didn't need to be saved.
// Reasons for not needing to be saved:
// 1. File has a non-savable name, according to util.HasSavableName
// 2. File has not changed since last time we ingested this bag.
if !gf.IngestNeedsSave {
continue
}
// Make sure checksums are present
require.Equal(t, 2, len(gf.Checksums),
"Checksums should be %s, found %d for %s", 2, len(gf.Checksums), gf.Identifier)
md5 := gf.GetChecksumByAlgorithm(constants.AlgMd5)
sha256 := gf.GetChecksumByAlgorithm(constants.AlgSha256)
require.NotNil(t, md5, "Missing md5 digest for for %s", gf.Identifier)
require.NotNil(t, sha256, "Missing sha256 digest for for %s", gf.Identifier)
// Make sure that these checksums were saved
assert.True(t, md5.Id > 0, "md5 was not saved for %s", gf.Identifier)
assert.True(t, md5.GenericFileId > 0, "md5.GenericFileId not set for %s", gf.Identifier)
assert.False(t, md5.CreatedAt.IsZero(), "md5.CreatedAt was not set for %s", gf.Identifier)
assert.False(t, md5.UpdatedAt.IsZero(), "md5.UpdatedAt was not set for %s", gf.Identifier)
assert.True(t, sha256.Id > 0, "sha256 was not saved for %s", gf.Identifier)
assert.True(t, sha256.GenericFileId > 0, "sha256.GenericFileId not set for %s", gf.Identifier)
assert.False(t, sha256.CreatedAt.IsZero(), "sha256.CreatedAt was not set for %s", gf.Identifier)
assert.False(t, sha256.UpdatedAt.IsZero(), "sha256.UpdatedAt was not set for %s", gf.Identifier)
// Make sure PremisEvents are present
expectedEventCount := 6
if gf.StorageOption != constants.StorageStandard {
expectedEventCount = 5 // no replication event for Glacier-only files
}
require.Equal(t, expectedEventCount, len(gf.PremisEvents),
"PremisEvents count should be %s, found %d for %s", 6, len(gf.PremisEvents), gf.Identifier)
assert.Equal(t, 1, len(gf.FindEventsByType(constants.EventFixityCheck)),
"Missing fixity check event for %s", gf.Identifier)
assert.Equal(t, 1, len(gf.FindEventsByType(constants.EventDigestCalculation)),
"Missing digest calculation event for %s", gf.Identifier)
assert.Equal(t, 2, len(gf.FindEventsByType(constants.EventIdentifierAssignment)),
"Missing identifier assignment event(s) for %s", gf.Identifier)
if gf.StorageOption == constants.StorageStandard {
assert.Equal(t, 1, len(gf.FindEventsByType(constants.EventReplication)),
"Missing replication event for %s", gf.Identifier)
}
assert.Equal(t, 1, len(gf.FindEventsByType(constants.EventIngestion)),
"Missing ingestion event for %s", gf.Identifier)
for _, event := range gf.PremisEvents {
assert.True(t, event.Id > 0, "Event %s was not saved for %s", event.EventType, gf.Identifier)
assert.True(t, event.IntellectualObjectId > 0,
"event.IntellectualObjectId not set for %s %s", event.EventType, gf.Identifier)
assert.True(t, event.GenericFileId > 0,
"event.GenericFileId not set for %s %s", event.EventType, gf.Identifier)
assert.False(t, event.DateTime.IsZero(),
"event.DateTime was not set for %s %s", event.EventType, gf.Identifier)
assert.False(t, event.CreatedAt.IsZero(),
"event.CreatedAt was not set for %s %s", event.EventType, gf.Identifier)
assert.False(t, event.UpdatedAt.IsZero(),
"event.UpdatedAt was not set for %s %s", event.EventType, gf.Identifier)
assert.True(t, util.LooksLikeUUID(event.Identifier),
"Identifier for %s %s doesn't look like a UUID", event.EventType, gf.Identifier)
assert.NotEmpty(t, event.EventType, "EventType missing for %s %s", gf.Identifier, event.Identifier)
assert.NotEmpty(t, event.Detail, "Detail is empty for %s %s", event.EventType, gf.Identifier)
assert.NotEmpty(t, event.Outcome, "Outcome is empty for %s %s", event.EventType, gf.Identifier)
assert.NotEmpty(t, event.OutcomeDetail,
"OutcomeDetail is empty for %s %s", event.EventType, gf.Identifier)
assert.NotEmpty(t, event.Object, "Object is empty for %s %s", event.EventType, gf.Identifier)
assert.NotEmpty(t, event.Agent, "Agent is empty for %s %s", event.EventType, gf.Identifier)
assert.NotEmpty(t, event.OutcomeInformation,
"OutcomeInformation is empty for %s %s", event.EventType, gf.Identifier)
assert.Equal(t, obj.Identifier, event.IntellectualObjectIdentifier,
"IntellectualObjectIdentifier is wrong for %s %s", event.EventType, gf.Identifier)
assert.Equal(t, gf.Identifier, event.GenericFileIdentifier,
"GenericFileIdentifier is wrong for %s %s", event.EventType, gf.Identifier)
}
}
}
|
package monitors
import (
"github.com/go-ping/ping"
)
func icmpMonitor(url string, expectation string) bool {
pinger, err := ping.NewPinger(url)
if err != nil {
return false
}
pinger.Count = 1
pinger.Run()
stats := pinger.Statistics()
if stats.PacketLoss > 0.0 {
return false
}
return true
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package filestore
import (
"context"
"fmt"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
dclService "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/filestore/beta"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured"
)
type Backup struct{}
func BackupToUnstructured(r *dclService.Backup) *unstructured.Resource {
u := &unstructured.Resource{
STV: unstructured.ServiceTypeVersion{
Service: "filestore",
Version: "beta",
Type: "Backup",
},
Object: make(map[string]interface{}),
}
if r.CapacityGb != nil {
u.Object["capacityGb"] = *r.CapacityGb
}
if r.CreateTime != nil {
u.Object["createTime"] = *r.CreateTime
}
if r.Description != nil {
u.Object["description"] = *r.Description
}
if r.DownloadBytes != nil {
u.Object["downloadBytes"] = *r.DownloadBytes
}
if r.Labels != nil {
rLabels := make(map[string]interface{})
for k, v := range r.Labels {
rLabels[k] = v
}
u.Object["labels"] = rLabels
}
if r.Location != nil {
u.Object["location"] = *r.Location
}
if r.Name != nil {
u.Object["name"] = *r.Name
}
if r.Project != nil {
u.Object["project"] = *r.Project
}
if r.SourceFileShare != nil {
u.Object["sourceFileShare"] = *r.SourceFileShare
}
if r.SourceInstance != nil {
u.Object["sourceInstance"] = *r.SourceInstance
}
if r.SourceInstanceTier != nil {
u.Object["sourceInstanceTier"] = string(*r.SourceInstanceTier)
}
if r.State != nil {
u.Object["state"] = string(*r.State)
}
if r.StorageBytes != nil {
u.Object["storageBytes"] = *r.StorageBytes
}
return u
}
func UnstructuredToBackup(u *unstructured.Resource) (*dclService.Backup, error) {
r := &dclService.Backup{}
if _, ok := u.Object["capacityGb"]; ok {
if i, ok := u.Object["capacityGb"].(int64); ok {
r.CapacityGb = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.CapacityGb: expected int64")
}
}
if _, ok := u.Object["createTime"]; ok {
if s, ok := u.Object["createTime"].(string); ok {
r.CreateTime = dcl.String(s)
} else {
return nil, fmt.Errorf("r.CreateTime: expected string")
}
}
if _, ok := u.Object["description"]; ok {
if s, ok := u.Object["description"].(string); ok {
r.Description = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Description: expected string")
}
}
if _, ok := u.Object["downloadBytes"]; ok {
if i, ok := u.Object["downloadBytes"].(int64); ok {
r.DownloadBytes = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.DownloadBytes: expected int64")
}
}
if _, ok := u.Object["labels"]; ok {
if rLabels, ok := u.Object["labels"].(map[string]interface{}); ok {
m := make(map[string]string)
for k, v := range rLabels {
if s, ok := v.(string); ok {
m[k] = s
}
}
r.Labels = m
} else {
return nil, fmt.Errorf("r.Labels: expected map[string]interface{}")
}
}
if _, ok := u.Object["location"]; ok {
if s, ok := u.Object["location"].(string); ok {
r.Location = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Location: expected string")
}
}
if _, ok := u.Object["name"]; ok {
if s, ok := u.Object["name"].(string); ok {
r.Name = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Name: expected string")
}
}
if _, ok := u.Object["project"]; ok {
if s, ok := u.Object["project"].(string); ok {
r.Project = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Project: expected string")
}
}
if _, ok := u.Object["sourceFileShare"]; ok {
if s, ok := u.Object["sourceFileShare"].(string); ok {
r.SourceFileShare = dcl.String(s)
} else {
return nil, fmt.Errorf("r.SourceFileShare: expected string")
}
}
if _, ok := u.Object["sourceInstance"]; ok {
if s, ok := u.Object["sourceInstance"].(string); ok {
r.SourceInstance = dcl.String(s)
} else {
return nil, fmt.Errorf("r.SourceInstance: expected string")
}
}
if _, ok := u.Object["sourceInstanceTier"]; ok {
if s, ok := u.Object["sourceInstanceTier"].(string); ok {
r.SourceInstanceTier = dclService.BackupSourceInstanceTierEnumRef(s)
} else {
return nil, fmt.Errorf("r.SourceInstanceTier: expected string")
}
}
if _, ok := u.Object["state"]; ok {
if s, ok := u.Object["state"].(string); ok {
r.State = dclService.BackupStateEnumRef(s)
} else {
return nil, fmt.Errorf("r.State: expected string")
}
}
if _, ok := u.Object["storageBytes"]; ok {
if i, ok := u.Object["storageBytes"].(int64); ok {
r.StorageBytes = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.StorageBytes: expected int64")
}
}
return r, nil
}
func GetBackup(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToBackup(u)
if err != nil {
return nil, err
}
r, err = c.GetBackup(ctx, r)
if err != nil {
return nil, err
}
return BackupToUnstructured(r), nil
}
func ListBackup(ctx context.Context, config *dcl.Config, project string, location string) ([]*unstructured.Resource, error) {
c := dclService.NewClient(config)
l, err := c.ListBackup(ctx, project, location)
if err != nil {
return nil, err
}
var resources []*unstructured.Resource
for {
for _, r := range l.Items {
resources = append(resources, BackupToUnstructured(r))
}
if !l.HasNext() {
break
}
if err := l.Next(ctx, c); err != nil {
return nil, err
}
}
return resources, nil
}
func ApplyBackup(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToBackup(u)
if err != nil {
return nil, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToBackup(ush)
if err != nil {
return nil, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
r, err = c.ApplyBackup(ctx, r, opts...)
if err != nil {
return nil, err
}
return BackupToUnstructured(r), nil
}
func BackupHasDiff(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToBackup(u)
if err != nil {
return false, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToBackup(ush)
if err != nil {
return false, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
opts = append(opts, dcl.WithLifecycleParam(dcl.BlockDestruction), dcl.WithLifecycleParam(dcl.BlockCreation), dcl.WithLifecycleParam(dcl.BlockModification))
_, err = c.ApplyBackup(ctx, r, opts...)
if err != nil {
if _, ok := err.(dcl.ApplyInfeasibleError); ok {
return true, nil
}
return false, err
}
return false, nil
}
func DeleteBackup(ctx context.Context, config *dcl.Config, u *unstructured.Resource) error {
c := dclService.NewClient(config)
r, err := UnstructuredToBackup(u)
if err != nil {
return err
}
return c.DeleteBackup(ctx, r)
}
func BackupID(u *unstructured.Resource) (string, error) {
r, err := UnstructuredToBackup(u)
if err != nil {
return "", err
}
return r.ID()
}
func (r *Backup) STV() unstructured.ServiceTypeVersion {
return unstructured.ServiceTypeVersion{
"filestore",
"Backup",
"beta",
}
}
func (r *Backup) SetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Backup) GetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Backup) DeletePolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) error {
return unstructured.ErrNoSuchMethod
}
func (r *Backup) SetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Backup) SetPolicyWithEtag(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Backup) GetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Backup) Get(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetBackup(ctx, config, resource)
}
func (r *Backup) Apply(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
return ApplyBackup(ctx, config, resource, opts...)
}
func (r *Backup) HasDiff(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
return BackupHasDiff(ctx, config, resource, opts...)
}
func (r *Backup) Delete(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) error {
return DeleteBackup(ctx, config, resource)
}
func (r *Backup) ID(resource *unstructured.Resource) (string, error) {
return BackupID(resource)
}
func init() {
unstructured.Register(&Backup{})
}
|
package ddl
import (
"reflect"
"strings"
)
// Reserved words
const (
Tag_Ddl = "ddl"
Tag_Skip = "-"
Tag_Col = "col"
Tag_Key = "key"
Tag_Auto = "auto"
Tag_Inc = "inc"
Tag_Ref = "ref"
)
// Field attributes
const Field_Skip = 0
const (
Field_Col = 1 << iota
Field_Key
Field_Auto
Field_Inc
Field_Ref
)
const Skip_Select = Field_Skip
const Skip_Return = Field_Key | Field_Auto
const Skip_Insert = Field_Auto | Field_Ref
const Skip_Update = Field_Key | Field_Auto | Field_Ref
///////////////////////////////////////////////////////////////////////
//
// Field information structure
type FieldInfo struct {
DdlType int
FldKind reflect.Kind
FldName string
ColName string
}
// Init field information
func (info *FieldInfo) InitFieldInfo(field reflect.StructField) {
if info == nil {
return
}
info.DdlType = Field_Skip
info.FldKind = field.Type.Kind()
info.FldName = field.Name
info.ColName = FormatColumnName(field.Name)
tag := field.Tag.Get(Tag_Ddl)
if len(tag) > 0 {
info.ParseFieldTag(tag)
} else {
info.DdlType = Field_Col
}
ddlLog.Trace("Field name:%s, column:%s, kind:%s, type:%d", info.FldName, info.ColName, info.FldKind.String(), info.DdlType)
}
// Parse field attributes from "ddl" tag
func (info *FieldInfo) ParseFieldTag(tag string) {
if info == nil {
return
}
if tag == Tag_Skip {
info.DdlType = Field_Skip
return
}
for i, v := range strings.Split(tag, ",") {
if i == 0 && v != "" {
info.ColName = v
}
if i > 0 {
switch v {
case Tag_Col:
info.DdlType |= Field_Col
case Tag_Key:
info.DdlType |= Field_Key
case Tag_Auto:
info.DdlType |= Field_Auto
case Tag_Ref:
info.DdlType |= Field_Ref
case Tag_Inc:
info.DdlType |= Field_Inc
}
}
}
if info.DdlType == Field_Skip {
info.DdlType = Field_Col
}
}
|
package c31_hmac_sha1_timing_leak
import (
"math/rand"
"testing"
"time"
"github.com/vodafon/cryptopals/set1/c1_hex_to_base64"
)
func TestServer(t *testing.T) {
key := make([]byte, 20+rand.Intn(100))
rand.Read(key)
hs := NewHMACSystem(key)
dur := 1 * time.Millisecond
server := NewServer(hs, dur)
fn := "foo"
validHMAC := hs.HMAC([]byte(fn))
sign := c1_hex_to_base64.EncodeHex(validHMAC)
respCode := server.CheckFile("file=" + fn + "&signature=" + string(sign))
if respCode != 200 {
t.Errorf("Invalid CheckFile %q. Expected code 200, got %d\n", sign, respCode)
}
}
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
)
/*
NeedsUpdate returns true if the external resource coresponding to this check
needs to be updated because its Spec differs from its Status.
The Spec reflects requested state of the resource while Status reflects its
state in Kubernetes.
Optional fields that aren't set in the Spec do not affect the comparison (in
this case the field in Status may still be set as it reflects the default value
returned from Pingdom API, but that's of no consequence).
*/
func (c *Check) NeedsUpdate() bool {
spec := &c.Spec
status := &c.Status
if spec.Paused != nil {
if *spec.Paused && status.Status != Paused {
return true
} else if !*spec.Paused && status.Status == Paused {
return true
}
}
opts := make(cmp.Options, 0)
unspecifiedFields := make([]string, 0)
if spec.Name == nil {
unspecifiedFields = append(unspecifiedFields, "Name")
}
if spec.Port == nil {
unspecifiedFields = append(unspecifiedFields, "Port")
}
if spec.ResolutionMinutes == nil {
unspecifiedFields = append(unspecifiedFields, "ResolutionMinutes")
}
if spec.UserIds == nil {
unspecifiedFields = append(unspecifiedFields, "UserIds")
}
if spec.URL == nil {
unspecifiedFields = append(unspecifiedFields, "URL")
}
if spec.Encryption == nil {
unspecifiedFields = append(unspecifiedFields, "Encryption")
}
if len(unspecifiedFields) > 0 {
opts = append(
opts,
cmpopts.IgnoreFields(CheckParameters{}, unspecifiedFields...),
)
}
return !cmp.Equal(spec.CheckParameters, status.CheckParameters, opts)
}
|
package connection
type Connection interface {
Write(data []byte) (int, error)
Read() ([]byte, error)
Close() error
CloseWrite() error
}
type Connector interface {
Dial() (Connection, error)
}
|
package app
import (
"encoding/json"
"fmt"
"log"
)
var (
name string = "-"
version string = "-"
)
type ApplicationRecord struct {
Name string `json:"name"`
Version string `json:"version"`
}
var application *ApplicationRecord
func (app *ApplicationRecord) String() string {
return fmt.Sprintf("Name: %s, Version: %s", app.Name, app.Version)
}
func (app *ApplicationRecord) JSON() (result string) {
if appMap, err := json.Marshal(app); err == nil {
result = fmt.Sprintf(string(appMap))
} else {
log.Fatal(err)
}
return
}
func Get() *ApplicationRecord {
if application == nil {
application = &ApplicationRecord{name, version}
}
return application
}
|
package server
import (
"github.com/gin-gonic/gin"
_ "github.com/go-sql-driver/mysql"
"github.com/gomodule/redigo/redis"
"github.com/spf13/viper"
"visitor/app/server/router"
"visitor/client/redis_client"
)
func Init() *gin.Engine {
//// 连接数据库
//_, err = gorm_client.Dial("mysql", viper.GetString("mysql.address"))
//if err != nil {
// panic(err)
//}
//
//// 初始化表
//err = gorm_client.Client.Master().Set("gorm:table_options", "ENGINE=InnoDB CHARSET=utf8mb4 auto_increment=1").
// AutoMigrate().Error
//if err != nil {
// panic(err)
//}
redisInit()
// 初始化路由
r := router.Init()
go startRpcServer()
return r
}
func redisInit() {
//初始化redis
redis_client.MasterPool = &redis.Pool{ //实例化一个连接池
MaxIdle: 16, //最初的连接数量
// MaxActive:1000000, //最大连接数量
MaxActive: 0, //连接池最大连接数量,不确定可以用0(0表示自动定义),按需分配
IdleTimeout: 300, //连接关闭时间 300秒 (300秒不使用自动关闭)
Dial: func() (redis.Conn, error) { //要连接的redis数据库
c, err := redis.Dial("tcp", viper.GetString("redis.master.address"))
if err != nil {
return nil, err
}
_, err = c.Do("AUTH", viper.GetString("redis.master.password"))
if err != nil {
return nil, err
}
return c, nil
},
}
redis_client.SalvePool = &redis.Pool{ //实例化一个连接池
MaxIdle: 16, //最初的连接数量
// MaxActive:1000000, //最大连接数量
MaxActive: 0, //连接池最大连接数量,不确定可以用0(0表示自动定义),按需分配
IdleTimeout: 300, //连接关闭时间 300秒 (300秒不使用自动关闭)
Dial: func() (redis.Conn, error) { //要连接的redis数据库
c, err := redis.Dial("tcp", viper.GetString("redis.master.address"))
if err != nil {
return nil, err
}
_, err = c.Do("AUTH", viper.GetString("redis.master.password"))
if err != nil {
return nil, err
}
return c, nil
},
}
}
|
package test
import (
"fmt"
"testing"
)
type (
RedisClient struct {
}
ClassB struct {
RedisClient
}
)
func NewClassA() RedisClient {
b := RedisClient{}
return b
}
func (r *RedisClient) Exec() {
fmt.Println("A is called")
}
func (r *ClassB) Exec() {
fmt.Println("B is called")
r.RedisClient.Exec()
}
func Test_A_B_class(t *testing.T) {
new(ClassB).Exec()
}
|
package main
import (
"errors"
"fmt"
"regexp"
"strconv"
)
/*
stat ::= assign | if | for | funDecl | ret | funCall
statlist ::= stat [statlist]
if ::= 'if' exp '{' [statlist] '}' [else '{' [statlist] '}']
for ::= 'for' [assign] ';' [explist] ';' [assign] '{' [statlist] '}'
type ::= 'int' | 'float' | 'bool'
typelist ::= type [',' typelist]
paramlist ::= var type [',' paramlist]
funDecl ::= 'fun' Name '(' [paramlist] ')' [typelist] '{' [statlist] '}'
ret ::= 'return' [explist]
assign ::= varlist ‘=’ explist | postIncr | postDecr
postIncr ::= varDecl '++'
postDecr ::= varDecl '--'
varlist ::= varDecl [‘,’ varlist]
explist ::= exp [‘,’ explist]
exp ::= Numeral | String | var | '(' exp ')' | exp binop exp | unop exp | funCall
funCall ::= Name '(' [explist] ')'
varDecl ::= [shadow] var
var ::= Name
binop ::= '+' | '-' | '*' | '/' | '%' | '==' | '!=' | '<=' | '>=' | '<' | '>' | '&&' | '||'
unop ::= '-' | '!'
Operator priority (Descending priority!):
0: '-', '!'
1: '*', '/', '%'
2: '+', '-'
3: '==', '!=', '<=', '>=', '<', '>'
4: '&&', '||'
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
// CONST
/////////////////////////////////////////////////////////////////////////////////////////////////
const (
TYPE_UNKNOWN = iota
TYPE_INT
TYPE_STRING
TYPE_CHAR
TYPE_FLOAT
TYPE_BOOL
// TYPE_FUNCTION ?
TYPE_ARRAY
TYPE_STRUCT
// This type will always be considered equal to any other type when compared!
// Used for variadic functions.
TYPE_WHATEVER
)
const (
OP_PLUS = iota
OP_MINUS
OP_MULT
OP_DIV
OP_MOD
OP_NEGATIVE
OP_NOT
OP_EQ
OP_NE
OP_LE
OP_GE
OP_LESS
OP_GREATER
OP_AND
OP_OR
OP_UNKNOWN
)
/////////////////////////////////////////////////////////////////////////////////////////////////
// INTERFACES
/////////////////////////////////////////////////////////////////////////////////////////////////
var (
ErrCritical = errors.New("")
ErrNormal = errors.New("error - ")
)
type SymbolVarEntry struct {
sType ComplexType
// Refers to the name used in the final assembler
varName string
offset int
// In case the variable is indexing an array (see sType), we need this second underlaying type as well!
isIndexed bool
//arrayType Type
// ... more information
}
// This is needed when code for function calls is generated
// and we need to know how many and what kind of variables are
// pushed onto the stack or popped from afterwards.
type SymbolFunEntry struct {
paramTypes []ComplexType
returnTypes []ComplexType
jumpLabel string
epilogueLabel string
returnStackPointerOffset int
inline bool
isUsed bool
}
type SymbolTypeEntry struct {
members []StructMem
offset int
}
type SymbolTable struct {
varTable map[string]SymbolVarEntry
funTable map[string][]SymbolFunEntry
typeTable map[string]SymbolTypeEntry
// activeFunctionReturn references the function return types, if we are within a function, otherwise nil
// This is required to check validity and code generation of return statements
activeFunctionName string
activeFunctionParams []ComplexType
activeFunctionReturn []ComplexType
activeLoop bool
activeLoopBreakLabel string
activeLoopContinueLabel string
parent *SymbolTable
}
type AST struct {
block Block
globalSymbolTable *SymbolTable
}
type ComplexType struct {
t Type
// iff t is a struct, we need the qualified type name to query the symbol table!
tName string
subType *ComplexType
}
type Type int
type Operator int
type Node interface {
// Notes the start position in the actual source code!
// (lineNr, columnNr)
startPos() (int, int)
generateCode(asm *ASM, s *SymbolTable)
}
//
// Interface types
//
type Statement interface {
Node
statement()
}
type Expression interface {
Node
expression()
getExpressionTypes(s *SymbolTable) []ComplexType
getResultCount() int
isDirectlyAccessed() bool
getDirectAccess() []DirectAccess
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// EXPRESSIONS
/////////////////////////////////////////////////////////////////////////////////////////////////
// This can either be an index access or a struct . access
type DirectAccess struct {
indexed bool
// If indexed, this is the index expression
indexExpression Expression
// If struct access by qualified name, this is the qualified name
accessName string
// If struct, we also need the offset within the struct!
structOffset int
line, column int
}
type Variable struct {
vType ComplexType
vName string
vShadow bool
directAccess []DirectAccess
line, column int
}
type Constant struct {
cType Type
cValue string
line, column int
}
type Array struct {
aType ComplexType
aCount int
aExpressions []Expression
directAccess []DirectAccess
line, column int
}
type BinaryOp struct {
operator Operator
leftExpr Expression
rightExpr Expression
opType ComplexType
// fixed means, that the whole binary operation is in '(' ')' and should not be combined differently
// independent on operator priority!
fixed bool
line, column int
}
type UnaryOp struct {
operator Operator
expr Expression
opType ComplexType
line, column int
}
type FunCall struct {
funName string
// as struct creation and funCalls have the same syntax, we set this flag, whether we have a
// function call or a struct creation. Analysis and code generation may differ.
createStruct bool
args []Expression
retTypes []ComplexType
directAccess []DirectAccess
line, column int
}
func (_ Variable) expression() {}
func (_ Constant) expression() {}
func (_ Array) expression() {}
func (_ BinaryOp) expression() {}
func (_ UnaryOp) expression() {}
func (_ FunCall) expression() {}
func (e Variable) startPos() (int, int) {
return e.line, e.column
}
func (e Constant) startPos() (int, int) {
return e.line, e.column
}
func (e Array) startPos() (int, int) {
return e.line, e.column
}
func (e BinaryOp) startPos() (int, int) {
return e.line, e.column
}
func (e UnaryOp) startPos() (int, int) {
return e.line, e.column
}
func (e FunCall) startPos() (int, int) {
return e.line, e.column
}
func getAccessedType(c ComplexType, access []DirectAccess, s *SymbolTable) ComplexType {
for _, da := range access {
if da.indexed {
c = *c.subType
} else {
entry, _ := s.getType(c.tName)
for _, m := range entry.members {
if da.accessName == m.memName {
c = m.memType
break
}
}
}
}
return c
}
func (e Constant) getExpressionTypes(s *SymbolTable) []ComplexType {
return []ComplexType{ComplexType{e.cType, "", nil}}
}
func (e Array) getExpressionTypes(s *SymbolTable) []ComplexType {
return []ComplexType{getAccessedType(e.aType, e.directAccess, s)}
}
func (e Variable) getExpressionTypes(s *SymbolTable) []ComplexType {
return []ComplexType{getAccessedType(e.vType, e.directAccess, s)}
}
func (e UnaryOp) getExpressionTypes(s *SymbolTable) []ComplexType {
return []ComplexType{e.opType}
}
func (e BinaryOp) getExpressionTypes(s *SymbolTable) []ComplexType {
return []ComplexType{e.opType}
}
func (e FunCall) getExpressionTypes(s *SymbolTable) []ComplexType {
if len(e.retTypes) != 1 {
return e.retTypes
}
return []ComplexType{getAccessedType(e.retTypes[0], e.directAccess, s)}
}
func (e Constant) getResultCount() int {
return 1
}
func (e Array) getResultCount() int {
return 1
}
func (e Variable) getResultCount() int {
return 1
}
func (e UnaryOp) getResultCount() int {
return 1
}
func (e BinaryOp) getResultCount() int {
return 1
}
func (e FunCall) getResultCount() int {
return len(e.retTypes)
}
func (e Constant) isDirectlyAccessed() bool {
return false
}
func (e Array) isDirectlyAccessed() bool {
return len(e.directAccess) > 0
}
func (e Variable) isDirectlyAccessed() bool {
return len(e.directAccess) > 0
}
func (e UnaryOp) isDirectlyAccessed() bool {
return false
}
func (e BinaryOp) isDirectlyAccessed() bool {
return false
}
func (e FunCall) isDirectlyAccessed() bool {
return len(e.directAccess) > 0
}
func (e Constant) getDirectAccess() []DirectAccess {
return nil
}
func (e Array) getDirectAccess() []DirectAccess {
return e.directAccess
}
func (e Variable) getDirectAccess() []DirectAccess {
return e.directAccess
}
func (e UnaryOp) getDirectAccess() []DirectAccess {
return nil
}
func (e BinaryOp) getDirectAccess() []DirectAccess {
return nil
}
func (e FunCall) getDirectAccess() []DirectAccess {
return e.directAccess
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// STATEMENTS
/////////////////////////////////////////////////////////////////////////////////////////////////
type StructMem struct {
memName string
offset int
memType ComplexType
}
type StructDef struct {
name string
members []StructMem
line, column int
}
type Block struct {
statements []Statement
symbolTable *SymbolTable
line, column int
}
type Assignment struct {
variables []Variable
expressions []Expression
line, column int
}
type Condition struct {
expression Expression
block Block
elseBlock Block
line, column int
}
type Case struct {
// When comparing values, a nil expressions list means: 'default'
// In a general switch, default is just 'true'
expressions []Expression
block Block
}
type Switch struct {
// nil for a general switch
expression Expression
cases []Case
line, column int
}
type Loop struct {
assignment Assignment
expressions []Expression
incrAssignment Assignment
block Block
line, column int
}
type RangedLoop struct {
counter Variable
elem Variable
rangeExpression Expression
block Block
line, column int
}
type Function struct {
fName string
parameters []Variable
returnTypes []ComplexType
block Block
line, column int
}
type Return struct {
expressions []Expression
line, column int
}
type Break struct {
line, column int
}
type Continue struct {
line, column int
}
func (_ StructDef) statement() {}
func (_ Block) statement() {}
func (_ Assignment) statement() {}
func (_ Condition) statement() {}
func (_ Switch) statement() {}
func (_ Loop) statement() {}
func (_ Function) statement() {}
func (_ Return) statement() {}
func (_ FunCall) statement() {}
func (_ RangedLoop) statement() {}
func (_ Break) statement() {}
func (_ Continue) statement() {}
func (s StructDef) startPos() (int, int) {
return s.line, s.column
}
func (s Block) startPos() (int, int) {
return s.line, s.column
}
func (s Assignment) startPos() (int, int) {
return s.line, s.column
}
func (s Condition) startPos() (int, int) {
return s.line, s.column
}
func (s Switch) startPos() (int, int) {
return s.line, s.column
}
func (s Loop) startPos() (int, int) {
return s.line, s.column
}
func (s Function) startPos() (int, int) {
return s.line, s.column
}
func (s Return) startPos() (int, int) {
return s.line, s.column
}
func (s RangedLoop) startPos() (int, int) {
return s.line, s.column
}
func (s Break) startPos() (int, int) {
return s.line, s.column
}
func (s Continue) startPos() (int, int) {
return s.line, s.column
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// AST, OPS STRING
/////////////////////////////////////////////////////////////////////////////////////////////////
func (ast AST) String() string {
s := fmt.Sprintln("AST:")
for _, st := range ast.block.statements {
s += fmt.Sprintf("%v\n", st)
}
return s
}
func (o Operator) String() string {
switch o {
case OP_PLUS:
return "+"
case OP_MINUS:
return "-"
case OP_MULT:
return "*"
case OP_DIV:
return "/"
case OP_MOD:
return "%"
case OP_NEGATIVE:
return "-"
case OP_EQ:
return "=="
case OP_NE:
return "!="
case OP_LE:
return "<="
case OP_GE:
return ">="
case OP_LESS:
return "<"
case OP_GREATER:
return ">"
case OP_AND:
return "&&"
case OP_OR:
return "||"
case OP_NOT:
return "!"
case OP_UNKNOWN:
return "?"
}
return "?"
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// EXPRESSION STRING
/////////////////////////////////////////////////////////////////////////////////////////////////
func (c ComplexType) String() string {
switch c.t {
case TYPE_ARRAY:
return fmt.Sprintf("array[%v]", c.subType)
case TYPE_STRUCT:
return fmt.Sprintf("struct[%v]", c.tName)
}
return fmt.Sprintf("%v", c.t.String())
}
func (v Variable) String() string {
if !v.isDirectlyAccessed() {
shadowString := ""
if v.vShadow {
shadowString = "shadow "
}
return fmt.Sprintf("%v%v(%v)", shadowString, v.vType.t, v.vName)
}
return fmt.Sprintf("%v(%v[%v])", v.vType.subType, v.vName, v.directAccess)
}
func (c Constant) String() string {
return fmt.Sprintf("%v(%v)", c.cType, c.cValue)
}
func (b BinaryOp) String() string {
start, end := "", ""
if b.fixed {
start = "("
end = ")"
}
return fmt.Sprintf("%v%v %v %v%v", start, b.leftExpr, b.operator, b.rightExpr, end)
}
func (u UnaryOp) String() string {
return fmt.Sprintf("%v(%v)", u.operator, u.expr)
}
func (a Array) String() string {
return fmt.Sprintf("[](%v, %v)", a.aType, a.aCount)
}
func (v Type) String() string {
switch v {
case TYPE_INT:
return "int"
case TYPE_STRING:
return "string"
case TYPE_CHAR:
return "char"
case TYPE_FLOAT:
return "float"
case TYPE_BOOL:
return "bool"
case TYPE_ARRAY:
return "array"
case TYPE_STRUCT:
return "struct"
case TYPE_WHATEVER:
return "anything"
}
return "?"
}
func stringToType(s string) Type {
switch s {
case "int":
return TYPE_INT
case "float":
return TYPE_FLOAT
case "bool":
return TYPE_BOOL
case "char":
return TYPE_CHAR
case "string":
return TYPE_STRING
case "array":
return TYPE_ARRAY
}
return TYPE_UNKNOWN
}
func isTypeString(s string) bool {
t := stringToType(s)
return t == TYPE_INT ||
t == TYPE_FLOAT ||
t == TYPE_BOOL ||
t == TYPE_ARRAY ||
t == TYPE_CHAR ||
t == TYPE_STRING
}
func (s SymbolFunEntry) String() string {
st := "SymbolFunEntry: ("
st += fmt.Sprintf("params: [")
for i, p := range s.paramTypes {
st += p.String()
if i < len(s.paramTypes)-1 {
st += " "
}
}
st += fmt.Sprintf("], returns: [")
for i, p := range s.returnTypes {
st += p.String()
if i < len(s.returnTypes)-1 {
st += " "
}
}
st += "])"
return st
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// STATEMENT STRING
/////////////////////////////////////////////////////////////////////////////////////////////////
func (a Assignment) String() (s string) {
for i, v := range a.variables {
s += fmt.Sprintf("%v", v)
if i != len(a.variables)-1 {
s += fmt.Sprintf(", ")
}
}
s += fmt.Sprintf(" = ")
for i, v := range a.expressions {
s += fmt.Sprintf("%v", v)
if i != len(a.expressions)-1 {
s += ", "
}
}
return
}
func (st StructDef) String() (s string) {
s += "struct " + st.name + " {\n"
for _, m := range st.members {
s += fmt.Sprintf(" %v %v\n", m.memName, m.memType)
}
s += "}"
return
}
func (c Condition) String() (s string) {
s += fmt.Sprintf("if %v {\n", c.expression)
for _, st := range c.block.statements {
s += fmt.Sprintf("\t%v\n", st)
}
s += "}"
if c.elseBlock.statements != nil {
s += " else {\n"
for _, st := range c.elseBlock.statements {
s += fmt.Sprintf("\t%v\n", st)
}
s += "}"
}
return
}
func (c Case) String() (s string) {
s += "case "
for i, e := range c.expressions {
s += fmt.Sprintf("%v", e)
if i != len(c.expressions)-1 {
s += ", "
}
}
s += ":\n"
s += fmt.Sprintf("%v\n", c.block)
return
}
func (sw Switch) String() (s string) {
s = "switch "
if sw.expression != nil {
s += fmt.Sprintf("%v ", sw.expression)
}
s += "{\n"
for _, c := range sw.cases {
s += c.String()
}
s += "}"
return
}
func (l Loop) String() (s string) {
s += fmt.Sprintf("for %v; ", l.assignment)
for i, e := range l.expressions {
s += fmt.Sprintf("%v", e)
if i != len(l.expressions)-1 {
s += ", "
}
}
s += fmt.Sprintf("; %v", l.incrAssignment)
s += " {\n"
for _, st := range l.block.statements {
s += fmt.Sprintf("\t%v\n", st)
}
s += "}"
return
}
func (l RangedLoop) String() (s string) {
s += fmt.Sprintf("for %v, %v : %v {\n", l.counter, l.elem, l.rangeExpression)
for _, st := range l.block.statements {
s += fmt.Sprintf("\t%v\n", st)
}
s += "}"
return
}
func (s Return) String() string {
return fmt.Sprintf("return %v", s.expressions)
}
func (s Break) String() string {
return "break"
}
func (s Continue) String() string {
return "continue"
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// TOKEN CHANNEL
/////////////////////////////////////////////////////////////////////////////////////////////////
// Implements a channel with one cache/lookahead, that can be pushed back in (logically)
type TokenChannel struct {
c chan Token
cached []Token
}
func (tc *TokenChannel) next() Token {
if len(tc.cached) > 0 {
t := tc.cached[len(tc.cached)-1]
tc.cached = tc.cached[:len(tc.cached)-1]
return t
}
v, ok := <-tc.c
if !ok {
panic("Error: Channel closed unexpectedly.")
}
return v
}
func (tc *TokenChannel) createToken(t TokenType, v string, line, column int) Token {
return Token{t, v, line, column}
}
func (tc *TokenChannel) pushBack(t Token) {
tc.cached = append(tc.cached, t)
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// PARSER IMPLEMENTATION
/////////////////////////////////////////////////////////////////////////////////////////////////
// Sometimes, we are OK with non strict equality, i.e. if we only need an array and don't care about
// the actual type.
func equalType(c1, c2 ComplexType, strict bool) bool {
if c1.t != TYPE_WHATEVER && c2.t != TYPE_WHATEVER && c1.t != c2.t {
return false
}
if c1.tName != c2.tName {
return false
}
if c1.subType != nil && c2.subType != nil {
return equalType(*c1.subType, *c2.subType, strict)
}
return !strict || c1.subType == nil && c2.subType == nil
}
func equalTypes(l1, l2 []ComplexType, strict bool) bool {
if len(l1) != len(l2) {
return false
}
for i, c1 := range l1 {
if !equalType(c1, l2[i], strict) {
return false
}
}
return true
}
func (c ComplexType) typeIsGeneric() bool {
if c.t == TYPE_WHATEVER {
return true
}
if c.subType == nil {
return false
}
return c.subType.typeIsGeneric()
}
func (c ComplexType) getMemTypes(symbolTable *SymbolTable) []Type {
if c.t == TYPE_STRUCT {
types := make([]Type, 0)
entry, _ := symbolTable.getType(c.tName)
for _, m := range entry.members {
types = append(types, m.memType.getMemTypes(symbolTable)...)
}
return types
}
return []Type{c.t}
}
func (c ComplexType) getMemCount(symbolTable *SymbolTable) int {
return len(c.getMemTypes(symbolTable))
}
func typesToMemCount(ct []ComplexType, symbolTable *SymbolTable) (count int) {
for _, t := range ct {
count += t.getMemCount(symbolTable)
}
return
}
// Operator priority (Descending priority!):
// 0: '-', '!'
// 1: '*', '/', '%'
// 2: '+', '-'
// 3: '==', '!=', '<=', '>=', '<', '>'
// 4: '&&'
// 5: '||'
func (o Operator) priority() int {
switch o {
case OP_NEGATIVE, OP_NOT:
return 0
case OP_MULT, OP_DIV, OP_MOD:
return 1
case OP_PLUS, OP_MINUS:
return 2
case OP_EQ, OP_NE, OP_LE, OP_GE, OP_LESS, OP_GREATER:
return 3
case OP_AND:
return 4
case OP_OR:
return 5
default:
fmt.Printf("Unknown operator: %v\n", o)
}
return 100
}
func getOperatorType(o string) Operator {
switch o {
case "+":
return OP_PLUS
case "-":
return OP_MINUS
case "*":
return OP_MULT
case "/":
return OP_DIV
case "%":
return OP_MOD
case "==":
return OP_EQ
case "!=":
return OP_NE
case "<=":
return OP_LE
case ">=":
return OP_GE
case "<":
return OP_LESS
case ">":
return OP_GREATER
case "&&":
return OP_AND
case "||":
return OP_OR
case "!":
return OP_NOT
}
return OP_UNKNOWN
}
// expectType checks the next token against a given expected type and returns the token string
// with corresponding line and column numbers
func (tokens *TokenChannel) expectType(ttype TokenType) (string, int, int, bool) {
t := tokens.next()
//fmt.Println(" ", t)
if t.tokenType != ttype {
tokens.pushBack(t)
return t.value, t.line, t.column, false
}
return t.value, t.line, t.column, true
}
// expect checks the next token against a given expected type and value and returns true, if the
// check was valid.
func (tokens *TokenChannel) expect(ttype TokenType, value string) (int, int, bool) {
t := tokens.next()
//fmt.Println(" ", t)
if t.tokenType != ttype || t.value != value {
tokens.pushBack(t)
return t.line, t.column, false
}
return t.line, t.column, true
}
func parseDirectAccess(tokens *TokenChannel) (indexExpressions []DirectAccess, err error) {
// If it comes to it, we parse infinite [] expressions :)
for {
// Parse indexed expressions ...[..]
if row, col, ok := tokens.expect(TOKEN_SQUARE_OPEN, "["); ok {
e, parseErr := parseExpression(tokens)
if errors.Is(parseErr, ErrCritical) {
err = parseErr
return
}
if row, col, ok := tokens.expect(TOKEN_SQUARE_CLOSE, "]"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected ']' after array index expression",
ErrCritical, row, col,
)
return
}
indexExpressions = append(indexExpressions, DirectAccess{true, e, "", 0, row, col})
} else {
// Parse struct access with qualified name.
if _, _, ok := tokens.expect(TOKEN_DOT, "."); ok {
name, row, col, ok := tokens.expectType(TOKEN_IDENTIFIER)
if !ok {
err = fmt.Errorf("%w[%v:%v] - Expected struct member name after '.'",
ErrCritical, row, col,
)
return
}
indexExpressions = append(indexExpressions, DirectAccess{false, nil, name, 0, row, col})
} else {
break
}
}
}
return
}
func parseVariable(tokens *TokenChannel) (variable Variable, err error) {
severity := ErrNormal
_, _, shadowing := tokens.expect(TOKEN_KEYWORD, "shadow")
if shadowing {
severity = ErrCritical
}
vName, startRow, startCol, ok := tokens.expectType(TOKEN_IDENTIFIER)
if !ok {
err = fmt.Errorf("%wExpected identifier for variable", severity)
return
}
directAccess, parseErr := parseDirectAccess(tokens)
if errors.Is(parseErr, ErrCritical) {
err = parseErr
return
}
variable.directAccess = directAccess
variable.vType = ComplexType{TYPE_UNKNOWN, "", nil}
variable.vName = vName
variable.vShadow = shadowing
variable.line = startRow
variable.column = startCol
return
}
func parseVarList(tokens *TokenChannel) (variables []Variable, err error) {
lastRow, lastCol, ok := 0, 0, false
i := 0
for {
v, parseErr := parseVariable(tokens)
if errors.Is(parseErr, ErrCritical) {
err = parseErr
return
}
if parseErr != nil {
// If we don't find any variable, thats fine. Just don't end in ',', thats an error!
// We throw a normal error, so the parser up the chain can handle it how it likes.
if i == 0 {
err = fmt.Errorf("%wVariable list is empty or invalid", ErrNormal)
return
}
err = fmt.Errorf("%w[%v:%v] - Variable list ends with ','", ErrCritical, lastRow, lastCol)
variables = nil
return
}
variables = append(variables, v)
// Expect separating ','. Otherwise, all good, we are through!
if lastRow, lastCol, ok = tokens.expect(TOKEN_SEPARATOR, ","); !ok {
break
}
i += 1
}
return
}
func getConstType(c string) Type {
rFloat := regexp.MustCompile(`^(-?\d+\.\d*)`)
rInt := regexp.MustCompile(`^(-?\d+)`)
rChar := regexp.MustCompile(`^(\'.\')`)
rString := regexp.MustCompile(`^(".*")`)
rBool := regexp.MustCompile(`^(true|false)`)
cByte := []byte(c)
if s := rFloat.FindIndex(cByte); s != nil {
return TYPE_FLOAT
}
if s := rInt.FindIndex(cByte); s != nil {
return TYPE_INT
}
if s := rChar.FindIndex(cByte); s != nil {
return TYPE_CHAR
}
if s := rString.FindIndex(cByte); s != nil {
return TYPE_STRING
}
if s := rBool.FindIndex(cByte); s != nil {
return TYPE_BOOL
}
return TYPE_UNKNOWN
}
func parseConstant(tokens *TokenChannel) (Constant, bool) {
if v, row, col, ok := tokens.expectType(TOKEN_CONSTANT); ok {
return Constant{getConstType(v), v, row, col}, true
}
return Constant{TYPE_UNKNOWN, "", 0, 0}, false
}
func parseFunCall(tokens *TokenChannel) (funCall FunCall, err error) {
readKeyword := false
v, startRow, startCol, ok := tokens.expectType(TOKEN_IDENTIFIER)
if !ok {
sv, row, col, ok := tokens.expectType(TOKEN_KEYWORD)
if !ok {
err = fmt.Errorf("%w - Invalid function call statement", ErrNormal)
return
}
// We only consider type castings here!
// In this case, this is just not a function call.
if !isTypeString(sv) {
err = fmt.Errorf("%w[%v:%v] - Function unknown", ErrNormal, row, col)
tokens.pushBack(tokens.createToken(TOKEN_KEYWORD, sv, row, col))
return
}
readKeyword = true
v = sv
startRow = row
startCol = col
}
if row, col, ok := tokens.expect(TOKEN_PARENTHESIS_OPEN, "("); !ok {
// This could still be just an assignment, so just a normal error!
err = fmt.Errorf("%w[%v:%v] - Expected '(' for function call parameters", ErrNormal, row, col)
// LL(2)
var t TokenType = TOKEN_IDENTIFIER
if readKeyword {
t = TOKEN_KEYWORD
}
tokens.pushBack(tokens.createToken(t, v, startRow, startCol))
return
}
expressions, parseErr := parseExpressionList(tokens)
if errors.Is(parseErr, ErrCritical) {
err = parseErr
return
}
if row, col, ok := tokens.expect(TOKEN_PARENTHESIS_CLOSE, ")"); !ok {
err = fmt.Errorf("%w[%v:%v] - Function call expects ')' after parameter list", ErrCritical, row, col)
return
}
funCall.funName = v
funCall.args = expressions
funCall.retTypes = []ComplexType{}
funCall.line = startRow
funCall.column = startCol
return
}
func parseArrayExpression(tokens *TokenChannel) (array Array, err error) {
startRow, startCol, ok := tokens.expect(TOKEN_SQUARE_OPEN, "[")
if !ok {
err = fmt.Errorf("%wExpected '['", ErrNormal)
return
}
// .. = [](int, 5)
if _, _, ok := tokens.expect(TOKEN_SQUARE_CLOSE, "]"); ok {
if row, col, ok := tokens.expect(TOKEN_PARENTHESIS_OPEN, "("); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '(' after '[]'", ErrCritical, row, col)
return
}
t, parseErr := parseType(tokens)
if errors.Is(parseErr, ErrCritical) {
err = parseErr
return
}
if row, col, ok := tokens.expect(TOKEN_SEPARATOR, ","); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected ',' after array type", ErrCritical, row, col)
return
}
c, ok := parseConstant(tokens)
cValue, tmpE := strconv.ParseInt(c.cValue, 10, 64)
if !ok || tmpE != nil || c.cType != TYPE_INT || cValue < 0 {
err = fmt.Errorf("%w[%v:%v] - Invalid size for array literal. Must be a constant int >= 0", ErrCritical, c.line, c.column)
return
}
if row, col, ok := tokens.expect(TOKEN_PARENTHESIS_CLOSE, ")"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected ')' after array declaration", ErrCritical, row, col)
return
}
array.aType = ComplexType{TYPE_ARRAY, "", &t}
array.aCount = int(cValue)
array.aExpressions = []Expression{}
array.line = startRow
array.column = startCol
return
} else {
// TODO: This must be able to parse chars later on. Like: ['a', 'b', 'c']!
// Or does it already? Maybe because we can already handle Char constants?
// .. = [1,2,3,4,5]
expressions, parseErr := parseExpressionList(tokens)
if errors.Is(parseErr, ErrCritical) {
err = parseErr
return
}
if row, col, ok := tokens.expect(TOKEN_SQUARE_CLOSE, "]"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected ']' after expression list in array declaration", ErrCritical, row, col)
return
}
if len(expressions) == 0 {
err = fmt.Errorf("%w[%v:%v] - Expression list in array declaration can not be empty", ErrCritical, startRow, startCol)
return
}
array.aType = ComplexType{TYPE_UNKNOWN, "", nil}
// This needs to be evaluated and counted up in analysis - One expression might have multiple values!
array.aCount = 0
array.aExpressions = expressions
array.line = startRow
array.column = startCol
return
}
}
//func parseStructExpr(tokens *TokenChannel) (st StructExpr, err error) {
// name, startRow, startCol, ok := tokens.expectType(TOKEN_IDENTIFIER)
// if !ok {
// err = fmt.Errorf("%wExpected identifier for struct expression", ErrNormal)
// return
// }
// if _, _, ok := tokens.expect(TOKEN_CURLY_OPEN, "{"); !ok {
// err = fmt.Errorf("%wExpected '{' after struct name", ErrNormal)
// tokens.pushBack(tokens.createToken(TOKEN_IDENTIFIER, name, startRow, startCol))
// return
// }
// expressions, parseErr := parseExpressionList(tokens)
// if errors.Is(parseErr, ErrCritical) {
// err = parseErr
// return
// }
// if row, col, ok := tokens.expect(TOKEN_CURLY_CLOSE, "}"); !ok {
// err = fmt.Errorf("%w[%v:%v] - Expected '}' after struct expression", ErrCritical, row, col)
// return
// }
// st.sType = ComplexType{TYPE_STRUCT, name, nil}
// st.name = name
// st.args = expressions
// return
//}
// parseSimpleExpression just parses variables, constants and '('...')'
func parseSimpleExpression(tokens *TokenChannel) (expression Expression, err error) {
// Parse function call before parsing for variables, as they are syntactically equal
// until the '(' for a function call!
tmpFunCall, parseErr := parseFunCall(tokens)
switch {
case parseErr == nil:
expression = tmpFunCall
return
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
// tmpStructExpr, parseErr := parseStructExpr(tokens)
// switch {
// case parseErr == nil:
// expression = tmpStructExpr
// return
// case errors.Is(parseErr, ErrCritical):
// err = parseErr
// return
// }
tmpV, parseErr := parseVariable(tokens)
switch {
case parseErr == nil:
expression = tmpV
return
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
tmpA, parseErr := parseArrayExpression(tokens)
switch {
case parseErr == nil:
expression = tmpA
return
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
if tmpC, ok := parseConstant(tokens); ok {
expression = tmpC
return
}
// Or a '(', then continue until ')'.
if row, col, ok := tokens.expect(TOKEN_PARENTHESIS_OPEN, "("); ok {
e, parseErr := parseExpression(tokens)
if parseErr != nil {
err = fmt.Errorf("%w%v", ErrCritical, parseErr.Error())
return
}
if tmpE, ok := e.(BinaryOp); ok {
tmpE.fixed = true
// We need to reassign the variable for the 'true' to hold instead of editing the local copy
e = tmpE
}
expression = e
// Expect TOKEN_PARENTHESIS_CLOSE
row, col, ok = tokens.expect(TOKEN_PARENTHESIS_CLOSE, ")")
if ok {
return
}
err = fmt.Errorf("%w[%v:%v] - Expected ')'", ErrCritical, row, col)
return
}
err = fmt.Errorf("%wInvalid simple expression", ErrNormal)
return
}
func parseUnaryExpression(tokens *TokenChannel) (expression Expression, err error) {
// Check for unary operator before the expression
if row, col, ok := tokens.expect(TOKEN_OPERATOR, "-"); ok {
e, parseErr := parseExpression(tokens)
if parseErr != nil {
err = fmt.Errorf("%w[%v:%v] - Invalid expression after unary '-'", ErrCritical, row, col)
return
}
expression = UnaryOp{OP_NEGATIVE, e, ComplexType{TYPE_UNKNOWN, "", nil}, row, col}
return
}
// Check for unary operator before the expression
if row, col, ok := tokens.expect(TOKEN_OPERATOR, "!"); ok {
e, parseErr := parseExpression(tokens)
if parseErr != nil {
err = fmt.Errorf("%w[%v:%v] - Invalid expression after unary '!'", ErrCritical, row, col)
return
}
expression = UnaryOp{OP_NOT, e, ComplexType{TYPE_UNKNOWN, "", nil}, row, col}
return
}
err = fmt.Errorf("%wInvalid unary expression", ErrNormal)
return
}
func parseExpression(tokens *TokenChannel) (expression Expression, err error) {
unaryExpression, parseErr := parseUnaryExpression(tokens)
if parseErr == nil {
expression = unaryExpression
} else {
simpleExpression, parseErr := parseSimpleExpression(tokens)
if parseErr != nil {
err = fmt.Errorf("%wSimple expression expected", parseErr)
return
}
expression = simpleExpression
}
// Or an expression followed by a binop. Here we can continue just normally and just check
// if token.next() == binop, and just then, throw the parsed expression into a binop one.
if t, row, col, ok := tokens.expectType(TOKEN_OPERATOR); ok {
// Create and return binary operation expression!
rightHandExpr, parseErr := parseExpression(tokens)
if parseErr != nil {
err = fmt.Errorf("%w[%v:%v] - Invalid expression on right hand side of binary operation", ErrCritical, row, col)
return
}
row, col = expression.startPos()
finalExpression := BinaryOp{getOperatorType(t), expression, rightHandExpr, ComplexType{TYPE_UNKNOWN, "", nil}, false, row, col}
expression = finalExpression
}
directAccess, parseErr := parseDirectAccess(tokens)
if errors.Is(parseErr, ErrCritical) {
err = parseErr
return
}
if parseErr == nil && len(directAccess) > 0 {
switch e := expression.(type) {
case Variable:
e.directAccess = directAccess
expression = e
case Array:
e.directAccess = directAccess
expression = e
case FunCall:
e.directAccess = directAccess
expression = e
default:
row, col := expression.startPos()
err = fmt.Errorf("%w[%v:%v] - Expression can not be indexed", ErrCritical, row, col)
return
}
}
// We just return the simpleExpression or unaryExpression and are happy
return
}
func parseExpressionList(tokens *TokenChannel) (expressions []Expression, err error) {
i := 0
lastRow, lastCol, ok := 0, 0, false
for {
e, parseErr := parseExpression(tokens)
if parseErr != nil {
// If we don't find any expression, thats fine. Just don't end in ',', thats an error!
if i == 0 {
// We propagate the error from the parser. This might be normal or critical.
err = fmt.Errorf("%w - Expression list is empty or invalid", parseErr)
return
}
err = fmt.Errorf("%w[%v:%v] - Expression list ends in ','", ErrCritical, lastRow, lastCol)
expressions = nil
return
}
expressions = append(expressions, e)
// Expect separating ','. Otherwise, all good, we are through!
if lastRow, lastCol, ok = tokens.expect(TOKEN_SEPARATOR, ","); !ok {
break
}
i += 1
}
return
}
// parseBlock parses a list of statements from the tokens.
func parseAssignment(tokens *TokenChannel) (assignment Assignment, err error) {
// A list of variables!
variables, parseErr := parseVarList(tokens)
// No variables will return an ErrNormal. So all good, severity is handled up stream.
if len(variables) == 0 {
err = fmt.Errorf("%wExpected variable in assignment", parseErr)
return
}
// This is most likely a critical error, like: a, = ...
if parseErr != nil {
err = fmt.Errorf("%w - Parsing the variable list for an assignment resulted in an error", parseErr)
return
}
// Special case: i++ as an assignment i = i+1
if len(variables) == 1 {
o, row, col, ok := tokens.expectType(TOKEN_OPERATOR)
if ok {
assignment.variables = variables
assignment.line = row
assignment.column = col
// Same one again!
_, _, ok := tokens.expect(TOKEN_OPERATOR, o)
if ok && (o == "+" || o == "-") {
assignment.expressions = []Expression{
BinaryOp{
getOperatorType(o),
variables[0],
Constant{TYPE_INT, "1", row, col},
ComplexType{TYPE_INT, "", nil},
false,
row, col,
},
}
assignment.line = row
assignment.column = col
return
}
// Check, if we have the special case of: i += 2
_, _, ok = tokens.expect(TOKEN_ASSIGNMENT, "=")
if ok && (o == "+" || o == "-" || o == "*" || o == "/") {
e, parseErr := parseExpression(tokens)
if errors.Is(parseErr, ErrCritical) {
err = parseErr
return
}
assignment.expressions = []Expression{
BinaryOp{getOperatorType(o), variables[0], e, ComplexType{TYPE_UNKNOWN, "", nil}, false, row, col},
}
return
}
// push the first operator token back
tokens.pushBack(tokens.createToken(TOKEN_OPERATOR, o, row, col))
}
}
// One TOKEN_ASSIGNMENT
// If we got this far, we have a valid variable list. So from here on out, this _needs_ to be valid!
// Right now, it can still be a function call!
if row, col, ok := tokens.expect(TOKEN_ASSIGNMENT, "="); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '=' in assignment", ErrNormal, row, col)
// LL(2)
tokens.pushBack(tokens.createToken(TOKEN_IDENTIFIER, variables[0].vName, variables[0].line, variables[0].column))
return
}
expressions, parseErr := parseExpressionList(tokens)
// For now we also accept an empty expression list (ErrNormal). If this is valid or not, is handled in the
// semanticAnalyzer.
if errors.Is(parseErr, ErrCritical) {
err = fmt.Errorf("%w - Invalid expression list in assignment", parseErr)
return
}
row, col := variables[0].startPos()
assignment = Assignment{variables, expressions, row, col}
return
}
// if ::= 'if' exp '{' [stat] '}' [else '{' [stat] '}']
func parseCondition(tokens *TokenChannel) (condition Condition, err error) {
startRow, startCol, ok := 0, 0, false
if startRow, startCol, ok = tokens.expect(TOKEN_KEYWORD, "if"); !ok {
err = fmt.Errorf("%wExpected 'if' keyword for condition", ErrNormal)
return
}
expression, parseErr := parseExpression(tokens)
if parseErr != nil {
err = fmt.Errorf("%w%v - Expected expression after 'if' keyword", ErrCritical, parseErr.Error())
return
}
if row, col, ok := tokens.expect(TOKEN_CURLY_OPEN, "{"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '{' after condition", ErrCritical, row, col)
return
}
statements, parseErr := parseBlock(tokens)
if parseErr != nil {
err = fmt.Errorf("%w - Invalid statement list in condition block", parseErr)
return
}
if row, col, ok := tokens.expect(TOKEN_CURLY_CLOSE, "}"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '}' after condition block", ErrCritical, row, col)
return
}
condition.expression = expression
condition.block = statements
// Just in case we have an else, handle it!
if _, _, ok := tokens.expect(TOKEN_KEYWORD, "else"); ok {
if row, col, ok := tokens.expect(TOKEN_CURLY_OPEN, "{"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '{' after 'else' in condition", ErrCritical, row, col)
return
}
elseStatements, parseErr := parseBlock(tokens)
if parseErr != nil {
err = fmt.Errorf("%w - Invalid statement list in condition else block", parseErr)
return
}
if row, col, ok := tokens.expect(TOKEN_CURLY_CLOSE, "}"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '}' after 'else' block in condition", ErrCritical, row, col)
return
}
condition.elseBlock = elseStatements
}
condition.line = startRow
condition.column = startCol
return
}
func parseCases(tokens *TokenChannel, valueSwitch bool) (cases []Case, err error) {
var parseErr error
for {
var expressions []Expression
var block Block
// When comparing values, a nil expressions list means: 'default'
// In a general switch, default is just 'true'
if _, _, ok := tokens.expect(TOKEN_KEYWORD, "case"); !ok {
if row, col, ok := tokens.expect(TOKEN_KEYWORD, "default"); ok {
if row, col, ok := tokens.expect(TOKEN_COLON, ":"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected ':' after default case", ErrCritical, row, col)
return
}
if valueSwitch {
expressions = nil
} else {
expressions = []Expression{Constant{TYPE_BOOL, "true", row, col}}
}
block, parseErr = parseBlock(tokens)
if errors.Is(parseErr, ErrCritical) {
err = parseErr
return
}
} else {
err = ErrNormal
return
}
} else {
expressions, parseErr = parseExpressionList(tokens)
if errors.Is(parseErr, ErrCritical) {
err = parseErr
return
}
if row, col, ok := tokens.expect(TOKEN_COLON, ":"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected ':' after case expressions", ErrCritical, row, col)
return
}
block, parseErr = parseBlock(tokens)
if errors.Is(parseErr, ErrCritical) {
err = parseErr
return
}
}
cases = append(cases, Case{expressions, block})
}
return
}
func parseSwitch(tokens *TokenChannel) (switchCase Switch, err error) {
startRow, startCol, ok := 0, 0, false
if startRow, startCol, ok = tokens.expect(TOKEN_KEYWORD, "switch"); !ok {
err = fmt.Errorf("%wExpected 'switch' keyword", ErrNormal)
return
}
e, parseErr := parseExpression(tokens)
if errors.Is(parseErr, ErrCritical) {
err = parseErr
return
}
if parseErr != nil {
e = nil
}
if row, col, ok := tokens.expect(TOKEN_CURLY_OPEN, "{"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '{'", ErrCritical, row, col)
return
}
// Parse cases
cases, parseErr := parseCases(tokens, e != nil)
if errors.Is(parseErr, ErrCritical) {
err = parseErr
return
}
if row, col, ok := tokens.expect(TOKEN_CURLY_CLOSE, "}"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '}'", ErrCritical, row, col)
return
}
switchCase.expression = e
switchCase.cases = cases
switchCase.line = startRow
switchCase.column = startCol
return
}
// parseRangedLoop is special in the case, that we have multiple statements that start with 'for id, id'.
// So we have to push multiple tokens back to the channel, if we fail before encountering the ':'.
// After that, we fail hard!
func parseRangedLoop(tokens *TokenChannel) (loop RangedLoop, err error) {
startRow, startCol, ok := 0, 0, false
if startRow, startCol, ok = tokens.expect(TOKEN_KEYWORD, "for"); !ok {
err = fmt.Errorf("%wExpected 'for' keyword for loop", ErrNormal)
return
}
i, iRow, iCol, ok := tokens.expectType(TOKEN_IDENTIFIER)
if !ok {
tokens.pushBack(tokens.createToken(TOKEN_KEYWORD, "for", startRow, startCol))
err = fmt.Errorf("%wExpected identifier", ErrNormal)
return
}
sRow, sCol, ok := tokens.expect(TOKEN_SEPARATOR, ",")
if !ok {
tokens.pushBack(tokens.createToken(TOKEN_IDENTIFIER, i, iRow, iCol))
tokens.pushBack(tokens.createToken(TOKEN_KEYWORD, "for", startRow, startCol))
err = fmt.Errorf("%wExpected separator ','", ErrNormal)
return
}
// After a ',' there must be an identifier, no matter what. Might as well fail here!
e, eRow, eCol, ok := tokens.expectType(TOKEN_IDENTIFIER)
if !ok {
err = fmt.Errorf("%w[%v:%v] - Expected identifier after ','", ErrCritical, eRow, eCol)
return
}
// Last place where this might be a normal loop. Push everything back!
if _, _, ok := tokens.expect(TOKEN_COLON, ":"); !ok {
tokens.pushBack(tokens.createToken(TOKEN_IDENTIFIER, e, eRow, eCol))
tokens.pushBack(tokens.createToken(TOKEN_SEPARATOR, ",", sRow, sCol))
tokens.pushBack(tokens.createToken(TOKEN_IDENTIFIER, i, iRow, iCol))
tokens.pushBack(tokens.createToken(TOKEN_KEYWORD, "for", startRow, startCol))
err = fmt.Errorf("%wExpected colon ':'", ErrNormal)
return
}
a, parseErr := parseExpression(tokens)
if errors.Is(parseErr, ErrCritical) {
err = fmt.Errorf("%w - Invalid expression in ranged loop", parseErr)
return
}
if row, col, ok := tokens.expect(TOKEN_CURLY_OPEN, "{"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '{' after loop header", ErrCritical, row, col)
return
}
forBlock, parseErr := parseBlock(tokens)
if parseErr != nil {
err = fmt.Errorf("%w - Error while parsing loop block", parseErr)
return
}
if row, col, ok := tokens.expect(TOKEN_CURLY_CLOSE, "}"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '}' after loop block", ErrCritical, row, col)
return
}
loop.counter = Variable{ComplexType{TYPE_INT, "", nil}, i, false, nil, iRow, iCol}
loop.elem = Variable{ComplexType{TYPE_UNKNOWN, "", nil}, e, false, nil, eRow, eCol}
loop.rangeExpression = a
loop.block = forBlock
loop.line = startRow
loop.column = startCol
return
}
func parseLoop(tokens *TokenChannel) (res Statement, err error) {
startRow, startCol, ok := 0, 0, false
if startRow, startCol, ok = tokens.expect(TOKEN_KEYWORD, "for"); !ok {
err = fmt.Errorf("%wExpected 'for' keyword for loop", ErrNormal)
return
}
// We don't care about a valid assignment. If there is none, we are fine too :)
assignment, parseErr := parseAssignment(tokens)
if errors.Is(parseErr, ErrCritical) {
err = fmt.Errorf("%w - Invalid assignment in loop", parseErr)
return
}
if row, col, ok := tokens.expect(TOKEN_SEMICOLON, ";"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected ';' after loop assignment", ErrCritical, row, col)
return
}
expressions, parseErr := parseExpressionList(tokens)
if errors.Is(parseErr, ErrCritical) {
err = fmt.Errorf("%w - Invalid expression list in loop expression", parseErr)
return
}
if row, col, ok := tokens.expect(TOKEN_SEMICOLON, ";"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected ';' after loop expression", ErrCritical, row, col)
return
}
// We are also fine with no assignment!
incrAssignment, parseErr := parseAssignment(tokens)
if errors.Is(parseErr, ErrCritical) {
err = fmt.Errorf("%w - Invalid increment assignment in loop", parseErr)
return
}
if row, col, ok := tokens.expect(TOKEN_CURLY_OPEN, "{"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '{' after loop header", ErrCritical, row, col)
return
}
forBlock, parseErr := parseBlock(tokens)
if parseErr != nil {
err = fmt.Errorf("%w - Error while parsing loop block", parseErr)
return
}
if row, col, ok := tokens.expect(TOKEN_CURLY_CLOSE, "}"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '}' after loop block", ErrCritical, row, col)
return
}
var loop Loop
loop.assignment = assignment
loop.expressions = expressions
loop.incrAssignment = incrAssignment
loop.block = forBlock
loop.line = startRow
loop.column = startCol
res = loop
return
}
func parseType(tokens *TokenChannel) (t ComplexType, err error) {
name := ""
valid := false
if keyword, _, _, ok := tokens.expectType(TOKEN_KEYWORD); ok {
name = keyword
valid = true
} else {
if id, _, _, ok := tokens.expectType(TOKEN_IDENTIFIER); ok {
name = id
valid = true
}
}
if !valid {
if row, col, ok := tokens.expect(TOKEN_SQUARE_OPEN, "["); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected a type or '['", ErrCritical, row, col)
return
}
if row, col, ok := tokens.expect(TOKEN_SQUARE_CLOSE, "]"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected ']'", ErrCritical, row, col)
return
}
subType, parseErr := parseType(tokens)
if parseErr != nil {
err = parseErr
return
}
t.t = TYPE_ARRAY
t.subType = &subType
return
}
tmpT := stringToType(name)
if tmpT == TYPE_UNKNOWN {
t.t = TYPE_STRUCT
t.tName = name
} else {
t.t = tmpT
}
t.subType = nil
return
}
func parseArgList(tokens *TokenChannel, expectSeparator bool) (variables []Variable, err error) {
i := 0
lastRow, lastCol, ok := 0, 0, false
for {
vName, row, col, vOK := tokens.expectType(TOKEN_IDENTIFIER)
if !vOK {
// If we don't find any variable, thats fine. Just don't end in ',', thats an error!
if i == 0 {
// We propagate the error from the parser. This might be normal or critical.
err = fmt.Errorf("%wNot a variable", ErrNormal)
return
}
// If we have a separator, it means, that we can only fail there. Failing here means, we
// do have a trailing ','
if expectSeparator {
err = fmt.Errorf("%w[%v:%v] - Variable list ends in ','", ErrCritical, lastRow, lastCol)
variables = nil
}
return
}
v := Variable{ComplexType{TYPE_UNKNOWN, "", nil}, vName, false, nil, row, col}
t, parseErr := parseType(tokens)
if parseErr != nil {
err = parseErr
return
}
v.vType = t
// Function parameters are always shadowing!
v.vShadow = true
variables = append(variables, v)
if expectSeparator {
// Expect separating ','. Otherwise, all good, we are through!
if lastRow, lastCol, ok = tokens.expect(TOKEN_SEPARATOR, ","); !ok {
break
}
}
i += 1
}
return
}
func parseTypeList(tokens *TokenChannel) (types []ComplexType, err error) {
lastRow, lastCol, ok := 0, 0, false
i := 0
for {
t, parseErr := parseType(tokens)
if parseErr != nil {
// If we don't find any type, thats fine. Just don't end in ',', thats an error!
// We throw a normal error, so the parser up the chain can handle it how it likes.
if i == 0 {
err = fmt.Errorf("%wType list is empty or invalid", ErrNormal)
return
}
err = fmt.Errorf("%w[%v:%v] - Type list ends with ','", ErrCritical, lastRow, lastCol)
types = nil
return
}
types = append(types, t)
// Expect separating ','. Otherwise, all good, we are through!
if lastRow, lastCol, ok = tokens.expect(TOKEN_SEPARATOR, ","); !ok {
break
}
i += 1
}
return
}
func parseFunction(tokens *TokenChannel) (fun Function, err error) {
startRow, startCol, ok := 0, 0, false
var parseErr error = nil
if startRow, startCol, ok = tokens.expect(TOKEN_KEYWORD, "fun"); !ok {
err = fmt.Errorf("%wExpected 'fun' keyword for function", ErrNormal)
return
}
funName, row, col, ok := tokens.expectType(TOKEN_IDENTIFIER)
if !ok {
err = fmt.Errorf("%w[%v:%v] - Expected identifier after 'fun'", ErrCritical, row, col)
return
}
if row, col, ok := tokens.expect(TOKEN_PARENTHESIS_OPEN, "("); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '(' for function parameters", ErrCritical, row, col)
return
}
variables, parseErr := parseArgList(tokens, true)
if errors.Is(parseErr, ErrCritical) {
err = fmt.Errorf("%w - Invalid argument list in function header", parseErr)
return
}
if row, col, ok := tokens.expect(TOKEN_PARENTHESIS_CLOSE, ")"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected ')' after function parameters", ErrCritical, row, col)
return
}
returnTypes, parseErr := parseTypeList(tokens)
if errors.Is(parseErr, ErrCritical) {
err = fmt.Errorf("%w - Invalid return-type list", parseErr)
return
}
if row, col, ok := tokens.expect(TOKEN_CURLY_OPEN, "{"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '{' after function header", ErrCritical, row, col)
return
}
funBlock, parseErr := parseBlock(tokens)
if parseErr != nil {
err = fmt.Errorf("%w - Error while parsing loop block", parseErr)
return
}
if row, col, ok := tokens.expect(TOKEN_CURLY_CLOSE, "}"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '}' after function block", ErrCritical, row, col)
return
}
fun.fName = funName
fun.parameters = variables
fun.returnTypes = returnTypes
fun.block = funBlock
fun.line = startRow
fun.column = startCol
return
}
func parseReturn(tokens *TokenChannel) (ret Return, err error) {
startRow, startCol, ok := 0, 0, false
if startRow, startCol, ok = tokens.expect(TOKEN_KEYWORD, "return"); !ok {
err = fmt.Errorf("%wExpected 'return' keyword", ErrNormal)
return
}
expressions, parseErr := parseExpressionList(tokens)
if errors.Is(parseErr, ErrCritical) {
err = fmt.Errorf("%w - Invalid expression list in return statement", parseErr)
return
}
ret.expressions = expressions
ret.line = startRow
ret.column = startCol
return
}
func parseBreak(tokens *TokenChannel) (br Break, err error) {
startRow, startCol, ok := 0, 0, false
if startRow, startCol, ok = tokens.expect(TOKEN_KEYWORD, "break"); !ok {
err = fmt.Errorf("%wExpected 'break' keyword", ErrNormal)
return
}
br.line = startRow
br.column = startCol
return
}
func parseContinue(tokens *TokenChannel) (cont Continue, err error) {
startRow, startCol, ok := 0, 0, false
if startRow, startCol, ok = tokens.expect(TOKEN_KEYWORD, "continue"); !ok {
err = fmt.Errorf("%wExpected 'break' keyword", ErrNormal)
return
}
cont.line = startRow
cont.column = startCol
return
}
func parseStruct(tokens *TokenChannel) (st StructDef, err error) {
startRow, startCol, ok := 0, 0, false
if startRow, startCol, ok = tokens.expect(TOKEN_KEYWORD, "struct"); !ok {
err = fmt.Errorf("%wExpected 'struct' keyword", ErrNormal)
return
}
name, row, col, ok := tokens.expectType(TOKEN_IDENTIFIER)
if !ok {
err = fmt.Errorf("%w[%v:%v] - Expected identifier after 'struct'", ErrCritical, row, col)
return
}
if row, col, ok := tokens.expect(TOKEN_CURLY_OPEN, "{"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '{' after function header", ErrCritical, row, col)
return
}
// Struct definitions are basically the same as argument lists for function headers.
variables, parseErr := parseArgList(tokens, false)
if errors.Is(parseErr, ErrCritical) {
err = fmt.Errorf("%w - Invalid struct member list", parseErr)
return
}
if row, col, ok := tokens.expect(TOKEN_CURLY_CLOSE, "}"); !ok {
err = fmt.Errorf("%w[%v:%v] - Expected '}' after struct definition", ErrCritical, row, col)
return
}
st.name = name
st.members = make([]StructMem, len(variables), len(variables))
for i, v := range variables {
st.members[i] = StructMem{v.vName, 0, v.vType}
}
st.line = startRow
st.column = startCol
return
}
func parseBlock(tokens *TokenChannel) (block Block, err error) {
for {
switch structStatement, parseErr := parseStruct(tokens); {
case parseErr == nil:
block.statements = append(block.statements, structStatement)
continue
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
switch ifStatement, parseErr := parseCondition(tokens); {
case parseErr == nil:
block.statements = append(block.statements, ifStatement)
continue
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
switch switchStatement, parseErr := parseSwitch(tokens); {
case parseErr == nil:
block.statements = append(block.statements, switchStatement)
continue
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
switch loopStatement, parseErr := parseRangedLoop(tokens); {
case parseErr == nil:
block.statements = append(block.statements, loopStatement)
continue
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
switch loopStatement, parseErr := parseLoop(tokens); {
case parseErr == nil:
block.statements = append(block.statements, loopStatement)
continue
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
switch assignment, parseErr := parseAssignment(tokens); {
case parseErr == nil:
block.statements = append(block.statements, assignment)
continue
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
switch function, parseErr := parseFunction(tokens); {
case parseErr == nil:
block.statements = append(block.statements, function)
continue
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
switch ret, parseErr := parseReturn(tokens); {
case parseErr == nil:
block.statements = append(block.statements, ret)
continue
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
switch br, parseErr := parseBreak(tokens); {
case parseErr == nil:
block.statements = append(block.statements, br)
continue
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
switch cont, parseErr := parseContinue(tokens); {
case parseErr == nil:
block.statements = append(block.statements, cont)
continue
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
switch funCall, parseErr := parseFunCall(tokens); {
case parseErr == nil:
block.statements = append(block.statements, funCall)
continue
case errors.Is(parseErr, ErrCritical):
err = parseErr
return
}
if _, _, ok := tokens.expect(TOKEN_EOF, ""); ok {
return
}
// A block can only be closed with }. If we don't find that, we have an error on hand.
row, col, ok := tokens.expect(TOKEN_CURLY_CLOSE, "}")
if !ok {
if row, col, ok := tokens.expect(TOKEN_KEYWORD, "case"); ok {
tokens.pushBack(tokens.createToken(TOKEN_KEYWORD, "case", row, col))
break
}
if row, col, ok := tokens.expect(TOKEN_KEYWORD, "default"); ok {
tokens.pushBack(tokens.createToken(TOKEN_KEYWORD, "default", row, col))
break
}
err = fmt.Errorf("%w[%v:%v] - Unexpected symbol. Can not be parsed.", ErrCritical, row, col)
return
}
tokens.pushBack(tokens.createToken(TOKEN_CURLY_CLOSE, "}", row, col))
// If we don't recognize the current token as part of a known statement, we break
// This means likely, that we are at the end of a block
break
}
if len(block.statements) > 0 {
row, col := block.statements[0].startPos()
block.line = row
block.column = col
}
return
}
func parse(tokens chan Token) (ast AST, err error) {
var tokenChan TokenChannel
tokenChan.c = tokens
block, parseErr := parseBlock(&tokenChan)
err = parseErr
ast.block = block
return
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package meta_test
import (
"fmt"
"reflect"
"sync/atomic"
"testing"
"time"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/external"
"github.com/pingcap/tidb/types"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/require"
)
func TestInitMetaTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
for _, sql := range session.DDLJobTables {
tk.MustExec(sql.SQL)
}
for _, sql := range session.BackfillTables {
tk.MustExec(sql.SQL)
}
tbls := map[string]struct{}{
"tidb_ddl_job": {},
"tidb_ddl_reorg": {},
"tidb_ddl_history": {},
"tidb_background_subtask": {},
"tidb_background_subtask_history": {},
}
for tbl := range tbls {
metaInMySQL := external.GetTableByName(t, tk, "mysql", tbl).Meta().Clone()
metaInTest := external.GetTableByName(t, tk, "test", tbl).Meta().Clone()
require.Greater(t, metaInMySQL.ID, int64(0))
require.Greater(t, metaInMySQL.UpdateTS, uint64(0))
metaInTest.ID = metaInMySQL.ID
metaInMySQL.UpdateTS = metaInTest.UpdateTS
require.True(t, reflect.DeepEqual(metaInMySQL, metaInTest))
}
}
func TestMetaTableRegion(t *testing.T) {
enableSplitTableRegionVal := atomic.LoadUint32(&ddl.EnableSplitTableRegion)
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
defer atomic.StoreUint32(&ddl.EnableSplitTableRegion, enableSplitTableRegionVal)
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
ddlReorgTableRegionID := tk.MustQuery("show table mysql.tidb_ddl_reorg regions").Rows()[0][0]
ddlReorgTableRegionStartKey := tk.MustQuery("show table mysql.tidb_ddl_reorg regions").Rows()[0][1]
require.Equal(t, ddlReorgTableRegionStartKey, fmt.Sprintf("%s_%d_", tablecodec.TablePrefix(), ddl.ReorgTableID))
ddlJobTableRegionID := tk.MustQuery("show table mysql.tidb_ddl_job regions").Rows()[0][0]
ddlJobTableRegionStartKey := tk.MustQuery("show table mysql.tidb_ddl_job regions").Rows()[0][1]
require.Equal(t, ddlJobTableRegionStartKey, fmt.Sprintf("%s_%d_", tablecodec.TablePrefix(), ddl.JobTableID))
require.NotEqual(t, ddlJobTableRegionID, ddlReorgTableRegionID)
ddlBackfillTableRegionID := tk.MustQuery("show table mysql.tidb_background_subtask regions").Rows()[0][0]
ddlBackfillTableRegionStartKey := tk.MustQuery("show table mysql.tidb_background_subtask regions").Rows()[0][1]
require.Equal(t, ddlBackfillTableRegionStartKey, fmt.Sprintf("%s_%d_", tablecodec.TablePrefix(), ddl.BackgroundSubtaskTableID))
ddlBackfillHistoryTableRegionID := tk.MustQuery("show table mysql.tidb_background_subtask_history regions").Rows()[0][0]
ddlBackfillHistoryTableRegionStartKey := tk.MustQuery("show table mysql.tidb_background_subtask_history regions").Rows()[0][1]
require.Equal(t, ddlBackfillHistoryTableRegionStartKey, fmt.Sprintf("%s_%d_", tablecodec.TablePrefix(), ddl.BackgroundSubtaskHistoryTableID))
require.NotEqual(t, ddlBackfillTableRegionID, ddlBackfillHistoryTableRegionID)
}
func MustReadCounter(t *testing.T, m prometheus.Counter) float64 {
pb := &dto.Metric{}
require.NoError(t, m.Write(pb))
return pb.GetCounter().GetValue()
}
func TestRecordTTLRows(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t(created_at datetime) TTL = created_at + INTERVAL 1 DAY")
// simple insert should be recorded
tk.MustExec("insert into t values (NOW())")
require.Equal(t, 1.0, MustReadCounter(t, metrics.TTLInsertRowsCount))
// insert in a explicit transaction should be recorded
tk.MustExec("begin")
tk.MustExec("insert into t values (NOW())")
tk.MustExec("commit")
require.Equal(t, 2.0, MustReadCounter(t, metrics.TTLInsertRowsCount))
// insert multiple rows should be the same
tk.MustExec("begin")
tk.MustExec("insert into t values (NOW())")
tk.MustExec("insert into t values (NOW())")
tk.MustExec("commit")
require.Equal(t, 4.0, MustReadCounter(t, metrics.TTLInsertRowsCount))
// rollback will remove all recorded TTL rows
tk.MustExec("begin")
tk.MustExec("insert into t values (NOW())")
tk.MustExec("insert into t values (NOW())")
tk.MustExec("rollback")
require.Equal(t, 6.0, MustReadCounter(t, metrics.TTLInsertRowsCount))
// savepoint will save the recorded TTL rows
tk.MustExec("begin")
tk.MustExec("insert into t values (NOW())")
tk.MustExec("savepoint insert1")
tk.MustExec("insert into t values (NOW())")
tk.MustExec("rollback to insert1")
tk.MustExec("commit")
require.Equal(t, 7.0, MustReadCounter(t, metrics.TTLInsertRowsCount))
}
func TestInformationSchemaCreateTime(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t (c int)")
tk.MustExec(`set @@time_zone = 'Asia/Shanghai'`)
ret := tk.MustQuery("select create_time from information_schema.tables where table_name='t';")
// Make sure t1 is greater than t.
time.Sleep(time.Second)
tk.MustExec("alter table t modify c int default 11")
ret1 := tk.MustQuery("select create_time from information_schema.tables where table_name='t';")
ret2 := tk.MustQuery("show table status like 't'")
require.Equal(t, ret2.Rows()[0][11].(string), ret1.Rows()[0][0].(string))
typ1, err := types.ParseDatetime(nil, ret.Rows()[0][0].(string))
require.NoError(t, err)
typ2, err := types.ParseDatetime(nil, ret1.Rows()[0][0].(string))
require.NoError(t, err)
r := typ2.Compare(typ1)
require.Equal(t, 1, r)
// Check that time_zone changes makes the create_time different
tk.MustExec(`set @@time_zone = 'Europe/Amsterdam'`)
ret = tk.MustQuery(`select create_time from information_schema.tables where table_name='t'`)
ret2 = tk.MustQuery(`show table status like 't'`)
require.Equal(t, ret2.Rows()[0][11].(string), ret.Rows()[0][0].(string))
typ3, err := types.ParseDatetime(nil, ret.Rows()[0][0].(string))
require.NoError(t, err)
// Asia/Shanghai 2022-02-17 17:40:05 > Europe/Amsterdam 2022-02-17 10:40:05
r = typ2.Compare(typ3)
require.Equal(t, 1, r)
}
// TestISColumns tests information_schema.columns.
func TestISColumns(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("select ORDINAL_POSITION from INFORMATION_SCHEMA.COLUMNS;")
tk.MustQuery("SELECT CHARACTER_SET_NAME FROM INFORMATION_SCHEMA.CHARACTER_SETS WHERE CHARACTER_SET_NAME = 'utf8mb4'").Check(testkit.Rows("utf8mb4"))
}
|
package ordercancelreplacerequest
import (
"github.com/shopspring/decimal"
"time"
"github.com/quickfixgo/quickfix"
"github.com/quickfixgo/quickfix/enum"
"github.com/quickfixgo/quickfix/field"
"github.com/quickfixgo/quickfix/fix40"
"github.com/quickfixgo/quickfix/tag"
)
//OrderCancelReplaceRequest is the fix40 OrderCancelReplaceRequest type, MsgType = G
type OrderCancelReplaceRequest struct {
fix40.Header
*quickfix.Body
fix40.Trailer
Message *quickfix.Message
}
//FromMessage creates a OrderCancelReplaceRequest from a quickfix.Message instance
func FromMessage(m *quickfix.Message) OrderCancelReplaceRequest {
return OrderCancelReplaceRequest{
Header: fix40.Header{&m.Header},
Body: &m.Body,
Trailer: fix40.Trailer{&m.Trailer},
Message: m,
}
}
//ToMessage returns a quickfix.Message instance
func (m OrderCancelReplaceRequest) ToMessage() *quickfix.Message {
return m.Message
}
//New returns a OrderCancelReplaceRequest initialized with the required fields for OrderCancelReplaceRequest
func New(origclordid field.OrigClOrdIDField, clordid field.ClOrdIDField, handlinst field.HandlInstField, symbol field.SymbolField, side field.SideField, orderqty field.OrderQtyField, ordtype field.OrdTypeField) (m OrderCancelReplaceRequest) {
m.Message = quickfix.NewMessage()
m.Header = fix40.NewHeader(&m.Message.Header)
m.Body = &m.Message.Body
m.Trailer.Trailer = &m.Message.Trailer
m.Header.Set(field.NewMsgType("G"))
m.Set(origclordid)
m.Set(clordid)
m.Set(handlinst)
m.Set(symbol)
m.Set(side)
m.Set(orderqty)
m.Set(ordtype)
return
}
//A RouteOut is the callback type that should be implemented for routing Message
type RouteOut func(msg OrderCancelReplaceRequest, sessionID quickfix.SessionID) quickfix.MessageRejectError
//Route returns the beginstring, message type, and MessageRoute for this Message type
func Route(router RouteOut) (string, string, quickfix.MessageRoute) {
r := func(msg *quickfix.Message, sessionID quickfix.SessionID) quickfix.MessageRejectError {
return router(FromMessage(msg), sessionID)
}
return "FIX.4.0", "G", r
}
//SetAccount sets Account, Tag 1
func (m OrderCancelReplaceRequest) SetAccount(v string) {
m.Set(field.NewAccount(v))
}
//SetClOrdID sets ClOrdID, Tag 11
func (m OrderCancelReplaceRequest) SetClOrdID(v string) {
m.Set(field.NewClOrdID(v))
}
//SetCommission sets Commission, Tag 12
func (m OrderCancelReplaceRequest) SetCommission(value decimal.Decimal, scale int32) {
m.Set(field.NewCommission(value, scale))
}
//SetCommType sets CommType, Tag 13
func (m OrderCancelReplaceRequest) SetCommType(v enum.CommType) {
m.Set(field.NewCommType(v))
}
//SetCurrency sets Currency, Tag 15
func (m OrderCancelReplaceRequest) SetCurrency(v string) {
m.Set(field.NewCurrency(v))
}
//SetExecInst sets ExecInst, Tag 18
func (m OrderCancelReplaceRequest) SetExecInst(v enum.ExecInst) {
m.Set(field.NewExecInst(v))
}
//SetHandlInst sets HandlInst, Tag 21
func (m OrderCancelReplaceRequest) SetHandlInst(v enum.HandlInst) {
m.Set(field.NewHandlInst(v))
}
//SetIDSource sets IDSource, Tag 22
func (m OrderCancelReplaceRequest) SetIDSource(v enum.IDSource) {
m.Set(field.NewIDSource(v))
}
//SetOrderID sets OrderID, Tag 37
func (m OrderCancelReplaceRequest) SetOrderID(v string) {
m.Set(field.NewOrderID(v))
}
//SetOrderQty sets OrderQty, Tag 38
func (m OrderCancelReplaceRequest) SetOrderQty(value decimal.Decimal, scale int32) {
m.Set(field.NewOrderQty(value, scale))
}
//SetOrdType sets OrdType, Tag 40
func (m OrderCancelReplaceRequest) SetOrdType(v enum.OrdType) {
m.Set(field.NewOrdType(v))
}
//SetOrigClOrdID sets OrigClOrdID, Tag 41
func (m OrderCancelReplaceRequest) SetOrigClOrdID(v string) {
m.Set(field.NewOrigClOrdID(v))
}
//SetPrice sets Price, Tag 44
func (m OrderCancelReplaceRequest) SetPrice(value decimal.Decimal, scale int32) {
m.Set(field.NewPrice(value, scale))
}
//SetRule80A sets Rule80A, Tag 47
func (m OrderCancelReplaceRequest) SetRule80A(v enum.Rule80A) {
m.Set(field.NewRule80A(v))
}
//SetSecurityID sets SecurityID, Tag 48
func (m OrderCancelReplaceRequest) SetSecurityID(v string) {
m.Set(field.NewSecurityID(v))
}
//SetSide sets Side, Tag 54
func (m OrderCancelReplaceRequest) SetSide(v enum.Side) {
m.Set(field.NewSide(v))
}
//SetSymbol sets Symbol, Tag 55
func (m OrderCancelReplaceRequest) SetSymbol(v string) {
m.Set(field.NewSymbol(v))
}
//SetText sets Text, Tag 58
func (m OrderCancelReplaceRequest) SetText(v string) {
m.Set(field.NewText(v))
}
//SetTimeInForce sets TimeInForce, Tag 59
func (m OrderCancelReplaceRequest) SetTimeInForce(v enum.TimeInForce) {
m.Set(field.NewTimeInForce(v))
}
//SetSettlmntTyp sets SettlmntTyp, Tag 63
func (m OrderCancelReplaceRequest) SetSettlmntTyp(v enum.SettlmntTyp) {
m.Set(field.NewSettlmntTyp(v))
}
//SetFutSettDate sets FutSettDate, Tag 64
func (m OrderCancelReplaceRequest) SetFutSettDate(v string) {
m.Set(field.NewFutSettDate(v))
}
//SetSymbolSfx sets SymbolSfx, Tag 65
func (m OrderCancelReplaceRequest) SetSymbolSfx(v enum.SymbolSfx) {
m.Set(field.NewSymbolSfx(v))
}
//SetListID sets ListID, Tag 66
func (m OrderCancelReplaceRequest) SetListID(v string) {
m.Set(field.NewListID(v))
}
//SetExecBroker sets ExecBroker, Tag 76
func (m OrderCancelReplaceRequest) SetExecBroker(v string) {
m.Set(field.NewExecBroker(v))
}
//SetStopPx sets StopPx, Tag 99
func (m OrderCancelReplaceRequest) SetStopPx(value decimal.Decimal, scale int32) {
m.Set(field.NewStopPx(value, scale))
}
//SetExDestination sets ExDestination, Tag 100
func (m OrderCancelReplaceRequest) SetExDestination(v enum.ExDestination) {
m.Set(field.NewExDestination(v))
}
//SetIssuer sets Issuer, Tag 106
func (m OrderCancelReplaceRequest) SetIssuer(v string) {
m.Set(field.NewIssuer(v))
}
//SetSecurityDesc sets SecurityDesc, Tag 107
func (m OrderCancelReplaceRequest) SetSecurityDesc(v string) {
m.Set(field.NewSecurityDesc(v))
}
//SetClientID sets ClientID, Tag 109
func (m OrderCancelReplaceRequest) SetClientID(v string) {
m.Set(field.NewClientID(v))
}
//SetMinQty sets MinQty, Tag 110
func (m OrderCancelReplaceRequest) SetMinQty(value decimal.Decimal, scale int32) {
m.Set(field.NewMinQty(value, scale))
}
//SetMaxFloor sets MaxFloor, Tag 111
func (m OrderCancelReplaceRequest) SetMaxFloor(value decimal.Decimal, scale int32) {
m.Set(field.NewMaxFloor(value, scale))
}
//SetSettlCurrency sets SettlCurrency, Tag 120
func (m OrderCancelReplaceRequest) SetSettlCurrency(v string) {
m.Set(field.NewSettlCurrency(v))
}
//SetForexReq sets ForexReq, Tag 121
func (m OrderCancelReplaceRequest) SetForexReq(v bool) {
m.Set(field.NewForexReq(v))
}
//SetExpireTime sets ExpireTime, Tag 126
func (m OrderCancelReplaceRequest) SetExpireTime(v time.Time) {
m.Set(field.NewExpireTime(v))
}
//GetAccount gets Account, Tag 1
func (m OrderCancelReplaceRequest) GetAccount() (v string, err quickfix.MessageRejectError) {
var f field.AccountField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetClOrdID gets ClOrdID, Tag 11
func (m OrderCancelReplaceRequest) GetClOrdID() (v string, err quickfix.MessageRejectError) {
var f field.ClOrdIDField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetCommission gets Commission, Tag 12
func (m OrderCancelReplaceRequest) GetCommission() (v decimal.Decimal, err quickfix.MessageRejectError) {
var f field.CommissionField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetCommType gets CommType, Tag 13
func (m OrderCancelReplaceRequest) GetCommType() (v enum.CommType, err quickfix.MessageRejectError) {
var f field.CommTypeField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetCurrency gets Currency, Tag 15
func (m OrderCancelReplaceRequest) GetCurrency() (v string, err quickfix.MessageRejectError) {
var f field.CurrencyField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetExecInst gets ExecInst, Tag 18
func (m OrderCancelReplaceRequest) GetExecInst() (v enum.ExecInst, err quickfix.MessageRejectError) {
var f field.ExecInstField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetHandlInst gets HandlInst, Tag 21
func (m OrderCancelReplaceRequest) GetHandlInst() (v enum.HandlInst, err quickfix.MessageRejectError) {
var f field.HandlInstField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetIDSource gets IDSource, Tag 22
func (m OrderCancelReplaceRequest) GetIDSource() (v enum.IDSource, err quickfix.MessageRejectError) {
var f field.IDSourceField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetOrderID gets OrderID, Tag 37
func (m OrderCancelReplaceRequest) GetOrderID() (v string, err quickfix.MessageRejectError) {
var f field.OrderIDField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetOrderQty gets OrderQty, Tag 38
func (m OrderCancelReplaceRequest) GetOrderQty() (v decimal.Decimal, err quickfix.MessageRejectError) {
var f field.OrderQtyField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetOrdType gets OrdType, Tag 40
func (m OrderCancelReplaceRequest) GetOrdType() (v enum.OrdType, err quickfix.MessageRejectError) {
var f field.OrdTypeField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetOrigClOrdID gets OrigClOrdID, Tag 41
func (m OrderCancelReplaceRequest) GetOrigClOrdID() (v string, err quickfix.MessageRejectError) {
var f field.OrigClOrdIDField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetPrice gets Price, Tag 44
func (m OrderCancelReplaceRequest) GetPrice() (v decimal.Decimal, err quickfix.MessageRejectError) {
var f field.PriceField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetRule80A gets Rule80A, Tag 47
func (m OrderCancelReplaceRequest) GetRule80A() (v enum.Rule80A, err quickfix.MessageRejectError) {
var f field.Rule80AField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetSecurityID gets SecurityID, Tag 48
func (m OrderCancelReplaceRequest) GetSecurityID() (v string, err quickfix.MessageRejectError) {
var f field.SecurityIDField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetSide gets Side, Tag 54
func (m OrderCancelReplaceRequest) GetSide() (v enum.Side, err quickfix.MessageRejectError) {
var f field.SideField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetSymbol gets Symbol, Tag 55
func (m OrderCancelReplaceRequest) GetSymbol() (v string, err quickfix.MessageRejectError) {
var f field.SymbolField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetText gets Text, Tag 58
func (m OrderCancelReplaceRequest) GetText() (v string, err quickfix.MessageRejectError) {
var f field.TextField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetTimeInForce gets TimeInForce, Tag 59
func (m OrderCancelReplaceRequest) GetTimeInForce() (v enum.TimeInForce, err quickfix.MessageRejectError) {
var f field.TimeInForceField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetSettlmntTyp gets SettlmntTyp, Tag 63
func (m OrderCancelReplaceRequest) GetSettlmntTyp() (v enum.SettlmntTyp, err quickfix.MessageRejectError) {
var f field.SettlmntTypField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetFutSettDate gets FutSettDate, Tag 64
func (m OrderCancelReplaceRequest) GetFutSettDate() (v string, err quickfix.MessageRejectError) {
var f field.FutSettDateField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetSymbolSfx gets SymbolSfx, Tag 65
func (m OrderCancelReplaceRequest) GetSymbolSfx() (v enum.SymbolSfx, err quickfix.MessageRejectError) {
var f field.SymbolSfxField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetListID gets ListID, Tag 66
func (m OrderCancelReplaceRequest) GetListID() (v string, err quickfix.MessageRejectError) {
var f field.ListIDField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetExecBroker gets ExecBroker, Tag 76
func (m OrderCancelReplaceRequest) GetExecBroker() (v string, err quickfix.MessageRejectError) {
var f field.ExecBrokerField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetStopPx gets StopPx, Tag 99
func (m OrderCancelReplaceRequest) GetStopPx() (v decimal.Decimal, err quickfix.MessageRejectError) {
var f field.StopPxField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetExDestination gets ExDestination, Tag 100
func (m OrderCancelReplaceRequest) GetExDestination() (v enum.ExDestination, err quickfix.MessageRejectError) {
var f field.ExDestinationField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetIssuer gets Issuer, Tag 106
func (m OrderCancelReplaceRequest) GetIssuer() (v string, err quickfix.MessageRejectError) {
var f field.IssuerField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetSecurityDesc gets SecurityDesc, Tag 107
func (m OrderCancelReplaceRequest) GetSecurityDesc() (v string, err quickfix.MessageRejectError) {
var f field.SecurityDescField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetClientID gets ClientID, Tag 109
func (m OrderCancelReplaceRequest) GetClientID() (v string, err quickfix.MessageRejectError) {
var f field.ClientIDField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetMinQty gets MinQty, Tag 110
func (m OrderCancelReplaceRequest) GetMinQty() (v decimal.Decimal, err quickfix.MessageRejectError) {
var f field.MinQtyField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetMaxFloor gets MaxFloor, Tag 111
func (m OrderCancelReplaceRequest) GetMaxFloor() (v decimal.Decimal, err quickfix.MessageRejectError) {
var f field.MaxFloorField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetSettlCurrency gets SettlCurrency, Tag 120
func (m OrderCancelReplaceRequest) GetSettlCurrency() (v string, err quickfix.MessageRejectError) {
var f field.SettlCurrencyField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetForexReq gets ForexReq, Tag 121
func (m OrderCancelReplaceRequest) GetForexReq() (v bool, err quickfix.MessageRejectError) {
var f field.ForexReqField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetExpireTime gets ExpireTime, Tag 126
func (m OrderCancelReplaceRequest) GetExpireTime() (v time.Time, err quickfix.MessageRejectError) {
var f field.ExpireTimeField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//HasAccount returns true if Account is present, Tag 1
func (m OrderCancelReplaceRequest) HasAccount() bool {
return m.Has(tag.Account)
}
//HasClOrdID returns true if ClOrdID is present, Tag 11
func (m OrderCancelReplaceRequest) HasClOrdID() bool {
return m.Has(tag.ClOrdID)
}
//HasCommission returns true if Commission is present, Tag 12
func (m OrderCancelReplaceRequest) HasCommission() bool {
return m.Has(tag.Commission)
}
//HasCommType returns true if CommType is present, Tag 13
func (m OrderCancelReplaceRequest) HasCommType() bool {
return m.Has(tag.CommType)
}
//HasCurrency returns true if Currency is present, Tag 15
func (m OrderCancelReplaceRequest) HasCurrency() bool {
return m.Has(tag.Currency)
}
//HasExecInst returns true if ExecInst is present, Tag 18
func (m OrderCancelReplaceRequest) HasExecInst() bool {
return m.Has(tag.ExecInst)
}
//HasHandlInst returns true if HandlInst is present, Tag 21
func (m OrderCancelReplaceRequest) HasHandlInst() bool {
return m.Has(tag.HandlInst)
}
//HasIDSource returns true if IDSource is present, Tag 22
func (m OrderCancelReplaceRequest) HasIDSource() bool {
return m.Has(tag.IDSource)
}
//HasOrderID returns true if OrderID is present, Tag 37
func (m OrderCancelReplaceRequest) HasOrderID() bool {
return m.Has(tag.OrderID)
}
//HasOrderQty returns true if OrderQty is present, Tag 38
func (m OrderCancelReplaceRequest) HasOrderQty() bool {
return m.Has(tag.OrderQty)
}
//HasOrdType returns true if OrdType is present, Tag 40
func (m OrderCancelReplaceRequest) HasOrdType() bool {
return m.Has(tag.OrdType)
}
//HasOrigClOrdID returns true if OrigClOrdID is present, Tag 41
func (m OrderCancelReplaceRequest) HasOrigClOrdID() bool {
return m.Has(tag.OrigClOrdID)
}
//HasPrice returns true if Price is present, Tag 44
func (m OrderCancelReplaceRequest) HasPrice() bool {
return m.Has(tag.Price)
}
//HasRule80A returns true if Rule80A is present, Tag 47
func (m OrderCancelReplaceRequest) HasRule80A() bool {
return m.Has(tag.Rule80A)
}
//HasSecurityID returns true if SecurityID is present, Tag 48
func (m OrderCancelReplaceRequest) HasSecurityID() bool {
return m.Has(tag.SecurityID)
}
//HasSide returns true if Side is present, Tag 54
func (m OrderCancelReplaceRequest) HasSide() bool {
return m.Has(tag.Side)
}
//HasSymbol returns true if Symbol is present, Tag 55
func (m OrderCancelReplaceRequest) HasSymbol() bool {
return m.Has(tag.Symbol)
}
//HasText returns true if Text is present, Tag 58
func (m OrderCancelReplaceRequest) HasText() bool {
return m.Has(tag.Text)
}
//HasTimeInForce returns true if TimeInForce is present, Tag 59
func (m OrderCancelReplaceRequest) HasTimeInForce() bool {
return m.Has(tag.TimeInForce)
}
//HasSettlmntTyp returns true if SettlmntTyp is present, Tag 63
func (m OrderCancelReplaceRequest) HasSettlmntTyp() bool {
return m.Has(tag.SettlmntTyp)
}
//HasFutSettDate returns true if FutSettDate is present, Tag 64
func (m OrderCancelReplaceRequest) HasFutSettDate() bool {
return m.Has(tag.FutSettDate)
}
//HasSymbolSfx returns true if SymbolSfx is present, Tag 65
func (m OrderCancelReplaceRequest) HasSymbolSfx() bool {
return m.Has(tag.SymbolSfx)
}
//HasListID returns true if ListID is present, Tag 66
func (m OrderCancelReplaceRequest) HasListID() bool {
return m.Has(tag.ListID)
}
//HasExecBroker returns true if ExecBroker is present, Tag 76
func (m OrderCancelReplaceRequest) HasExecBroker() bool {
return m.Has(tag.ExecBroker)
}
//HasStopPx returns true if StopPx is present, Tag 99
func (m OrderCancelReplaceRequest) HasStopPx() bool {
return m.Has(tag.StopPx)
}
//HasExDestination returns true if ExDestination is present, Tag 100
func (m OrderCancelReplaceRequest) HasExDestination() bool {
return m.Has(tag.ExDestination)
}
//HasIssuer returns true if Issuer is present, Tag 106
func (m OrderCancelReplaceRequest) HasIssuer() bool {
return m.Has(tag.Issuer)
}
//HasSecurityDesc returns true if SecurityDesc is present, Tag 107
func (m OrderCancelReplaceRequest) HasSecurityDesc() bool {
return m.Has(tag.SecurityDesc)
}
//HasClientID returns true if ClientID is present, Tag 109
func (m OrderCancelReplaceRequest) HasClientID() bool {
return m.Has(tag.ClientID)
}
//HasMinQty returns true if MinQty is present, Tag 110
func (m OrderCancelReplaceRequest) HasMinQty() bool {
return m.Has(tag.MinQty)
}
//HasMaxFloor returns true if MaxFloor is present, Tag 111
func (m OrderCancelReplaceRequest) HasMaxFloor() bool {
return m.Has(tag.MaxFloor)
}
//HasSettlCurrency returns true if SettlCurrency is present, Tag 120
func (m OrderCancelReplaceRequest) HasSettlCurrency() bool {
return m.Has(tag.SettlCurrency)
}
//HasForexReq returns true if ForexReq is present, Tag 121
func (m OrderCancelReplaceRequest) HasForexReq() bool {
return m.Has(tag.ForexReq)
}
//HasExpireTime returns true if ExpireTime is present, Tag 126
func (m OrderCancelReplaceRequest) HasExpireTime() bool {
return m.Has(tag.ExpireTime)
}
|
package postgres
import (
"context"
"errors"
"github.com/jackc/pgx/v5"
"github.com/pomerium/pomerium/pkg/cryptutil"
)
var migrations = []func(context.Context, pgx.Tx) error{
1: func(ctx context.Context, tx pgx.Tx) error {
_, err := tx.Exec(ctx, `
CREATE TABLE `+schemaName+`.`+recordsTableName+` (
type TEXT NOT NULL,
id TEXT NOT NULL,
version BIGINT NOT NULL,
data JSONB NOT NULL,
modified_at TIMESTAMPTZ NOT NULL DEFAULT(NOW()),
index_cidr INET NULL,
PRIMARY KEY (type, id)
)
`)
if err != nil {
return err
}
_, err = tx.Exec(ctx, `
CREATE INDEX ON `+schemaName+`.`+recordsTableName+`
USING gist (index_cidr inet_ops);
`)
if err != nil {
return err
}
_, err = tx.Exec(ctx, `
CREATE TABLE `+schemaName+`.`+recordChangesTableName+` (
type TEXT NOT NULL,
id TEXT NOT NULL,
version BIGINT NOT NULL,
data JSONB NOT NULL,
modified_at TIMESTAMPTZ NOT NULL,
deleted_at TIMESTAMPTZ NULL,
PRIMARY KEY (version)
)
`)
if err != nil {
return err
}
_, err = tx.Exec(ctx, `
CREATE TABLE `+schemaName+`.`+recordOptionsTableName+` (
type TEXT NOT NULL,
capacity BIGINT NULL,
PRIMARY KEY (type)
)
`)
if err != nil {
return err
}
_, err = tx.Exec(ctx, `
CREATE TABLE `+schemaName+`.`+leasesTableName+` (
name TEXT NOT NULL,
id TEXT NOT NULL,
expires_at TIMESTAMPTZ NOT NULL,
PRIMARY KEY (name)
)
`)
if err != nil {
return err
}
return nil
},
2: func(ctx context.Context, tx pgx.Tx) error {
serverVersion := uint64(cryptutil.NewRandomUInt32())
_, err := tx.Exec(ctx, `
UPDATE `+schemaName+`.`+migrationInfoTableName+`
SET server_version = $1
`, serverVersion)
if err != nil {
return err
}
_, err = tx.Exec(ctx, `
DELETE FROM `+schemaName+`.`+recordChangesTableName+`
`)
if err != nil {
return err
}
_, err = tx.Exec(ctx, `
DELETE FROM `+schemaName+`.`+recordsTableName+`
`)
if err != nil {
return err
}
_, err = tx.Exec(ctx, `
ALTER TABLE `+schemaName+`.`+recordChangesTableName+`
ALTER COLUMN version
ADD GENERATED BY DEFAULT AS IDENTITY
`)
if err != nil {
return err
}
return nil
},
3: func(ctx context.Context, tx pgx.Tx) error {
_, err := tx.Exec(ctx, `
CREATE TABLE `+schemaName+`.`+servicesTableName+` (
kind TEXT NOT NULL,
endpoint TEXT NOT NULL,
expires_at TIMESTAMPTZ NOT NULL,
PRIMARY KEY (kind, endpoint)
)
`)
if err != nil {
return err
}
return nil
},
4: func(ctx context.Context, tx pgx.Tx) error {
_, err := tx.Exec(ctx, `
ALTER TABLE `+schemaName+`.`+recordChangesTableName+`
ALTER data DROP NOT NULL
`)
if err != nil {
return err
}
return nil
},
5: func(ctx context.Context, tx pgx.Tx) error {
for _, q := range []string{
`CREATE INDEX ON ` + schemaName + `.` + recordsTableName + ` (type)`,
`CREATE INDEX ON ` + schemaName + `.` + recordsTableName + ` (type, version)`,
`CREATE INDEX ON ` + schemaName + `.` + recordChangesTableName + ` (modified_at)`,
`CREATE INDEX ON ` + schemaName + `.` + recordChangesTableName + ` (version)`,
`CREATE INDEX ON ` + schemaName + `.` + recordChangesTableName + ` (type)`,
`CREATE INDEX ON ` + schemaName + `.` + recordChangesTableName + ` (type, version)`,
} {
_, err := tx.Exec(ctx, q)
if err != nil {
return err
}
}
return nil
},
}
func migrate(ctx context.Context, tx pgx.Tx) (serverVersion uint64, err error) {
_, err = tx.Exec(ctx, `CREATE SCHEMA IF NOT EXISTS `+schemaName)
if err != nil {
return serverVersion, err
}
_, err = tx.Exec(ctx, `
CREATE TABLE IF NOT EXISTS `+schemaName+`.`+migrationInfoTableName+` (
server_version BIGINT NOT NULL,
migration_version SMALLINT NOT NULL
)
`)
if err != nil {
return serverVersion, err
}
var migrationVersion uint64
err = tx.QueryRow(ctx, `
SELECT server_version, migration_version
FROM `+schemaName+`.migration_info
`).Scan(&serverVersion, &migrationVersion)
if errors.Is(err, pgx.ErrNoRows) {
serverVersion = uint64(cryptutil.NewRandomUInt32()) // we can't actually store a uint64, just an int64, so just generate a uint32
_, err = tx.Exec(ctx, `
INSERT INTO `+schemaName+`.`+migrationInfoTableName+` (server_version, migration_version)
VALUES ($1, $2)
`, serverVersion, 0)
}
if err != nil {
return serverVersion, err
}
for version := migrationVersion + 1; version < uint64(len(migrations)); version++ {
err = migrations[version](ctx, tx)
if err != nil {
return serverVersion, err
}
_, err = tx.Exec(ctx, `
UPDATE `+schemaName+`.`+migrationInfoTableName+`
SET migration_version = $1
`, version)
if err != nil {
return serverVersion, err
}
}
return serverVersion, nil
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package alpha
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"strings"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
)
func (r *Dataset) validate() error {
if err := dcl.Required(r, "name"); err != nil {
return err
}
if err := dcl.Required(r, "project"); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(r.DefaultEncryptionConfiguration) {
if err := r.DefaultEncryptionConfiguration.validate(); err != nil {
return err
}
}
return nil
}
func (r *DatasetAccess) validate() error {
if err := dcl.Required(r, "role"); err != nil {
return err
}
if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"UserByEmail", "GroupByEmail", "Domain", "SpecialGroup", "IamMember", "View", "Routine"}, r.UserByEmail, r.GroupByEmail, r.Domain, r.SpecialGroup, r.IamMember, r.View, r.Routine); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(r.View) {
if err := r.View.validate(); err != nil {
return err
}
}
if !dcl.IsEmptyValueIndirect(r.Routine) {
if err := r.Routine.validate(); err != nil {
return err
}
}
return nil
}
func (r *DatasetAccessView) validate() error {
if err := dcl.Required(r, "projectId"); err != nil {
return err
}
if err := dcl.Required(r, "datasetId"); err != nil {
return err
}
if err := dcl.Required(r, "tableId"); err != nil {
return err
}
return nil
}
func (r *DatasetAccessRoutine) validate() error {
if err := dcl.Required(r, "projectId"); err != nil {
return err
}
if err := dcl.Required(r, "datasetId"); err != nil {
return err
}
if err := dcl.Required(r, "routineId"); err != nil {
return err
}
return nil
}
func (r *DatasetDefaultEncryptionConfiguration) validate() error {
return nil
}
func (r *Dataset) basePath() string {
params := map[string]interface{}{}
return dcl.Nprintf("https://bigquery.googleapis.com/bigquery/v2/", params)
}
func (r *Dataset) getURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("projects/{{project}}/datasets/{{name}}", nr.basePath(), userBasePath, params), nil
}
func (r *Dataset) listURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
}
return dcl.URL("projects/{{project}}/datasets", nr.basePath(), userBasePath, params), nil
}
func (r *Dataset) createURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
}
return dcl.URL("projects/{{project}}/datasets", nr.basePath(), userBasePath, params), nil
}
func (r *Dataset) deleteURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("projects/{{project}}/datasets/{{name}}", nr.basePath(), userBasePath, params), nil
}
// datasetApiOperation represents a mutable operation in the underlying REST
// API such as Create, Update, or Delete.
type datasetApiOperation interface {
do(context.Context, *Dataset, *Client) error
}
// newUpdateDatasetPatchDatasetRequest creates a request for an
// Dataset resource's PatchDataset update type by filling in the update
// fields based on the intended state of the resource.
func newUpdateDatasetPatchDatasetRequest(ctx context.Context, f *Dataset, c *Client) (map[string]interface{}, error) {
req := map[string]interface{}{}
res := f
_ = res
if v := f.FriendlyName; !dcl.IsEmptyValueIndirect(v) {
req["friendlyName"] = v
}
if v := f.Description; !dcl.IsEmptyValueIndirect(v) {
req["description"] = v
}
if v := f.DefaultTableExpirationMs; !dcl.IsEmptyValueIndirect(v) {
req["defaultTableExpirationMs"] = v
}
if v := f.DefaultPartitionExpirationMs; !dcl.IsEmptyValueIndirect(v) {
req["defaultPartitionExpirationMs"] = v
}
if v := f.Labels; !dcl.IsEmptyValueIndirect(v) {
req["labels"] = v
}
if v, err := expandDatasetAccessSlice(c, f.Access, res); err != nil {
return nil, fmt.Errorf("error expanding Access into access: %w", err)
} else if v != nil {
req["access"] = v
}
b, err := c.getDatasetRaw(ctx, f)
if err != nil {
return nil, err
}
var m map[string]interface{}
if err := json.Unmarshal(b, &m); err != nil {
return nil, err
}
rawEtag, err := dcl.GetMapEntry(
m,
[]string{"etag"},
)
if err != nil {
c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err)
} else {
req["etag"] = rawEtag.(string)
}
return req, nil
}
// marshalUpdateDatasetPatchDatasetRequest converts the update into
// the final JSON request body.
func marshalUpdateDatasetPatchDatasetRequest(c *Client, m map[string]interface{}) ([]byte, error) {
dcl.MoveMapEntry(
m,
[]string{"name"},
[]string{"datasetReference", "datasetId"},
)
dcl.MoveMapEntry(
m,
[]string{"project"},
[]string{"datasetReference", "projectId"},
)
return json.Marshal(m)
}
type updateDatasetPatchDatasetOperation struct {
// If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated.
// Usually it will be nil - this is to prevent us from accidentally depending on apply
// options, which should usually be unnecessary.
ApplyOptions []dcl.ApplyOption
FieldDiffs []*dcl.FieldDiff
}
// do creates a request and sends it to the appropriate URL. In most operations,
// do will transcribe a subset of the resource into a request object and send a
// PUT request to a single URL.
func (op *updateDatasetPatchDatasetOperation) do(ctx context.Context, r *Dataset, c *Client) error {
_, err := c.GetDataset(ctx, r)
if err != nil {
return err
}
u, err := r.updateURL(c.Config.BasePath, "PatchDataset")
if err != nil {
return err
}
req, err := newUpdateDatasetPatchDatasetRequest(ctx, r, c)
if err != nil {
return err
}
c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req)
body, err := marshalUpdateDatasetPatchDatasetRequest(c, req)
if err != nil {
return err
}
_, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider)
if err != nil {
return err
}
return nil
}
func (c *Client) listDatasetRaw(ctx context.Context, r *Dataset, pageToken string, pageSize int32) ([]byte, error) {
u, err := r.urlNormalized().listURL(c.Config.BasePath)
if err != nil {
return nil, err
}
m := make(map[string]string)
if pageToken != "" {
m["pageToken"] = pageToken
}
if pageSize != DatasetMaxPage {
m["pageSize"] = fmt.Sprintf("%v", pageSize)
}
u, err = dcl.AddQueryParams(u, m)
if err != nil {
return nil, err
}
resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider)
if err != nil {
return nil, err
}
defer resp.Response.Body.Close()
return ioutil.ReadAll(resp.Response.Body)
}
type listDatasetOperation struct {
Datasets []map[string]interface{} `json:"datasets"`
Token string `json:"nextPageToken"`
}
func (c *Client) listDataset(ctx context.Context, r *Dataset, pageToken string, pageSize int32) ([]*Dataset, string, error) {
b, err := c.listDatasetRaw(ctx, r, pageToken, pageSize)
if err != nil {
return nil, "", err
}
var m listDatasetOperation
if err := json.Unmarshal(b, &m); err != nil {
return nil, "", err
}
var l []*Dataset
for _, v := range m.Datasets {
res, err := unmarshalMapDataset(v, c, r)
if err != nil {
return nil, m.Token, err
}
res.Project = r.Project
l = append(l, res)
}
return l, m.Token, nil
}
func (c *Client) deleteAllDataset(ctx context.Context, f func(*Dataset) bool, resources []*Dataset) error {
var errors []string
for _, res := range resources {
if f(res) {
// We do not want deleteAll to fail on a deletion or else it will stop deleting other resources.
err := c.DeleteDataset(ctx, res)
if err != nil {
errors = append(errors, err.Error())
}
}
}
if len(errors) > 0 {
return fmt.Errorf("%v", strings.Join(errors, "\n"))
} else {
return nil
}
}
type deleteDatasetOperation struct{}
func (op *deleteDatasetOperation) do(ctx context.Context, r *Dataset, c *Client) error {
r, err := c.GetDataset(ctx, r)
if err != nil {
if dcl.IsNotFound(err) {
c.Config.Logger.InfoWithContextf(ctx, "Dataset not found, returning. Original error: %v", err)
return nil
}
c.Config.Logger.WarningWithContextf(ctx, "GetDataset checking for existence. error: %v", err)
return err
}
u, err := r.deleteURL(c.Config.BasePath)
if err != nil {
return err
}
// Delete should never have a body
body := &bytes.Buffer{}
_, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider)
if err != nil {
return fmt.Errorf("failed to delete Dataset: %w", err)
}
// We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration.
// This is the reason we are adding retry to handle that case.
retriesRemaining := 10
dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) {
_, err := c.GetDataset(ctx, r)
if dcl.IsNotFound(err) {
return nil, nil
}
if retriesRemaining > 0 {
retriesRemaining--
return &dcl.RetryDetails{}, dcl.OperationNotDone{}
}
return nil, dcl.NotDeletedError{ExistingResource: r}
}, c.Config.RetryProvider)
return nil
}
// Create operations are similar to Update operations, although they do not have
// specific request objects. The Create request object is the json encoding of
// the resource, which is modified by res.marshal to form the base request body.
type createDatasetOperation struct {
response map[string]interface{}
}
func (op *createDatasetOperation) FirstResponse() (map[string]interface{}, bool) {
return op.response, len(op.response) > 0
}
func (op *createDatasetOperation) do(ctx context.Context, r *Dataset, c *Client) error {
c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r)
u, err := r.createURL(c.Config.BasePath)
if err != nil {
return err
}
req, err := r.marshal(c)
if err != nil {
return err
}
resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider)
if err != nil {
return err
}
o, err := dcl.ResponseBodyAsJSON(resp)
if err != nil {
return fmt.Errorf("error decoding response body into JSON: %w", err)
}
op.response = o
if _, err := c.GetDataset(ctx, r); err != nil {
c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err)
return err
}
return nil
}
func (c *Client) getDatasetRaw(ctx context.Context, r *Dataset) ([]byte, error) {
u, err := r.getURL(c.Config.BasePath)
if err != nil {
return nil, err
}
resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider)
if err != nil {
return nil, err
}
defer resp.Response.Body.Close()
b, err := ioutil.ReadAll(resp.Response.Body)
if err != nil {
return nil, err
}
return b, nil
}
func (c *Client) datasetDiffsForRawDesired(ctx context.Context, rawDesired *Dataset, opts ...dcl.ApplyOption) (initial, desired *Dataset, diffs []*dcl.FieldDiff, err error) {
c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...")
// First, let us see if the user provided a state hint. If they did, we will start fetching based on that.
var fetchState *Dataset
if sh := dcl.FetchStateHint(opts); sh != nil {
if r, ok := sh.(*Dataset); !ok {
c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Dataset, got %T", sh)
} else {
fetchState = r
}
}
if fetchState == nil {
fetchState = rawDesired
}
// 1.2: Retrieval of raw initial state from API
rawInitial, err := c.GetDataset(ctx, fetchState)
if rawInitial == nil {
if !dcl.IsNotFound(err) {
c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Dataset resource already exists: %s", err)
return nil, nil, nil, fmt.Errorf("failed to retrieve Dataset resource: %v", err)
}
c.Config.Logger.InfoWithContext(ctx, "Found that Dataset resource did not exist.")
// Perform canonicalization to pick up defaults.
desired, err = canonicalizeDatasetDesiredState(rawDesired, rawInitial)
return nil, desired, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Dataset: %v", rawInitial)
c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Dataset: %v", rawDesired)
// The Get call applies postReadExtract and so the result may contain fields that are not part of API version.
if err := extractDatasetFields(rawInitial); err != nil {
return nil, nil, nil, err
}
// 1.3: Canonicalize raw initial state into initial state.
initial, err = canonicalizeDatasetInitialState(rawInitial, rawDesired)
if err != nil {
return nil, nil, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Dataset: %v", initial)
// 1.4: Canonicalize raw desired state into desired state.
desired, err = canonicalizeDatasetDesiredState(rawDesired, rawInitial, opts...)
if err != nil {
return nil, nil, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Dataset: %v", desired)
// 2.1: Comparison of initial and desired state.
diffs, err = diffDataset(c, desired, initial, opts...)
return initial, desired, diffs, err
}
func canonicalizeDatasetInitialState(rawInitial, rawDesired *Dataset) (*Dataset, error) {
// TODO(magic-modules-eng): write canonicalizer once relevant traits are added.
return rawInitial, nil
}
/*
* Canonicalizers
*
* These are responsible for converting either a user-specified config or a
* GCP API response to a standard format that can be used for difference checking.
* */
func canonicalizeDatasetDesiredState(rawDesired, rawInitial *Dataset, opts ...dcl.ApplyOption) (*Dataset, error) {
if rawInitial == nil {
// Since the initial state is empty, the desired state is all we have.
// We canonicalize the remaining nested objects with nil to pick up defaults.
rawDesired.DefaultEncryptionConfiguration = canonicalizeDatasetDefaultEncryptionConfiguration(rawDesired.DefaultEncryptionConfiguration, nil, opts...)
return rawDesired, nil
}
canonicalDesired := &Dataset{}
if dcl.StringCanonicalize(rawDesired.Name, rawInitial.Name) {
canonicalDesired.Name = rawInitial.Name
} else {
canonicalDesired.Name = rawDesired.Name
}
if dcl.IsZeroValue(rawDesired.Project) || (dcl.IsEmptyValueIndirect(rawDesired.Project) && dcl.IsEmptyValueIndirect(rawInitial.Project)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
canonicalDesired.Project = rawInitial.Project
} else {
canonicalDesired.Project = rawDesired.Project
}
if dcl.StringCanonicalize(rawDesired.FriendlyName, rawInitial.FriendlyName) {
canonicalDesired.FriendlyName = rawInitial.FriendlyName
} else {
canonicalDesired.FriendlyName = rawDesired.FriendlyName
}
if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) {
canonicalDesired.Description = rawInitial.Description
} else {
canonicalDesired.Description = rawDesired.Description
}
if dcl.StringCanonicalize(rawDesired.DefaultTableExpirationMs, rawInitial.DefaultTableExpirationMs) {
canonicalDesired.DefaultTableExpirationMs = rawInitial.DefaultTableExpirationMs
} else {
canonicalDesired.DefaultTableExpirationMs = rawDesired.DefaultTableExpirationMs
}
if dcl.StringCanonicalize(rawDesired.DefaultPartitionExpirationMs, rawInitial.DefaultPartitionExpirationMs) {
canonicalDesired.DefaultPartitionExpirationMs = rawInitial.DefaultPartitionExpirationMs
} else {
canonicalDesired.DefaultPartitionExpirationMs = rawDesired.DefaultPartitionExpirationMs
}
if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
canonicalDesired.Labels = rawInitial.Labels
} else {
canonicalDesired.Labels = rawDesired.Labels
}
canonicalDesired.Access = canonicalizeDatasetAccessSlice(rawDesired.Access, rawInitial.Access, opts...)
if dcl.StringCanonicalize(rawDesired.Location, rawInitial.Location) {
canonicalDesired.Location = rawInitial.Location
} else {
canonicalDesired.Location = rawDesired.Location
}
if dcl.BoolCanonicalize(rawDesired.Published, rawInitial.Published) {
canonicalDesired.Published = rawInitial.Published
} else {
canonicalDesired.Published = rawDesired.Published
}
canonicalDesired.DefaultEncryptionConfiguration = canonicalizeDatasetDefaultEncryptionConfiguration(rawDesired.DefaultEncryptionConfiguration, rawInitial.DefaultEncryptionConfiguration, opts...)
return canonicalDesired, nil
}
func canonicalizeDatasetNewState(c *Client, rawNew, rawDesired *Dataset) (*Dataset, error) {
if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) {
rawNew.Etag = rawDesired.Etag
} else {
if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) {
rawNew.Etag = rawDesired.Etag
}
}
if dcl.IsEmptyValueIndirect(rawNew.Id) && dcl.IsEmptyValueIndirect(rawDesired.Id) {
rawNew.Id = rawDesired.Id
} else {
if dcl.StringCanonicalize(rawDesired.Id, rawNew.Id) {
rawNew.Id = rawDesired.Id
}
}
if dcl.IsEmptyValueIndirect(rawNew.SelfLink) && dcl.IsEmptyValueIndirect(rawDesired.SelfLink) {
rawNew.SelfLink = rawDesired.SelfLink
} else {
if dcl.StringCanonicalize(rawDesired.SelfLink, rawNew.SelfLink) {
rawNew.SelfLink = rawDesired.SelfLink
}
}
if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) {
rawNew.Name = rawDesired.Name
} else {
if dcl.StringCanonicalize(rawDesired.Name, rawNew.Name) {
rawNew.Name = rawDesired.Name
}
}
if dcl.IsEmptyValueIndirect(rawNew.Project) && dcl.IsEmptyValueIndirect(rawDesired.Project) {
rawNew.Project = rawDesired.Project
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.FriendlyName) && dcl.IsEmptyValueIndirect(rawDesired.FriendlyName) {
rawNew.FriendlyName = rawDesired.FriendlyName
} else {
if dcl.StringCanonicalize(rawDesired.FriendlyName, rawNew.FriendlyName) {
rawNew.FriendlyName = rawDesired.FriendlyName
}
}
if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) {
rawNew.Description = rawDesired.Description
} else {
if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) {
rawNew.Description = rawDesired.Description
}
}
if dcl.IsEmptyValueIndirect(rawNew.DefaultTableExpirationMs) && dcl.IsEmptyValueIndirect(rawDesired.DefaultTableExpirationMs) {
rawNew.DefaultTableExpirationMs = rawDesired.DefaultTableExpirationMs
} else {
if dcl.StringCanonicalize(rawDesired.DefaultTableExpirationMs, rawNew.DefaultTableExpirationMs) {
rawNew.DefaultTableExpirationMs = rawDesired.DefaultTableExpirationMs
}
}
if dcl.IsEmptyValueIndirect(rawNew.DefaultPartitionExpirationMs) && dcl.IsEmptyValueIndirect(rawDesired.DefaultPartitionExpirationMs) {
rawNew.DefaultPartitionExpirationMs = rawDesired.DefaultPartitionExpirationMs
} else {
if dcl.StringCanonicalize(rawDesired.DefaultPartitionExpirationMs, rawNew.DefaultPartitionExpirationMs) {
rawNew.DefaultPartitionExpirationMs = rawDesired.DefaultPartitionExpirationMs
}
}
if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) {
rawNew.Labels = rawDesired.Labels
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.Access) && dcl.IsEmptyValueIndirect(rawDesired.Access) {
rawNew.Access = rawDesired.Access
} else {
rawNew.Access = canonicalizeNewDatasetAccessSet(c, rawDesired.Access, rawNew.Access)
}
if dcl.IsEmptyValueIndirect(rawNew.CreationTime) && dcl.IsEmptyValueIndirect(rawDesired.CreationTime) {
rawNew.CreationTime = rawDesired.CreationTime
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.LastModifiedTime) && dcl.IsEmptyValueIndirect(rawDesired.LastModifiedTime) {
rawNew.LastModifiedTime = rawDesired.LastModifiedTime
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.Location) && dcl.IsEmptyValueIndirect(rawDesired.Location) {
rawNew.Location = rawDesired.Location
} else {
if dcl.StringCanonicalize(rawDesired.Location, rawNew.Location) {
rawNew.Location = rawDesired.Location
}
}
if dcl.IsEmptyValueIndirect(rawNew.Published) && dcl.IsEmptyValueIndirect(rawDesired.Published) {
rawNew.Published = rawDesired.Published
} else {
if dcl.BoolCanonicalize(rawDesired.Published, rawNew.Published) {
rawNew.Published = rawDesired.Published
}
}
if dcl.IsEmptyValueIndirect(rawNew.DefaultEncryptionConfiguration) && dcl.IsEmptyValueIndirect(rawDesired.DefaultEncryptionConfiguration) {
rawNew.DefaultEncryptionConfiguration = rawDesired.DefaultEncryptionConfiguration
} else {
rawNew.DefaultEncryptionConfiguration = canonicalizeNewDatasetDefaultEncryptionConfiguration(c, rawDesired.DefaultEncryptionConfiguration, rawNew.DefaultEncryptionConfiguration)
}
return rawNew, nil
}
func canonicalizeDatasetAccess(des, initial *DatasetAccess, opts ...dcl.ApplyOption) *DatasetAccess {
if des == nil {
return initial
}
if des.empty {
return des
}
if des.UserByEmail != nil || (initial != nil && initial.UserByEmail != nil) {
// Check if anything else is set.
if dcl.AnySet(des.GroupByEmail, des.Domain, des.SpecialGroup, des.IamMember, des.View, des.Routine) {
des.UserByEmail = nil
if initial != nil {
initial.UserByEmail = nil
}
}
}
if des.GroupByEmail != nil || (initial != nil && initial.GroupByEmail != nil) {
// Check if anything else is set.
if dcl.AnySet(des.UserByEmail, des.Domain, des.SpecialGroup, des.IamMember, des.View, des.Routine) {
des.GroupByEmail = nil
if initial != nil {
initial.GroupByEmail = nil
}
}
}
if des.Domain != nil || (initial != nil && initial.Domain != nil) {
// Check if anything else is set.
if dcl.AnySet(des.UserByEmail, des.GroupByEmail, des.SpecialGroup, des.IamMember, des.View, des.Routine) {
des.Domain = nil
if initial != nil {
initial.Domain = nil
}
}
}
if des.SpecialGroup != nil || (initial != nil && initial.SpecialGroup != nil) {
// Check if anything else is set.
if dcl.AnySet(des.UserByEmail, des.GroupByEmail, des.Domain, des.IamMember, des.View, des.Routine) {
des.SpecialGroup = nil
if initial != nil {
initial.SpecialGroup = nil
}
}
}
if des.IamMember != nil || (initial != nil && initial.IamMember != nil) {
// Check if anything else is set.
if dcl.AnySet(des.UserByEmail, des.GroupByEmail, des.Domain, des.SpecialGroup, des.View, des.Routine) {
des.IamMember = nil
if initial != nil {
initial.IamMember = nil
}
}
}
if des.View != nil || (initial != nil && initial.View != nil) {
// Check if anything else is set.
if dcl.AnySet(des.UserByEmail, des.GroupByEmail, des.Domain, des.SpecialGroup, des.IamMember, des.Routine) {
des.View = nil
if initial != nil {
initial.View = nil
}
}
}
if des.Routine != nil || (initial != nil && initial.Routine != nil) {
// Check if anything else is set.
if dcl.AnySet(des.UserByEmail, des.GroupByEmail, des.Domain, des.SpecialGroup, des.IamMember, des.View) {
des.Routine = nil
if initial != nil {
initial.Routine = nil
}
}
}
if initial == nil {
return des
}
cDes := &DatasetAccess{}
if canonicalizeDatasetAccessRole(des.Role, initial.Role) || dcl.IsZeroValue(des.Role) {
cDes.Role = initial.Role
} else {
cDes.Role = des.Role
}
if dcl.StringCanonicalize(des.UserByEmail, initial.UserByEmail) || dcl.IsZeroValue(des.UserByEmail) {
cDes.UserByEmail = initial.UserByEmail
} else {
cDes.UserByEmail = des.UserByEmail
}
if dcl.StringCanonicalize(des.GroupByEmail, initial.GroupByEmail) || dcl.IsZeroValue(des.GroupByEmail) {
cDes.GroupByEmail = initial.GroupByEmail
} else {
cDes.GroupByEmail = des.GroupByEmail
}
if dcl.StringCanonicalize(des.Domain, initial.Domain) || dcl.IsZeroValue(des.Domain) {
cDes.Domain = initial.Domain
} else {
cDes.Domain = des.Domain
}
if dcl.StringCanonicalize(des.SpecialGroup, initial.SpecialGroup) || dcl.IsZeroValue(des.SpecialGroup) {
cDes.SpecialGroup = initial.SpecialGroup
} else {
cDes.SpecialGroup = des.SpecialGroup
}
if dcl.StringCanonicalize(des.IamMember, initial.IamMember) || dcl.IsZeroValue(des.IamMember) {
cDes.IamMember = initial.IamMember
} else {
cDes.IamMember = des.IamMember
}
cDes.View = canonicalizeDatasetAccessView(des.View, initial.View, opts...)
cDes.Routine = canonicalizeDatasetAccessRoutine(des.Routine, initial.Routine, opts...)
return cDes
}
func canonicalizeDatasetAccessSlice(des, initial []DatasetAccess, opts ...dcl.ApplyOption) []DatasetAccess {
if des == nil {
return initial
}
if len(des) != len(initial) {
items := make([]DatasetAccess, 0, len(des))
for _, d := range des {
cd := canonicalizeDatasetAccess(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]DatasetAccess, 0, len(des))
for i, d := range des {
cd := canonicalizeDatasetAccess(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewDatasetAccess(c *Client, des, nw *DatasetAccess) *DatasetAccess {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for DatasetAccess while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
if canonicalizeDatasetAccessRole(des.Role, nw.Role) {
nw.Role = des.Role
}
if dcl.StringCanonicalize(des.UserByEmail, nw.UserByEmail) {
nw.UserByEmail = des.UserByEmail
}
if dcl.StringCanonicalize(des.GroupByEmail, nw.GroupByEmail) {
nw.GroupByEmail = des.GroupByEmail
}
if dcl.StringCanonicalize(des.Domain, nw.Domain) {
nw.Domain = des.Domain
}
if dcl.StringCanonicalize(des.SpecialGroup, nw.SpecialGroup) {
nw.SpecialGroup = des.SpecialGroup
}
if dcl.StringCanonicalize(des.IamMember, nw.IamMember) {
nw.IamMember = des.IamMember
}
nw.View = canonicalizeNewDatasetAccessView(c, des.View, nw.View)
nw.Routine = canonicalizeNewDatasetAccessRoutine(c, des.Routine, nw.Routine)
return nw
}
func canonicalizeNewDatasetAccessSet(c *Client, des, nw []DatasetAccess) []DatasetAccess {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []DatasetAccess
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareDatasetAccessNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewDatasetAccess(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewDatasetAccessSlice(c *Client, des, nw []DatasetAccess) []DatasetAccess {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []DatasetAccess
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewDatasetAccess(c, &d, &n))
}
return items
}
func canonicalizeDatasetAccessView(des, initial *DatasetAccessView, opts ...dcl.ApplyOption) *DatasetAccessView {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &DatasetAccessView{}
if dcl.IsZeroValue(des.ProjectId) || (dcl.IsEmptyValueIndirect(des.ProjectId) && dcl.IsEmptyValueIndirect(initial.ProjectId)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.ProjectId = initial.ProjectId
} else {
cDes.ProjectId = des.ProjectId
}
if dcl.IsZeroValue(des.DatasetId) || (dcl.IsEmptyValueIndirect(des.DatasetId) && dcl.IsEmptyValueIndirect(initial.DatasetId)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.DatasetId = initial.DatasetId
} else {
cDes.DatasetId = des.DatasetId
}
if dcl.IsZeroValue(des.TableId) || (dcl.IsEmptyValueIndirect(des.TableId) && dcl.IsEmptyValueIndirect(initial.TableId)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.TableId = initial.TableId
} else {
cDes.TableId = des.TableId
}
return cDes
}
func canonicalizeDatasetAccessViewSlice(des, initial []DatasetAccessView, opts ...dcl.ApplyOption) []DatasetAccessView {
if dcl.IsEmptyValueIndirect(des) {
return initial
}
if len(des) != len(initial) {
items := make([]DatasetAccessView, 0, len(des))
for _, d := range des {
cd := canonicalizeDatasetAccessView(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]DatasetAccessView, 0, len(des))
for i, d := range des {
cd := canonicalizeDatasetAccessView(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewDatasetAccessView(c *Client, des, nw *DatasetAccessView) *DatasetAccessView {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for DatasetAccessView while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
return nw
}
func canonicalizeNewDatasetAccessViewSet(c *Client, des, nw []DatasetAccessView) []DatasetAccessView {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []DatasetAccessView
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareDatasetAccessViewNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewDatasetAccessView(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewDatasetAccessViewSlice(c *Client, des, nw []DatasetAccessView) []DatasetAccessView {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []DatasetAccessView
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewDatasetAccessView(c, &d, &n))
}
return items
}
func canonicalizeDatasetAccessRoutine(des, initial *DatasetAccessRoutine, opts ...dcl.ApplyOption) *DatasetAccessRoutine {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &DatasetAccessRoutine{}
if dcl.IsZeroValue(des.ProjectId) || (dcl.IsEmptyValueIndirect(des.ProjectId) && dcl.IsEmptyValueIndirect(initial.ProjectId)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.ProjectId = initial.ProjectId
} else {
cDes.ProjectId = des.ProjectId
}
if dcl.IsZeroValue(des.DatasetId) || (dcl.IsEmptyValueIndirect(des.DatasetId) && dcl.IsEmptyValueIndirect(initial.DatasetId)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.DatasetId = initial.DatasetId
} else {
cDes.DatasetId = des.DatasetId
}
if dcl.IsZeroValue(des.RoutineId) || (dcl.IsEmptyValueIndirect(des.RoutineId) && dcl.IsEmptyValueIndirect(initial.RoutineId)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.RoutineId = initial.RoutineId
} else {
cDes.RoutineId = des.RoutineId
}
return cDes
}
func canonicalizeDatasetAccessRoutineSlice(des, initial []DatasetAccessRoutine, opts ...dcl.ApplyOption) []DatasetAccessRoutine {
if dcl.IsEmptyValueIndirect(des) {
return initial
}
if len(des) != len(initial) {
items := make([]DatasetAccessRoutine, 0, len(des))
for _, d := range des {
cd := canonicalizeDatasetAccessRoutine(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]DatasetAccessRoutine, 0, len(des))
for i, d := range des {
cd := canonicalizeDatasetAccessRoutine(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewDatasetAccessRoutine(c *Client, des, nw *DatasetAccessRoutine) *DatasetAccessRoutine {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for DatasetAccessRoutine while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
return nw
}
func canonicalizeNewDatasetAccessRoutineSet(c *Client, des, nw []DatasetAccessRoutine) []DatasetAccessRoutine {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []DatasetAccessRoutine
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareDatasetAccessRoutineNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewDatasetAccessRoutine(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewDatasetAccessRoutineSlice(c *Client, des, nw []DatasetAccessRoutine) []DatasetAccessRoutine {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []DatasetAccessRoutine
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewDatasetAccessRoutine(c, &d, &n))
}
return items
}
func canonicalizeDatasetDefaultEncryptionConfiguration(des, initial *DatasetDefaultEncryptionConfiguration, opts ...dcl.ApplyOption) *DatasetDefaultEncryptionConfiguration {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &DatasetDefaultEncryptionConfiguration{}
if dcl.IsZeroValue(des.KmsKeyName) || (dcl.IsEmptyValueIndirect(des.KmsKeyName) && dcl.IsEmptyValueIndirect(initial.KmsKeyName)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.KmsKeyName = initial.KmsKeyName
} else {
cDes.KmsKeyName = des.KmsKeyName
}
return cDes
}
func canonicalizeDatasetDefaultEncryptionConfigurationSlice(des, initial []DatasetDefaultEncryptionConfiguration, opts ...dcl.ApplyOption) []DatasetDefaultEncryptionConfiguration {
if dcl.IsEmptyValueIndirect(des) {
return initial
}
if len(des) != len(initial) {
items := make([]DatasetDefaultEncryptionConfiguration, 0, len(des))
for _, d := range des {
cd := canonicalizeDatasetDefaultEncryptionConfiguration(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]DatasetDefaultEncryptionConfiguration, 0, len(des))
for i, d := range des {
cd := canonicalizeDatasetDefaultEncryptionConfiguration(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewDatasetDefaultEncryptionConfiguration(c *Client, des, nw *DatasetDefaultEncryptionConfiguration) *DatasetDefaultEncryptionConfiguration {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for DatasetDefaultEncryptionConfiguration while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
return nw
}
func canonicalizeNewDatasetDefaultEncryptionConfigurationSet(c *Client, des, nw []DatasetDefaultEncryptionConfiguration) []DatasetDefaultEncryptionConfiguration {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []DatasetDefaultEncryptionConfiguration
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareDatasetDefaultEncryptionConfigurationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewDatasetDefaultEncryptionConfiguration(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewDatasetDefaultEncryptionConfigurationSlice(c *Client, des, nw []DatasetDefaultEncryptionConfiguration) []DatasetDefaultEncryptionConfiguration {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []DatasetDefaultEncryptionConfiguration
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewDatasetDefaultEncryptionConfiguration(c, &d, &n))
}
return items
}
// The differ returns a list of diffs, along with a list of operations that should be taken
// to remedy them. Right now, it does not attempt to consolidate operations - if several
// fields can be fixed with a patch update, it will perform the patch several times.
// Diffs on some fields will be ignored if the `desired` state has an empty (nil)
// value. This empty value indicates that the user does not care about the state for
// the field. Empty fields on the actual object will cause diffs.
// TODO(magic-modules-eng): for efficiency in some resources, add batching.
func diffDataset(c *Client, desired, actual *Dataset, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) {
if desired == nil || actual == nil {
return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual)
}
c.Config.Logger.Infof("Diff function called with desired state: %v", desired)
c.Config.Logger.Infof("Diff function called with actual state: %v", actual)
var fn dcl.FieldName
var newDiffs []*dcl.FieldDiff
// New style diffs.
if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Id, actual.Id, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Id")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.SelfLink, actual.SelfLink, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SelfLink")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.FriendlyName, actual.FriendlyName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDatasetPatchDatasetOperation")}, fn.AddNest("FriendlyName")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDatasetPatchDatasetOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.DefaultTableExpirationMs, actual.DefaultTableExpirationMs, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDatasetPatchDatasetOperation")}, fn.AddNest("DefaultTableExpirationMs")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.DefaultPartitionExpirationMs, actual.DefaultPartitionExpirationMs, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDatasetPatchDatasetOperation")}, fn.AddNest("DefaultPartitionExpirationMs")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDatasetPatchDatasetOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Access, actual.Access, dcl.DiffInfo{Type: "Set", ObjectFunction: compareDatasetAccessNewStyle, EmptyObject: EmptyDatasetAccess, OperationSelector: dcl.TriggersOperation("updateDatasetPatchDatasetOperation")}, fn.AddNest("Access")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.CreationTime, actual.CreationTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreationTime")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.LastModifiedTime, actual.LastModifiedTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LastModifiedTime")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Published, actual.Published, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Published")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.DefaultEncryptionConfiguration, actual.DefaultEncryptionConfiguration, dcl.DiffInfo{ObjectFunction: compareDatasetDefaultEncryptionConfigurationNewStyle, EmptyObject: EmptyDatasetDefaultEncryptionConfiguration, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DefaultEncryptionConfiguration")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if len(newDiffs) > 0 {
c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs)
}
return newDiffs, nil
}
func compareDatasetAccessNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*DatasetAccess)
if !ok {
desiredNotPointer, ok := d.(DatasetAccess)
if !ok {
return nil, fmt.Errorf("obj %v is not a DatasetAccess or *DatasetAccess", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*DatasetAccess)
if !ok {
actualNotPointer, ok := a.(DatasetAccess)
if !ok {
return nil, fmt.Errorf("obj %v is not a DatasetAccess", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.Role, actual.Role, dcl.DiffInfo{CustomDiff: canonicalizeDatasetAccessRole, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Role")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.UserByEmail, actual.UserByEmail, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UserByEmail")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.GroupByEmail, actual.GroupByEmail, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GroupByEmail")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.Domain, actual.Domain, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Domain")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.SpecialGroup, actual.SpecialGroup, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SpecialGroup")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.IamMember, actual.IamMember, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IamMember")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.View, actual.View, dcl.DiffInfo{ObjectFunction: compareDatasetAccessViewNewStyle, EmptyObject: EmptyDatasetAccessView, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("View")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.Routine, actual.Routine, dcl.DiffInfo{ObjectFunction: compareDatasetAccessRoutineNewStyle, EmptyObject: EmptyDatasetAccessRoutine, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Routine")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareDatasetAccessViewNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*DatasetAccessView)
if !ok {
desiredNotPointer, ok := d.(DatasetAccessView)
if !ok {
return nil, fmt.Errorf("obj %v is not a DatasetAccessView or *DatasetAccessView", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*DatasetAccessView)
if !ok {
actualNotPointer, ok := a.(DatasetAccessView)
if !ok {
return nil, fmt.Errorf("obj %v is not a DatasetAccessView", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.ProjectId, actual.ProjectId, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ProjectId")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.DatasetId, actual.DatasetId, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DatasetId")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.TableId, actual.TableId, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TableId")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareDatasetAccessRoutineNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*DatasetAccessRoutine)
if !ok {
desiredNotPointer, ok := d.(DatasetAccessRoutine)
if !ok {
return nil, fmt.Errorf("obj %v is not a DatasetAccessRoutine or *DatasetAccessRoutine", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*DatasetAccessRoutine)
if !ok {
actualNotPointer, ok := a.(DatasetAccessRoutine)
if !ok {
return nil, fmt.Errorf("obj %v is not a DatasetAccessRoutine", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.ProjectId, actual.ProjectId, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ProjectId")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.DatasetId, actual.DatasetId, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DatasetId")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.RoutineId, actual.RoutineId, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RoutineId")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareDatasetDefaultEncryptionConfigurationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*DatasetDefaultEncryptionConfiguration)
if !ok {
desiredNotPointer, ok := d.(DatasetDefaultEncryptionConfiguration)
if !ok {
return nil, fmt.Errorf("obj %v is not a DatasetDefaultEncryptionConfiguration or *DatasetDefaultEncryptionConfiguration", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*DatasetDefaultEncryptionConfiguration)
if !ok {
actualNotPointer, ok := a.(DatasetDefaultEncryptionConfiguration)
if !ok {
return nil, fmt.Errorf("obj %v is not a DatasetDefaultEncryptionConfiguration", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.KmsKeyName, actual.KmsKeyName, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsKeyName")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
// urlNormalized returns a copy of the resource struct with values normalized
// for URL substitutions. For instance, it converts long-form self-links to
// short-form so they can be substituted in.
func (r *Dataset) urlNormalized() *Dataset {
normalized := dcl.Copy(*r).(Dataset)
normalized.Etag = dcl.SelfLinkToName(r.Etag)
normalized.Id = dcl.SelfLinkToName(r.Id)
normalized.SelfLink = dcl.SelfLinkToName(r.SelfLink)
normalized.Name = dcl.SelfLinkToName(r.Name)
normalized.Project = dcl.SelfLinkToName(r.Project)
normalized.FriendlyName = dcl.SelfLinkToName(r.FriendlyName)
normalized.Description = dcl.SelfLinkToName(r.Description)
normalized.DefaultTableExpirationMs = dcl.SelfLinkToName(r.DefaultTableExpirationMs)
normalized.DefaultPartitionExpirationMs = dcl.SelfLinkToName(r.DefaultPartitionExpirationMs)
normalized.Location = dcl.SelfLinkToName(r.Location)
return &normalized
}
func (r *Dataset) updateURL(userBasePath, updateName string) (string, error) {
nr := r.urlNormalized()
if updateName == "PatchDataset" {
fields := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("projects/{{project}}/datasets/{{name}}", nr.basePath(), userBasePath, fields), nil
}
return "", fmt.Errorf("unknown update name: %s", updateName)
}
// marshal encodes the Dataset resource into JSON for a Create request, and
// performs transformations from the resource schema to the API schema if
// necessary.
func (r *Dataset) marshal(c *Client) ([]byte, error) {
m, err := expandDataset(c, r)
if err != nil {
return nil, fmt.Errorf("error marshalling Dataset: %w", err)
}
dcl.MoveMapEntry(
m,
[]string{"name"},
[]string{"datasetReference", "datasetId"},
)
dcl.MoveMapEntry(
m,
[]string{"project"},
[]string{"datasetReference", "projectId"},
)
return json.Marshal(m)
}
// unmarshalDataset decodes JSON responses into the Dataset resource schema.
func unmarshalDataset(b []byte, c *Client, res *Dataset) (*Dataset, error) {
var m map[string]interface{}
if err := json.Unmarshal(b, &m); err != nil {
return nil, err
}
return unmarshalMapDataset(m, c, res)
}
func unmarshalMapDataset(m map[string]interface{}, c *Client, res *Dataset) (*Dataset, error) {
dcl.MoveMapEntry(
m,
[]string{"datasetReference", "datasetId"},
[]string{"name"},
)
dcl.MoveMapEntry(
m,
[]string{"datasetReference", "projectId"},
[]string{"project"},
)
flattened := flattenDataset(c, m, res)
if flattened == nil {
return nil, fmt.Errorf("attempted to flatten empty json object")
}
return flattened, nil
}
// expandDataset expands Dataset into a JSON request object.
func expandDataset(c *Client, f *Dataset) (map[string]interface{}, error) {
m := make(map[string]interface{})
res := f
_ = res
if v := f.Name; dcl.ValueShouldBeSent(v) {
m["name"] = v
}
if v := f.Project; dcl.ValueShouldBeSent(v) {
m["project"] = v
}
if v := f.FriendlyName; dcl.ValueShouldBeSent(v) {
m["friendlyName"] = v
}
if v := f.Description; dcl.ValueShouldBeSent(v) {
m["description"] = v
}
if v := f.DefaultTableExpirationMs; dcl.ValueShouldBeSent(v) {
m["defaultTableExpirationMs"] = v
}
if v := f.DefaultPartitionExpirationMs; dcl.ValueShouldBeSent(v) {
m["defaultPartitionExpirationMs"] = v
}
if v := f.Labels; dcl.ValueShouldBeSent(v) {
m["labels"] = v
}
if v, err := expandDatasetAccessSlice(c, f.Access, res); err != nil {
return nil, fmt.Errorf("error expanding Access into access: %w", err)
} else if v != nil {
m["access"] = v
}
if v := f.Location; dcl.ValueShouldBeSent(v) {
m["location"] = v
}
if v := f.Published; dcl.ValueShouldBeSent(v) {
m["published"] = v
}
if v, err := expandDatasetDefaultEncryptionConfiguration(c, f.DefaultEncryptionConfiguration, res); err != nil {
return nil, fmt.Errorf("error expanding DefaultEncryptionConfiguration into defaultEncryptionConfiguration: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["defaultEncryptionConfiguration"] = v
}
return m, nil
}
// flattenDataset flattens Dataset from a JSON request object into the
// Dataset type.
func flattenDataset(c *Client, i interface{}, res *Dataset) *Dataset {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
if len(m) == 0 {
return nil
}
resultRes := &Dataset{}
resultRes.Etag = dcl.FlattenString(m["etag"])
resultRes.Id = dcl.FlattenString(m["id"])
resultRes.SelfLink = dcl.FlattenString(m["selfLink"])
resultRes.Name = dcl.FlattenString(m["name"])
resultRes.Project = dcl.FlattenString(m["project"])
resultRes.FriendlyName = dcl.FlattenString(m["friendlyName"])
resultRes.Description = dcl.FlattenString(m["description"])
resultRes.DefaultTableExpirationMs = dcl.FlattenString(m["defaultTableExpirationMs"])
resultRes.DefaultPartitionExpirationMs = dcl.FlattenString(m["defaultPartitionExpirationMs"])
resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"])
resultRes.Access = flattenDatasetAccessSlice(c, m["access"], res)
resultRes.CreationTime = dcl.FlattenInteger(m["creationTime"])
resultRes.LastModifiedTime = dcl.FlattenInteger(m["lastModifiedTime"])
resultRes.Location = dcl.FlattenString(m["location"])
resultRes.Published = dcl.FlattenBool(m["published"])
resultRes.DefaultEncryptionConfiguration = flattenDatasetDefaultEncryptionConfiguration(c, m["defaultEncryptionConfiguration"], res)
return resultRes
}
// expandDatasetAccessMap expands the contents of DatasetAccess into a JSON
// request object.
func expandDatasetAccessMap(c *Client, f map[string]DatasetAccess, res *Dataset) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandDatasetAccess(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandDatasetAccessSlice expands the contents of DatasetAccess into a JSON
// request object.
func expandDatasetAccessSlice(c *Client, f []DatasetAccess, res *Dataset) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandDatasetAccess(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenDatasetAccessMap flattens the contents of DatasetAccess from a JSON
// response object.
func flattenDatasetAccessMap(c *Client, i interface{}, res *Dataset) map[string]DatasetAccess {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]DatasetAccess{}
}
if len(a) == 0 {
return map[string]DatasetAccess{}
}
items := make(map[string]DatasetAccess)
for k, item := range a {
items[k] = *flattenDatasetAccess(c, item.(map[string]interface{}), res)
}
return items
}
// flattenDatasetAccessSlice flattens the contents of DatasetAccess from a JSON
// response object.
func flattenDatasetAccessSlice(c *Client, i interface{}, res *Dataset) []DatasetAccess {
a, ok := i.([]interface{})
if !ok {
return []DatasetAccess{}
}
if len(a) == 0 {
return []DatasetAccess{}
}
items := make([]DatasetAccess, 0, len(a))
for _, item := range a {
items = append(items, *flattenDatasetAccess(c, item.(map[string]interface{}), res))
}
return items
}
// expandDatasetAccess expands an instance of DatasetAccess into a JSON
// request object.
func expandDatasetAccess(c *Client, f *DatasetAccess, res *Dataset) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
m := make(map[string]interface{})
if v := f.Role; !dcl.IsEmptyValueIndirect(v) {
m["role"] = v
}
if v := f.UserByEmail; !dcl.IsEmptyValueIndirect(v) {
m["userByEmail"] = v
}
if v := f.GroupByEmail; !dcl.IsEmptyValueIndirect(v) {
m["groupByEmail"] = v
}
if v := f.Domain; !dcl.IsEmptyValueIndirect(v) {
m["domain"] = v
}
if v := f.SpecialGroup; !dcl.IsEmptyValueIndirect(v) {
m["specialGroup"] = v
}
if v := f.IamMember; !dcl.IsEmptyValueIndirect(v) {
m["iamMember"] = v
}
if v, err := expandDatasetAccessView(c, f.View, res); err != nil {
return nil, fmt.Errorf("error expanding View into view: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["view"] = v
}
if v, err := expandDatasetAccessRoutine(c, f.Routine, res); err != nil {
return nil, fmt.Errorf("error expanding Routine into routine: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["routine"] = v
}
return m, nil
}
// flattenDatasetAccess flattens an instance of DatasetAccess from a JSON
// response object.
func flattenDatasetAccess(c *Client, i interface{}, res *Dataset) *DatasetAccess {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &DatasetAccess{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyDatasetAccess
}
r.Role = dcl.FlattenString(m["role"])
r.UserByEmail = dcl.FlattenString(m["userByEmail"])
r.GroupByEmail = dcl.FlattenString(m["groupByEmail"])
r.Domain = dcl.FlattenString(m["domain"])
r.SpecialGroup = dcl.FlattenString(m["specialGroup"])
r.IamMember = dcl.FlattenString(m["iamMember"])
r.View = flattenDatasetAccessView(c, m["view"], res)
r.Routine = flattenDatasetAccessRoutine(c, m["routine"], res)
return r
}
// expandDatasetAccessViewMap expands the contents of DatasetAccessView into a JSON
// request object.
func expandDatasetAccessViewMap(c *Client, f map[string]DatasetAccessView, res *Dataset) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandDatasetAccessView(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandDatasetAccessViewSlice expands the contents of DatasetAccessView into a JSON
// request object.
func expandDatasetAccessViewSlice(c *Client, f []DatasetAccessView, res *Dataset) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandDatasetAccessView(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenDatasetAccessViewMap flattens the contents of DatasetAccessView from a JSON
// response object.
func flattenDatasetAccessViewMap(c *Client, i interface{}, res *Dataset) map[string]DatasetAccessView {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]DatasetAccessView{}
}
if len(a) == 0 {
return map[string]DatasetAccessView{}
}
items := make(map[string]DatasetAccessView)
for k, item := range a {
items[k] = *flattenDatasetAccessView(c, item.(map[string]interface{}), res)
}
return items
}
// flattenDatasetAccessViewSlice flattens the contents of DatasetAccessView from a JSON
// response object.
func flattenDatasetAccessViewSlice(c *Client, i interface{}, res *Dataset) []DatasetAccessView {
a, ok := i.([]interface{})
if !ok {
return []DatasetAccessView{}
}
if len(a) == 0 {
return []DatasetAccessView{}
}
items := make([]DatasetAccessView, 0, len(a))
for _, item := range a {
items = append(items, *flattenDatasetAccessView(c, item.(map[string]interface{}), res))
}
return items
}
// expandDatasetAccessView expands an instance of DatasetAccessView into a JSON
// request object.
func expandDatasetAccessView(c *Client, f *DatasetAccessView, res *Dataset) (map[string]interface{}, error) {
if dcl.IsEmptyValueIndirect(f) {
return nil, nil
}
m := make(map[string]interface{})
if v := f.ProjectId; !dcl.IsEmptyValueIndirect(v) {
m["projectId"] = v
}
if v := f.DatasetId; !dcl.IsEmptyValueIndirect(v) {
m["datasetId"] = v
}
if v := f.TableId; !dcl.IsEmptyValueIndirect(v) {
m["tableId"] = v
}
return m, nil
}
// flattenDatasetAccessView flattens an instance of DatasetAccessView from a JSON
// response object.
func flattenDatasetAccessView(c *Client, i interface{}, res *Dataset) *DatasetAccessView {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &DatasetAccessView{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyDatasetAccessView
}
r.ProjectId = dcl.FlattenString(m["projectId"])
r.DatasetId = dcl.FlattenString(m["datasetId"])
r.TableId = dcl.FlattenString(m["tableId"])
return r
}
// expandDatasetAccessRoutineMap expands the contents of DatasetAccessRoutine into a JSON
// request object.
func expandDatasetAccessRoutineMap(c *Client, f map[string]DatasetAccessRoutine, res *Dataset) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandDatasetAccessRoutine(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandDatasetAccessRoutineSlice expands the contents of DatasetAccessRoutine into a JSON
// request object.
func expandDatasetAccessRoutineSlice(c *Client, f []DatasetAccessRoutine, res *Dataset) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandDatasetAccessRoutine(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenDatasetAccessRoutineMap flattens the contents of DatasetAccessRoutine from a JSON
// response object.
func flattenDatasetAccessRoutineMap(c *Client, i interface{}, res *Dataset) map[string]DatasetAccessRoutine {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]DatasetAccessRoutine{}
}
if len(a) == 0 {
return map[string]DatasetAccessRoutine{}
}
items := make(map[string]DatasetAccessRoutine)
for k, item := range a {
items[k] = *flattenDatasetAccessRoutine(c, item.(map[string]interface{}), res)
}
return items
}
// flattenDatasetAccessRoutineSlice flattens the contents of DatasetAccessRoutine from a JSON
// response object.
func flattenDatasetAccessRoutineSlice(c *Client, i interface{}, res *Dataset) []DatasetAccessRoutine {
a, ok := i.([]interface{})
if !ok {
return []DatasetAccessRoutine{}
}
if len(a) == 0 {
return []DatasetAccessRoutine{}
}
items := make([]DatasetAccessRoutine, 0, len(a))
for _, item := range a {
items = append(items, *flattenDatasetAccessRoutine(c, item.(map[string]interface{}), res))
}
return items
}
// expandDatasetAccessRoutine expands an instance of DatasetAccessRoutine into a JSON
// request object.
func expandDatasetAccessRoutine(c *Client, f *DatasetAccessRoutine, res *Dataset) (map[string]interface{}, error) {
if dcl.IsEmptyValueIndirect(f) {
return nil, nil
}
m := make(map[string]interface{})
if v := f.ProjectId; !dcl.IsEmptyValueIndirect(v) {
m["projectId"] = v
}
if v := f.DatasetId; !dcl.IsEmptyValueIndirect(v) {
m["datasetId"] = v
}
if v := f.RoutineId; !dcl.IsEmptyValueIndirect(v) {
m["routineId"] = v
}
return m, nil
}
// flattenDatasetAccessRoutine flattens an instance of DatasetAccessRoutine from a JSON
// response object.
func flattenDatasetAccessRoutine(c *Client, i interface{}, res *Dataset) *DatasetAccessRoutine {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &DatasetAccessRoutine{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyDatasetAccessRoutine
}
r.ProjectId = dcl.FlattenString(m["projectId"])
r.DatasetId = dcl.FlattenString(m["datasetId"])
r.RoutineId = dcl.FlattenString(m["routineId"])
return r
}
// expandDatasetDefaultEncryptionConfigurationMap expands the contents of DatasetDefaultEncryptionConfiguration into a JSON
// request object.
func expandDatasetDefaultEncryptionConfigurationMap(c *Client, f map[string]DatasetDefaultEncryptionConfiguration, res *Dataset) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandDatasetDefaultEncryptionConfiguration(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandDatasetDefaultEncryptionConfigurationSlice expands the contents of DatasetDefaultEncryptionConfiguration into a JSON
// request object.
func expandDatasetDefaultEncryptionConfigurationSlice(c *Client, f []DatasetDefaultEncryptionConfiguration, res *Dataset) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandDatasetDefaultEncryptionConfiguration(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenDatasetDefaultEncryptionConfigurationMap flattens the contents of DatasetDefaultEncryptionConfiguration from a JSON
// response object.
func flattenDatasetDefaultEncryptionConfigurationMap(c *Client, i interface{}, res *Dataset) map[string]DatasetDefaultEncryptionConfiguration {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]DatasetDefaultEncryptionConfiguration{}
}
if len(a) == 0 {
return map[string]DatasetDefaultEncryptionConfiguration{}
}
items := make(map[string]DatasetDefaultEncryptionConfiguration)
for k, item := range a {
items[k] = *flattenDatasetDefaultEncryptionConfiguration(c, item.(map[string]interface{}), res)
}
return items
}
// flattenDatasetDefaultEncryptionConfigurationSlice flattens the contents of DatasetDefaultEncryptionConfiguration from a JSON
// response object.
func flattenDatasetDefaultEncryptionConfigurationSlice(c *Client, i interface{}, res *Dataset) []DatasetDefaultEncryptionConfiguration {
a, ok := i.([]interface{})
if !ok {
return []DatasetDefaultEncryptionConfiguration{}
}
if len(a) == 0 {
return []DatasetDefaultEncryptionConfiguration{}
}
items := make([]DatasetDefaultEncryptionConfiguration, 0, len(a))
for _, item := range a {
items = append(items, *flattenDatasetDefaultEncryptionConfiguration(c, item.(map[string]interface{}), res))
}
return items
}
// expandDatasetDefaultEncryptionConfiguration expands an instance of DatasetDefaultEncryptionConfiguration into a JSON
// request object.
func expandDatasetDefaultEncryptionConfiguration(c *Client, f *DatasetDefaultEncryptionConfiguration, res *Dataset) (map[string]interface{}, error) {
if dcl.IsEmptyValueIndirect(f) {
return nil, nil
}
m := make(map[string]interface{})
if v := f.KmsKeyName; !dcl.IsEmptyValueIndirect(v) {
m["kmsKeyName"] = v
}
return m, nil
}
// flattenDatasetDefaultEncryptionConfiguration flattens an instance of DatasetDefaultEncryptionConfiguration from a JSON
// response object.
func flattenDatasetDefaultEncryptionConfiguration(c *Client, i interface{}, res *Dataset) *DatasetDefaultEncryptionConfiguration {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &DatasetDefaultEncryptionConfiguration{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyDatasetDefaultEncryptionConfiguration
}
r.KmsKeyName = dcl.FlattenString(m["kmsKeyName"])
return r
}
// This function returns a matcher that checks whether a serialized resource matches this resource
// in its parameters (as defined by the fields in a Get, which definitionally define resource
// identity). This is useful in extracting the element from a List call.
func (r *Dataset) matcher(c *Client) func([]byte) bool {
return func(b []byte) bool {
cr, err := unmarshalDataset(b, c, r)
if err != nil {
c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.")
return false
}
nr := r.urlNormalized()
ncr := cr.urlNormalized()
c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr)
if nr.Project == nil && ncr.Project == nil {
c.Config.Logger.Info("Both Project fields null - considering equal.")
} else if nr.Project == nil || ncr.Project == nil {
c.Config.Logger.Info("Only one Project field is null - considering unequal.")
return false
} else if *nr.Project != *ncr.Project {
return false
}
if nr.Name == nil && ncr.Name == nil {
c.Config.Logger.Info("Both Name fields null - considering equal.")
} else if nr.Name == nil || ncr.Name == nil {
c.Config.Logger.Info("Only one Name field is null - considering unequal.")
return false
} else if *nr.Name != *ncr.Name {
return false
}
return true
}
}
type datasetDiff struct {
// The diff should include one or the other of RequiresRecreate or UpdateOp.
RequiresRecreate bool
UpdateOp datasetApiOperation
FieldName string // used for error logging
}
func convertFieldDiffsToDatasetDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]datasetDiff, error) {
opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff)
// Map each operation name to the field diffs associated with it.
for _, fd := range fds {
for _, ro := range fd.ResultingOperation {
if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok {
fieldDiffs = append(fieldDiffs, fd)
opNamesToFieldDiffs[ro] = fieldDiffs
} else {
config.Logger.Infof("%s required due to diff: %v", ro, fd)
opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd}
}
}
}
var diffs []datasetDiff
// For each operation name, create a datasetDiff which contains the operation.
for opName, fieldDiffs := range opNamesToFieldDiffs {
// Use the first field diff's field name for logging required recreate error.
diff := datasetDiff{FieldName: fieldDiffs[0].FieldName}
if opName == "Recreate" {
diff.RequiresRecreate = true
} else {
apiOp, err := convertOpNameToDatasetApiOperation(opName, fieldDiffs, opts...)
if err != nil {
return diffs, err
}
diff.UpdateOp = apiOp
}
diffs = append(diffs, diff)
}
return diffs, nil
}
func convertOpNameToDatasetApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (datasetApiOperation, error) {
switch opName {
case "updateDatasetPatchDatasetOperation":
return &updateDatasetPatchDatasetOperation{FieldDiffs: fieldDiffs}, nil
default:
return nil, fmt.Errorf("no such operation with name: %v", opName)
}
}
func extractDatasetFields(r *Dataset) error {
vDefaultEncryptionConfiguration := r.DefaultEncryptionConfiguration
if vDefaultEncryptionConfiguration == nil {
// note: explicitly not the empty object.
vDefaultEncryptionConfiguration = &DatasetDefaultEncryptionConfiguration{}
}
if err := extractDatasetDefaultEncryptionConfigurationFields(r, vDefaultEncryptionConfiguration); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vDefaultEncryptionConfiguration) {
r.DefaultEncryptionConfiguration = vDefaultEncryptionConfiguration
}
return nil
}
func extractDatasetAccessFields(r *Dataset, o *DatasetAccess) error {
vView := o.View
if vView == nil {
// note: explicitly not the empty object.
vView = &DatasetAccessView{}
}
if err := extractDatasetAccessViewFields(r, vView); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vView) {
o.View = vView
}
vRoutine := o.Routine
if vRoutine == nil {
// note: explicitly not the empty object.
vRoutine = &DatasetAccessRoutine{}
}
if err := extractDatasetAccessRoutineFields(r, vRoutine); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vRoutine) {
o.Routine = vRoutine
}
return nil
}
func extractDatasetAccessViewFields(r *Dataset, o *DatasetAccessView) error {
return nil
}
func extractDatasetAccessRoutineFields(r *Dataset, o *DatasetAccessRoutine) error {
return nil
}
func extractDatasetDefaultEncryptionConfigurationFields(r *Dataset, o *DatasetDefaultEncryptionConfiguration) error {
return nil
}
func postReadExtractDatasetFields(r *Dataset) error {
vDefaultEncryptionConfiguration := r.DefaultEncryptionConfiguration
if vDefaultEncryptionConfiguration == nil {
// note: explicitly not the empty object.
vDefaultEncryptionConfiguration = &DatasetDefaultEncryptionConfiguration{}
}
if err := postReadExtractDatasetDefaultEncryptionConfigurationFields(r, vDefaultEncryptionConfiguration); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vDefaultEncryptionConfiguration) {
r.DefaultEncryptionConfiguration = vDefaultEncryptionConfiguration
}
return nil
}
func postReadExtractDatasetAccessFields(r *Dataset, o *DatasetAccess) error {
vView := o.View
if vView == nil {
// note: explicitly not the empty object.
vView = &DatasetAccessView{}
}
if err := extractDatasetAccessViewFields(r, vView); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vView) {
o.View = vView
}
vRoutine := o.Routine
if vRoutine == nil {
// note: explicitly not the empty object.
vRoutine = &DatasetAccessRoutine{}
}
if err := extractDatasetAccessRoutineFields(r, vRoutine); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vRoutine) {
o.Routine = vRoutine
}
return nil
}
func postReadExtractDatasetAccessViewFields(r *Dataset, o *DatasetAccessView) error {
return nil
}
func postReadExtractDatasetAccessRoutineFields(r *Dataset, o *DatasetAccessRoutine) error {
return nil
}
func postReadExtractDatasetDefaultEncryptionConfigurationFields(r *Dataset, o *DatasetDefaultEncryptionConfiguration) error {
return nil
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
eventarcpb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/eventarc/eventarc_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc"
)
// TriggerServer implements the gRPC interface for Trigger.
type TriggerServer struct{}
// ProtoToTriggerMatchingCriteria converts a TriggerMatchingCriteria object from its proto representation.
func ProtoToEventarcTriggerMatchingCriteria(p *eventarcpb.EventarcTriggerMatchingCriteria) *eventarc.TriggerMatchingCriteria {
if p == nil {
return nil
}
obj := &eventarc.TriggerMatchingCriteria{
Attribute: dcl.StringOrNil(p.GetAttribute()),
Value: dcl.StringOrNil(p.GetValue()),
Operator: dcl.StringOrNil(p.GetOperator()),
}
return obj
}
// ProtoToTriggerDestination converts a TriggerDestination object from its proto representation.
func ProtoToEventarcTriggerDestination(p *eventarcpb.EventarcTriggerDestination) *eventarc.TriggerDestination {
if p == nil {
return nil
}
obj := &eventarc.TriggerDestination{
CloudRunService: ProtoToEventarcTriggerDestinationCloudRunService(p.GetCloudRunService()),
CloudFunction: dcl.StringOrNil(p.GetCloudFunction()),
Gke: ProtoToEventarcTriggerDestinationGke(p.GetGke()),
Workflow: dcl.StringOrNil(p.GetWorkflow()),
}
return obj
}
// ProtoToTriggerDestinationCloudRunService converts a TriggerDestinationCloudRunService object from its proto representation.
func ProtoToEventarcTriggerDestinationCloudRunService(p *eventarcpb.EventarcTriggerDestinationCloudRunService) *eventarc.TriggerDestinationCloudRunService {
if p == nil {
return nil
}
obj := &eventarc.TriggerDestinationCloudRunService{
Service: dcl.StringOrNil(p.GetService()),
Path: dcl.StringOrNil(p.GetPath()),
Region: dcl.StringOrNil(p.GetRegion()),
}
return obj
}
// ProtoToTriggerDestinationGke converts a TriggerDestinationGke object from its proto representation.
func ProtoToEventarcTriggerDestinationGke(p *eventarcpb.EventarcTriggerDestinationGke) *eventarc.TriggerDestinationGke {
if p == nil {
return nil
}
obj := &eventarc.TriggerDestinationGke{
Cluster: dcl.StringOrNil(p.GetCluster()),
Location: dcl.StringOrNil(p.GetLocation()),
Namespace: dcl.StringOrNil(p.GetNamespace()),
Service: dcl.StringOrNil(p.GetService()),
Path: dcl.StringOrNil(p.GetPath()),
}
return obj
}
// ProtoToTriggerTransport converts a TriggerTransport object from its proto representation.
func ProtoToEventarcTriggerTransport(p *eventarcpb.EventarcTriggerTransport) *eventarc.TriggerTransport {
if p == nil {
return nil
}
obj := &eventarc.TriggerTransport{
Pubsub: ProtoToEventarcTriggerTransportPubsub(p.GetPubsub()),
}
return obj
}
// ProtoToTriggerTransportPubsub converts a TriggerTransportPubsub object from its proto representation.
func ProtoToEventarcTriggerTransportPubsub(p *eventarcpb.EventarcTriggerTransportPubsub) *eventarc.TriggerTransportPubsub {
if p == nil {
return nil
}
obj := &eventarc.TriggerTransportPubsub{
Topic: dcl.StringOrNil(p.GetTopic()),
Subscription: dcl.StringOrNil(p.GetSubscription()),
}
return obj
}
// ProtoToTrigger converts a Trigger resource from its proto representation.
func ProtoToTrigger(p *eventarcpb.EventarcTrigger) *eventarc.Trigger {
obj := &eventarc.Trigger{
Name: dcl.StringOrNil(p.GetName()),
Uid: dcl.StringOrNil(p.GetUid()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
ServiceAccount: dcl.StringOrNil(p.GetServiceAccount()),
Destination: ProtoToEventarcTriggerDestination(p.GetDestination()),
Transport: ProtoToEventarcTriggerTransport(p.GetTransport()),
Etag: dcl.StringOrNil(p.GetEtag()),
Project: dcl.StringOrNil(p.GetProject()),
Location: dcl.StringOrNil(p.GetLocation()),
Channel: dcl.StringOrNil(p.GetChannel()),
EventDataContentType: dcl.StringOrNil(p.GetEventDataContentType()),
}
for _, r := range p.GetMatchingCriteria() {
obj.MatchingCriteria = append(obj.MatchingCriteria, *ProtoToEventarcTriggerMatchingCriteria(r))
}
return obj
}
// TriggerMatchingCriteriaToProto converts a TriggerMatchingCriteria object to its proto representation.
func EventarcTriggerMatchingCriteriaToProto(o *eventarc.TriggerMatchingCriteria) *eventarcpb.EventarcTriggerMatchingCriteria {
if o == nil {
return nil
}
p := &eventarcpb.EventarcTriggerMatchingCriteria{}
p.SetAttribute(dcl.ValueOrEmptyString(o.Attribute))
p.SetValue(dcl.ValueOrEmptyString(o.Value))
p.SetOperator(dcl.ValueOrEmptyString(o.Operator))
return p
}
// TriggerDestinationToProto converts a TriggerDestination object to its proto representation.
func EventarcTriggerDestinationToProto(o *eventarc.TriggerDestination) *eventarcpb.EventarcTriggerDestination {
if o == nil {
return nil
}
p := &eventarcpb.EventarcTriggerDestination{}
p.SetCloudRunService(EventarcTriggerDestinationCloudRunServiceToProto(o.CloudRunService))
p.SetCloudFunction(dcl.ValueOrEmptyString(o.CloudFunction))
p.SetGke(EventarcTriggerDestinationGkeToProto(o.Gke))
p.SetWorkflow(dcl.ValueOrEmptyString(o.Workflow))
return p
}
// TriggerDestinationCloudRunServiceToProto converts a TriggerDestinationCloudRunService object to its proto representation.
func EventarcTriggerDestinationCloudRunServiceToProto(o *eventarc.TriggerDestinationCloudRunService) *eventarcpb.EventarcTriggerDestinationCloudRunService {
if o == nil {
return nil
}
p := &eventarcpb.EventarcTriggerDestinationCloudRunService{}
p.SetService(dcl.ValueOrEmptyString(o.Service))
p.SetPath(dcl.ValueOrEmptyString(o.Path))
p.SetRegion(dcl.ValueOrEmptyString(o.Region))
return p
}
// TriggerDestinationGkeToProto converts a TriggerDestinationGke object to its proto representation.
func EventarcTriggerDestinationGkeToProto(o *eventarc.TriggerDestinationGke) *eventarcpb.EventarcTriggerDestinationGke {
if o == nil {
return nil
}
p := &eventarcpb.EventarcTriggerDestinationGke{}
p.SetCluster(dcl.ValueOrEmptyString(o.Cluster))
p.SetLocation(dcl.ValueOrEmptyString(o.Location))
p.SetNamespace(dcl.ValueOrEmptyString(o.Namespace))
p.SetService(dcl.ValueOrEmptyString(o.Service))
p.SetPath(dcl.ValueOrEmptyString(o.Path))
return p
}
// TriggerTransportToProto converts a TriggerTransport object to its proto representation.
func EventarcTriggerTransportToProto(o *eventarc.TriggerTransport) *eventarcpb.EventarcTriggerTransport {
if o == nil {
return nil
}
p := &eventarcpb.EventarcTriggerTransport{}
p.SetPubsub(EventarcTriggerTransportPubsubToProto(o.Pubsub))
return p
}
// TriggerTransportPubsubToProto converts a TriggerTransportPubsub object to its proto representation.
func EventarcTriggerTransportPubsubToProto(o *eventarc.TriggerTransportPubsub) *eventarcpb.EventarcTriggerTransportPubsub {
if o == nil {
return nil
}
p := &eventarcpb.EventarcTriggerTransportPubsub{}
p.SetTopic(dcl.ValueOrEmptyString(o.Topic))
p.SetSubscription(dcl.ValueOrEmptyString(o.Subscription))
return p
}
// TriggerToProto converts a Trigger resource to its proto representation.
func TriggerToProto(resource *eventarc.Trigger) *eventarcpb.EventarcTrigger {
p := &eventarcpb.EventarcTrigger{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetUid(dcl.ValueOrEmptyString(resource.Uid))
p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime))
p.SetUpdateTime(dcl.ValueOrEmptyString(resource.UpdateTime))
p.SetServiceAccount(dcl.ValueOrEmptyString(resource.ServiceAccount))
p.SetDestination(EventarcTriggerDestinationToProto(resource.Destination))
p.SetTransport(EventarcTriggerTransportToProto(resource.Transport))
p.SetEtag(dcl.ValueOrEmptyString(resource.Etag))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
p.SetChannel(dcl.ValueOrEmptyString(resource.Channel))
p.SetEventDataContentType(dcl.ValueOrEmptyString(resource.EventDataContentType))
sMatchingCriteria := make([]*eventarcpb.EventarcTriggerMatchingCriteria, len(resource.MatchingCriteria))
for i, r := range resource.MatchingCriteria {
sMatchingCriteria[i] = EventarcTriggerMatchingCriteriaToProto(&r)
}
p.SetMatchingCriteria(sMatchingCriteria)
mLabels := make(map[string]string, len(resource.Labels))
for k, r := range resource.Labels {
mLabels[k] = r
}
p.SetLabels(mLabels)
mConditions := make(map[string]string, len(resource.Conditions))
for k, r := range resource.Conditions {
mConditions[k] = r
}
p.SetConditions(mConditions)
return p
}
// applyTrigger handles the gRPC request by passing it to the underlying Trigger Apply() method.
func (s *TriggerServer) applyTrigger(ctx context.Context, c *eventarc.Client, request *eventarcpb.ApplyEventarcTriggerRequest) (*eventarcpb.EventarcTrigger, error) {
p := ProtoToTrigger(request.GetResource())
res, err := c.ApplyTrigger(ctx, p)
if err != nil {
return nil, err
}
r := TriggerToProto(res)
return r, nil
}
// applyEventarcTrigger handles the gRPC request by passing it to the underlying Trigger Apply() method.
func (s *TriggerServer) ApplyEventarcTrigger(ctx context.Context, request *eventarcpb.ApplyEventarcTriggerRequest) (*eventarcpb.EventarcTrigger, error) {
cl, err := createConfigTrigger(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyTrigger(ctx, cl, request)
}
// DeleteTrigger handles the gRPC request by passing it to the underlying Trigger Delete() method.
func (s *TriggerServer) DeleteEventarcTrigger(ctx context.Context, request *eventarcpb.DeleteEventarcTriggerRequest) (*emptypb.Empty, error) {
cl, err := createConfigTrigger(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteTrigger(ctx, ProtoToTrigger(request.GetResource()))
}
// ListEventarcTrigger handles the gRPC request by passing it to the underlying TriggerList() method.
func (s *TriggerServer) ListEventarcTrigger(ctx context.Context, request *eventarcpb.ListEventarcTriggerRequest) (*eventarcpb.ListEventarcTriggerResponse, error) {
cl, err := createConfigTrigger(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListTrigger(ctx, request.GetProject(), request.GetLocation())
if err != nil {
return nil, err
}
var protos []*eventarcpb.EventarcTrigger
for _, r := range resources.Items {
rp := TriggerToProto(r)
protos = append(protos, rp)
}
p := &eventarcpb.ListEventarcTriggerResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigTrigger(ctx context.Context, service_account_file string) (*eventarc.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return eventarc.NewClient(conf), nil
}
|
// +build testnet
package common
import (
"math/big"
"github.com/qlcchain/go-qlc/common/types"
)
var (
// PoV Block Chain Params
PovChainGenesisBlockHeight = uint64(0)
PovChainBlockInterval = 60
PovChainTargetCycle = 20
PovChainBlockSize = 2 * 1024 * 1024
PovChainRetargetTimespan = PovChainBlockInterval * PovChainTargetCycle
PovChainMinRetargetTimespan = PovChainRetargetTimespan / 4
PovChainMaxRetargetTimespan = PovChainRetargetTimespan * 4
POVChainBlocksPerHour = 3600 / PovChainBlockInterval
POVChainBlocksPerDay = POVChainBlocksPerHour * 24
PovMinerPledgeAmountMin = types.NewBalance(100000000000000)
PovMinerVerifyHeightStart = uint64(POVChainBlocksPerDay * 1)
PovMinerRewardHeightStart = uint64(POVChainBlocksPerDay * 1)
PovMinerRewardHeightGapToLatest = uint64(POVChainBlocksPerDay * 1)
PovMinerMaxRewardBlocksPerCall = uint64(POVChainBlocksPerDay * 1)
PovMinerRewardHeightRound = uint64(POVChainBlocksPerDay * 1)
PovMinerMaxFindNonceTimeSec = PovChainBlockInterval * PovChainTargetCycle
// Reward per block, rewardPerBlock * blockNumPerYear / gasTotalSupply = 3%
// 10000000000000000 * 0.03 / (3600 * 24 * 365 / 30)
PovMinerRewardPerBlockInt = big.NewInt(285388127)
PovMinerRewardPerBlockBalance = types.NewBalance(285388127)
PoVMaxForkHeight = uint64(POVChainBlocksPerHour * 12)
PovGenesisTargetHex = "0000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
//PovMinimumTargetHex = "0000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
//PovMaximumTargetHex = "000003ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
PovGenesisTargetInt, _ = new(big.Int).SetString(PovGenesisTargetHex, 16)
//PovMinimumTargetInt, _ = new(big.Int).SetString(PovMinimumTargetHex, 16)
//PovMaximumTargetInt, _ = new(big.Int).SetString(PovMaximumTargetHex, 16)
// maximum number of seconds a block time is allowed to be ahead of the now time.
PovMaxAllowedFutureTimeSec = 300
PovMaxNonce = ^uint64(0) // 2^64 - 1
)
|
package handlers
import (
"encoding/json"
"fmt"
"log"
"net/http"
"strconv"
"gopkg.in/go-playground/validator.v9"
"github.com/egamorim/star-wars-planets/cmd/api/request"
"github.com/egamorim/star-wars-planets/cmd/api/response"
"github.com/egamorim/star-wars-planets/pkg/integration"
"github.com/gorilla/mux"
)
//HandleInsertNewPlanet ...
func (h *Handler) HandleInsertNewPlanet(w http.ResponseWriter, r *http.Request) {
v := validator.New()
req := new(request.PlanetRequest)
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&req); err != nil {
log.Println(err.Error())
h.RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
err := v.Struct(req)
if err != nil {
h.RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
swapi := integration.Swapi{}
swapiPlanet, err := swapi.GetPlanet(req.Name)
if err != nil {
h.RespondWithError(w, http.StatusNotFound, err.Error())
return
}
planet := req.ToPlanet()
planet.AmountOfMovies = len(swapiPlanet.Films)
p, err := h.PlanetRepository.Insert(&planet)
if err != nil {
log.Println("Error:", err.Error())
h.RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
response := response.PlanetResponse{Planet: p, Links: make(map[string]string)}
response.Links["self"] = fmt.Sprintf("http://%s%s/%s", r.Host, r.URL.Path, string(p.ID.Hex()))
h.RespondWithJSON(w, http.StatusCreated, response)
}
//HandleGetAll ...
func (h *Handler) HandleGetAll(w http.ResponseWriter, r *http.Request) {
l, _ := r.URL.Query()["limit"]
o, _ := r.URL.Query()["offset"]
limit := 5
offset := 0
if l != nil {
limit, _ = strconv.Atoi(l[0])
}
if o != nil {
offset, _ = strconv.Atoi(o[0])
}
planets, err := h.PlanetRepository.GetAll(offset, limit)
if err != nil {
log.Println("Error:", err.Error())
h.RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
nextOffset := offset + limit
previousOffset := offset - limit
response := response.ListPlanetResponse{Planets: planets, Links: make(map[string]string)}
response.Links["next"] = fmt.Sprintf("http://%s%s?offset=%d&limit=%d", r.Host, r.URL.Path, nextOffset, limit)
if previousOffset >= 0 {
response.Links["prev"] = fmt.Sprintf("http://%s%s?offset=%d&limit=%d", r.Host, r.URL.Path, previousOffset, limit)
}
h.RespondWithJSON(w, http.StatusOK, response)
}
//HandleFindByName ...
func (h *Handler) HandleFindByName(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name, _ := vars["name"]
p, err := h.PlanetRepository.FindByName(name)
if err != nil {
log.Println("Error:", err.Error())
h.RespondWithError(w, http.StatusNotFound, err.Error())
return
}
h.RespondWithJSON(w, http.StatusOK, p)
}
//HandleGetByID ...
func (h *Handler) HandleGetByID(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id, _ := vars["id"]
p, err := h.PlanetRepository.GetByID(id)
if err != nil {
log.Println("Error:", err.Error())
h.RespondWithError(w, http.StatusNotFound, err.Error())
return
}
h.RespondWithJSON(w, http.StatusOK, p)
}
//HandleDelete ...
func (h *Handler) HandleDelete(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id, _ := vars["id"]
err := h.PlanetRepository.Delete(id)
if err != nil {
log.Println("Error:", err.Error())
h.RespondWithError(w, http.StatusNotFound, err.Error())
return
}
h.RespondWithJSON(w, http.StatusOK, nil)
}
|
package repair
import (
"strconv"
"testing"
)
// 10m ile 100m arasında olacak bir sayının md5 hali veriliyor, cevap olarak o sayı bekleniyor.
func TestCheckRepair(t *testing.T) {
results := []struct {
Number int
Md5Result string
Result bool
}{
{15765233, "2f0899f61805126a241830dc8f0b3d70", true},
{64176233, "d723f8c7e136c804c32aa19236dba4d3", true},
{32165233, "4407ddc7463403b98f25ae796bba07f8", true},
{12, "4407ddc7463403b98f25ae796bba07f8", false},
{0, "4407ddc7463403b98f25ae796bba07f8", false},
{123456758, "4407ddc7463403b98f25ae796bba07f8", false},
}
for _, v := range results {
if CheckRepair(v.Number, v.Md5Result) != v.Result {
t.Error(strconv.Itoa(v.Number) + " result shouldn't be " + v.Md5Result)
}
}
}
|
package main
import (
"fmt"
)
func main() {
esporteFavorito := "starcraft"
switch esporteFavorito {
case "futebol":
fmt.Println("quer jogar futebol, vai pro brasil")
case "starcraft":
fmt.Println("quer jogar starcraft, vai pra coréia")
case "espeleísmo":
fmt.Println("quer fazer essa coisa estranha, vai pro psiquiatra")
}
}
|
package main
import (
"fmt"
"errors"
)
func add(a, b int) (c int, err error) {
if a<0 || b<0 {
err = errors.New("XXXXX")
return
}
a *= 2
b *= 3
c = a + b
return
}
func main() {
a, b := 1, 2
c, err := add(a,b)
if err != nil {
fmt.Println(err)
} else {
fmt.Printf("add(%d,%d)=%d\n",a,b,c)
}
} |
package philifence
type Properties map[string]interface{}
type PointMessage struct {
Type string `json:"type"`
Properties Properties `json:"properties"`
Geometry PointGeometry `json:"geometry"`
}
type PointGeometry struct {
Type string `json:"type"`
Coordinates []float64 `json:"coordinates"`
}
type ResponseMessage struct {
Query PointMessage `json:"query"`
Result []Properties `json:"result"`
}
func newPointMessage(c Coordinate, props Properties) *PointMessage {
return &PointMessage{
Type: "Feature",
Properties: props,
Geometry: PointGeometry{
Type: "Point",
Coordinates: []float64{c.lon, c.lat},
},
}
}
func newResponseMessage(c Coordinate, props map[string]interface{}, fences []Properties) *ResponseMessage {
return &ResponseMessage{
Query: *newPointMessage(c, Properties(props)),
Result: fences,
}
}
|
package gorilla
import (
"github.com/Highway-Project/highway/pkg/router"
"github.com/Highway-Project/highway/pkg/rules"
"github.com/gorilla/mux"
"net/http"
)
type GorillaRouter struct {
router *mux.Router
}
func (r *GorillaRouter) AddRule(rule rules.Rule) error {
route := r.router.Schemes(rule.Schema).PathPrefix(rule.PathPrefix)
if rule.Hosts != nil {
for _, host := range rule.Hosts {
route.Host(host)
}
}
if rule.Methods != nil {
route.Methods(rule.Methods...)
}
if rule.Headers != nil {
for k, v := range rule.Headers {
route.Headers(k, v)
}
}
if rule.Queries != nil {
for k, v := range rule.Queries {
route.Queries(k, v)
}
}
route.Handler(&rule)
return nil
}
func (r *GorillaRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
r.router.ServeHTTP(w, req)
}
func New(options router.RouterOptions) (router.Router, error) {
return &GorillaRouter{
router: mux.NewRouter(),
}, nil
}
|
package main
import (
"fmt"
)
type ErrNegativeSqrt float64
func(e ErrNegativeSqrt) Error() string{
return fmt.Sprintf("Cannot Sqrt negative number")
}
func Sqrt(x float64) (float64, error) {
if x<0{
return x, ErrNegativeSqrt(x)
}
z:=1.0
for i:=0; i<10; i++{
z-=(z*z-x)/(2*z)
}
return z, nil
}
func main() {
fmt.Println(Sqrt(2))
fmt.Println(Sqrt(-2))
}
|
// Package windowmean provides a moderately efficient implementation of a movingmean.
package windowmean
// WindowMean gives the window-based mean of a slice of float64's
// using a window of the given radius.
func WindowMean(a []float64, radius int) []float64 {
var s float64
b := make([]float64, len(a))
for i := 0; i < radius; i++ {
s += a[i]
}
w := float64(2*(radius+1) - 1)
iw := float64(radius)
for i := radius; i < int(w); i++ {
s += a[i]
iw++
b[i-radius] = s / iw
}
for i := radius + 1; i < len(a)-radius; i++ {
b[i-radius] = s / w
s -= a[i-radius-1]
s += a[i+radius]
}
for i := len(a) - int(radius); i <= len(a); i++ {
b[i-1] = s / w
s -= a[i-radius-1]
w--
}
return b
}
// WindowMeanUint16 gives the window-based mean of a slice of uint16's
// using a window of the given radius.
func WindowMeanUint16(a []uint16, radius int) []uint16 {
var s float64
b := make([]uint16, len(a))
for i := 0; i < radius; i++ {
s += float64(a[i])
}
w := float64(2*(radius+1) - 1)
iw := float64(radius)
for i := radius; i < int(w); i++ {
s += float64(a[i])
iw++
b[i-radius] = uint16(0.5 + s/float64(iw))
}
for i := radius + 1; i < len(a)-radius; i++ {
b[i-radius] = uint16(0.5 + s/w)
s -= float64(a[i-radius-1])
s += float64(a[i+radius])
}
for i := len(a) - int(radius); i <= len(a); i++ {
b[i-1] = uint16(0.5 + s/w)
s -= float64(a[i-radius-1])
w--
}
return b
}
|
package crypto
import (
"crypto-performance-compare/utils"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
)
type Client struct {
baseURL string
apiKey string
baseCurrency string
Cache *Cache
}
type Response struct {
Symbol string `json:"symbol"`
Name string `json:"name"`
Price string `json:"price"`
Delta string `json:"delta_1h"`
Time string
}
// NewClient returns *Client with config from env
func NewClient(baseURL string, cache *Cache) *Client {
return &Client{
baseURL: baseURL,
apiKey: utils.GetEnv("API_KEY", "default"),
baseCurrency: utils.GetEnv("BASE_CURRENCY", "USD"),
Cache: cache,
}
}
// GetInfo returns current stats for a coin from the API
func (c *Client) GetInfo(symbol string) (Response, error) {
var response Response
resp, err := http.Get(fmt.Sprintf("%s/api/v1/coin?key=%s&pref=%s&symbol=%s", c.baseURL, c.apiKey, c.baseCurrency, symbol))
if err != nil {
return response, fmt.Errorf("getting response from api: %w", err)
}
if resp.StatusCode != http.StatusOK {
return response, fmt.Errorf("bad response, status code: %d", resp.StatusCode)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return response, fmt.Errorf("reading response body: %w", err)
}
err = json.Unmarshal(body, &response)
if err != nil {
return response, fmt.Errorf("unmarshalling response body: %w", err)
}
if response == (Response{}) {
return response, fmt.Errorf("empty response returned")
}
response.Time = time.Now().Format("2006-01-02T15:04:05")
return response, nil
}
|
package common
import (
"bytes"
"crypto/md5"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"time"
"tokensky_bg_admin/conf"
)
//用户资产变动
type balanceChange struct {
Uid int `json:"uid"`
//货币类型
Symbol string `json:"symbol"`
//操作 add加 sub减 mul乘法 qup除法
MethodBalance string `json:"methodBalance"`
Balance string `json:"balance"`
balance float64 `json:"-"`
MethodFrozenBalance string `json:"methodFrozenBalance"`
FrozenBalance string `json:"frozenBalance"`
frozenBalance float64 `json:"-"`
SignId string `json:"signId"`
}
type requestOne struct {
//来源 1后端 2后台
Source int `json:"source"`
//单个
Change *balanceChange `json:"change"`
//说明
Cont string `json:"cont"`
//操作模版
Mold string `json:"mold"`
//时间戳 单位毫秒
PushTime int64 `json:"pushTime"`
//唯一哈希
HashId string `json:"hashId"`
}
type requestMulti struct {
//来源 1后端 2后台 3定时任务
Source int `json:"source"`
//资产变动多个
Changes []*balanceChange `json:"changes"`
//说明
Cont string `json:"cont"`
//操作模版
Mold string `json:"mold"`
//时间戳 单位毫秒
PushTime int64 `json:"pushTime"`
//唯一哈希
HashId string `json:"hashId"`
}
//用户资产
type userBalance struct {
Uid int `json:"uid"`
Symbol string `json:"symbol"`
Balance string `json:"balance"`
FrozenBalance string `json:"frozenBalance"`
}
//响应
type responseOne struct {
//0正常
Code int `json:"code"`
//返回最新数据
Balance *userBalance `json:"balance"`
//说明
Msg string `json:"msg"`
//哈希
HashId string `json:"hashId"`
}
type responseMulti struct {
//0正常
Code int `json:"code"`
//返回最新数据
Balances []*userBalance `json:"balances"`
//说明
Msg string `json:"msg"`
//哈希
HashId string `json:"hashId"`
}
//用户资产变化
type tokenskyUserBalanceChange struct {
cont string
source int
hashId string
mold string
pushTime int64
data []*balanceChange
}
func (this *tokenskyUserBalanceChange) Add(uid int, symbol string, signId string,
methodBalance string, balance float64, methodFrozenBalance string, frozenBalance float64) {
var Balance, FrozenBalance string
if balance != 0 {
Balance = strconv.FormatFloat(balance, 'f', 8, 64)
}
if frozenBalance != 0 {
FrozenBalance = strconv.FormatFloat(frozenBalance, 'f', 8, 64)
}
this.data = append(this.data,&balanceChange{
Uid: uid,
Symbol: symbol,
SignId: signId,
MethodBalance: methodBalance,
MethodFrozenBalance: methodFrozenBalance,
Balance: Balance,
FrozenBalance: FrozenBalance,
})
}
func (this *tokenskyUserBalanceChange)Count()int{
return len(this.data)
}
func (this *tokenskyUserBalanceChange) Send() (bool, string,string) {
var bys []byte
var err error
num := len(this.data)
var url string
//单位毫秒
this.pushTime = time.Now().UnixNano() / int64(time.Millisecond)
switch num {
case 0:
return false, "没有待处理数据",""
case 1:
obj := this.data[0]
res := requestOne{
Source: this.source,
Cont: this.cont,
Mold: this.mold,
PushTime: this.pushTime,
Change: obj,
}
bys, err = json.Marshal(res)
if err != nil {
return false, "序列化异常",""
}
url = conf.TOKENSKY_BALANCE_CHANGE_URL + "/balance/one"
default:
res := requestMulti{
Source: this.source,
Cont: this.cont,
Changes: this.data,
}
bys, err = json.Marshal(res)
if err != nil {
return false, "序列化异常",""
}
url = conf.TOKENSKY_BALANCE_CHANGE_URL + "/balance/multi"
}
tx := ",\"hashId\":\"" + fmt.Sprintf("%x", md5.Sum(bys)) + "\"}"
bys = append(bys[:len(bys)-13], []byte(tx)...)
client := &http.Client{}
reader := bytes.NewReader(bys)
request, err := http.NewRequest("POST",url, reader)
if err != nil {
return false, "创建请求异常",""
}
response, err := client.Do(request)
if err != nil {
return false, "请求异常",""
}
body, err := ioutil.ReadAll(response.Body)
defer response.Body.Close()
//数据返回异常
if err != nil {
return false, "获取数据异常",""
}
hashId := ""
switch num {
case 1:
obj := &responseOne{}
err = json.Unmarshal(body, obj)
if err != nil {
//解析异常
return false, "解析异常",""
}
if obj.Code != 0 {
return false, obj.Msg,""
}
hashId = obj.HashId
default:
obj := &responseMulti{}
err = json.Unmarshal(body, obj)
if err != nil {
//解析异常
return false, "解析异常",""
}
if obj.Code != 0 {
return false, obj.Msg,""
}
hashId = obj.HashId
}
return true, "",hashId
}
func NewTokenskyUserBalanceChange(source int, mold string, cont string) *tokenskyUserBalanceChange {
return &tokenskyUserBalanceChange{
cont: cont,
source: source,
mold: mold,
}
}
|
package main
import "testing"
func TestFixUrl(t *testing.T) {
var parameters = []struct {
given string
expected string
}{
{"example.com", "https://example.com"},
{"http://example.com", "http://example.com"},
{"http://example.com/", "http://example.com"},
{"https://example.com/", "https://example.com"},
}
var output string
for _, p := range parameters {
output = fixURL(p.given)
if output != p.expected {
t.Errorf("%s != %s\n", output, p.expected)
}
}
}
|
/*
Copyright 2021 apstndb.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// ImagePullSecretSpec defines the desired state of ImagePullSecret
type ImagePullSecretSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
SecretName string `json:"secretName"`
ServiceAccountName string `json:"serviceAccountName"`
// GsaEmail must be email of the GCP Service Account.
GsaEmail string `json:"gsaEmail"`
// WorkloadIdentityPoolPrivider must be `projects/${PROJECT_NUMBER}/locations/global/workloadIdentityPools/${POOL}/providers/${PROVIDER}`
WorkloadIdentityPoolProvider string `json:"workloadIdentityPoolProvider"`
}
// ImagePullSecretStatus defines the observed state of ImagePullSecret
type ImagePullSecretStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
ExpiresAt metav1.Time `json:"expiresAt,omitempty"`
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
//+kubebuilder:printcolumn:name="SECRET",type=string,JSONPath=`.spec.secretName`
//+kubebuilder:printcolumn:name="KSA_NAME",type=string,JSONPath=`.spec.serviceAccountName`
//+kubebuilder:printcolumn:name="GSA_EMAIL",type=string,JSONPath=`.spec.gsaEmail`
//+kubebuilder:printcolumn:name="PROVIDER",type=string,JSONPath=`.spec.workloadIdentityPoolProvider`
//+kubebuilder:printcolumn:name="CURRENT_EXPIRES_AT",type=string,JSONPath=`.status.expiresAt`
// ImagePullSecret is the Schema for the imagepullsecrets API
type ImagePullSecret struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ImagePullSecretSpec `json:"spec,omitempty"`
Status ImagePullSecretStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// ImagePullSecretList contains a list of ImagePullSecret
type ImagePullSecretList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ImagePullSecret `json:"items"`
}
func init() {
SchemeBuilder.Register(&ImagePullSecret{}, &ImagePullSecretList{})
}
|
package asyncexec
import (
"fmt"
"os"
"os/exec"
)
// StreamCommand executes a given command in a context directory and streams
// the outputs to their according stdeout/stderr.
func StreamCommand(workDir string, command string, args []string) error {
cmd := exec.Command(command, args...)
if workDir != "" {
cmd.Dir = workDir
}
acmd := New(cmd, 8)
err := acmd.Run()
if err != nil {
return err
}
// done is used to wait for stream readers to finish before exiting the function.
done := make(chan struct{})
go func() {
for b := range acmd.StdoutStream() {
_, err := os.Stdout.Write(b)
if err != nil {
msg := fmt.Sprintf("failed to write to Stdout: %v", err)
// should never happen, just panic.
panic(msg)
}
}
done <- struct{}{}
}()
go func() {
for b := range acmd.StderrStream() {
_, err := os.Stderr.Write(b)
if err != nil {
msg := fmt.Sprintf("failed to write to Stderr: %v", err)
// should never happen, just panic.
panic(msg)
}
}
done <- struct{}{}
}()
// wait for printers to finish
defer func() { _, _ = <-done, <-done }()
// wait for command to finish
err = acmd.Wait()
if err != nil {
return err
}
if acmd.ExitCode() != 0 {
return fmt.Errorf("exited with code %d", acmd.ExitCode())
}
return nil
}
|
package handler
import (
"context"
proto "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/core/v1"
)
// WechatBuildOAuthURL 生成网页授权地址
func (j *JinmuHealth) WechatBuildOAuthURL(ctx context.Context, req *proto.WechatBuildOAuthURLRequest, resp *proto.WechatBuildOAuthURLResponse) error {
w := j.wechat
resp.AuthCodeUrl = w.BuildOAuthURL(req.AuthRedirectUrl, req.State)
return nil
}
|
package dao
import (
"github.com/jinzhu/gorm"
)
type IDAware interface {
GetID() interface{}
SetID(id interface{})
}
type BaseDAO struct {
ConnectionManager ConnectionProvider
Model interface{}
}
type DAO interface {
Delete(object IDAware, tx ...*gorm.DB) error
DeleteByID(id interface{})
Insert(object IDAware) error
Update(object IDAware) error
GetConnectionManager() ConnectionProvider
}
func (d *BaseDAO) Db() *gorm.DB {
return d.ConnectionManager.GetDb()
}
func (d *BaseDAO) GetConnectionManager() ConnectionProvider {
return d.ConnectionManager
}
func (d *BaseDAO) Delete(object IDAware, tx ...*gorm.DB) error {
var db *gorm.DB = nil
hasTransaction := len(tx) > 0
if hasTransaction {
db = tx[0]
} else {
db = d.ConnectionManager.GetDb()
}
if err := db.Delete(object).Error; err != nil {
return err
}
return db.Error
}
func (d *BaseDAO) DeleteByID(id interface{}) error {
db := d.ConnectionManager.GetDb()
db = db.Where("id = ?", id).Delete(d.Model)
return db.Error
}
|
package main
import (
"fmt"
"log"
"net/http"
"strconv"
)
func main() {
http.HandleFunc("/", set)
http.Handle("/favicon.ico", http.NotFoundHandler())
http.ListenAndServe(":8080", nil)
}
func set(w http.ResponseWriter, r *http.Request) {
// look for cookie first
c, err := r.Cookie("stephens-cookie")
if err == http.ErrNoCookie {
c = &http.Cookie{
Name: "stephens-cookie",
Value: "0",
}
}
vc, err := strconv.Atoi(c.Value)
if err != nil {
log.Fatalln(err)
}
//fmt.Fprintln(w, "We've seen you before! visit count =", vc) // DOES NOT WORK IF ADD THIS BEFORE SetCookie
vc++
c.Value = strconv.Itoa(vc)
http.SetCookie(w, c) // must be called before any other use of w. Maybe because it adds a cookie HEADER, set before body?
fmt.Fprintf(w, "Updated cookie to latest visit count of %s\n", c.Value)
//io.WriteString(w, c.Value)
}
|
package session
import (
"bytes"
"encoding/gob"
"errors"
"time"
"github.com/boltdb/bolt"
)
/*
BoltStore is a session storage using bolt.
*/
type BoltStore struct {
store *bolt.DB
lastUsedName []byte
sessionsName []byte
maxAge time.Duration
}
const TimeStampFormat = "2006-01-02 15:04:05.000"
/*
NewBoltStore returns a BoltStore SessionStorage.
*/
func NewBoltStore(db *bolt.DB, maxAge time.Duration) (*BoltStore, error) {
var s BoltStore
if maxAge < 5*time.Minute {
return nil, errors.New("maxAge duration too short")
}
s.store = db
s.maxAge = maxAge
s.lastUsedName = []byte("sessionsLastUsed")
s.sessionsName = []byte("sessions")
err := db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(s.lastUsedName)
if err != nil {
return err
}
_, err = tx.CreateBucketIfNotExists(s.sessionsName)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return &s, nil
}
/* Interface Functions */
// Close the Store, will also close the bolt db.
func (s *BoltStore) Close() error {
return s.store.Close()
}
// GC one pass over the BoltStore
func (s *BoltStore) GC() error {
err := s.store.Update(func(tx *bolt.Tx) error {
lastUsedBucket := tx.Bucket(s.lastUsedName)
sessionsBucket := tx.Bucket(s.sessionsName)
lastUsedBucket.ForEach(func(k, v []byte) error {
var t time.Time
err := t.GobDecode(v)
if err != nil || time.Since(t) > s.maxAge {
lastUsedBucket.Delete(k)
sessionsBucket.Delete(k)
}
return nil
})
return nil
})
return err
}
// Get session associated with sid.
func (s *BoltStore) Get(sid string) (*Session, error) {
var ses Session
err := s.store.View(func(tx *bolt.Tx) error {
lastUsedBucket := tx.Bucket(s.lastUsedName)
sessionsBucket := tx.Bucket(s.sessionsName)
bsid := []byte(sid)
lastUsed := lastUsedBucket.Get(bsid)
if lastUsed == nil {
return nil
}
var t time.Time
err := t.GobDecode(lastUsed)
if err != nil || time.Since(t) > s.maxAge {
return nil
}
sesGob := sessionsBucket.Get(bsid)
if sesGob == nil {
return nil
}
ses.Values, _ = ungobValues(sesGob)
return nil
})
if err != nil {
return nil, err
}
if ses.Values == nil {
return nil, ErrNotFound
}
return &ses, nil
}
// Commit session back to storage.
func (s *BoltStore) Commit(ses *Session) error {
err := s.store.Update(func(tx *bolt.Tx) error {
lastUsedBucket := tx.Bucket(s.lastUsedName)
sessionsBucket := tx.Bucket(s.sessionsName)
bsid := []byte(ses.sid)
tb, err := time.Now().GobEncode()
if err != nil {
return err
}
err = lastUsedBucket.Put(bsid, tb)
if err != nil {
return err
}
g, err := gobValues(ses.Values)
if err != nil {
return err
}
err = sessionsBucket.Put(bsid, g)
if err != nil {
return err
}
return nil
})
return err
}
// Convert a map[string]string to a gobbed []byte
func gobValues(v map[string]string) ([]byte, error) {
b := &bytes.Buffer{}
g := gob.NewEncoder(b)
err := g.Encode(v)
return b.Bytes(), err
}
// Convert a gobbed map[string]string back to a map.
func ungobValues(v []byte) (map[string]string, error) {
b := bytes.NewBuffer(v)
g := gob.NewDecoder(b)
var values map[string]string
err := g.Decode(&values)
return values, err
}
// Delete session from storage.
func (s *BoltStore) Delete(ses *Session) error {
err := s.store.Update(func(tx *bolt.Tx) error {
lastUsedBucket := tx.Bucket(s.lastUsedName)
sessionsBucket := tx.Bucket(s.sessionsName)
err := lastUsedBucket.Delete([]byte(ses.sid))
if err != nil {
return err
}
err = sessionsBucket.Delete([]byte(ses.sid))
if err != nil {
return err
}
return nil
})
return err
}
|
package bootstrap
import (
"database/sql"
_ "github.com/mattn/go-sqlite3"
"github.com/superbet-group/code-cadets-2021/homework_4/02_bets_api/cmd/config"
)
// RabbitMq bootstraps the rabbit mq connection.
func Sqlite() *sql.DB {
db, err := sql.Open("sqlite3", config.Cfg.SqliteDatabase)
if err != nil {
panic(err)
}
return db
}
|
package main
import (
"crypto/md5"
"encoding/json"
"errors"
"os"
"sort"
)
func combine() error {
inputMaps := make([]map[int]exercice, len(inputFiles))
for i, f := range inputFiles {
m := map[int]exercice{}
file, err := os.Open(f)
if err != nil {
return err
}
dec := json.NewDecoder(file)
err = dec.Decode(&m)
if err != nil {
return err
}
inputMaps[i] = m
}
targetMap := make(map[int]*exercice)
hashMap := make(map[int]map[string]bool)
count := make([]int, len(inputMaps))
for mi, m := range inputMaps {
for ei, e := range m {
var target *exercice
if targetMap[ei] == nil {
target = &exercice{
LaTeX: e.LaTeX,
Number: e.Number,
Tasks: []task{},
}
} else {
target = targetMap[ei]
}
if target.LaTeX != e.LaTeX {
return errors.New("Exercise mismatch")
}
for _, t := range e.Tasks {
h := md5.Sum([]byte(t.LaTeX))
hStr := string(h[:])
if hashMap[ei] == nil {
hashMap[ei] = make(map[string]bool)
}
if !hashMap[ei][hStr] {
count[mi]++
target.Tasks = append(target.Tasks, t)
hashMap[ei][hStr] = true
}
}
targetMap[ei] = target
}
}
targetArr := []exercice{}
for _, e := range targetMap {
if e == nil {
continue
}
targetArr = append(targetArr, *e)
}
sort.Sort(exerciceArray(targetArr))
sheets := []sheet{
{Exercices: targetArr},
}
var makeFunc outputFunction
if outputFormat == "tex" {
makeFunc = outputFunction(makeLaTeX)
} else if outputFormat == "json" {
makeFunc = outputFunction(makeJSON)
}
err := makeFunc(sheets, os.Stdout)
if err != nil {
return err
}
return nil
}
type exerciceArray []exercice
func (e exerciceArray) Len() int {
return len(e)
}
func (e exerciceArray) Swap(i, j int) {
e[i], e[j] = e[j], e[i]
}
func (e exerciceArray) Less(i, j int) bool {
return e[i].Number < e[j].Number
}
|
package pgsql
import (
"testing"
)
func TestTime(t *testing.T) {
testlist2{{
data: []testdata{
{
input: timeval(21, 5, 33, 0),
output: timeval(21, 5, 33, 0)},
{
input: timeval(4, 5, 6, 789),
output: timeval(4, 5, 6, 789)},
},
}, {
scanner: TimeToString,
data: []testdata{
{
input: string("21:05:33"),
output: string("21:05:33")},
{
input: string("04:05:06.789"),
output: string("04:05:06.789")},
},
}, {
scanner: TimeToByteSlice,
data: []testdata{
{
input: []byte("21:05:33"),
output: []byte("21:05:33")},
{
input: []byte("04:05:06.789"),
output: []byte("04:05:06.789")},
},
}}.execute(t, "time")
}
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsubwrapper
import (
"context"
"time"
)
type (
Client interface {
CreateTopic(ctx context.Context, topicID string) (Topic, error)
Topic(id string) Topic
Topics(ctx context.Context) ([]Topic, error)
CreateSubscription(ctx context.Context, id string, cfg SubscriptionConfig) (Subscription, error)
Subscription(id string) Subscription
Subscriptions(ctx context.Context) ([]Subscription, error)
embedToIncludeNewMethods()
}
Topic interface {
String() string
Publish(ctx context.Context, msg Message) PublishResult
Exists(ctx context.Context) (bool, error)
Delete(ctx context.Context) error
embedToIncludeNewMethods()
}
Subscription interface {
String() string
Exists(ctx context.Context) (bool, error)
Receive(ctx context.Context, f func(context.Context, Message)) error
Delete(ctx context.Context) error
embedToIncludeNewMethods()
}
Message interface {
ID() string
Data() []byte
Attributes() map[string]string
PublishTime() time.Time
Ack()
Nack()
embedToIncludeNewMethods()
}
PublishResult interface {
Get(ctx context.Context) (serverID string, err error)
embedToIncludeNewMethods()
}
)
|
package game
import (
"errors"
"fmt"
"github.com/smallgamefish/BreakBricks/protoc/github.com/smallgamefish/BreakBricks/protoc"
"net"
"sync"
)
const (
MaxRoomNumber = 500
)
//房间管理者
var RoomManage *roomManage
func init() {
if RoomManage == nil {
RoomManage = &roomManage{conn: nil, roomMap: make(map[string]*Room)}
}
}
//房间管理者
type roomManage struct {
conn *net.UDPConn //udp服务器的唯一链接
roomMap map[string]*Room //房间地图
sync.RWMutex
}
//设置链接
func (m *roomManage) SetConn(conn *net.UDPConn) {
m.conn = conn
}
//新建一个房间,成功返回true,失败返回false
func (m *roomManage) AddRoom(roomId string) error {
m.Lock()
defer m.Unlock()
if _, ok := m.roomMap[roomId]; ok {
return errors.New("房间已经存在")
}
if len(m.roomMap) == MaxRoomNumber {
return errors.New(fmt.Sprintf("服务器目前最多只支持%d个房间", MaxRoomNumber))
}
newRoom := NewRoom(roomId, m.conn)
m.roomMap[roomId] = newRoom
//启动房间
go newRoom.Run()
return nil
}
//用户加入房间
func (m *roomManage) JoinRoom(roomId string, player *net.UDPAddr) error {
room, err := m.GetRoom(roomId)
if err != nil {
return err
}
//加入房间
room.getJoinChan() <- NewPlayer(player)
return nil
}
//用户离开房间
func (m *roomManage) LeaveRoom(roomId string, player *net.UDPAddr) error {
room, err := m.GetRoom(roomId)
if err != nil {
return err
}
room.getLeaveChan() <- NewPlayer(player)
return nil
}
//游戏数据帧广播
func (m *roomManage) BroadcastFrameData(event *protoc.ClientSendMsg_FrameDataEvent) error {
room, err := m.GetRoom(event.FrameDataEvent.GetRoomId())
if err != nil {
return err
}
startGameEvent := new(protoc.ClientAcceptMsg)
startGameEvent.Code = protoc.ClientAcceptMsg_Success
startGameEvent.Event = &protoc.ClientAcceptMsg_FrameDataEvent{FrameDataEvent: &protoc.FrameDataEvent{FrameData: event.FrameDataEvent.GetFrameData(), RoomId: event.FrameDataEvent.GetRoomId()}}
room.getBroadcastChan() <- startGameEvent
return nil
}
//用户准备
func (m *roomManage) ReadyRoom(roomId string, player *net.UDPAddr, ready bool) error {
room, err := m.GetRoom(roomId)
if err != nil {
return err
}
readyPlayer := NewPlayer(player)
readyPlayer.ready = ready
room.getReadyChan() <- readyPlayer
return nil
}
//ping一下用户,确保用户还链接正常
func (m *roomManage) UpdatePlayerLastAcceptPingTime(roomId string, player *net.UDPAddr) error {
room, err := m.GetRoom(roomId)
if err != nil {
return err
}
readyPlayer := NewPlayer(player)
room.getPingActivePlayerChan() <- readyPlayer
return nil
}
//删除一个房间
func (m *roomManage) deleteRoom(roomId string) {
m.Lock()
defer m.Unlock()
if _, ok := m.roomMap[roomId]; ok {
//删除map
delete(m.roomMap, roomId)
}
}
//获取房间
func (m *roomManage) GetRoom(roomId string) (*Room, error) {
m.RLock()
defer m.RUnlock()
if room, ok := m.roomMap[roomId]; ok {
return room, nil
}
return nil, errors.New("房间找不到")
}
|
package core
const (
IndexType = iota + 1
UniqueType
)
// database index
type Index struct {
Name string
Type int
Cols []string
}
// add columns which will be composite index
func (index *Index) AddColumn(cols ...string) {
for _, col := range cols {
index.Cols = append(index.Cols, col)
}
}
// new an index
func NewIndex(name string, indexType int) *Index {
return &Index{name, indexType, make([]string, 0)}
}
|
package main
import (
"database/sql"
"errors"
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/jessevdk/go-flags"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
"github.com/alokmenghrajani/sqlc/sqlc"
"log"
"os"
)
var VERSION string = "0.1.5"
var opts sqlc.Options
var parser = flags.NewParser(&opts, flags.Default)
func init() {
opts.Version = printVersionAndExit
}
func main() {
if _, err := parser.Parse(); err != nil {
os.Exit(1)
}
if err := opts.Validate(); err != nil {
log.Fatal(err)
os.Exit(1)
}
db, dialect, err := dataSource()
if err != nil {
log.Fatal(err)
os.Exit(1)
}
opts.Dialect = dialect
err = sqlc.Generate(db, VERSION, &opts)
if err != nil {
log.Fatal(err)
os.Exit(1)
}
}
func dataSource() (*sql.DB, sqlc.Dialect, error) {
d, err := opts.DbType()
if err != nil {
return nil, sqlc.Sqlite, err
}
switch d {
case sqlc.Sqlite:
db, err := sql.Open("sqlite3", opts.File)
return db, d, err
case sqlc.MySQL:
db, err := sql.Open("mysql", opts.Url)
return db, d, err
case sqlc.Postgres:
db, err := sql.Open("postgres", opts.Url)
return db, d, err
default:
return nil, sqlc.Sqlite, errors.New("Invalid Db type")
}
}
func printVersionAndExit() {
fmt.Fprintf(os.Stderr, "%s %s\n", "sqlc", VERSION)
os.Exit(0)
}
|
package testdata
import (
"github.com/taktakty/netlabi/models"
)
var HostOSTestData = []models.HostOS{
{
Name: "test name 1",
Note: "test note 1",
},
{
Name: "test name 2",
Note: "test note 2",
},
{
Name: "test name 3",
Note: "test note 3",
},
{
Name: "test name 4",
Note: "test note 4",
},
{
Name: "test name 5",
Note: "test note 5",
},
{
Name: "test name 6 for search",
Note: "test note 6",
},
{
Name: "test name 7 for search",
Note: "test note 7",
},
{
Name: "test name 8 for search",
Note: "test note 8",
},
}
type HostOSParamStruct struct {
Name string
Status int
Protocol int
Note string
}
type HostOSRespStruct struct {
ID string
CreatedAt string
UpdatedAt string
DeletedAt *string
Name string
Note string
}
var HostOSResp = `{ id createdAt updatedAt deletedAt name note }`
|
package examples
import (
"fmt"
"github.com/corbym/gocrest/then"
"testing"
"github.com/corbym/gocrest/is"
"github.com/cybernostics/cntest"
)
func TestContainer(t *testing.T) {
cnt := cntest.NewContainer().WithImage("hello-world:latest")
name, err := cnt.Start()
defer cnt.Remove()
then.AssertThat(t, err, is.Nil())
then.AssertThat(t, len(name), is.GreaterThan(0))
ok, err := cnt.AwaitExit(10)
then.AssertThat(t, err, is.Nil())
then.AssertThat(t, ok, is.True())
logs, err := cnt.Logs()
then.AssertThat(t, err, is.Nil())
then.AssertThat(t, len(logs), is.GreaterThan(0))
fmt.Printf("Logs %s\n", logs)
}
|
/*
Imagine a very simple language. It has just 2 syntax features: () indicates a block scope, and any word consisting only of 1 or more lower case ASCII letters, which indicates a identifier. There are no keywords.
In this language, the value of identifiers is not important except when they appear multiple times. Thus for golfing purposes it makes sense to give them names that are as short as possible. A variable is "declared" when it is first used.
The goal of this challenge is to take a program, either as a string or as a ragged list, and make the identifiers as short as possible. The first identifier (and all its references) should be re-named to a, the next b then so on. There will never be more than 26 identifiers.
Each set of () encloses a scope. Scopes can access variables created in the parent scope defined before but not those created in child or sibling scopes. Thus if we have the program (bad (cab) (face)) the minimum size is (a (b) (b)). A variable belongs to the scope when it is first used. When that scope ends the variable is deleted.
In summary:
If a variable name has appeared in the scope or enclosing scopes before, re-use the letter
Else create a new letter inside the current scope
At the end of a scope delete all variables created inside the scope.
Test cases
{
"(rudolf)": "(a)",
"(mousetail mousetail)": "(a a)",
"(cart fish)": "(a b)",
"(no and no)": "(a b a)",
"(burger (and fries))": "(a (b c))",
"(burger (or burger))": "(a (b a))",
"(let (bob and) (bob let))": "(a (b c) (b a))",
"(let (a (fish (let))))": "(a (b (c (a))))",
"(kor (kor kor) (kor kor))": "(a (a a) (a a))",
"((kor) kor)": "((a) a)",
"(aa (ab ac ad) (ad ad) ad)": "(a (b c d) (b b) b)",
"(aa not (ab ac ad) (ad ad))":"(a b (c d e) (c c))",
"(((((do) re) mi) fa) so)": "(((((a) a) a) a) a)",
"(do (re (mi (fa (so)))))": "(a (b (c (d (e)))))",
"((mark sam) sam)": "((a b) a)",
}
IO
You can take input as either a string or ragged array.
You can give output either as a string or ragged array.
However, you must use the same format for input and output. Specifically, you need to produce output in such a way that it would also be a valid input. Applying the function or program more than once always has the same result as applying it once.
Neither scopes nor variable names may be empty. Applying your program to its result again should be a no-op.
*/
package main
import (
"bytes"
"fmt"
"unicode"
)
func main() {
assert(minify("(rudolf)") == "(a)")
assert(minify("(mousetail mousetail)") == "(a a)")
assert(minify("(cart fish)") == "(a b)")
assert(minify("(no and no)") == "(a b a)")
assert(minify("(burger (and fries))") == "(a (b c))")
assert(minify("(burger (or burger))") == "(a (b a))")
assert(minify("(let (bob and) (bob let))") == "(a (b c) (b a))")
assert(minify("(let (a (fish (let))))") == "(a (b (c (a))))")
assert(minify("(kor (kor kor) (kor kor))") == "(a (a a) (a a))")
assert(minify("((kor) kor)") == "((a) a)")
assert(minify("(aa (ab ac ad) (ad ad) ad)") == "(a (b c d) (b b) b)")
assert(minify("(aa not (ab ac ad) (ad ad))") == "(a b (c d e) (c c))")
assert(minify("(((((do) re) mi) fa) so)") == "(((((a) a) a) a) a)")
assert(minify("(do (re (mi (fa (so)))))") == "(a (b (c (d (e)))))")
assert(minify("((mark sam) sam)") == "((a b) a)")
assert(minify("(no and no)") == "(a b a)")
assert(minify("(burger (and fries))") == "(a (b c))")
assert(minify("(burger (or burger))") == "(a (b a))")
assert(minify("(let (bob and) (bob let))") == "(a (b c) (b a))")
assert(minify("(let (a (fish (let))))") == "(a (b (c (a))))")
assert(minify("(kor (kor kor) (kor kor))") == "(a (a a) (a a))")
assert(minify("((kor) kor)") == "((a) a)")
assert(minify("(aa (ab ac ad) (ad ad) ad)") == "(a (b c d) (b b) b)")
assert(minify("(aa not (ab ac ad) (ad ad))") == "(a b (c d e) (c c))")
assert(minify("(((((do) re) mi) fa) so)") == "(((((a) a) a) a) a)")
assert(minify("(do (re (mi (fa (so)))))") == "(a (b (c (d (e)))))")
assert(minify("((mark sam) sam)") == "((a b) a)")
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func minify(s string) string {
w := new(bytes.Buffer)
b := new(bytes.Buffer)
p := []map[string]rune{}
for _, r := range s {
if !(unicode.IsSpace(r) || r == '(' || r == ')') {
b.WriteRune(r)
continue
}
fmt.Fprintf(w, "%s%c", mapto(b, p), r)
switch r {
case '(':
p = append(p, make(map[string]rune))
case ')':
if n := len(p); n > 0 {
p = p[:n-1]
}
}
}
fmt.Fprint(w, mapto(b, p))
return w.String()
}
func mapto(b *bytes.Buffer, p []map[string]rune) string {
s := b.String()
d := len(p) - 1
b.Reset()
if s == "" || d < 0 {
return s
}
v := 0
for n := d; n >= 0; n-- {
if p[n][s] != 0 {
return string(p[n][s])
}
v += len(p[n])
}
p[d][s] = rune('a' + v)
return string(p[d][s])
}
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package handle
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestInsertAndDelete(t *testing.T) {
h := Handle{
listHead: &SessionStatsCollector{mapper: make(tableDeltaMap)},
}
var items []*SessionStatsCollector
for i := 0; i < 5; i++ {
items = append(items, h.NewSessionStatsCollector())
}
items[0].Delete() // delete tail
items[2].Delete() // delete middle
items[4].Delete() // delete head
h.sweepList()
require.Equal(t, items[3], h.listHead.next)
require.Equal(t, items[1], items[3].next)
require.Nil(t, items[1].next)
// delete rest
items[1].Delete()
items[3].Delete()
h.sweepList()
require.Nil(t, h.listHead.next)
}
|
package transform
import (
"bytes"
"errors"
"image"
_ "image/draw"
_ "image/gif"
"image/jpeg"
_ "image/png"
)
var JpegQuality = 100
var ErrUnknownFormat = errors.New("can't decode: unknown image format")
type Img struct {
}
func (Img) Encode(img image.Image) ([]byte, error) {
buf := &bytes.Buffer{}
err := jpeg.Encode(buf, img, &jpeg.Options{Quality: JpegQuality})
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (Img) Decode(data []byte) (image.Image, error) {
r := bytes.NewReader(data)
img, format, err := image.Decode(r)
if err != nil {
return nil, err
}
if format == "" || img == nil {
return nil, ErrUnknownFormat
}
return img, nil
}
|
package destroy
import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/openshift/installer/pkg/asset/cluster"
"github.com/openshift/installer/pkg/destroy/providers"
)
// New returns a Destroyer based on `metadata.json` in `rootDir`.
func New(logger logrus.FieldLogger, rootDir string) (providers.Destroyer, error) {
metadata, err := cluster.LoadMetadata(rootDir)
if err != nil {
return nil, err
}
platform := metadata.Platform()
if platform == "" {
return nil, errors.New("no platform configured in metadata")
}
creator, ok := providers.Registry[platform]
if !ok {
return nil, errors.Errorf("no destroyers registered for %q", platform)
}
return creator(logger, metadata)
}
|
package models
import "github.com/dancewing/revel/orm"
type YysAccount struct {
ID int `orm:"pk;auto"`
Email, Name, Comment string
Level int
}
type YysCards struct {
ID int `orm:"pk;auto"`
Card string
Level string
Quantity int
Account *YysAccount `orm:"rel(fk);column(yys_account_id)"`
}
type YysRole struct {
ID int `orm:"pk;auto"`
RoleName string
}
type YysSupian struct {
ID int `orm:"pk;auto"`
Quantity int
Account *YysAccount `orm:"rel(fk);column(yys_account_id)"`
Role *YysRole `orm:"rel(fk)"`
}
func init() {
orm.RegisterModel(new(YysAccount))
orm.RegisterModel(new(YysCards))
orm.RegisterModel(new(YysRole))
orm.RegisterModel(new(YysSupian))
}
|
/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package primitive
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"os/user"
"path"
"strings"
"time"
"github.com/apache/rocketmq-client-go/v2/rlog"
)
// resolver for nameserver, monitor change of nameserver and notify client
// consul or domain is common
type NsResolver interface {
Resolve() []string
Description() string
}
type StaticResolver struct {
}
var _ NsResolver = (*EnvResolver)(nil)
func NewEnvResolver() *EnvResolver {
return &EnvResolver{}
}
type EnvResolver struct {
}
func (e *EnvResolver) Resolve() []string {
if v := os.Getenv("NAMESRV_ADDR"); v != "" {
return strings.Split(v, ";")
}
return nil
}
func (e *EnvResolver) Description() string {
return "env resolver of var NAMESRV_ADDR"
}
type passthroughResolver struct {
addr []string
failback NsResolver
}
func NewPassthroughResolver(addr []string) *passthroughResolver {
return &passthroughResolver{
addr: addr,
failback: NewEnvResolver(),
}
}
func (p *passthroughResolver) Resolve() []string {
if p.addr != nil {
return p.addr
}
return p.failback.Resolve()
}
func (p *passthroughResolver) Description() string {
return fmt.Sprintf("passthrough resolver of %v", p.addr)
}
const (
DEFAULT_NAMESRV_ADDR = "http://jmenv.tbsite.net:8080/rocketmq/nsaddr"
)
var _ NsResolver = (*HttpResolver)(nil)
type HttpResolver struct {
domain string
instance string
cli http.Client
failback NsResolver
}
func NewHttpResolver(instance string, domain ...string) *HttpResolver {
d := DEFAULT_NAMESRV_ADDR
if len(domain) > 0 {
d = domain[0]
}
client := http.Client{Timeout: 10 * time.Second}
h := &HttpResolver{
domain: d,
instance: instance,
cli: client,
failback: NewEnvResolver(),
}
return h
}
func (h *HttpResolver) DomainWithUnit(unitName string) {
if unitName == "" {
return
}
if strings.Contains(h.domain, "?nofix=1") {
return
}
if strings.Contains(h.domain, "?") {
h.domain = strings.Replace(h.domain, "?", fmt.Sprintf("-%s?nofix=1&", unitName), 1)
} else {
h.domain = fmt.Sprintf("%s-%s?nofix=1", h.domain, unitName)
}
}
func (h *HttpResolver) Resolve() []string {
addrs := h.get()
if len(addrs) > 0 {
return addrs
}
addrs = h.loadSnapshot()
if len(addrs) > 0 {
return addrs
}
return h.failback.Resolve()
}
func (h *HttpResolver) Description() string {
return fmt.Sprintf("passthrough resolver of domain:%v instance:%v", h.domain, h.instance)
}
func (h *HttpResolver) get() []string {
resp, err := h.cli.Get(h.domain)
if err != nil || resp == nil || resp.StatusCode != 200 {
data := map[string]interface{}{
"NameServerDomain": h.domain,
"err": err,
}
if resp != nil {
data["StatusCode"] = resp.StatusCode
}
rlog.Error("name server http fetch failed", data)
return nil
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
rlog.Error("name server read http response failed", map[string]interface{}{
"NameServerDomain": h.domain,
"err": err,
})
return nil
}
bodyStr := strings.TrimSpace(string(body))
if bodyStr == "" {
return nil
}
_ = h.saveSnapshot([]byte(bodyStr))
return strings.Split(bodyStr, ";")
}
func (h *HttpResolver) saveSnapshot(body []byte) error {
filePath := h.getSnapshotFilePath(h.instance)
err := ioutil.WriteFile(filePath, body, 0644)
if err != nil {
rlog.Error("name server snapshot save failed", map[string]interface{}{
"filePath": filePath,
"err": err,
})
return err
}
rlog.Info("name server snapshot save successfully", map[string]interface{}{
"filePath": filePath,
})
return nil
}
func (h *HttpResolver) loadSnapshot() []string {
filePath := h.getSnapshotFilePath(h.instance)
_, err := os.Stat(filePath)
if os.IsNotExist(err) {
rlog.Warning("name server snapshot local file not exists", map[string]interface{}{
"filePath": filePath,
})
return nil
}
bs, err := ioutil.ReadFile(filePath)
if err != nil {
return nil
}
rlog.Info("load the name server snapshot local file", map[string]interface{}{
"filePath": filePath,
})
return strings.Split(string(bs), ";")
}
func (h *HttpResolver) getSnapshotFilePath(instanceName string) string {
homeDir := ""
if usr, err := user.Current(); err == nil {
homeDir = usr.HomeDir
} else {
rlog.Error("name server domain, can't get user home directory", map[string]interface{}{
"err": err,
})
}
storePath := path.Join(homeDir, "/logs/rocketmq-go/snapshot")
if _, err := os.Stat(storePath); os.IsNotExist(err) {
if err = os.MkdirAll(storePath, 0755); err != nil {
rlog.Fatal("can't create name server snapshot directory", map[string]interface{}{
"path": storePath,
"err": err,
})
}
}
filePath := path.Join(storePath, fmt.Sprintf("nameserver_addr-%s", instanceName))
return filePath
}
|
package main
import (
"fmt"
"io"
"log"
"os"
"sync"
)
type LogLevel int
var logger = NewLogger(os.Args[0] + " ")
type LogServerConf struct {
Level string
Prefix string
Syslog int
}
const (
LogError LogLevel = iota
LogInfo
LogDebug
)
type Log struct {
level LogLevel
prefix string
syslog bool
ioWriter io.Writer
output [LogDebug + 1]*log.Logger
l sync.Mutex
}
func NewLogger(prefix string) *Log {
ret := &Log{
level: LogDebug,
prefix: prefix,
syslog: false,
}
for i := LogError; i <= LogDebug; i++ {
ret.output[i] = log.New(os.Stdout, prefix, 0)
}
ret.SetLevel(ret.level)
return ret
}
func GetDefaultLogger() *Log {
return logger
}
func (p *Log) SetPrefix(prefix string) {
p.l.Lock()
defer p.l.Unlock()
p.prefix = prefix
for i := LogError; i <= LogDebug; i++ {
p.output[i].SetPrefix(p.prefix)
}
}
func (p *Log) SetLevel(level LogLevel) {
p.l.Lock()
defer p.l.Unlock()
p.level = level
var flag = 0
if !p.syslog {
flag |= (log.Ldate | log.Ltime)
}
if p.level == LogDebug {
flag |= log.Lshortfile
}
for i := LogError; i <= LogDebug; i++ {
if i == LogError {
flag |= log.Lshortfile
}
p.output[i].SetFlags(flag)
}
}
func (p *Log) SetLevelStr(level string) error {
switch level {
case "error":
p.SetLevel(LogError)
case "info":
p.SetLevel(LogInfo)
case "debug":
p.SetLevel(LogDebug)
default:
return fmt.Errorf("invalid level:%s", level)
}
return nil
}
func (p *Log) ApplyConf(conf LogServerConf) {
if conf.Prefix != "" {
logger.SetPrefix(conf.Prefix)
}
if err := logger.SetLevelStr(conf.Level); err != nil {
panic(err)
}
}
func (p *Log) Errorf(format string, v ...interface{}) {
if p.level < LogError {
return
}
p.output[LogError].Output(2, fmt.Sprintf(format, v...))
}
func (p *Log) info(format string, v ...interface{}) {
if p.level < LogInfo {
return
}
p.output[LogInfo].Output(2, fmt.Sprintf(format, v...))
}
func (p *Log) Debug(format string, v ...interface{}) {
if p.level < LogDebug {
return
}
p.output[LogDebug].Output(2, fmt.Sprintf(format, v...))
}
|
/*
* EVE Swagger Interface
*
* An OpenAPI for EVE Online
*
* OpenAPI spec version: 0.2.2
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package goesi
import (
"time"
)
// transaction
type GetCharactersCharacterIdWalletsJournal200OkObject struct {
// argument_name string
ArgumentName string `json:"argument_name,omitempty"`
// argument_value integer
ArgumentValue int32 `json:"argument_value,omitempty"`
// first_party_id integer
FirstPartyId int32 `json:"first_party_id,omitempty"`
// first_party_type string
FirstPartyType string `json:"first_party_type,omitempty"`
// post_transaction_balance integer
PostTransactionBalance int64 `json:"post_transaction_balance,omitempty"`
// reason string
Reason string `json:"reason,omitempty"`
// ref_id integer
RefId int64 `json:"ref_id,omitempty"`
// ref_type string
RefType string `json:"ref_type,omitempty"`
// second_party_id integer
SecondPartyId int32 `json:"second_party_id,omitempty"`
// second_party_type string
SecondPartyType string `json:"second_party_type,omitempty"`
// tax_amount integer
TaxAmount int64 `json:"tax_amount,omitempty"`
// tax_reciever_id integer
TaxRecieverId int32 `json:"tax_reciever_id,omitempty"`
// Positive if transferred to first party, negative if transferred to second party
TransactionAmount int64 `json:"transaction_amount,omitempty"`
// transaction_date string
TransactionDate time.Time `json:"transaction_date,omitempty"`
}
|
/*
Copyright 2019 The Yingxi.company Authors. All rights reserved.
Go
Error
*/
package errno
var (
OK = &Errno{Code: 0, Message: "OK"}
InternalServerError = &Errno{Code: 10001, Message: "Internal server errno"}
)
|
// Package viztransform is the parent package of all viztransform packages.
//
// These include geometry, transform, and parse along with all commands defined
// in cmd.
package viztransform
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package staleread_test
import (
"context"
"testing"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
)
func TestReadWriteExternalTimestamp(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustQuery("select @@tidb_external_ts").Check(testkit.Rows("0"))
tk.MustExec("set global tidb_external_ts=19980613")
tk.MustQuery("select @@tidb_external_ts").Check(testkit.Rows("19980613"))
tk.MustExec("set global tidb_external_ts=20220930")
tk.MustQuery("select @@tidb_external_ts").Check(testkit.Rows("20220930"))
}
func TestExternalTimestampRead(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t (id INT NOT NULL,d double,PRIMARY KEY (id))")
tk.MustExec("insert into t values (0, 100)")
tk.MustExec("insert into t values (1, 100)")
tk.MustExec("insert into t values (2, 100)")
tk.MustExec("insert into t values (3, 100)")
tk.MustQuery("select * from t").Check(testkit.Rows("0 100", "1 100", "2 100", "3 100"))
tk.MustQuery("select @@tidb_external_ts").Check(testkit.Rows("0"))
tk.MustExec("start transaction;set global tidb_external_ts=@@tidb_current_ts;commit;")
tk.MustExec("insert into t values (4, 100)")
// as the `tidb_external_ts` is set an old value, the newest row (4, 100) cannot be read
tk.MustExec("set tidb_enable_external_ts_read=ON")
tk.MustQuery("select * from t").Check(testkit.Rows("0 100", "1 100", "2 100", "3 100"))
tk.MustExec("set tidb_enable_external_ts_read=OFF")
tk.MustQuery("select * from t").Check(testkit.Rows("0 100", "1 100", "2 100", "3 100", "4 100"))
}
func TestExternalTimestampReadonly(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("use test")
tk.MustExec("create table t (id INT NOT NULL,PRIMARY KEY (id))")
tk.MustQuery("select @@tidb_external_ts").Check(testkit.Rows("0"))
tk.MustExec("start transaction;set global tidb_external_ts=@@tidb_current_ts;commit;")
// with tidb_enable_external_ts_read enabled, this session will be readonly
tk.MustExec("set tidb_enable_external_ts_read=ON")
_, err := tk.Exec("insert into t values (0)")
require.Error(t, err)
tk.MustExec("set tidb_enable_external_ts_read=OFF")
tk.MustExec("insert into t values (0)")
// even when tidb_enable_external_ts_read is enabled, internal SQL will not be affected
tk.MustExec("set tidb_enable_external_ts_read=ON")
tk.Session().GetSessionVars().InRestrictedSQL = true
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers)
tk.MustExecWithContext(ctx, "insert into t values (1)")
tk.Session().GetSessionVars().InRestrictedSQL = false
}
func TestExternalTimestampReadWithTransaction(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("use test")
tk.MustExec("create table t (id INT NOT NULL,PRIMARY KEY (id))")
tk.MustQuery("select @@tidb_external_ts").Check(testkit.Rows("0"))
tk.MustExec("start transaction;set global tidb_external_ts=@@tidb_current_ts;commit;")
tk.MustExec("insert into t values (0)")
tk.MustQuery("select * from t").Check(testkit.Rows("0"))
tk.MustExec("set tidb_enable_external_ts_read=ON")
tk.MustQuery("select * from t").Check(testkit.Rows())
tk.MustExec("start transaction")
tk.MustQuery("select * from t").Check(testkit.Rows())
tk.MustExec("commit")
tk.MustExec("set tidb_enable_external_ts_read=OFF")
tk.MustExec("start transaction")
tk.MustQuery("select * from t").Check(testkit.Rows("0"))
tk.MustExec("commit")
tk.MustExec("start transaction")
tk.MustQuery("select * from t").Check(testkit.Rows("0"))
tk.MustExec("set tidb_enable_external_ts_read=ON")
// `tidb_enable_external_ts_read` doesn't affect existing transaction
tk.MustQuery("select * from t").Check(testkit.Rows("0"))
tk.MustExec("set tidb_enable_external_ts_read=OFF")
tk.MustExec("commit")
}
func TestExternalTimestampNotAffectPrepare(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("use test")
tk.MustExec("create table t (id INT NOT NULL,PRIMARY KEY (id))")
tk.MustExec("insert into t values (0)")
tk.MustQuery("select * from t").Check(testkit.Rows("0"))
tk.MustQuery("select @@tidb_external_ts").Check(testkit.Rows("0"))
tk.MustExec("start transaction;set global tidb_external_ts=@@tidb_current_ts;commit;")
tk.MustExec("insert into t values (1)")
tk.MustQuery("select * from t").Check(testkit.Rows("0", "1"))
tk.MustExec("set tidb_enable_external_ts_read=on")
tk.MustExec("prepare my_select from 'select * from t'")
tk.MustQuery("execute my_select").Check(testkit.Rows("0"))
tk.MustExec("set tidb_enable_external_ts_read=off")
tk.MustQuery("execute my_select").Check(testkit.Rows("0", "1"))
}
|
package main
import (
"fmt"
"math/rand"
"strings"
"time"
"github.com/bwmarrin/discordgo"
"github.com/tahkapaa/test_stuff/riot_api/riotapi"
)
// Bot is a discordBot
type Bot struct {
Discord *discordgo.Session
Channels map[string]*Channel
db DB
}
// Channel is a channel with followed players
type Channel struct {
ChannelID string
monitor *PlayerMonitor
}
func newBot(botToken string, db DB) (*Bot, error) {
dg, err := discordgo.New("Bot " + botToken)
if err != nil {
return nil, err
}
bot := Bot{
Discord: dg,
Channels: make(map[string]*Channel),
db: db,
}
bot.AddMessageHandler()
err = dg.Open()
if err != nil {
return nil, err
}
bot.readChannelsFromDB()
return &bot, nil
}
func (b *Bot) readChannelsFromDB() {
channels, err := b.db.Get()
if err != nil {
panic(fmt.Sprintf("Failed to initialize channels from db: %v", err))
}
for _, channel := range channels {
b.addChannel(channel.ID, channel.Summoners)
}
}
func (b *Bot) addChannel(ID string, players map[string]Player) {
if players == nil {
players = make(map[string]Player)
}
pm := PlayerMonitor{
FollowedPlayers: players,
games: make(map[int]*riotapi.CurrentGameInfo),
reportedGames: make(map[int]bool),
messageChan: make(chan monitorMessage, 1),
ChannelID: ID,
db: b.db,
}
go pm.monitorPlayers()
b.Channels[ID] = &Channel{
ChannelID: ID,
monitor: &pm,
}
}
// AddMessageHandler adds CreateMessage handler to the bot
func (b *Bot) AddMessageHandler() {
b.Discord.AddHandler(func(s *discordgo.Session, m *discordgo.MessageCreate) {
timeStr := time.Now().Format(time.ANSIC)
// Ignore all messages created by the bot itself
// This isn't required in this specific example but it's a good practice.
if m.Author.ID == s.State.User.ID {
return
}
// If the message is "ping" reply with "Pong!"
if m.Content == "ping" {
s.ChannelMessageSend(m.ChannelID, "Yarrr!")
}
// If the message is "pong" reply with "Ping!"
if m.Content == "pong" {
s.ChannelMessageSend(m.ChannelID, "Yarrr!")
}
if strings.Contains(strings.ToLower(m.Content), "kapteeni") || strings.Contains(strings.ToLower(m.Content), "kapu") {
s.ChannelMessageSend(m.ChannelID, "Yarrr!")
}
if m.Content == "?help" {
handleHelp(m.ChannelID, m.Author.Username, timeStr, s)
return
}
// Start following players
if strings.HasPrefix(m.Content, "?follow") {
_, ok := b.Channels[m.ChannelID]
if !ok {
b.addChannel(m.ChannelID, nil)
}
b.Channels[m.ChannelID].handleStartFollowing(m.Content, m.Author.Username, timeStr, s)
return
}
// Stop following players
if strings.HasPrefix(m.Content, "?remove") {
b.Channels[m.ChannelID].handleStopFollowing(m.ChannelID, m.Content, m.Author.Username, timeStr)
return
}
// list followed players
if m.Content == "?list" {
b.Channels[m.ChannelID].handleListFollowedPlayers(m.ChannelID, m.Author.Username, timeStr)
return
}
if m.Content == "?joke" {
s.ChannelMessageSend(m.ChannelID, jokes[rand.Intn(len(jokes))])
return
}
})
}
func handleHelp(channelID, sender, timeStr string, s *discordgo.Session) {
msg := discordgo.MessageSend{
Embed: &discordgo.MessageEmbed{
Title: "Available commands",
Color: green,
Fields: []*discordgo.MessageEmbedField{
{Name: "?follow", Value: "List o' summoner names t' be followed"},
{Name: "?list", Value: "List o' summoners that are bein' followed"},
{Name: "?remove", Value: "List o' summoner names that should nah be followed"},
{Name: "?joke", Value: "Wants t' hear a joke?"},
},
Footer: newFooter(sender, timeStr),
},
}
s.ChannelMessageSendComplex(channelID, &msg)
}
func (c *Channel) handleStartFollowing(summonerNames, sender, timeStr string, s *discordgo.Session) {
st, err := s.ChannelMessageSendComplex(c.ChannelID, newWorkingMessage())
if err != nil {
fmt.Println(err)
return
}
names := strings.Fields(summonerNames)
if len(names) < 2 {
s.ChannelMessageSendComplex(c.ChannelID, newErrorMessage("Give at least one summoner name", sender, timeStr))
return
}
var addedSummoners []*discordgo.MessageEmbedField
for _, name := range names[1:] {
summoner, err := RC.Summoner.SummonerByName(name)
if err != nil || summoner == nil {
s.ChannelMessageSendComplex(c.ChannelID, newErrorMessage(fmt.Sprintf("Unable t' find summoner: %v", name), sender, timeStr))
continue
}
rank, err := findPlayerRank(RC, summoner.ID)
if err != nil {
fmt.Println(err)
}
addedSummoners = append(addedSummoners, &discordgo.MessageEmbedField{Name: summoner.Name, Value: rank})
c.monitor.messageChan <- monitorMessage{
kind: AddPlayer,
player: Player{Name: summoner.Name, ID: summoner.ID, Rank: rank},
sender: sender,
timeStr: timeStr,
}
}
if len(addedSummoners) > 0 {
s.ChannelMessageEditComplex(&discordgo.MessageEdit{
Channel: c.ChannelID,
ID: st.ID,
Embed: newAddedSummonersMessage("Now followin'", sender, timeStr, addedSummoners),
})
}
}
func (c *Channel) handleStopFollowing(channelID, summonerNames, sender, timeStr string) {
if c == nil {
DGBot.Discord.ChannelMessageSendComplex(channelID, newErrorMessage("No one be bein' followed", sender, timeStr))
return
}
c.monitor.messageChan <- monitorMessage{
kind: RemovePlayer,
summonerNames: summonerNames,
sender: sender,
timeStr: timeStr,
}
}
func (c *Channel) handleListFollowedPlayers(channelID, sender, timeStr string) {
if c == nil {
DGBot.Discord.ChannelMessageSendComplex(channelID, newErrorMessage("No one be bein' followed", sender, timeStr))
return
}
c.monitor.messageChan <- monitorMessage{
kind: ListPlayers,
sender: sender,
timeStr: timeStr,
}
}
|
package main
import (
"fmt"
"math"
)
// 输入:nums = [2,3,1,5,4]
// 输出:10
// 解释:通过翻转子数组 [3,1,5] ,数组变成 [2,5,1,3,4] ,数组值为 10 。
func main() {
fmt.Println(maxValueAfterReverse([]int{
2, 3, 1, 5, 4,
}))
}
func maxValueAfterReverse(nums []int) int {
n := len(nums)
a, b := nums[0], nums[n-1]
premax1 := math.MinInt / 2 // ai + ai-1 -|ai - ai-1| 前缀和
premax2 := math.MinInt / 2 // ai - ai-1 -|ai - ai-1| 前缀和
premax3 := math.MinInt / 2 // -ai + ai-1 -|ai - ai-1| 前缀和
premax4 := math.MinInt / 2 // -ai - ai-1 -|ai - ai-1| 前缀和
sum, ans := 0, 0 // sum 为原来的和, ans 为增量
for i := 1; i < n; i++ {
y, x := nums[i-1], nums[i] // 可以表示 ai-1(y) , ai(x) 和 aj(y)、aj+1(x)
d := abs(x - y)
sum += d
ans = max(ans, max(
abs(y-b), // abs(ai-1 - an-1) => abs(y-b)
abs(x-a), // abs(ai - a0 ) => abs(x-a)
premax1-x-y, // -aj+1 -aj -|aj+1 - aj| + ai + ai-1 -|ai - ai-1|
premax2-x+y, // -aj+1 +aj -|aj+1 - aj| + ai - ai-1 -|ai - ai-1|
premax3+x-y, // aj+1 -aj -|aj+1 - aj| + -ai + ai-1 -|ai - ai-1|
premax4+x+y, // aj+1 +aj -|aj+1 - aj| + -ai - ai-1 -|ai - ai-1|
)-d)
premax1 = max(premax1, x+y-d)
premax2 = max(premax2, x-y-d)
premax3 = max(premax3, -x+y-d)
premax4 = max(premax4, -x-y-d)
}
return sum + ans
}
func abs(a int) int {
if a < 0 {
return -a
}
return a
}
func max(arr ...int) int {
x := arr[0]
for _, v := range arr[1:] {
if v > x {
x = v
}
}
return x
}
|
package model
import (
"time"
"gopkg.in/go-playground/validator.v9"
)
//Articleの構造体にIDとdbのカラムのidをメタ情報化し、sqlxがSQLの実行結果と構造体を紐づける
type Article struct {
ID int `db:"id"`
Title string `db:"title" form:"title" validate:"required,max=50"`
Body string `db:"body" form:"body" validate:"required"`
Created time.Time `db:"created"`
Updated time.Time `db:"updated"`
}
// ValidatonErrors
// 返り値がerr型はerror型 fmt.Stringerに似た組み込みのインタフェース stringの配列
// 返り値が複数あれば(,)で指定する
func (a *Article) ValidationErrors(err error) []string {
var errMessages []string
// 複数のエラーが発生する可能性があるためforで回す
for _, err := range err.(validator.ValidationErrors) {
var message string
switch err.Field() {
case "Title":
// どのvalidationでエラーになったかキーで判断
switch err.Tag() {
case "required":
message = "タイトルは必須です。"
case "max":
message = "タイトルは最大50文字です。"
}
case "Body":
message = "本文は必須です。"
}
// メッセージをerrMessageのスライスに追加して代入
if message != "" {
errMessages = append(errMessages, message)
}
}
return errMessages
}
|
package pn532
type Device interface {
// Low level communication methods
WriteCommand(p []byte)
ReadData(p []byte)
ReadAck() bool
Ready() bool
WaitReady(int64) bool
}
func OpenDevice(ss, clk, miso, mosi uint8) (device Device, err error) {
return openDeviceSPI(clk, miso, mosi, ss)
}
|
package main
import (
"flag"
"fmt"
"math/rand"
"sync"
"time"
)
type product int
type producer struct {
id int
product
}
type customer struct {
id int
product
}
func main() {
producersNumber := flag.Int("p", 1, "number of producers")
customersNumber := flag.Int("c", 1, "number of customers")
bufferSize := flag.Int("b", 1, "size of buffer")
flag.Parse()
producersList := initProducers(*producersNumber)
customersList := initCustomers(*customersNumber)
buffer := make(chan product, *bufferSize)
run(producersList, customersList, buffer)
for {
select {
default:
time.Sleep(time.Millisecond * 500)
// fmt.Printf("Main\n")
}
}
}
func initProducers(number int) []producer {
producersList := make([]producer, number)
var id int = 0
for ; id < number; id++ {
producersList[id] = producer{id, product(rand.Intn(10000))}
}
return producersList
}
func initCustomers(number int) []customer {
customersList := make([]customer, number)
var id int = 0
for ; id < number; id++ {
customersList[id] = customer{id, 0}
}
return customersList
}
func produce(p *producer, buffer chan product, wg *sync.WaitGroup) {
defer wg.Done()
buffer <- p.product
fmt.Printf("Producer %d produced %d\n", p.id, p.product)
p.product = 0
}
func buy(c *customer, buffer chan product, wg *sync.WaitGroup) {
defer wg.Done()
fmt.Printf("Customer %d is waiting\n", c.id)
c.product = <-buffer
fmt.Printf("Customer %d bought %d\n", c.id, c.product)
}
func run(producers []producer, customers []customer, buffer chan product) {
var wg sync.WaitGroup
wg.Add(len(customers))
for j := range customers {
go buy(&customers[j], buffer, &wg)
}
// wg.Wait()
wg.Add(len(producers))
for i := range producers {
go produce(&producers[i], buffer, &wg)
}
}
|
package scene
import (
"github.com/eriklupander/rt/internal/pkg/config"
"github.com/eriklupander/rt/internal/pkg/mat"
"github.com/eriklupander/rt/internal/pkg/obj"
"io/ioutil"
"math"
)
func SimpleGopher() *Scene {
camera := mat.NewCamera(config.Cfg.Width, config.Cfg.Height, math.Pi/3.5)
camera.Transform = mat.ViewTransform(mat.NewPoint(-.1, 1.2, 6), mat.NewPoint(0.05, 1.1, 0.05), mat.NewVector(0, 1, 0))
camera.Inverse = mat.Inverse(camera.Transform)
// Model
bytes, _ := ioutil.ReadFile("assets/models/gopher.obj")
model := obj.ParseObj(string(bytes)).ToGroup()
model.SetTransform(mat.Translate(0, 1.2, 0))
model.SetTransform(mat.RotateX(math.Pi / 2))
model.SetTransform(mat.RotateY(-math.Pi / 2))
model.SetTransform(mat.RotateX(-math.Pi / 8))
mat.Divide(model, 100)
model.Bounds()
w := mat.NewWorld()
w.Light = append(w.Light, mat.NewLight(mat.NewPoint(3.3, 4, 10.5), mat.NewColor(1, 1, 1)))
floor := mat.NewPlane()
pm := mat.NewMaterial(mat.NewColor(1, 1, 1), 0.025, 0.67, 0, 200)
pm.Reflectivity = 0.2
floor.SetMaterial(pm)
return &Scene{
Camera: camera,
// Lights: []mat.Light{{mat.NewPoint(-1, 1, 2.5), mat.NewColor(0.3, 0.3, 0.3)}},
AreaLights: []mat.AreaLight{mat.NewAreaLight(
mat.NewPoint(1.5, 2.5, 4.5),
mat.NewVector(-.5, 0, .5),
4,
mat.NewVector(0, 1, 0),
4,
mat.NewColor(0.9, 0.9, 0.9))},
Objects: []mat.Shape{
floor, model,
},
}
}
|
package main
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestRoles_add_on_nil_roles_dont_throw_error(t *testing.T) {
ass := assert.New(t)
var roles Roles
roles.add()
ass.Nil(roles)
roles.add("any")
ass.Contains(roles, "any")
}
func TestRoles_add_nil_role(t *testing.T) {
ass := assert.New(t)
roles := Roles{"first"}
roles.add()
ass.Len(roles, 1)
ass.Contains(roles, "first")
}
func TestRoles_add_same_role(t *testing.T) {
ass := assert.New(t)
sameRole := "same role"
roles := Roles{sameRole}
roles.add(sameRole)
ass.Len(roles, 1)
ass.Contains(roles, sameRole)
}
func TestRoles_add_many_roles(t *testing.T) {
ass := assert.New(t)
first := "premier role"
second := "deuxieme role"
third := "troisieme role"
roles := Roles{first}
roles.add(second, third)
ass.Len(roles, 3)
ass.Contains(roles, first, second, third)
}
|
package main
import (
"fmt"
"net/http"
"strings"
)
func handler(w http.ResponseWriter, r *http.Request) {
var userAgent = r.Header.Get("User-Agent")
if strings.Contains(userAgent, "github-camo") {
http.Redirect(w, r, "https://img.youtube.com/vi/"+r.URL.Query().Get("v")+"/0.jpg", 301)
} else {
http.Redirect(w, r, "https://youtube.com/watch?v="+r.URL.Query().Get("v"), 301)
}
}
func main() {
fmt.Print("> Listening on http://localhost:8080\n")
http.HandleFunc("/", handler)
http.ListenAndServe(":8080", nil)
}
|
package str
import (
"crypto/md5"
"encoding/hex"
"io"
)
func MD5(str string) string {
hasher := md5.New()
io.WriteString(hasher, str)
return hex.EncodeToString(hasher.Sum(nil))
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
. "github.com/onsi/ginkgo"
)
// The actual test suite
var _ = t.Describe("help", func() {
const helpMessageIdentifier = "crictl - client for CRI"
It("should succeed with `help` subcommand", func() {
t.CrictlExpectSuccess("help", helpMessageIdentifier)
})
It("should succeed with `--help` flag", func() {
t.CrictlExpectSuccess("--help", helpMessageIdentifier)
})
It("should succeed with `-h` flag", func() {
t.CrictlExpectSuccess("-h", helpMessageIdentifier)
})
It("should show help on invalid flag", func() {
t.CrictlExpectFailure("--invalid", helpMessageIdentifier,
"flag provided but not defined")
})
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.