text stringlengths 11 4.05M |
|---|
package main
import "fmt"
func main() {
matrix := [][]int{{0, 1, 2, 0}, {3, 4, 5, 2}, {1, 3, 1, 5}}
setZeroes(matrix)
fmt.Println(matrix)
}
func setZeroes(matrix [][]int) {
row := len(matrix)
col := len(matrix[0])
firstRow := false
firstCol := false
for i := 1; i < row; i++ {
for j := 1; j < col; j++ {
if matrix[i][j] == 0 {
matrix[0][j] = 0
matrix[i][0] = 0
}
}
}
for i := 1; i < row; i++ {
for j := 1; j < col; j++ {
if matrix[0][j] == 0 || matrix[i][0] == 0 {
matrix[i][j] = 0
}
}
}
for i := 0; i < row; i++ {
if matrix[i][0] == 0 {
firstCol = true
break
}
}
for i := 0; i < col; i++ {
if matrix[0][i] == 0 {
firstRow = true
break
}
}
if firstRow {
for i := 0; i < col; i++ {
matrix[0][i] = 0
}
}
if firstCol {
for i := 0; i < row; i++ {
matrix[i][0] = 0
}
}
}
|
package main
import "fmt"
func main() {
switch 25 {
case 1:
fmt.Println("Yes")
case 25:
fmt.Println("No")
}
}
|
// Copyright 2020 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package kvtenantccl
import (
"context"
"net"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/config"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvtenant"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/grpcutil"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/netutil"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var rpcRetryOpts = retry.Options{
InitialBackoff: 1 * time.Microsecond,
MaxBackoff: 4 * time.Microsecond,
}
var _ roachpb.InternalServer = &mockServer{}
type mockServer struct {
rangeLookupFn func(context.Context, *roachpb.RangeLookupRequest) (*roachpb.RangeLookupResponse, error)
gossipSubFn func(*roachpb.GossipSubscriptionRequest, roachpb.Internal_GossipSubscriptionServer) error
}
func (m *mockServer) RangeLookup(
ctx context.Context, req *roachpb.RangeLookupRequest,
) (*roachpb.RangeLookupResponse, error) {
return m.rangeLookupFn(ctx, req)
}
func (m *mockServer) GossipSubscription(
req *roachpb.GossipSubscriptionRequest, stream roachpb.Internal_GossipSubscriptionServer,
) error {
return m.gossipSubFn(req, stream)
}
func (*mockServer) ResetQuorum(
context.Context, *roachpb.ResetQuorumRequest,
) (*roachpb.ResetQuorumResponse, error) {
panic("unimplemented")
}
func (*mockServer) Batch(context.Context, *roachpb.BatchRequest) (*roachpb.BatchResponse, error) {
panic("unimplemented")
}
func (*mockServer) RangeFeed(*roachpb.RangeFeedRequest, roachpb.Internal_RangeFeedServer) error {
panic("unimplemented")
}
func (m *mockServer) Join(
context.Context, *roachpb.JoinNodeRequest,
) (*roachpb.JoinNodeResponse, error) {
panic("unimplemented")
}
func gossipEventForClusterID(clusterID uuid.UUID) *roachpb.GossipSubscriptionEvent {
return &roachpb.GossipSubscriptionEvent{
Key: gossip.KeyClusterID,
Content: roachpb.MakeValueFromBytesAndTimestamp(clusterID.GetBytes(), hlc.Timestamp{}),
PatternMatched: gossip.KeyClusterID,
}
}
func gossipEventForNodeDesc(desc *roachpb.NodeDescriptor) *roachpb.GossipSubscriptionEvent {
val, err := protoutil.Marshal(desc)
if err != nil {
panic(err)
}
return &roachpb.GossipSubscriptionEvent{
Key: gossip.MakeNodeIDKey(desc.NodeID),
Content: roachpb.MakeValueFromBytesAndTimestamp(val, hlc.Timestamp{}),
PatternMatched: gossip.MakePrefixPattern(gossip.KeyNodeIDPrefix),
}
}
func gossipEventForSystemConfig(cfg *config.SystemConfigEntries) *roachpb.GossipSubscriptionEvent {
val, err := protoutil.Marshal(cfg)
if err != nil {
panic(err)
}
return &roachpb.GossipSubscriptionEvent{
Key: gossip.KeySystemConfig,
Content: roachpb.MakeValueFromBytesAndTimestamp(val, hlc.Timestamp{}),
PatternMatched: gossip.KeySystemConfig,
}
}
func waitForNodeDesc(t *testing.T, c *Connector, nodeID roachpb.NodeID) {
t.Helper()
testutils.SucceedsSoon(t, func() error {
_, err := c.GetNodeDescriptor(nodeID)
return err
})
}
// TestConnectorGossipSubscription tests Connector's roles as a
// kvcoord.NodeDescStore and as a config.SystemConfigProvider.
func TestConnectorGossipSubscription(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
stopper := stop.NewStopper()
defer stopper.Stop(ctx)
clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond)
rpcContext := rpc.NewInsecureTestingContext(clock, stopper)
s := rpc.NewServer(rpcContext)
// Test setting the cluster ID by setting it to nil then ensuring it's later
// set to the original ID value.
clusterID := rpcContext.ClusterID.Get()
rpcContext.ClusterID.Reset(uuid.Nil)
gossipSubC := make(chan *roachpb.GossipSubscriptionEvent)
defer close(gossipSubC)
gossipSubFn := func(req *roachpb.GossipSubscriptionRequest, stream roachpb.Internal_GossipSubscriptionServer) error {
assert.Len(t, req.Patterns, 3)
assert.Equal(t, "cluster-id", req.Patterns[0])
assert.Equal(t, "node:.*", req.Patterns[1])
assert.Equal(t, "system-db", req.Patterns[2])
for gossipSub := range gossipSubC {
if err := stream.Send(gossipSub); err != nil {
return err
}
}
return nil
}
roachpb.RegisterInternalServer(s, &mockServer{gossipSubFn: gossipSubFn})
ln, err := netutil.ListenAndServeGRPC(stopper, s, util.TestAddr)
require.NoError(t, err)
cfg := kvtenant.ConnectorConfig{
AmbientCtx: log.AmbientContext{Tracer: tracing.NewTracer()},
RPCContext: rpcContext,
RPCRetryOptions: rpcRetryOpts,
}
addrs := []string{ln.Addr().String()}
c := NewConnector(cfg, addrs)
// Start should block until the first GossipSubscription response.
startedC := make(chan error)
go func() {
startedC <- c.Start(ctx)
}()
select {
case err := <-startedC:
t.Fatalf("Start unexpectedly completed with err=%v", err)
case <-time.After(10 * time.Millisecond):
}
// Return first GossipSubscription response.
node1 := &roachpb.NodeDescriptor{NodeID: 1, Address: util.MakeUnresolvedAddr("tcp", "1.1.1.1")}
node2 := &roachpb.NodeDescriptor{NodeID: 2, Address: util.MakeUnresolvedAddr("tcp", "2.2.2.2")}
gossipSubC <- gossipEventForNodeDesc(node1)
gossipSubC <- gossipEventForNodeDesc(node2)
gossipSubC <- gossipEventForClusterID(clusterID)
require.NoError(t, <-startedC)
// Ensure that ClusterID was updated.
require.Equal(t, clusterID, rpcContext.ClusterID.Get())
// Test kvcoord.NodeDescStore impl. Wait for full update first.
waitForNodeDesc(t, c, 2)
desc, err := c.GetNodeDescriptor(1)
require.Equal(t, node1, desc)
require.NoError(t, err)
desc, err = c.GetNodeDescriptor(2)
require.Equal(t, node2, desc)
require.NoError(t, err)
desc, err = c.GetNodeDescriptor(3)
require.Nil(t, desc)
require.Regexp(t, "unable to look up descriptor for n3", err)
// Return updated GossipSubscription response.
node1Up := &roachpb.NodeDescriptor{NodeID: 1, Address: util.MakeUnresolvedAddr("tcp", "1.2.3.4")}
node3 := &roachpb.NodeDescriptor{NodeID: 3, Address: util.MakeUnresolvedAddr("tcp", "2.2.2.2")}
gossipSubC <- gossipEventForNodeDesc(node1Up)
gossipSubC <- gossipEventForNodeDesc(node3)
// Test kvcoord.NodeDescStore impl. Wait for full update first.
waitForNodeDesc(t, c, 3)
desc, err = c.GetNodeDescriptor(1)
require.Equal(t, node1Up, desc)
require.NoError(t, err)
desc, err = c.GetNodeDescriptor(2)
require.Equal(t, node2, desc)
require.NoError(t, err)
desc, err = c.GetNodeDescriptor(3)
require.Equal(t, node3, desc)
require.NoError(t, err)
// Test config.SystemConfigProvider impl. Should not have a SystemConfig yet.
sysCfg := c.GetSystemConfig()
require.Nil(t, sysCfg)
sysCfgC := c.RegisterSystemConfigChannel()
require.Len(t, sysCfgC, 0)
// Return first SystemConfig response.
sysCfgEntries := &config.SystemConfigEntries{Values: []roachpb.KeyValue{
{Key: roachpb.Key("a")},
{Key: roachpb.Key("b")},
}}
gossipSubC <- gossipEventForSystemConfig(sysCfgEntries)
// Test config.SystemConfigProvider impl. Wait for update first.
<-sysCfgC
sysCfg = c.GetSystemConfig()
require.NotNil(t, sysCfg)
require.Equal(t, sysCfgEntries.Values, sysCfg.Values)
// Return updated SystemConfig response.
sysCfgEntriesUp := &config.SystemConfigEntries{Values: []roachpb.KeyValue{
{Key: roachpb.Key("a")},
{Key: roachpb.Key("c")},
}}
gossipSubC <- gossipEventForSystemConfig(sysCfgEntriesUp)
// Test config.SystemConfigProvider impl. Wait for update first.
<-sysCfgC
sysCfg = c.GetSystemConfig()
require.NotNil(t, sysCfg)
require.Equal(t, sysCfgEntriesUp.Values, sysCfg.Values)
// A newly registered SystemConfig channel will be immediately notified.
sysCfgC2 := c.RegisterSystemConfigChannel()
require.Len(t, sysCfgC2, 1)
}
// TestConnectorGossipSubscription tests Connector's role as a
// kvcoord.RangeDescriptorDB.
func TestConnectorRangeLookup(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
stopper := stop.NewStopper()
defer stopper.Stop(ctx)
clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond)
rpcContext := rpc.NewInsecureTestingContext(clock, stopper)
s := rpc.NewServer(rpcContext)
rangeLookupRespC := make(chan *roachpb.RangeLookupResponse, 1)
rangeLookupFn := func(_ context.Context, req *roachpb.RangeLookupRequest) (*roachpb.RangeLookupResponse, error) {
// Validate request.
assert.Equal(t, roachpb.RKey("a"), req.Key)
assert.Equal(t, roachpb.READ_UNCOMMITTED, req.ReadConsistency)
assert.Equal(t, int64(0), req.PrefetchNum)
assert.Equal(t, false, req.PrefetchReverse)
// Respond.
return <-rangeLookupRespC, nil
}
server := &mockServer{rangeLookupFn: rangeLookupFn}
roachpb.RegisterInternalServer(s, server)
ln, err := netutil.ListenAndServeGRPC(stopper, s, util.TestAddr)
require.NoError(t, err)
cfg := kvtenant.ConnectorConfig{
AmbientCtx: log.AmbientContext{Tracer: tracing.NewTracer()},
RPCContext: rpcContext,
RPCRetryOptions: rpcRetryOpts,
}
addrs := []string{ln.Addr().String()}
c := NewConnector(cfg, addrs)
// NOTE: we don't actually start the connector worker. That's ok, as
// RangeDescriptorDB methods don't require it to be running.
// Success case.
descs := []roachpb.RangeDescriptor{{RangeID: 1}, {RangeID: 2}}
preDescs := []roachpb.RangeDescriptor{{RangeID: 3}, {RangeID: 4}}
rangeLookupRespC <- &roachpb.RangeLookupResponse{
Descriptors: descs, PrefetchedDescriptors: preDescs,
}
resDescs, resPreDescs, err := c.RangeLookup(ctx, roachpb.RKey("a"), false /* useReverseScan */)
require.Equal(t, descs, resDescs)
require.Equal(t, preDescs, resPreDescs)
require.NoError(t, err)
// Error case.
rangeLookupRespC <- &roachpb.RangeLookupResponse{
Error: roachpb.NewErrorf("hit error"),
}
resDescs, resPreDescs, err = c.RangeLookup(ctx, roachpb.RKey("a"), false /* useReverseScan */)
require.Nil(t, resDescs)
require.Nil(t, resPreDescs)
require.Regexp(t, "hit error", err)
// Context cancelation.
canceledCtx, cancel := context.WithCancel(ctx)
blockingC := make(chan struct{})
server.rangeLookupFn = func(ctx context.Context, _ *roachpb.RangeLookupRequest) (*roachpb.RangeLookupResponse, error) {
<-blockingC
<-ctx.Done()
return nil, ctx.Err()
}
go func() {
blockingC <- struct{}{}
cancel()
}()
resDescs, resPreDescs, err = c.RangeLookup(canceledCtx, roachpb.RKey("a"), false /* useReverseScan */)
require.Nil(t, resDescs)
require.Nil(t, resPreDescs)
require.Regexp(t, context.Canceled.Error(), err)
// FirstRange always returns error.
desc, err := c.FirstRange()
require.Nil(t, desc)
require.Regexp(t, "does not have access to FirstRange", err)
require.True(t, grpcutil.IsAuthError(err))
}
// TestConnectorRetriesUnreachable tests that Connector iterates over each of
// its provided addresses and retries until it is able to establish a connection
// on one of them.
func TestConnectorRetriesUnreachable(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
stopper := stop.NewStopper()
defer stopper.Stop(ctx)
clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond)
rpcContext := rpc.NewInsecureTestingContext(clock, stopper)
s := rpc.NewServer(rpcContext)
node1 := &roachpb.NodeDescriptor{NodeID: 1, Address: util.MakeUnresolvedAddr("tcp", "1.1.1.1")}
node2 := &roachpb.NodeDescriptor{NodeID: 2, Address: util.MakeUnresolvedAddr("tcp", "2.2.2.2")}
gossipSubEvents := []*roachpb.GossipSubscriptionEvent{
gossipEventForClusterID(rpcContext.ClusterID.Get()),
gossipEventForNodeDesc(node1),
gossipEventForNodeDesc(node2),
}
gossipSubFn := func(req *roachpb.GossipSubscriptionRequest, stream roachpb.Internal_GossipSubscriptionServer) error {
assert.Len(t, req.Patterns, 3)
assert.Equal(t, "cluster-id", req.Patterns[0])
assert.Equal(t, "node:.*", req.Patterns[1])
assert.Equal(t, "system-db", req.Patterns[2])
for _, event := range gossipSubEvents {
if err := stream.Send(event); err != nil {
return err
}
}
<-stream.Context().Done()
return stream.Context().Err()
}
roachpb.RegisterInternalServer(s, &mockServer{gossipSubFn: gossipSubFn})
// Decompose netutil.ListenAndServeGRPC so we can listen before serving.
ln, err := net.Listen(util.TestAddr.Network(), util.TestAddr.String())
require.NoError(t, err)
stopper.AddCloser(stop.CloserFn(s.Stop))
_ = stopper.RunAsyncTask(ctx, "wait-quiesce", func(context.Context) {
<-stopper.ShouldQuiesce()
netutil.FatalIfUnexpected(ln.Close())
})
// Add listen address into list of other bogus addresses.
cfg := kvtenant.ConnectorConfig{
AmbientCtx: log.AmbientContext{Tracer: tracing.NewTracer()},
RPCContext: rpcContext,
RPCRetryOptions: rpcRetryOpts,
}
addrs := []string{"1.1.1.1:9999", ln.Addr().String(), "2.2.2.2:9999"}
c := NewConnector(cfg, addrs)
c.rpcDialTimeout = 5 * time.Millisecond // speed up test
// Start should block until the first GossipSubscription response.
startedC := make(chan error)
go func() {
startedC <- c.Start(ctx)
}()
select {
case err := <-startedC:
t.Fatalf("Start unexpectedly completed with err=%v", err)
case <-time.After(25 * time.Millisecond):
}
// Begin serving on gRPC server. Connector should quickly connect
// and complete startup.
_ = stopper.RunAsyncTask(ctx, "serve", func(context.Context) {
netutil.FatalIfUnexpected(s.Serve(ln))
})
require.NoError(t, <-startedC)
// Test kvcoord.NodeDescStore impl. Wait for full update first.
waitForNodeDesc(t, c, 2)
desc, err := c.GetNodeDescriptor(1)
require.Equal(t, node1, desc)
require.NoError(t, err)
desc, err = c.GetNodeDescriptor(2)
require.Equal(t, node2, desc)
require.NoError(t, err)
desc, err = c.GetNodeDescriptor(3)
require.Nil(t, desc)
require.Regexp(t, "unable to look up descriptor for n3", err)
}
|
package main
import "fmt"
//int float64 bool string; struct array slice map channel
var s string
var i int = 0
var j = 0
type Student struct {
Name string `json:"name"`
Age int `json:"age"`
}
func main() {
fmt.Println(s, i, j)
var I int = 0
fmt.Println(I)
x := "this is string"
x = `is string`
fmt.Println(x)
xiaoming := Student{
Name: "xiaoming",
Age: 18,
}
fmt.Println(xiaoming)
//var array [4]int
//array = [4]int{1, 2, 3, 4}
//array := new([4]int)
array := [4]int{}
fmt.Println(array)
//slice := []string{"a", "bc"}
//slice := make([]string, 0)
var slice []string
slice = []string{"a", "b"}
fmt.Println("slice is ", slice)
//var m map[string]Student
//m = map[string]Student{
// "a": xiaoming,
//}
//m := make(map[string]Student, 0)
m := map[string]Student{
"a": xiaoming,
}
fmt.Println("m is ", m)
var c chan int
c = make(chan int)
fmt.Println(c)
}
|
package session
import (
"context"
"reflect"
"strings"
"time"
"gamesvr/manager"
"shared/common"
"shared/csv/static"
"shared/statistic/logreason"
"shared/utility/errors"
"shared/utility/glog"
"shared/utility/param"
"shared/utility/servertime"
)
func (s *Session) isGMCode(code string) bool {
return strings.HasPrefix(code, "#GM:")
}
func (s *Session) handelGMCode(ctx context.Context, code string) error {
// 解析命令码
// "#GM:addItem(1,2);addItem(1,3)"
gmCodes := strings.Split(strings.TrimPrefix(code, "#GM:"), ";")
// ["addItem(1,2)", "addItem(1,3)"]
gmTasks := make([]common.GMTask, 0, len(gmCodes))
for _, gmCode := range gmCodes {
splitGmCode := strings.Split(gmCode, "(")
if len(splitGmCode) != 2 {
return errors.New("code fmt error")
}
// ["addItem", "1,2)"]
// 解析参数["1", "2"]
params := strings.Split(strings.TrimSuffix(splitGmCode[1], ")"), ",")
glog.Infof("cmd: %v", "gm"+strings.Title(splitGmCode[0]))
// 搜索函数名:gmAddItem
method, ok := reflect.TypeOf(s).MethodByName("GM" + strings.Title(splitGmCode[0]))
if !ok {
return errors.New("not found gm")
}
gmTasks = append(gmTasks, *common.NewGMTask(method.Func, s, ctx, param.NewParam(params)))
}
// 按顺序执行任务
for _, gmTask := range gmTasks {
err := gmTask.Do()
if err != nil {
return errors.WrapTrace(err)
}
}
return nil
}
func (s *Session) GMConfigTask(ctx context.Context, param *param.Param) error {
if param.Len() != 1 {
return common.ErrParamError
}
id, err := param.GetInt32(0)
if err != nil {
return errors.WrapTrace(err)
}
code, err := manager.CSV.GM.GetGMCode(id)
if err != nil {
return errors.WrapTrace(err)
}
if s.isGMCode(code) {
return s.handelGMCode(ctx, code)
}
return errors.New("gm fmt error")
}
func (s *Session) GMAddItem(ctx context.Context, param *param.Param) error {
if param.Len() != 2 {
return common.ErrParamError
}
id, err := param.GetInt32(0)
if err != nil {
return errors.WrapTrace(err)
}
count, err := param.GetInt32(1)
if err != nil {
return errors.WrapTrace(err)
}
rewards := common.NewRewards()
rewards.AddReward(common.NewReward(id, count))
reason := logreason.NewReason(logreason.GMAddReward)
_, err = s.AddRewards(rewards, reason)
if err != nil {
return errors.WrapTrace(err)
}
return nil
}
func (s *Session) GMAddEquipmentEXP(ctx context.Context, param *param.Param) error {
if param.Len() != 2 {
return common.ErrParamError
}
id, err := param.GetInt64(0)
if err != nil {
return errors.WrapTrace(err)
}
count, err := param.GetInt32(1)
if err != nil {
return errors.WrapTrace(err)
}
equipment, err := s.User.EquipmentPack.Get(id)
if err != nil {
return errors.WrapTrace(err)
}
equipment.EXP.Plus(count)
err = manager.CSV.Equipment.SyncLevelAndUnlockAttr(equipment)
if err != nil {
return errors.WrapTrace(err)
}
return nil
}
func (s *Session) GMAddMail(ctx context.Context, params *param.Param) error {
if params.Len() != 2 {
return common.ErrParamError
}
templateId, err := params.GetInt32(0)
if err != nil {
return err
}
templateCfg, err := manager.CSV.Mail.GetTemplate(int32(templateId))
if err != nil {
return err
}
var rewards *common.Rewards = nil
dropId, err := params.GetInt32(1)
if err != nil {
return err
}
if dropId != 0 {
rewards, err = manager.CSV.Drop.DropRewards(dropId)
if err != nil {
return err
}
}
sendTime := servertime.Now().Unix()
expireTime := sendTime + int64(templateCfg.ExpireDays)*servertime.SecondPerDay
s.User.AddMail(templateId, "", []string{}, "", []string{},
rewards, "", sendTime, expireTime)
return nil
}
func (s *Session) GMPassLevel(ctx context.Context, params *param.Param) error {
if params.Len() < 1 {
return common.ErrParamError
}
levelId, err := params.GetInt32(0)
if err != nil {
return err
}
levelCfg, err := manager.CSV.LevelsEntry.GetLevel(levelId)
if err != nil {
return err
}
passAchievement := []int32{}
for i := int32(1); i <= levelCfg.AchievementsCount; i++ {
passAchievement = append(passAchievement, i)
}
passTarget := []int32{}
for i := int32(1); i <= levelCfg.TargetCount; i++ {
passTarget = append(passTarget, i)
}
s.User.PassLevel(ctx, levelCfg, passTarget, passAchievement, 0,
[]int64{}, []int32{}, nil, logreason.EmptyReason())
return nil
}
func (s *Session) GMResetLevel(ctx context.Context, params *param.Param) error {
if params.Len() < 1 {
return common.ErrParamError
}
levelId, err := params.GetInt32(0)
if err != nil {
return err
}
level, ok := s.User.LevelsInfo.GetLevel(levelId)
if !ok {
return errors.Swrapf(common.ErrLevelNotPassed, levelId)
}
level.Reset()
return nil
}
func (s *Session) GMPassTower(ctx context.Context, params *param.Param) error {
if params.Len() < 2 {
return common.ErrParamError
}
towerId, err := params.GetInt32(0)
if err != nil {
return err
}
towerStage, err := params.GetInt32(1)
if err != nil {
return err
}
towerStageCfg, err := manager.CSV.Tower.GetTowerStage(towerId, towerStage)
if err != nil {
return err
}
levelCfg, _ := manager.CSV.LevelsEntry.GetLevel(towerStageCfg.LevelId)
passAchievement := []int32{}
for i := int32(1); i <= levelCfg.AchievementsCount; i++ {
passAchievement = append(passAchievement, i)
}
passTarget := []int32{}
for i := int32(1); i <= levelCfg.TargetCount; i++ {
passTarget = append(passTarget, i)
}
s.User.PassLevel(ctx, levelCfg, passTarget, passAchievement, static.BattleTypeTower,
[]int64{int64(towerId), int64(towerStage)}, []int32{}, nil, logreason.EmptyReason())
return nil
}
func (s *Session) GMSetTime(ctx context.Context, params *param.Param) error {
if params.Len() < 1 {
return common.ErrParamError
}
timeStr, err := params.GetString(0)
if err != nil {
return err
}
timeStr = strings.Trim(timeStr, "\"")
t, err := servertime.ParseTime(timeStr)
if err != nil {
return err
}
timeOffset := t - servertime.OriginNow().Unix()
servertime.SetTimeOffset(timeOffset)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*1)
defer cancel()
manager.Global.String.Set(ctx, servertime.TimeOffsetRedisName, timeOffset)
return nil
}
func (s *Session) GMResetSetTime(ctx context.Context, params *param.Param) error {
servertime.SetTimeOffset(0)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*1)
defer cancel()
manager.Global.String.Set(ctx, servertime.TimeOffsetRedisName, 0)
return nil
}
func (s *Session) GMPassGuide(ctx context.Context, params *param.Param) error {
if params.Len() != 1 {
return errors.WrapTrace(common.ErrParamError)
}
guideId, err := params.GetInt32(0)
if err != nil {
return errors.WrapTrace(err)
}
config, err := manager.CSV.Guide.GetConfigById(guideId)
if err != nil {
return errors.WrapTrace(err)
}
before := manager.CSV.Guide.GetGuideBefore(config.GuideOrder)
reason := logreason.NewReason(logreason.GMPassGuide)
for _, guideConfig := range before {
if s.User.Info.PassedGuideIds.Contains(guideConfig.Id) {
continue
}
if config.DropId > 0 {
_, err = s.User.AddRewardsByDropId(config.DropId, reason)
if err != nil {
return errors.WrapTrace(err)
}
}
s.User.Info.PassedGuideIds.Append(guideConfig.Id)
}
return nil
}
func (s *Session) GMAddYggdrasilMail(ctx context.Context, params *param.Param) error {
if params.Len() != 2 {
return errors.WrapTrace(common.ErrParamError)
}
num, err := params.GetInt32(0)
if err != nil {
return errors.WrapTrace(err)
}
dropId, err := params.GetInt32(1)
if err != nil {
return errors.WrapTrace(err)
}
for i := 0; i < int(num); i++ {
rewards, err := manager.CSV.Drop.DropRewards(dropId)
if err != nil {
return errors.WrapTrace(err)
}
err = s.User.Yggdrasil.MailBox.AddOne(ctx, s.ID, s.Name, rewards)
if err != nil {
return errors.WrapTrace(err)
}
}
return nil
}
func (s *Session) GMUnlockCity(ctx context.Context, params *param.Param) error {
cityId, err := params.GetInt32(0)
if err != nil {
return errors.WrapTrace(err)
}
_, err = manager.CSV.Yggdrasil.GetYggCityById(cityId)
if err != nil {
return errors.WrapTrace(err)
}
s.Yggdrasil.CanTravelCityIds.Append(cityId)
return nil
}
func (s *Session) GMCompleteTracedSubTask(ctx context.Context, params *param.Param) error {
taskId := s.Yggdrasil.Task.FetchTrackTaskId()
if taskId == 0 {
return nil
}
info := s.Yggdrasil.Task.TaskInProgress[taskId]
for _, multi := range info.Multi {
err := s.Yggdrasil.Task.ForceCompleteSubTask(ctx, s.User, multi, info)
if err != nil {
return errors.WrapTrace(err)
}
}
err := s.Yggdrasil.Task.ForceCompleteSubTask(ctx, s.User, info.Base, info)
if err != nil {
return errors.WrapTrace(err)
}
return nil
}
func (s *Session) GMAcceptTask(ctx context.Context, params *param.Param) error {
taskId, err := params.GetInt32(0)
if err != nil {
return errors.WrapTrace(err)
}
config, err := manager.CSV.Yggdrasil.GetTaskConfig(taskId)
if err != nil {
return errors.WrapTrace(err)
}
_, err = s.Yggdrasil.Task.InitSubTask(ctx, s.User, config.NextSubTaskId)
if err != nil {
return err
}
return nil
}
func (s *Session) GMCompleteTask(ctx context.Context, params *param.Param) error {
taskId, err := params.GetInt32(0)
if err != nil {
return errors.WrapTrace(err)
}
return s.Yggdrasil.Task.ForceCompleteTask(ctx, s.User, taskId)
}
func (s *Session) GMAddTravelAp(ctx context.Context, params *param.Param) error {
add, err := params.GetInt32(0)
if err != nil {
return errors.WrapTrace(err)
}
s.Yggdrasil.TravelInfo.TravelAp += add
return nil
}
func (s *Session) GMAddItemInYgg(ctx context.Context, params *param.Param) error {
if params.Len() != 2 {
return common.ErrParamError
}
id, err := params.GetInt32(0)
if err != nil {
return errors.WrapTrace(err)
}
count, err := params.GetInt32(1)
if err != nil {
return errors.WrapTrace(err)
}
rewards := common.NewRewards()
rewards.AddReward(common.NewReward(id, count))
reason := logreason.NewReason(logreason.GMAddReward)
err = s.Yggdrasil.AddRewards(ctx, s.User, rewards, 0, reason)
if err != nil {
return errors.WrapTrace(err)
}
return nil
}
func (s *Session) GMYggLightAll(ctx context.Context, params *param.Param) error {
for _, config := range manager.CSV.Yggdrasil.GetAllAreaConfig() {
for _, p := range config.Area.Points() {
s.Yggdrasil.UnlockArea.AppendPoint(p)
}
}
return nil
}
func (s *Session) GMYggClearMatchPool(ctx context.Context, params *param.Param) error {
err := manager.Global.SDel(ctx, "matchUserIds")
if err != nil {
return errors.WrapTrace(err)
}
return nil
}
func (s *Session) GMYggResetExploreTimes(ctx context.Context, params *param.Param) error {
s.Yggdrasil.TravelTime = 0
return nil
}
func (s *Session) GMYggResetCharacterTime(ctx context.Context, params *param.Param) error {
for _, character := range *s.CharacterPack {
character.CanYggdrasilTime = servertime.Now().Unix()
}
return nil
}
|
package main
import "fmt"
func main() {
defer foo() //When main closes then all defers get run
bar()
} //END main
func foo() {
fmt.Println("foo")
}
func bar() {
fmt.Println("bar")
}
//A "defer" statement invokes a function whose execution is deferred to the moment the surrounding function returns, either because the surrounding function executed a return statement, reached the end of its function body, or because the corresponding goroutine is panicking. |
package client
import "github.com/go-redis/redis"
var (
Cache *redis.Client
)
|
package getsubcommands
import (
snmpsimclient "github.com/inexio/snmpsim-restapi-go-client"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"os"
)
// GetEnginesCmd represents the getEngines command
var GetEnginesCmd = &cobra.Command{
Use: "engines",
Args: cobra.ExactArgs(0),
Short: "Returns a list of all engines",
Long: `Returns a detailed list of all engines available.
All details of one specific engine can be retrieved via 'get engine <engine-id>'.`,
Run: func(cmd *cobra.Command, args []string) {
//Parse all persistent flags
format, depth, prettified := parsePersistentFlags(cmd)
//Load the client data from the config
baseUrl := viper.GetString("mgmt.http.baseUrl")
username := viper.GetString("mgmt.http.authUsername")
password := viper.GetString("mgmt.http.authPassword")
//Create a new client
client, err := snmpsimclient.NewManagementClient(baseUrl)
if err != nil {
log.Error().
Msg("Error while creating management client")
os.Exit(1)
}
err = client.SetUsernameAndPassword(username, password)
if err != nil {
log.Error().
Msg("Error while setting username and password")
os.Exit(1)
}
//Parse filters from flags
filters := parseFilters(cmd)
//Get and print the engines
var engines snmpsimclient.Engines
engines, err = client.GetEngines(filters)
if err != nil {
log.Error().
Msg("Error while getting engines")
os.Exit(1)
}
err = printData(engines, format, prettified, depth)
if err != nil {
log.Error().
Msg("Error while printing data")
os.Exit(1)
}
},
}
func init() {
GetEnginesCmd.Flags().String("name", "", "Set the name filter")
GetEnginesCmd.Flags().String("engine_id", "", "Set the engine id filter")
}
|
package migrations
import (
"database/sql"
"github.com/pressly/goose"
)
func init() {
goose.AddMigration(upInit, downInit)
}
var initDB = `
CREATE TABLE messages (
id uuid PRIMARY KEY,
message text,
from uuid NOT NULL,
to uuid NOT NULL
);
`
func upInit(tx *sql.Tx) error {
_, err := tx.Exec(initDB)
if err != nil {
return err
}
return nil
}
func downInit(tx *sql.Tx) error {
// This code is executed when the migration is rolled back.
return nil
}
|
// division and multiplication accuracy in floating point precesion
// use of math.Abs
package main
import "fmt"
func main() {
// division first
c1 := 21.0
fmt.Print((c1/5.0*9.0)+32, "* F\n")
fmt.Print((9.0/5.0*c1)+32, "* f\n")
// 69.80000000000001* F
// 69.80000000000001* f
// multiplication first
F1 := (c1 * 9.0 / 5.0) + 32.0
fmt.Print(F1, "* F\n")
// 69.8* F
// floting point in accuracy while addn
piggyBank := 0.1
piggyBank += 0.2
fmt.Println(piggyBank)
// 0.30000000000000004
// so its never 0.1+0.2 != 0.3
// instead use - fmt.Println(math.Abs(piggyBank-0.3) < 0.0001) 1
}
|
package service
import (
"context"
"fmt"
"github.com/imouto1994/yume/internal/infra/sqlite"
"github.com/imouto1994/yume/internal/model"
"github.com/imouto1994/yume/internal/repository"
"go.uber.org/zap"
)
type ServiceLibrary interface {
CreateLibrary(context.Context, sqlite.DBOps, *model.Library) error
GetLibraries(context.Context, sqlite.DBOps) ([]*model.Library, error)
GetLibraryByID(context.Context, sqlite.DBOps, string) (*model.Library, error)
DeleteLibraryByID(context.Context, sqlite.DBOps, string) error
ScanLibrary(context.Context, sqlite.DBOps, *model.Library) error
}
type serviceLibrary struct {
repositoryLibrary repository.RepositoryLibrary
serviceScanner ServiceScanner
serviceTitle ServiceTitle
serviceBook ServiceBook
}
func NewServiceLibrary(rLibrary repository.RepositoryLibrary, sScanner ServiceScanner, sTitle ServiceTitle, sBook ServiceBook) ServiceLibrary {
return &serviceLibrary{
repositoryLibrary: rLibrary,
serviceScanner: sScanner,
serviceTitle: sTitle,
serviceBook: sBook,
}
}
func (s *serviceLibrary) CreateLibrary(ctx context.Context, dbOps sqlite.DBOps, library *model.Library) error {
err := s.repositoryLibrary.Insert(ctx, dbOps, library)
if err != nil {
return fmt.Errorf("sLibrary - failed to create library in DB: %w", err)
}
return nil
}
func (s *serviceLibrary) GetLibraries(ctx context.Context, dbOps sqlite.DBOps) ([]*model.Library, error) {
libraries, err := s.repositoryLibrary.FindAll(ctx, dbOps)
if err != nil {
return nil, fmt.Errorf("sLibrary - failed to get all libraries in DB: %w", err)
}
return libraries, nil
}
func (s *serviceLibrary) GetLibraryByID(ctx context.Context, dbOps sqlite.DBOps, libraryID string) (*model.Library, error) {
library, err := s.repositoryLibrary.FindByID(ctx, dbOps, libraryID)
if err != nil {
return nil, fmt.Errorf("sLibrary - failed to library by ID in DB: %w", err)
}
return library, nil
}
func (s *serviceLibrary) DeleteLibraryByID(ctx context.Context, dbOps sqlite.DBOps, libraryID string) error {
err := s.repositoryLibrary.DeleteByID(ctx, dbOps, libraryID)
if err != nil {
return fmt.Errorf("sLibrary - failed to delete library by ID in DB: %w", err)
}
err = s.serviceTitle.DeleteTitlesByLibraryID(ctx, dbOps, libraryID)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Title to delete titles of deleted library: %w", err)
}
err = s.serviceBook.DeleteBooksByLibraryID(ctx, dbOps, libraryID)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Book to delete books of deleted library: %w", err)
}
return nil
}
func (s *serviceLibrary) ScanLibrary(ctx context.Context, dbOps sqlite.DBOps, library *model.Library) error {
scanResult, err := s.serviceScanner.ScanLibraryRoot(library.Root)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Scanner to scan library: %w", err)
}
zap.L().Info("sLibrary - successfully scanned library files", zap.Int("numTitles", len(scanResult.TitleByTitleName)))
// Get all titles in DB
dbTitles, err := s.serviceTitle.GetTitlesByLibraryID(ctx, dbOps, fmt.Sprintf("%d", library.ID))
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Title get all current titles in scanned library: %w", err)
}
dbTitleByTitleName := make(map[string]*model.Title)
for _, dbTitle := range dbTitles {
dbTitleByTitleName[dbTitle.Name] = dbTitle
}
// Remove titles not existing anymore
for _, dbTitle := range dbTitles {
if _, ok := scanResult.TitleByTitleName[dbTitle.Name]; !ok {
err = s.serviceTitle.DeleteTitleByID(ctx, dbOps, fmt.Sprintf("%d", dbTitle.ID))
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Title to delete non-existing titles in scanned library: %w", err)
}
zap.L().Info("sLibrary - successfully removed non-existing title", zap.String("name", dbTitle.Name))
}
}
numBooks := 0
for _, books := range scanResult.BooksByTitleName {
numBooks += len(books)
}
bookScanChannel := make(chan error, numBooks)
for _, title := range scanResult.TitleByTitleName {
if dbTitle, ok := dbTitleByTitleName[title.Name]; ok {
if dbTitle.UpdatedAt != title.UpdatedAt {
// Update title's modified time
err := s.serviceTitle.UpdateTitleModifiedTime(ctx, dbOps, fmt.Sprintf("%d", dbTitle.ID), title.UpdatedAt)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Title to update title's modified time from scanned library: %w", err)
}
// Update title's cover dimension if necessary
if dbTitle.CoverHeight != title.CoverHeight || dbTitle.CoverWidth != title.CoverWidth {
err = s.serviceTitle.UpdateTitleCoverDimension(ctx, dbOps, fmt.Sprintf("%d", dbTitle.ID), title.CoverWidth, title.CoverHeight)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Title to update title's cover dimension from scanned library: %w", err)
}
}
// Update title's supported languages if necessary
if dbTitle.Langs != title.Langs {
err = s.serviceTitle.UpdateTitleLangs(ctx, dbOps, fmt.Sprintf("%d", dbTitle.ID), title.Langs)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Title to update title's supported langs from scanned library: %w", err)
}
}
// Update title's book count if necessary
if dbTitle.BookCount != title.BookCount {
err = s.serviceTitle.UpdateTitleBookCount(ctx, dbOps, fmt.Sprintf("%d", dbTitle.ID), title.BookCount)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Title to update title's book count from scanned library: %w", err)
}
}
// Update title's flags if necessary
if dbTitle.Uncensored != title.Uncensored {
err = s.serviceTitle.UpdateTitleUncensored(ctx, dbOps, fmt.Sprintf("%d", dbTitle.ID), title.Uncensored)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Title to update title's uncensored flag from scanned library: %w", err)
}
}
if dbTitle.Waifu2x != title.Waifu2x {
err = s.serviceTitle.UpdateTitleWaifu2x(ctx, dbOps, fmt.Sprintf("%d", dbTitle.ID), title.Waifu2x)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Title to update title's waifu2x flag from scanned library: %w", err)
}
}
dbBooks, err := s.serviceBook.GetBooksByTitleID(ctx, dbOps, fmt.Sprintf("%d", dbTitle.ID))
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Book to get all current stored books in updated title from scanned library: %w", err)
}
books := scanResult.BooksByTitleName[title.Name]
dbBookByBookName := make(map[string]*model.Book)
bookByBookName := make(map[string]*model.Book)
for _, dbBook := range dbBooks {
dbBookByBookName[dbBook.Name] = dbBook
}
for _, book := range books {
bookByBookName[book.Name] = book
}
for _, dbBook := range dbBooks {
if _, ok := bookByBookName[dbBook.Name]; !ok {
// Remove book not existing anymore
err = s.serviceBook.DeleteBookByID(ctx, dbOps, fmt.Sprintf("%d", dbBook.ID))
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Book to delete non-existing books in updated title from scanned library: %w", err)
}
}
}
for _, book := range books {
if dbBook, ok := dbBookByBookName[book.Name]; ok {
previewChanged := (dbBook.PreviewUpdatedAt == nil && book.PreviewUpdatedAt != nil) ||
(dbBook.PreviewUpdatedAt != nil && book.PreviewUpdatedAt == nil) ||
(dbBook.PreviewUpdatedAt != nil && book.PreviewUpdatedAt != nil && *dbBook.PreviewUpdatedAt != *book.PreviewUpdatedAt)
if dbBook.UpdatedAt != book.UpdatedAt || previewChanged {
// Update book's modified time
if dbBook.UpdatedAt != book.UpdatedAt {
err = s.serviceBook.UpdateBookModifiedTime(ctx, dbOps, fmt.Sprintf("%d", dbBook.ID), book.UpdatedAt)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Book to update book's modified time in updated title from scanned library: %w", err)
}
}
// Update book's page count
if dbBook.PageCount != book.PageCount {
err = s.serviceBook.UpdateBookPageCount(ctx, dbOps, fmt.Sprintf("%d", dbBook.ID), book.PageCount)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Book to update book's page count in updated title from scanned library: %w", err)
}
}
// Update preview
if previewChanged {
err = s.serviceBook.UpdateBookPreviewInfo(ctx, dbOps, fmt.Sprintf("%d", dbBook.ID), book.PreviewURL, book.PreviewUpdatedAt)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Book to update book's preview info in updated title from scanned library: %w", err)
}
}
// Delete all pages from updated book in updated title
err = s.serviceBook.DeleteBookPages(ctx, dbOps, fmt.Sprintf("%d", dbBook.ID))
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Book to delete pages of updated book in updated title from scanned library: %w", err)
}
err = s.serviceBook.DeleteBookPreviews(ctx, dbOps, fmt.Sprintf("%d", dbBook.ID))
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Book to delete previews of updated book in updated title from scanned library: %w", err)
}
// Rescan all pages from updated book in updated title
go func(b *model.Book) {
err = s.serviceBook.ScanBook(ctx, dbOps, b)
if err != nil {
bookScanChannel <- fmt.Errorf("sLibrary - failed to use service Book to scan updated book for updated title from scanned library: %w", err)
} else {
bookScanChannel <- nil
}
}(dbBook)
} else {
bookScanChannel <- nil
}
} else {
book.LibraryID = library.ID
book.TitleID = dbTitle.ID
// Create new book entry in updated title
err = s.serviceBook.CreateBook(ctx, dbOps, book)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Book to create new book for updated title from scanned library: %w", err)
}
// Scan new book in updated title
go func(b *model.Book) {
err = s.serviceBook.ScanBook(ctx, dbOps, b)
if err != nil {
bookScanChannel <- fmt.Errorf("sLibrary - failed to use service Book to scan new book for updated title from scanned library: %w", err)
} else {
bookScanChannel <- nil
}
}(book)
}
}
zap.L().Info("sLibrary - successfully updated modified title", zap.String("name", title.Name))
} else {
books := scanResult.BooksByTitleName[title.Name]
for range books {
bookScanChannel <- nil
}
}
} else {
title.LibraryID = library.ID
// Create new title entry
err = s.serviceTitle.CreateTitle(ctx, dbOps, title)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Title to create new title for scanned library: %w", err)
}
books := scanResult.BooksByTitleName[title.Name]
for _, book := range books {
book.LibraryID = library.ID
book.TitleID = title.ID
// Create new book entry for new title
err = s.serviceBook.CreateBook(ctx, dbOps, book)
if err != nil {
return fmt.Errorf("sLibrary - failed to use service Book to create book for new title from scanned library: %w", err)
}
// Scan new book for new title
go func(b *model.Book) {
err = s.serviceBook.ScanBook(ctx, dbOps, b)
if err != nil {
bookScanChannel <- fmt.Errorf("sLibrary - failed to use service Book to scan book for new title from scanned library: %w", err)
} else {
bookScanChannel <- nil
}
}(book)
}
zap.L().Info("sLibrary - successfully added new title", zap.String("name", title.Name))
}
}
for i := 0; i < numBooks; i++ {
err := <-bookScanChannel
if err != nil {
return err
}
}
return nil
}
|
package agollo
import (
"github.com/go-apollo/agollo/test"
"testing"
)
func TestStart(t *testing.T) {
go runMockConfigServer(onlyNormalConfigResponse)
go runMockNotifyServer(onlyNormalResponse)
defer closeMockConfigServer()
Start()
value := getValue("key1")
test.Equal(t, "value1", value)
}
|
package ooapi
import (
"bytes"
"context"
"encoding/gob"
"encoding/json"
"io"
"net/http"
"strings"
"text/template"
)
type defaultRequestMaker struct{}
func (*defaultRequestMaker) NewRequest(
ctx context.Context, method, URL string, body io.Reader) (*http.Request, error) {
return http.NewRequestWithContext(ctx, method, URL, body)
}
type defaultJSONCodec struct{}
func (*defaultJSONCodec) Encode(v interface{}) ([]byte, error) {
return json.Marshal(v)
}
func (*defaultJSONCodec) Decode(b []byte, v interface{}) error {
return json.Unmarshal(b, v)
}
type defaultTemplateExecutor struct{}
func (*defaultTemplateExecutor) Execute(tmpl string, v interface{}) (string, error) {
to, err := template.New("t").Parse(tmpl)
if err != nil {
return "", err
}
var sb strings.Builder
if err := to.Execute(&sb, v); err != nil {
return "", err
}
return sb.String(), nil
}
type defaultGobCodec struct{}
func (*defaultGobCodec) Encode(v interface{}) ([]byte, error) {
var bb bytes.Buffer
if err := gob.NewEncoder(&bb).Encode(v); err != nil {
return nil, err
}
return bb.Bytes(), nil
}
func (*defaultGobCodec) Decode(b []byte, v interface{}) error {
return gob.NewDecoder(bytes.NewReader(b)).Decode(v)
}
|
package image
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGetDestinationInfo(t *testing.T) {
t.Run(`not nil`, func(t *testing.T) {
testDir := "dir"
context, reference, err := getDestinationInfo(testDir)
require.NoError(t, err)
require.NotNil(t, context)
require.NotNil(t, reference)
})
}
func TestBuildDestinationContext(t *testing.T) {
t.Run(`not nil`, func(t *testing.T) {
testDir := "dir"
destinationContext := buildDestinationContext(testDir)
require.NotNil(t, destinationContext)
assert.Equal(t, testDir, destinationContext.BlobInfoCacheDir)
assert.True(t, destinationContext.DirForceDecompress)
})
}
func TestGetDestinationReference(t *testing.T) {
t.Run(`not nil`, func(t *testing.T) {
testDir := "dir"
destinationReference, err := getDestinationReference(testDir)
require.NoError(t, err)
require.NotNil(t, destinationReference)
})
}
|
//
// Weather update client.
// Connects SUB socket to tcp://weather-server:5556
// Collects weather updates and finds avg temp in zipcode
//
package main
import (
zmq "github.com/pebbe/zmq4"
"fmt"
"os"
"strconv"
"strings"
)
func main() {
// Socket to talk to server
fmt.Println("Collecting updates from weather server...")
subscriber, _ := zmq.NewSocket(zmq.SUB)
defer subscriber.Close()
subscriber.Connect("tcp://weather-server:5556")
// Subscribe to zipcode, default is NYC, 10001
filter := "10001 "
if len(os.Args) > 1 {
filter = os.Args[1] + " "
}
subscriber.SetSubscribe(filter)
// Process 100 updates
total_temp := 0
update_nbr := 0
for update_nbr < 20 {
msg, _ := subscriber.Recv(0)
fmt.Printf(".")
if msgs := strings.Fields(msg); len(msgs) > 1 {
if temperature, err := strconv.Atoi(msgs[1]); err == nil {
total_temp += temperature
update_nbr++
}
}
}
fmt.Printf("\nAverage temperature for zipcode '%s' was %dC \n\n", strings.TrimSpace(filter), total_temp/update_nbr)
}
|
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package check implements binary analysis similar to bazel's nogo, or the
// unitchecker package. It exists in order to provide additional facilities for
// analysis, namely plumbing through the output from dumping the generated
// binary (to analyze actual produced code).
package check
import (
"errors"
"fmt"
"go/ast"
"go/build"
"go/parser"
"go/token"
"go/types"
"io"
"os"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime/debug"
"strings"
"sync"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/gcexportdata"
"gvisor.dev/gvisor/runsc/flag"
"gvisor.dev/gvisor/tools/nogo/facts"
"gvisor.dev/gvisor/tools/nogo/flags"
)
var (
// ErrSkip indicates the package should be skipped.
ErrSkip = errors.New("skipped")
// showTimes indicates we should show analyzer times.
showTimes = flag.Bool("show_times", false, "show all analyzer times")
)
var (
tagsOnce sync.Once
buildTags []string
releaseTagsVal []string
releaseTagsErr error
)
// Hack! factFacts only provides facts loaded from directly imported packages
// for efficiency (see importer.cache). In general, if you need a fact from a
// package that isn't otherwise imported, the expectation is that you will add
// a dummy import/use of the desired package to ensure it is a dependency.
//
// Unfortunately, some packages need facts from internal packages. Since
// internal packages cannot be imported we explicitly import in this tool to
// ensure the facts are available to ImportPackageFact.
var internalPackages = []string{
// Required by pkg/sync for internal/abi.MapType.
"internal/abi",
}
// shouldInclude indicates whether the file should be included.
func shouldInclude(path string) (bool, error) {
tagsOnce.Do(func() {
if len(flags.BuildTags) > 0 {
buildTags = strings.Split(flags.BuildTags, ",")
}
releaseTagsVal, releaseTagsErr = releaseTags()
})
if releaseTagsErr != nil {
return false, releaseTagsErr
}
ctx := build.Default
ctx.GOOS = flags.GOOS
ctx.GOARCH = flags.GOARCH
ctx.BuildTags = buildTags
ctx.ReleaseTags = releaseTagsVal
return ctx.MatchFile(filepath.Dir(path), filepath.Base(path))
}
// sortSrcs sorts a set of src files into Go files and non-Go files.
func sortSrcs(srcs []string) (goFiles []string, nonGoFiles []string) {
for _, filename := range srcs {
if strings.HasSuffix(filename, ".go") {
goFiles = append(goFiles, filename)
} else {
nonGoFiles = append(nonGoFiles, filename)
}
}
return
}
// importerEntry is a single entry in the importer.
type importerEntry struct {
ready sync.WaitGroup
pkg *types.Package
findings FindingSet
err error
factsMu sync.Mutex
facts *facts.Package
}
// importer is an almost-implementation of go/types.Importer.
//
// This wraps a configuration, which provides the map of package names to
// files, and the facts. Note that this importer implementation will always
// pass when a given package is not available.
type importer struct {
fset *token.FileSet
sources map[string][]string
// mu protects cache & bundles (see below).
mu sync.Mutex
cache map[string]*importerEntry
// bundles is protected by mu, but once set is immutable.
bundles []*facts.Bundle
// importsMu protects imports.
importsMu sync.Mutex
imports map[string]*types.Package
}
// loadBundles loads all bundle files.
//
// This should only be called from loadFacts, below. After calling this
// function, i.bundles may be read freely without holding a lock.
func (i *importer) loadBundles() error {
i.mu.Lock()
defer i.mu.Unlock()
// Are bundles already available?
if i.bundles != nil {
return nil
}
// Scan all bundle files.
for _, filename := range flags.Bundles {
// Open the given filename as a bundle.
loadedFacts, err := facts.BundleFrom(filename)
if err != nil {
return fmt.Errorf("error loading bundled facts: %w", err)
}
// Add to the set of available bundles.
i.bundles = append(i.bundles, loadedFacts)
}
return nil
}
// loadFacts returns all package facts for the given name.
//
// This should be called only from importPackage, as this may deserialize a
// facts file (which is an expensive operation). Callers should generally rely
// on fastFacts to access facts for packages that have already been imported.
func (i *importer) loadFacts(pkg *types.Package) (*facts.Package, error) {
// Attempt to load from the fact map.
filename, ok := flags.FactMap[pkg.Path()]
if ok {
r, openErr := os.Open(filename)
if openErr != nil {
return nil, fmt.Errorf("error loading facts from %q: %w", filename, openErr)
}
defer r.Close()
loadedFacts := facts.NewPackage()
if readErr := loadedFacts.ReadFrom(pkg, r); readErr != nil {
return nil, fmt.Errorf("error loading facts: %w", readErr)
}
return loadedFacts, nil
}
// Attempt to load any bundles.
if err := i.loadBundles(); err != nil {
return nil, fmt.Errorf("error loading bundles: %w", err)
}
// Try to import from the bundle.
for _, bundleFacts := range i.bundles {
localFacts, err := bundleFacts.Package(pkg)
if err != nil {
return nil, fmt.Errorf("error loading from a bundle: %w", err)
}
if localFacts != nil {
return localFacts, nil
}
}
// Nothing available for this package?
return nil, nil
}
// fastFacts returns facts for the given package.
//
// This relies exclusively on loaded packages, as the parameter is
// *types.Package and therefore the package data must already be available.
func (i *importer) fastFacts(pkg *types.Package) *facts.Package {
i.mu.Lock()
e, ok := i.cache[pkg.Path()]
i.mu.Unlock()
if !ok {
return nil
}
e.factsMu.Lock()
defer e.factsMu.Unlock()
// Do we have them already?
if e.facts != nil {
return e.facts
}
// Load the facts.
facts, err := i.loadFacts(pkg)
if err != nil {
// There are no facts available, but no good way to propagate
// this minor error. It may be intentional that no analysis was
// performed on some part of the standard library, for example.
return nil
}
e.facts = facts // Cache the result.
return facts
}
// findArchive finds the archive for the given package.
func (i *importer) findArchive(path string) (rc io.ReadCloser, err error) {
realPath, ok := flags.ArchiveMap[path]
if !ok {
return i.findBinary(path)
}
return os.Open(realPath)
}
// findBinary finds the binary for the given package.
func (i *importer) findBinary(path string) (rc io.ReadCloser, err error) {
realPath, ok := flags.ImportMap[path]
if !ok {
// Not found in the import path. Attempt to find the package
// via the standard library.
rc, err = findStdPkg(path)
} else {
// Open the file.
rc, err = os.Open(realPath)
}
return rc, err
}
// importPackage almost-implements types.Importer.Import.
//
// This must be called by other methods directly.
func (i *importer) importPackage(path string) (*types.Package, error) {
if path == "unsafe" {
// Special case: go/types has pre-defined type information for
// unsafe. We ensure that this package is correct, in case any
// analyzers are specifically looking for this.
return types.Unsafe, nil
}
// Pull the internal entry.
i.mu.Lock()
entry, ok := i.cache[path]
if ok && entry.pkg != nil {
i.mu.Unlock()
entry.ready.Wait()
return entry.pkg, entry.err
}
// Start preparing this entry.
entry = new(importerEntry)
entry.ready.Add(1)
defer entry.ready.Done()
i.cache[path] = entry
i.mu.Unlock()
// If we have the srcs for this package, then we can actually do an
// analysis from first principles to validate the package and derive
// the types. We strictly prefer this to the gcexportdata.
if srcs, ok := i.sources[path]; ok && len(srcs) > 0 {
entry.pkg, entry.findings, entry.facts, entry.err = i.checkPackage(path, srcs)
if entry.err != nil {
return nil, entry.err
}
i.importsMu.Lock()
defer i.importsMu.Unlock()
i.imports[path] = entry.pkg
return entry.pkg, entry.err
}
// Load all exported data. Unfortunately, we will have to hold the lock
// during this time. The imported may access imports directly.
rc, err := i.findBinary(path)
if err != nil {
return nil, err
}
defer rc.Close()
r, err := gcexportdata.NewReader(rc)
if err != nil {
return nil, err
}
i.importsMu.Lock()
defer i.importsMu.Unlock()
entry.pkg, entry.err = gcexportdata.Read(r, i.fset, i.imports, path)
return entry.pkg, entry.err
}
// Import implements types.Importer.Import.
func (i *importer) Import(path string) (*types.Package, error) {
return i.importPackage(path)
}
// errorImporter tracks the last error.
type errorImporter struct {
*importer
lastErr error
}
// Import implements types.Importer.Import.
func (i *errorImporter) Import(path string) (*types.Package, error) {
pkg, err := i.importer.importPackage(path)
if err != nil {
i.lastErr = err
}
return pkg, err
}
// checkPackage is the backing implementation for CheckPackage and others.
//
// The implementation was adapted from [1], which was in turn adpated from [2].
// This returns a list of matching analysis issues, or an error if the analysis
// could not be completed.
//
// Note that a partial result may be returned if an error occurred on at least
// one analyzer. This may be expected if e.g. a binary is not provided but a
// binaryAnalyzer is used.
//
// [1] bazelbuid/rules_go/tools/builders/nogo_main.go
// [2] golang.org/x/tools/go/checker/internal/checker
func (i *importer) checkPackage(path string, srcs []string) (*types.Package, FindingSet, *facts.Package, error) {
// Load all source files.
goFiles, nonGoFiles := sortSrcs(srcs)
syntax := make([]*ast.File, 0, len(goFiles))
for _, file := range goFiles {
include, err := shouldInclude(file)
if err != nil {
return nil, nil, nil, fmt.Errorf("error evaluating file %q: %w", file, err)
}
if !include {
continue
}
s, err := parser.ParseFile(i.fset, file, nil, parser.ParseComments)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing file %q: %w", file, err)
}
syntax = append(syntax, s)
}
otherFiles := make([]string, 0, len(nonGoFiles))
for _, file := range nonGoFiles {
include, err := shouldInclude(file)
if err != nil {
return nil, nil, nil, fmt.Errorf("error evaluating non-Go file %q: %w", file, err)
}
if !include {
continue
}
otherFiles = append(otherFiles, file)
}
// Check type information.
ei := &errorImporter{
importer: i,
}
typesSizes := types.SizesFor("gc", flags.GOARCH)
typeConfig := types.Config{
Importer: ei,
Error: func(error) {},
}
typesInfo := &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Instances: make(map[*ast.Ident]types.Instance),
Uses: make(map[*ast.Ident]types.Object),
Defs: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
astPackage, err := typeConfig.Check(path, i.fset, syntax, typesInfo)
if err != nil && ei.lastErr != ErrSkip {
return nil, nil, nil, fmt.Errorf("error checking types: %w", err)
}
// Note that facts should be reconcilable between types as of go/tools
// commit ee04797aa0b6be5ce3d5f7ac0f91e34716b3acdf. We previously used
// to do a sanity check to ensure that binary import data was
// compatible with ast-derived data, but this is no longer necessary.
// If packages are available locally, we can refer to those directly.
astFacts := facts.NewPackage()
// Recursively visit all analyzers.
var (
resultsMu sync.RWMutex // protects results & errs, findings.
factsMu sync.RWMutex // protects facts.
ready = make(map[*analysis.Analyzer]*sync.WaitGroup)
results = make(map[*analysis.Analyzer]any)
errs = make(map[*analysis.Analyzer]error)
findings = make(FindingSet, 0)
)
for a := range allAnalyzers {
wg := new(sync.WaitGroup)
wg.Add(1) // For analysis.
ready[a] = wg
}
limit := make(chan struct{}, 1)
for a, wg := range ready {
go func(a *analysis.Analyzer, wg *sync.WaitGroup) {
defer wg.Done()
// Wait for all requirements.
for _, orig := range a.Requires {
ready[orig].Wait()
// Should we bail early?
resultsMu.RLock()
if err := errs[orig]; err != nil {
resultsMu.RUnlock()
resultsMu.Lock()
defer resultsMu.Unlock()
errs[a] = err
return
}
resultsMu.RUnlock()
}
limit <- struct{}{}
defer func() { <-limit }()
// Collect local fact types.
localFactTypes := make(map[reflect.Type]bool)
for _, ft := range a.FactTypes {
localFactTypes[reflect.TypeOf(ft)] = true
}
// Run the analysis.
var localFindings FindingSet
p := &analysis.Pass{
Analyzer: a,
Fset: i.fset,
Files: syntax,
OtherFiles: otherFiles,
Pkg: astPackage,
TypesInfo: typesInfo,
ResultOf: results, // All results.
Report: func(d analysis.Diagnostic) {
localFindings = append(localFindings, Finding{
Category: a.Name,
Position: i.fset.Position(d.Pos),
Message: d.Message,
GOOS: flags.GOOS,
GOARCH: flags.GOARCH,
})
},
ImportPackageFact: func(pkg *types.Package, ptr analysis.Fact) bool {
if pkg != astPackage {
if f := i.fastFacts(pkg); f != nil {
return f.ImportFact(nil, ptr)
}
return false
}
factsMu.RLock()
defer factsMu.RUnlock()
return astFacts.ImportFact(nil, ptr)
},
ExportPackageFact: func(fact analysis.Fact) {
factsMu.Lock()
defer factsMu.Unlock()
astFacts.ExportFact(nil, fact)
},
ImportObjectFact: func(obj types.Object, ptr analysis.Fact) bool {
if pkg := obj.Pkg(); pkg != nil && pkg != astPackage {
if f := i.fastFacts(pkg); f != nil {
return f.ImportFact(obj, ptr)
}
return false
}
factsMu.RLock()
defer factsMu.RUnlock()
return astFacts.ImportFact(obj, ptr)
},
ExportObjectFact: func(obj types.Object, fact analysis.Fact) {
if obj == nil {
// Tried to export nil object?
return
}
if obj.Pkg() != astPackage {
// This is not allowed: the
// built-in facts library will
// also panic in this case.
return
}
factsMu.Lock()
defer factsMu.Unlock()
astFacts.ExportFact(obj, fact)
},
AllPackageFacts: func() (rv []analysis.PackageFact) {
factsMu.RLock()
defer factsMu.RUnlock()
// Pull all dependencies.
for _, importedPkg := range astPackage.Imports() {
otherFacts := i.fastFacts(importedPkg)
if otherFacts == nil {
continue
}
for typ := range localFactTypes {
v := reflect.New(typ.Elem())
if otherFacts.ImportFact(nil, v.Interface().(analysis.Fact)) {
rv = append(rv, analysis.PackageFact{
Package: importedPkg,
Fact: v.Interface().(analysis.Fact),
})
}
}
}
// Pull all local facts.
for typ := range localFactTypes {
v := reflect.New(typ.Elem())
if astFacts.ImportFact(nil, v.Interface().(analysis.Fact)) {
rv = append(rv, analysis.PackageFact{
Package: astPackage,
Fact: v.Interface().(analysis.Fact),
})
}
}
return
},
AllObjectFacts: func() (rv []analysis.ObjectFact) {
factsMu.RLock()
defer factsMu.RUnlock()
// Pull all local facts.
for obj := range astFacts.Objects {
for typ := range localFactTypes {
v := reflect.New(typ.Elem())
if astFacts.ImportFact(obj, v.Interface().(analysis.Fact)) {
rv = append(rv, analysis.ObjectFact{
Object: obj,
Fact: v.Interface().(analysis.Fact),
})
}
}
}
return
},
TypesSizes: typesSizes,
}
// Ensure any analyzer panics are captured. This may
// happen for packages that are not supported by
// specific analyzers. The only panic that can happen
// is while resultsMu is held as a read-only lock.
var (
result any
err error
)
defer func() {
if r := recover(); r != nil {
// In order to make the multiple
// analyzers running concurrently
// debuggable, capture panic exceptions
// and propagate as an analyzer error.
err = fmt.Errorf("panic recovered: %s (%s)", r, debug.Stack())
resultsMu.RUnlock() // +checklocksignore
}
resultsMu.Lock()
findings = append(findings, localFindings...)
results[a] = result
errs[a] = err
resultsMu.Unlock()
}()
found := findAnalyzer(a)
resultsMu.RLock()
if ba, ok := found.(binaryAnalyzer); ok {
// Load the binary and analyze.
rc, loadErr := i.findArchive(path)
if loadErr != nil {
if loadErr != ErrSkip {
err = loadErr
} else {
err = nil // Ignore.
}
} else {
result, err = ba.Run(p, rc)
rc.Close()
}
} else {
result, err = a.Run(p)
}
resultsMu.RUnlock()
}(a, wg)
}
for _, wg := range ready {
// Wait for completion.
wg.Wait()
}
for a := range ready {
// Check the error. If we generate an error here, we report
// this as a finding that can be suppressed. Some analyzers
// will fail on some packages.
if errs[a] != nil {
filename := ""
if len(srcs) > 0 {
filename = srcs[0]
}
findings = append(findings, Finding{
Category: a.Name,
Position: token.Position{Filename: filename},
Message: errs[a].Error(),
GOOS: flags.GOOS,
GOARCH: flags.GOARCH,
})
continue
}
// Check the result. Per above, we check that the type is what
// we expected and that an error did not occur during analysis.
if got, want := reflect.TypeOf(results[a]), a.ResultType; got != want {
return astPackage, findings, astFacts, fmt.Errorf("error: analyzer %s returned %v (expected type %v)", a.Name, results[a], want)
}
}
// Return all findings.
return astPackage, findings, astFacts, nil
}
// Package runs all analyzer on a single package.
func Package(path string, srcs []string) (FindingSet, facts.Serializer, error) {
i := &importer{
fset: token.NewFileSet(),
cache: make(map[string]*importerEntry),
imports: make(map[string]*types.Package),
}
// See comment on internalPackages.
for _, pkg := range internalPackages {
if _, err := i.Import(pkg); err != nil {
return nil, nil, fmt.Errorf("error importing %s: %w", pkg, err)
}
}
_, findings, facts, err := i.checkPackage(path, srcs)
if err != nil {
return nil, nil, err
}
return findings, facts, nil
}
// allFactsAndFindings returns all factsAndFindings from an importer.
func (i *importer) allFactsAndFindings() (FindingSet, *facts.Bundle) {
var (
findings = make(FindingSet, 0)
allFacts = facts.NewBundle()
)
for path, entry := range i.cache {
findings = append(findings, entry.findings...)
allFacts.Add(path, entry.facts)
}
return findings, allFacts
}
// FindRoot finds a package root.
func FindRoot(srcs []string, srcRootRegex string) (string, error) {
if srcRootRegex == "" {
return "", nil
}
// Calculate the root source directory. This is always a directory
// named 'src', of which we simply take the first we find. This is a
// bit fragile, but works for all currently known Go source
// configurations.
//
// Note that there may be extra files outside of the root source
// directory; we simply ignore those.
re, err := regexp.Compile(srcRootRegex)
if err != nil {
return "", fmt.Errorf("srcRootRegex is not valid: %w", err)
}
srcRootPrefix := ""
for _, filename := range srcs {
if s := re.FindString(filename); len(s) > len(srcRootPrefix) {
srcRootPrefix = s
}
}
if srcRootPrefix == "" {
// For whatever reason, we didn't identify a good common prefix to use here.
return "", fmt.Errorf("unable to identify src prefix for %v with regex %s", srcs, srcRootRegex)
}
return srcRootPrefix, nil
}
// SplitPackages splits a typical package structure into packages.
func SplitPackages(srcs []string, srcRootPrefix string) map[string][]string {
sources := make(map[string][]string)
for _, filename := range srcs {
if !strings.HasPrefix(filename, srcRootPrefix) {
continue // Superflouous file.
}
d := path.Dir(filename)
if len(srcRootPrefix) >= len(d) {
continue // Not a file.
}
pkg := d[len(srcRootPrefix):]
for len(pkg) > 0 && pkg[0] == '/' {
pkg = pkg[1:]
}
if len(pkg) == 0 {
continue // Also not a file.
}
// Ignore any files with /testdata/ in the path.
if strings.Contains(filename, "/testdata/") {
continue
}
// Ignore all test files since they *may* be in a different
// package than the rest of the sources.
if strings.HasSuffix(filename, "_test.go") {
continue
}
// Skip the "builtin" package, which is only for docs and not a
// real package. Attempting type checking goes crazy.
if pkg == "builtin" {
continue
}
// In Go's sources, vendored packages under cmd/vendor are imported via
// paths not containing cmd/vendor.
pkg = strings.TrimPrefix(pkg, "cmd/vendor/")
// Place the special runtime package (functions emitted by the
// compiler itself) into the runtime packages.
if strings.Contains(filename, "cmd/compile/internal/typecheck/_builtin/runtime.go") {
pkg = "runtime"
}
// Add to the package.
sources[pkg] = append(sources[pkg], filename)
}
return sources
}
// Bundle checks a bundle of files (typically the standard library).
func Bundle(sources map[string][]string) (FindingSet, facts.Serializer, error) {
// Process all packages.
i := &importer{
fset: token.NewFileSet(),
sources: sources,
cache: make(map[string]*importerEntry),
imports: make(map[string]*types.Package),
}
for pkg := range sources {
// Was there an error processing this package?
if _, err := i.importPackage(pkg); err != nil && err != ErrSkip {
return nil, nil, err
}
}
findings, facts := i.allFactsAndFindings()
return findings, facts, nil
}
|
package main
import (
"encoding/json"
"io/ioutil"
"log"
)
const (
config = ".loop.json"
)
const (
red = "\033[31m"
green = "\033[32m"
reset = "\033[39;49m"
)
func main() {
loop := &Loop{}
data, err := ioutil.ReadFile(config)
if err != nil {
log.Fatal(err)
}
err = json.Unmarshal(data, loop)
if err != nil {
log.Fatal(err)
}
err = loop.Loop()
if err != nil {
log.Fatal(err)
}
}
func status(ok bool, text string) {
color := green
if !ok {
color = red
}
log.Println(color + text + reset)
}
|
package main
import (
"log"
"os"
"github.com/akamensky/argparse"
)
func main() {
parser := argparse.NewParser("Tv Series Renamer", "Organizes your tvseries")
inputPath := parser.String("i", "inputPath", &argparse.Options{Required: true, Help: "path/to/folder/for/input"})
err := parser.Parse(os.Args)
if err != nil {
log.Fatal(err)
}
tk := token()
name := files(*inputPath)
var allepisodesdata []database
seriesname := getSeriesNames(name)
createDirectory(seriesname, *inputPath)
allepisodesdata, err = generateDatabase(seriesname, tk)
if err != nil {
log.Fatal(err)
}
rename(name, *inputPath, allepisodesdata)
}
|
package option
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"github.com/jacexh/multiconfig"
)
type (
// LoggerOption 日志配置模块
LoggerOption struct {
Level string `default:"info"`
Name string
Filename string
MaxSize int `default:"100" yaml:"max_size,omitempty" json:"max_size,omitempty"`
MaxAge int `default:"7" yaml:"max_age,omitempty" json:"max_age,omitempty"`
MaxBackups int `default:"30" yaml:"max_backups,omitempty" json:"max_backups,omitempty"`
LocalTime bool `default:"true" yaml:"local_time,omitempty" json:"local_time,omitempty"`
Compress bool
}
// RouterOption 服务运行时配置
RouterOption struct {
Port int `default:"8080"`
Timeout int `default:"30"`
}
// DatabaseOption mysql数据库配置
DatabaseOption struct {
Host string `default:"localhost"`
Port int `default:"3306"`
Username string
Password string
Database string
MaxOpenConnections int `default:"5" yaml:"max_open_connections" json:"max_open_connections"`
MaxIdleConnections int `default:"3" yaml:"max_idle_connections" json:"max_idle_connections"`
}
// Option 配置入口
Option struct {
Description string
Logger LoggerOption
Router RouterOption
Database DatabaseOption
}
)
var (
configFileType = "yml"
configName = "config"
// searchInPaths 配置文件查找目录
searchInPaths []string
// environmentVariablesPrefix 项目环境变量前缀
environmentVariablesPrefix = "{{.EnvironmentVariablesPrefix}}"
// environmentVariableProfile 项目profile的环境变量名称
environmentVariableProfile = environmentVariablesPrefix + "_PROJECT_PROFILE"
)
// SetConfigFileType 配置文件类型
func SetConfigFileType(t string) {
if t != "" {
configFileType = t
}
}
// SetConfigName 配置名称
func SetConfigName(n string) {
if n != "" {
configName = n
}
}
// AddConfigPath 添加配置文件目录
func AddConfigPath(path string) {
if path == "" {
return
}
if filepath.IsAbs(path) {
searchInPaths = append(searchInPaths, filepath.Clean(path))
return
}
fp, err := filepath.Abs(path)
if err != nil {
panic(err)
}
searchInPaths = append(searchInPaths, fp)
}
func HomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
return home
}
return os.Getenv("HOME")
}
func getConfigName() string {
profile := os.Getenv(environmentVariableProfile)
if profile == "" {
return configName
}
return fmt.Sprintf("%s_%s", configName, profile)
}
func findInDir(dir string, file string) string {
fp := filepath.Join(dir, file)
fi, err := os.Stat(fp)
if err == nil && !fi.IsDir() {
return fp
}
return ""
}
func findConfigFile() string {
fp := fmt.Sprintf("%s.%s", getConfigName(), configFileType)
for _, d := range searchInPaths {
if p := findInDir(d, fp); p != "" {
return p
}
}
panic(errors.New("cannot find the config file"))
}
func LoadConfig() *Option {
f := findConfigFile()
opt := new(Option)
loader := multiconfig.NewWithPathAndEnvPrefix(f, environmentVariablesPrefix)
loader.MustLoad(opt)
return opt
}
func init() {
AddConfigPath("./conf")
}
|
package configmap
import (
"context"
"strconv"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
)
func UpsertDisableConfigMap(ctx context.Context, client ctrlclient.Client, name string, key string, isDisabled bool) error {
cm := &v1alpha1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
_, err := controllerutil.CreateOrUpdate(ctx, client, cm, func() error {
if cm.Data == nil {
cm.Data = make(map[string]string)
}
cm.Data[key] = strconv.FormatBool(isDisabled)
return nil
})
return err
}
|
package model
import (
"github.com/joostvdg/cmg/pkg/model"
)
// MapLegend Legend for API uses, which allows use of codes (which can than be mapped via the Legend
type MapLegend struct {
Harbors []model.Harbor
Landscapes []model.Landscape
}
|
package flags
import (
"fmt"
"strings"
"github.com/urfave/cli"
)
// OldPasswordFile returns a flag for receiving an old password
func OldPasswordFile(usage string) cli.Flag {
if usage == "" {
usage = "The path to the `FILE` containing the old encryption password"
}
return cli.StringFlag{
Name: "old-password-file, o",
Usage: usage,
EnvVar: "STEP_OLD_PASSWORD_FILE",
}
}
// NewPasswordFile returns a flag for receiving a new password
func NewPasswordFile(usage string) cli.Flag {
if usage == "" {
usage = "The path to the `FILE` containing the new encryption password"
}
return cli.StringFlag{
Name: "new-password-file, n",
Usage: usage,
EnvVar: "STEP_NEW_PASSWORD_FILE",
}
}
// Bits returns a flag for receiving the number of bits in generating a key
func Bits(usage string, value int) cli.Flag {
if usage == "" {
usage = "Number of bits used to generate the private key"
}
if value == 0 {
value = 256
}
return cli.IntFlag{
Name: "bits, b",
Usage: usage,
EnvVar: "STEP_BITS",
Value: value,
}
}
// Action returns a flag for receiving an action out of several possibilities
func Action(usage string, possibilities []string, value string) cli.Flag {
usage = fmt.Sprintf("%s (Options: %s)", usage, strings.Join(possibilities, ", "))
return cli.StringFlag{
Name: "action, a",
Usage: usage,
EnvVar: "STEP_ACTION",
Value: value,
}
}
// Type returns a flag for receiving a type of thing to create out of several
// possibilties
func Type(usage string, possibilities []string, value string) cli.Flag {
usage = fmt.Sprintf("%s (Options: %s)", usage, strings.Join(possibilities, ", "))
return cli.StringFlag{
Name: "type, t",
Usage: usage,
EnvVar: "STEP_TYPE",
Value: value,
}
}
// Alg returns a flag for receiving the type of algorithm to use when performing an operation
func Alg(usage string, possibilities []string, value string) cli.Flag {
usage = fmt.Sprintf("%s (Options: %s)", usage, strings.Join(possibilities, ", "))
return cli.StringFlag{
Name: "alg",
Usage: usage,
EnvVar: "STEP_ALG",
Value: value,
}
}
// RootCertificate returns a flag for specifying the path to a root certificate
func RootCertificate(usage string) cli.Flag {
if usage == "" {
usage = "The file `PATH` to the root certificate"
}
return cli.StringFlag{
Name: "root, r",
Usage: usage,
EnvVar: "STEP_ROOT_CERTIFICATE",
}
}
// PasswordFile returns a flag for specifying the path to a file containing a password
func PasswordFile(usage string) cli.Flag {
if usage == "" {
usage = "Path to file containing a password"
}
return cli.StringFlag{
Name: "password-file, p",
Usage: usage,
EnvVar: "STEP_PASSWORD_FILE",
}
}
// OutputFile returns a flag for specifying the path inwhich to write output too
func OutputFile(usage string) cli.Flag {
if usage == "" {
usage = "Path to where the output should be written"
}
return cli.StringFlag{
Name: "output-file, o",
Usage: usage,
EnvVar: "STEP_OUTPUT_FILE",
}
}
// Number returns a flag for collecting the number of something to create
func Number(usage string) cli.Flag {
if usage == "" {
usage = "The `NUMBER` of entities to create"
}
return cli.StringFlag{
Name: "number, n",
Usage: usage,
EnvVar: "STEP_NUMBER",
}
}
// Prefix returns a flag for prefixing to the name of an entity during creation
func Prefix(usage, value string) cli.Flag {
if usage == "" {
usage = "The `PREFIX` to apply to the names of all created entities"
}
return cli.StringFlag{
Name: "prefix, p",
Usage: usage,
Value: value,
EnvVar: "STEP_PREFIX",
}
}
// OAuthProvider returns a flag for allowing the user to select an oauth provider
func OAuthProvider(usage string, providers []string, value string) cli.Flag {
usage = fmt.Sprintf("%s (Options: %s)", usage, strings.Join(providers, ", "))
return cli.StringFlag{
Name: "provider, idp",
Usage: usage,
Value: value,
EnvVar: "STEP_PROVIDER",
}
}
// Email returns a flag allowing the user to specify their email
func Email(usage string) cli.Flag {
if usage == "" {
usage = "Email to use"
}
return cli.StringFlag{
Name: "email, e",
Usage: usage,
EnvVar: "STEP_EMAIL",
}
}
// Console returns a flag allowing the user to specify whether or not they want
// to remain entirely in the console
func Console(usage string) cli.Flag {
if usage == "" {
usage = "Whether or not to remain entirely in the console to complete the action"
}
return cli.BoolFlag{
Name: "console, c",
Usage: usage,
EnvVar: "STEP_CONSOLE",
}
}
// Limit returns a flag for limiting the results return by a command
func Limit(usage string, value int) cli.Flag {
if usage == "" {
usage = "The maximum `NUMBER` of results to return"
}
if value == 0 {
value = 10
}
return cli.IntFlag{
Name: "limit, l",
Usage: usage,
Value: value,
}
}
|
package main
import (
"math"
"sync"
)
type Tree struct {
cache Path
size uint64
h Hasher
sync.RWMutex
}
func (t Tree) Root() Pos {
return Pos{0, uint64(math.Ceil(math.Log2(float64(t.size))))}
}
func (t Tree) Last() Pos {
return Pos{t.size, 0}
}
func (t *Tree) Add(event []byte) (Digest, Visitor) {
t.Lock()
defer t.Unlock()
c := NewComputeVisitor(t.h, event)
t.size++
Traverse(t, State{t.Root(), t.size - 1}, c)
return c.path[t.Root()], c
}
func (t *Tree) Incremental(j, k Pos) (Digest, Visitor) {
t.Lock()
defer t.Unlock()
c := NewComputeVisitor(t.h, nil)
t.size++
Traverse(t, State{t.Root(), j.i}, c)
Traverse(t, State{t.Root(), k.i}, c)
return c.path[t.Root()], c
}
func (t Tree) Cached(s State) (d Digest, ok bool) {
if s.v >= s.p.i+pow(2, s.p.l)-1 {
d, ok = t.cache[s.p]
}
return
}
func (t *Tree) Cache(s State, d Digest) {
if s.v >= s.p.i+pow(2, s.p.l)-1 {
t.cache[s.p] = d
}
}
|
package main
import (
"fmt"
"strings"
)
// submission on leetcode
// https://leetcode.com/submissions/detail/293528291/
func main() {
testStr := "the sky is blue"
fmt.Printf("'%s'", reverseWords(testStr))
testStr = " Hello! World! "
fmt.Printf("'%s'", reverseWords(testStr))
testStr = " Hello! World! "
fmt.Printf("'%s'", reverseWords(testStr))
}
func reverseWords(s string) string {
s = strings.Trim(s, " ")
var reversed string
var start, end, slen int
slen = len(s)
start = slen
end = slen - 1
var foundWord = false
for i := end; i > 0; i-- {
if string(s[i]) == " " {
if foundWord == false {
if i != 0 && string(s[i-1]) != " " {
foundWord = false
start = i + 1
reversed = reversed + s[start:end+1] + " "
end = i - 1
} else {
foundWord = true
start = i + 1
}
} else if i != 0 && string(s[i-1]) != " " {
foundWord = false
reversed = reversed + s[start:end+1] + " "
end = i - 1
}
}
}
// copy the last word
reversed = reversed + s[0:end+1]
return reversed
}
|
package day12
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
// TestSomething : test
func TestParsing(t *testing.T) {
colorReset := "\033[0m"
colorGreen := "\033[32m"
colorYellow := "\033[33m"
testData := "input_test.txt"
functionOutput := ParseInput(testData)
validResult := []instruction{
{direction: "F", ammount: 10},
{direction: "N", ammount: 3},
{direction: "F", ammount: 7},
{direction: "R", ammount: 90},
{direction: "F", ammount: 11},
}
fmt.Println(string(colorYellow), "Function output:", functionOutput)
fmt.Println(string(colorGreen), "Valid output: ", validResult)
fmt.Println(string(colorReset), "")
assert.Equal(t, validResult[0], functionOutput[0], "The two Arrays should be the same.")
}
func TestCalculateDistance(t *testing.T) {
colorReset := "\033[0m"
colorGreen := "\033[32m"
colorYellow := "\033[33m"
testData := "input_test.txt"
functionOutput := CalculateDistance(testData)
validResult := 25
fmt.Println(string(colorYellow), "Function output:", functionOutput)
fmt.Println(string(colorGreen), "Valid output: ", validResult)
fmt.Println(string(colorReset), "")
assert.Equal(t, validResult, functionOutput, "The two numbers should be the same.")
}
func TestCalculateDistanceWaypoint(t *testing.T) {
colorReset := "\033[0m"
colorGreen := "\033[32m"
colorYellow := "\033[33m"
testData := "input_test.txt"
functionOutput := CalculateDistanceWaypoint(testData)
validResult := 286
fmt.Println(string(colorYellow), "Function output:", functionOutput)
fmt.Println(string(colorGreen), "Valid output: ", validResult)
fmt.Println(string(colorReset), "")
assert.Equal(t, validResult, functionOutput, "The two numbers should be the same.")
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package arcvpn interacts with the ARC-side fake VPN.
package arcvpn
import (
"context"
"fmt"
"regexp"
"strings"
"time"
"chromiumos/tast/common/action"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/bundles/cros/network/vpn"
"chromiumos/tast/testing"
)
const (
// These need to stay in sync with /vendor/google_arc/packages/system/ArcHostVpn
// Pkg is the package name of the ARC-side fake VPN
Pkg = "org.chromium.arc.hostvpn"
// Svc is the name of the Android Service that runs the ARC-side fake VPN
Svc = "org.chromium.arc.hostvpn.ArcHostVpnService"
)
// SetUpHostVPN creates a base VPN config, then calls SetUpHostVPNWithConfig
func SetUpHostVPN(ctx context.Context) (*vpn.Connection, action.Action, error) {
// Host VPN config we'll use for connections. Arbitrary VPN type, but it can't cause the
// test to log out of the user during setup otherwise we won't have access to adb anymore.
// For example, vpn.AuthTypeCert VPNs will log the user out while trying to prep the cert
// store.
config := vpn.Config{
Type: vpn.TypeL2TPIPsec,
AuthType: vpn.AuthTypePSK,
}
return SetUpHostVPNWithConfig(ctx, config)
}
// SetUpHostVPNWithConfig create the host VPN server, but does not initiate a connection. The
// returned vpn.Connection is immediately ready for Connect() to be called on it. Also returns a
// cleanup function that handles the VPN server cleanup for the caller to execute.
func SetUpHostVPNWithConfig(ctx context.Context, config vpn.Config) (*vpn.Connection, action.Action, error) {
conn, err := vpn.NewConnection(ctx, config)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create connection object")
}
if err := conn.SetUp(ctx); err != nil {
return nil, nil, errors.Wrap(err, "failed to setup VPN")
}
return conn, func(ctx context.Context) error { return conn.Cleanup(ctx) }, nil
}
// SetARCVPNEnabled flips the flag in the current running ARC instance. If running multiple tests
// within the same ARC instance, it's recommended to cleanup by flipping the flag back to the
// expected default state afterwards. Since no state is persisted, new ARC instances will initialize
// with the default state.
func SetARCVPNEnabled(ctx context.Context, a *arc.ARC, enabled bool) error {
testing.ContextLogf(ctx, "Setting cros-vpn-as-arc-vpn flag to %t", enabled)
cmd := a.Command(ctx, "dumpsys", "wifi", "set-cros-vpn-as-arc-vpn", fmt.Sprintf("%t", enabled))
o, err := cmd.Output(testexec.DumpLogOnError)
if err != nil {
return errors.Wrap(err, "failed to execute 'set-cros-vpn-as-arc-vpn' commmand")
}
if !strings.Contains(string(o), "sEnableCrosVpnAsArcVpn="+fmt.Sprintf("%t", enabled)) {
return errors.New("unable to set sEnableCrosVpnAsArcVpn to " + fmt.Sprintf("%t", enabled))
}
return nil
}
// WaitForARCServiceState checks if the Android service is running in the `expectedRunning` state.
func WaitForARCServiceState(ctx context.Context, a *arc.ARC, pkg, svc string, expectedRunning bool) error {
testing.ContextLogf(ctx, "Check the state of %s/%s", pkg, svc)
// Poll since it might take some time for the service to start/stop.
if err := testing.Poll(ctx, func(ctx context.Context) error {
cmd := a.Command(ctx, "dumpsys", "activity", "services", pkg+"/"+svc)
o, err := cmd.Output(testexec.DumpLogOnError)
if err != nil {
return errors.Wrap(err, "failed to execute 'dumpsys activity services' commmand")
}
// Use raw string so we can directly use backslashes
matched, matchErr := regexp.Match(`ServiceRecord\{`, o)
if matched != expectedRunning || matchErr != nil {
if expectedRunning {
return errors.Wrap(matchErr, "expected, but didn't find ServiceRecord")
}
return errors.Wrap(matchErr, "didn't expect, but found ServiceRecord")
}
return nil
}, &testing.PollOptions{Timeout: 10 * time.Second}); err != nil {
return errors.Wrapf(err, "service not in expected running state of %t", expectedRunning)
}
return nil
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"log"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
)
/*Constants used throughout the program to identify commands, request, response, and error messages*/
const (
//request
SET = "set"
GET = "get"
GETM = "getm"
CAS = "cas"
DELETE = "delete"
NOREPLY = "noreply"
// //response
OK = "OK"
CRLF = "\r\n"
VALUE = "VALUE"
DELETED = "DELETED"
//errors
ERR_CMD_ERR = "ERR_CMD_ERR"
ERR_NOT_FOUND = "ERR_NOT_FOUND"
ERR_VERSION = "ERR_VERSION"
ERR_INTERNAL = "ERR_INTERNAL"
//constant
MAX_CMD_ARGS = 6
MIN_CMD_ARGS = 2
READ_TIMEOUT = 5
)
//represents the value in the main hashtable (key, value) pair
type Data struct {
numBytes uint64 //number of bytes of the value bytes
version uint64 //current version of the key
expTime uint64 //time offset in seconds after which the key should expire
value []byte //bytes representing the actual content of the value
isPerpetual bool //specifies that the key does not expire
}
//represents the main hashtable where the dance actually happens
type KeyValueStore struct {
dictionary map[string]*Data //the hashtable that stores the (key, value) pairs
sync.RWMutex //mutex for synchronization when reading or writing to the hashtable
}
//pointer to custom logger
var logger *log.Logger
//cache
var table *KeyValueStore
/*Function to start the server and accept connections.
*arguments: none
*return: none
*/
func startServer() {
logger.Println("Server started")
listener, err := net.Listen("tcp", ":5000")
if err != nil {
logger.Println("Could not start server!")
}
//initialize key value store
table = &KeyValueStore{dictionary: make(map[string]*Data)}
//infinite loop
for {
conn, err := listener.Accept()
if err != nil {
logger.Println(err)
continue
}
go handleClient(conn, table) //client connection handler
}
}
/*Function to read data from the connection and put it on the channel so it could be read in a systematic fashion.
*arguments: channel shared between this go routine and other functions performing actions based on the commands given, client connection
*return: none
*/
func myRead(ch chan []byte, conn net.Conn) {
scanner := bufio.NewScanner(conn)
scanner.Split(CustomSplitter)
for {
if ok := scanner.Scan(); !ok {
break
} else {
temp := scanner.Bytes()
ch <- temp
logger.Println(temp, "$$")
}
}
}
/*Simple write function to send information to the client
*arguments: client connection, msg to send to the client
*return: none
*/
func write(conn net.Conn, msg string) {
buf := []byte(msg)
buf = append(buf, []byte(CRLF)...)
logger.Println(buf, len(buf))
conn.Write(buf)
}
/*After initial establishment of the connection with the client, this go routine handles further interaction
*arguments: client connection, pointer to the hastable structure
*return: none
*/
func handleClient(conn net.Conn, table *KeyValueStore) {
defer conn.Close()
//channel for every connection for every client
ch := make(chan []byte)
go myRead(ch, conn)
for {
msg := <-ch
logger.Println("Channel: ", msg, string(msg))
if len(msg) == 0 {
continue
}
parseInput(conn, string(msg), table, ch)
}
}
/*Basic validations for various commands
*arguments: command to check against, other parmameters sent with the command (excluding the value), client connection
*return: integer representing error state
*/
func isValid(cmd string, tokens []string, conn net.Conn) int {
switch cmd {
case SET:
if len(tokens) > 5 || len(tokens) < 4 {
logger.Println(cmd, ":Invalid no. of tokens")
write(conn, ERR_CMD_ERR)
return 1
}
if len([]byte(tokens[1])) > 250 {
logger.Println(cmd, ":Invalid size of key")
write(conn, ERR_CMD_ERR)
return 1
}
if len(tokens) == 5 && tokens[4] != NOREPLY {
logger.Println(cmd, ":optional arg incorrect")
write(conn, ERR_CMD_ERR)
return 1
}
if _, err := strconv.ParseUint(tokens[2], 10, 64); err != nil {
logger.Println(cmd, ":expiry time invalid")
write(conn, ERR_CMD_ERR)
return 1
}
if _, err := strconv.ParseUint(tokens[3], 10, 64); err != nil {
logger.Println(cmd, ":numBytes invalid")
write(conn, ERR_CMD_ERR)
return 1
}
case GET:
if len(tokens) != 2 {
logger.Println(cmd, ":Invalid number of arguments")
write(conn, ERR_CMD_ERR)
return 1
}
if len(tokens[1]) > 250 {
logger.Println(cmd, ":Invalid key size")
write(conn, ERR_CMD_ERR)
return 1
}
case GETM:
if len(tokens) != 2 {
logger.Println(cmd, ":Invalid number of tokens")
write(conn, ERR_CMD_ERR)
return 1
}
if len(tokens[1]) > 250 {
logger.Println(cmd, ":Invalid key size")
write(conn, ERR_CMD_ERR)
return 1
}
case CAS:
if len(tokens) > 6 || len(tokens) < 5 {
logger.Println(cmd, ":Invalid number of tokens")
write(conn, ERR_CMD_ERR)
return 1
}
if len([]byte(tokens[1])) > 250 {
logger.Println(cmd, ":Invalid size of key")
write(conn, ERR_CMD_ERR)
return 1
}
if len(tokens) == 6 && tokens[5] != NOREPLY {
logger.Println(cmd, ":optional arg incorrect")
write(conn, ERR_CMD_ERR)
return 1
}
if _, err := strconv.ParseUint(tokens[2], 10, 64); err != nil {
logger.Println(cmd, ":expiry time invalid")
write(conn, ERR_CMD_ERR)
return 1
}
if _, err := strconv.ParseUint(tokens[3], 10, 64); err != nil {
logger.Println(cmd, ":version invalid")
write(conn, ERR_CMD_ERR)
return 1
}
if _, err := strconv.ParseUint(tokens[4], 10, 64); err != nil {
logger.Println(cmd, ":numbytes invalid")
write(conn, ERR_CMD_ERR)
return 1
}
case DELETE:
if len(tokens) != 2 {
logger.Println(cmd, ":Invalid number of tokens")
write(conn, ERR_CMD_ERR)
return 1
}
if len([]byte(tokens[1])) > 250 {
logger.Println(cmd, ":Invalid size of key")
write(conn, ERR_CMD_ERR)
return 1
}
default:
return 0
}
//compiler is happy
return 0
}
/*Function parses the command provided by the client and delegates further action to command specific functions.
*Based on the return values of those functions, send appropriate messages to the client.
*arguments: client connection, message from client, pointer to hashtable structure, channel shared with myRead function
*return: none
*/
func parseInput(conn net.Conn, msg string, table *KeyValueStore, ch chan []byte) {
tokens := strings.Fields(msg)
//general error, don't check for commands, avoid the pain ;)
if len(tokens) > MAX_CMD_ARGS || len(tokens) < MIN_CMD_ARGS {
write(conn, ERR_CMD_ERR)
return
}
//fmt.Println(tokens)
//for efficient string concatenation
var buffer bytes.Buffer
switch tokens[0] {
case SET:
if isValid(SET, tokens, conn) != 0 {
return
}
if ver, ok, r := performSet(conn, tokens[1:len(tokens)], table, ch); ok {
//debug(table)
logger.Println(ver)
if r {
buffer.Reset()
buffer.WriteString(OK)
buffer.WriteString(" ")
buffer.WriteString(strconv.FormatUint(ver, 10))
logger.Println(buffer.String())
write(conn, buffer.String())
}
}
case GET:
if isValid(GET, tokens, conn) != 0 {
return
}
if data, ok := performGet(conn, tokens[1:len(tokens)], table); ok {
logger.Println("sending", tokens[1], "data")
buffer.Reset()
buffer.WriteString(VALUE)
buffer.WriteString(" ")
buffer.WriteString(strconv.FormatUint(data.numBytes, 10))
write(conn, buffer.String())
buffer.Reset()
buffer.Write(data.value)
write(conn, buffer.String())
} else {
buffer.Reset()
buffer.WriteString(ERR_NOT_FOUND)
write(conn, buffer.String())
}
//debug(table)
case GETM:
if isValid(GETM, tokens, conn) != 0 {
return
}
if data, ok := performGetm(conn, tokens[1:len(tokens)], table); ok {
logger.Println("sending", tokens[1], "metadata")
buffer.Reset()
buffer.WriteString(VALUE)
buffer.WriteString(" ")
buffer.WriteString(strconv.FormatUint(data.version, 10))
buffer.WriteString(" ")
if data.isPerpetual {
buffer.WriteString("0")
} else {
buffer.WriteString(strconv.FormatUint(data.expTime-uint64(time.Now().Unix()), 10))
}
buffer.WriteString(" ")
buffer.WriteString(strconv.FormatUint(data.numBytes, 10))
write(conn, buffer.String())
buffer.Reset()
buffer.Write(data.value)
write(conn, buffer.String())
} else {
buffer.Reset()
buffer.WriteString(ERR_NOT_FOUND)
write(conn, buffer.String())
}
//debug(table)
case CAS:
if isValid(CAS, tokens, conn) != 0 {
return
}
if ver, ok, r := performCas(conn, tokens[1:len(tokens)], table, ch); r {
if r {
switch ok {
case 0:
buffer.Reset()
buffer.WriteString(OK)
buffer.WriteString(" ")
buffer.WriteString(strconv.FormatUint(ver, 10))
logger.Println(buffer.String())
write(conn, buffer.String())
case 1:
buffer.Reset()
buffer.WriteString(ERR_CMD_ERR)
write(conn, buffer.String())
case 2:
buffer.Reset()
buffer.WriteString(ERR_VERSION)
write(conn, buffer.String())
case 3:
buffer.Reset()
buffer.WriteString(ERR_NOT_FOUND)
write(conn, buffer.String())
}
}
}
//debug(table)
case DELETE:
if isValid(DELETE, tokens, conn) != 0 {
return
}
if ok := performDelete(conn, tokens[1:len(tokens)], table); ok == 0 {
write(conn, DELETED)
} else {
write(conn, ERR_NOT_FOUND)
}
//debug(table)
default:
buffer.Reset()
buffer.WriteString(ERR_CMD_ERR)
write(conn, buffer.String())
}
}
/*
*Helper function to read value or cause timeout after READ_TIMEOUT seconds
*parameters: channel to read data from, threshold number of bytes to read
*returns: the value string and error state
*/
func readValue(ch chan []byte, n uint64) ([]byte, bool) {
//now we need to read the value which should have been sent
valReadLength := uint64(0)
var v []byte
err := false
up := make(chan bool, 1)
//after 5 seconds passed reading value, we'll just send err to client
go func() {
time.Sleep(READ_TIMEOUT * time.Second)
up <- true
}()
//use select for the data channel and the timeout channel
for valReadLength < n+2 {
select {
case temp := <-ch:
logger.Println("Value chunk read!")
valReadLength += uint64(len(temp))
if valReadLength > n+2 {
err = true
break
}
v = append(v, temp...)
case <-up:
err = true
logger.Println("Oh, Oh timeout")
break
}
//will be true if timeout occurs
if err {
break
}
}
if err {
return []byte{0}, err
}
return v[:n], err
}
/*Delegate function responsible for all parsing and hashtable interactions for the SET command sent by client
*arguments: client connection, tokenized command sent by the client, pointer to hashtable structure, channel shared with myRead
*return: version of inserted key (if successful, 0 otherwise), success or failure, whether to send reply to client
*/
func performSet(conn net.Conn, tokens []string, table *KeyValueStore, ch chan []byte) (uint64, bool, bool) {
k := tokens[0]
//expiry time offset
e, _ := strconv.ParseUint(tokens[1], 10, 64)
//numbytes
n, _ := strconv.ParseUint(tokens[2], 10, 64)
r := true
if len(tokens) == 4 && tokens[3] == NOREPLY {
r = false
}
logger.Println(r)
if v, err := readValue(ch, n); err {
write(conn, ERR_CMD_ERR)
return 0, false, r
} else {
defer table.Unlock()
table.Lock()
//critical section start
var val *Data
if _, ok := table.dictionary[k]; ok {
val = table.dictionary[k]
} else {
val = new(Data)
table.dictionary[k] = val
}
val.numBytes = n
val.version++
if e == 0 {
val.isPerpetual = true
val.expTime = 0
} else {
val.isPerpetual = false
val.expTime = e + uint64(time.Now().Unix())
}
val.value = v
return val.version, true, r
}
}
/*Delegate function reponsible for activities related to the GET command sent by the client.
*arguments: client connection, tokenized command sent by the client, pointer to hashtable structure
*return: pointer to value corresponding to the key given by client, success or failure
*/
func performGet(conn net.Conn, tokens []string, table *KeyValueStore) (*Data, bool) {
k := tokens[0]
defer table.Unlock()
//lock because if key is expired, we'll delete it
table.Lock()
//critical section begin
if v, ok := table.dictionary[k]; ok {
if !v.isPerpetual && v.expTime < uint64(time.Now().Unix()) {
//delete the key
delete(table.dictionary, k)
return nil, false
}
data := new(Data)
data.numBytes = v.numBytes
data.value = v.value[:]
return data, true
} else {
return nil, false
}
}
/*Delegate function reponsible for activities related to the GETM command sent by the client.
*arguments: client connection, tokenized command sent by the client, pointer to hashtable structure
*return: pointer to value corresponding to the key given by client, success or failure
*/
func performGetm(conn net.Conn, tokens []string, table *KeyValueStore) (*Data, bool) {
k := tokens[0]
defer table.Unlock()
table.Lock()
//critical section begin
if v, ok := table.dictionary[k]; ok {
if !v.isPerpetual && v.expTime < uint64(time.Now().Unix()) {
//delete the key
delete(table.dictionary, k)
return nil, false
}
data := new(Data)
data.version = v.version
data.expTime = v.expTime
data.numBytes = v.numBytes
data.value = v.value[:]
data.isPerpetual = v.isPerpetual
return data, true
} else {
return nil, false
}
}
/*Delegate function reponsible for activities related to the CAS command sent by the client.
*arguments: client connection, tokenized command sent by the client, pointer to hashtable structure, channel shared with myRead
*return: new version of updated key (if it is updated), error status {0: error while reading new value, 1: key found and changed,
*2: version mismatch with key, 3: key not found}, whether to reply to client
*/
func performCas(conn net.Conn, tokens []string, table *KeyValueStore, ch chan []byte) (uint64, int, bool) {
k := tokens[0]
e, _ := strconv.ParseUint(tokens[1], 10, 64)
ve, _ := strconv.ParseUint(tokens[2], 10, 64)
n, _ := strconv.ParseUint(tokens[3], 10, 64)
r := true
logger.Println(k, e, ve, n, r)
if len(tokens) == 5 && tokens[4] == NOREPLY {
r = false
}
//read value
if v, err := readValue(ch, n); err {
return 0, 1, r
} else {
defer table.Unlock()
table.Lock()
if val, ok := table.dictionary[k]; ok {
if val.version == ve {
if val.isPerpetual || val.expTime >= uint64(time.Now().Unix()) {
//if expiry time is zero, key should not be deleted
if e == 0 {
val.isPerpetual = true
val.expTime = 0
} else {
val.isPerpetual = false
val.expTime = e + uint64(time.Now().Unix())
}
val.numBytes = n
val.version++
val.value = v
//key found and changed
return val.version, 0, r
} else {
logger.Println("expired key found!")
//version found but key expired, can delete key safely and tell client that it does not exist
delete(table.dictionary, k)
return 0, 3, r
}
}
//version mismatch
return 0, 2, r
}
//key not found
return 0, 3, r
}
}
/*Delegate function reponsible for activities related to the DELETE command sent by the client.
*arguments: client connection, tokenized command sent by the client, pointer to hashtable structure
*return: integer secifying error state {0: found and deleted, 1: found but expired (deleted but client told non-existent, 2: key not found}
*/
func performDelete(conn net.Conn, tokens []string, table *KeyValueStore) int {
k := tokens[0]
logger.Println(tokens)
flag := 1
defer table.Unlock()
table.Lock()
//begin critical section
if v, ok := table.dictionary[k]; ok {
if v.isPerpetual || v.expTime >= uint64(time.Now().Unix()) {
//found not expired
flag = 0
}
//delete anyway as expired or needs to be deleted
delete(table.dictionary, k)
return flag
}
//key not found
return 2
}
/*Simple function that dumps the contents of the hashtable
*arguments: pointer to the hashtable structure
*return: none
*/
func debug(table *KeyValueStore) {
logger.Println("----start debug----")
for key, val := range (*table).dictionary {
logger.Println(key, val)
}
logger.Println("----end debug----")
}
/*Copied from the bufio.Scanner (originally ScanLines). By default it splits by '\n' but now we want it to split by '\r\n'
*arguments: data in bytes, is eof reached
*return: next sequence of bytes, chunk of data found, err state
*/
func CustomSplitter(data []byte, atEOF bool) (advance int, token []byte, err error) {
omega := 0
if atEOF && len(data) == 0 {
return 0, nil, nil
}
for {
if i := bytes.IndexByte(data[omega:], '\n'); i >= 0 {
//here we add omega as we are using the complete data array instead of the slice where we found '\n'
if i > 0 && data[omega+i-1] == '\r' {
//next byte begins at i+1 and data[0:i+1] returned
return omega + i + 1, data[:omega+i+1], nil
} else {
//move the omega index to the byte after \n
omega += i + 1
}
} else {
//need to break free the chains
break
}
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), data, nil
}
// Request more data.
return 0, nil, nil
}
/*Entry point of this program. Initializes the start of ther server and sets up the logger.
*arguments: none
*return: none
*/
func main() {
toLog := ""
if len(os.Args) > 1 {
toLog = os.Args[1]
}
//toLog = "s"
if toLog != "" {
logf, _ := os.OpenFile("serverlog.log", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
defer logf.Close()
logger = log.New(logf, "SERVER: ", log.Ltime|log.Lshortfile)
//logger = log.New(os.Stdout, "SERVER: ", log.Ltime|log.Lshortfile)
} else {
logger = log.New(ioutil.Discard, "SERVER: ", log.Ldate)
}
go startServer()
var input string
fmt.Scanln(&input)
}
//server will not call this, we'll call it from test cases to clear the map
func ReInitServer() {
defer table.Unlock()
table.Lock()
for key, _ := range table.dictionary {
delete(table.dictionary, key)
}
//fmt.Println(table.dictionary)
}
|
package main
import "fmt"
func main() {
fmt.Println("Hakuna " + "Matata")
fmt.Println("Let's check whether 1+1 is 11 ", 1+1)
fmt.Println("float check 7.0/3.0 =", 7.0/3.0)
fmt.Println("bool check ", true && false)
fmt.Println("Life is awesome: ", true || false)
fmt.Println("go is difficult", !true)
} |
// envsubst command line tool
package main
import (
"bufio"
"flag"
"fmt"
"io"
"os"
)
var (
input = flag.String("i", "", "")
output = flag.String("o", "", "")
noUnset = flag.Bool("no-unset", false, "")
noEmpty = flag.Bool("no-empty", false, "")
)
var usage = `Usage: envsubst [options...] <input>
Options:
-i Specify file input, otherwise use last argument as input file.
If no input file is specified, read from stdin.
-o Specify file output. If none is specified, write to stdout.
-no-unset Fail if a variable is not set.
-no-empty Fail if a variable is set but empty.
`
func main() {
flag.Usage = func() {
fmt.Fprint(os.Stderr, fmt.Sprintf(usage))
}
flag.Parse()
// Reader
var reader *bufio.Reader
if *input != "" {
file, err := os.Open(*input)
if err != nil {
usageAndExit(fmt.Sprintf("Error to open file input: %s.", *input))
}
defer file.Close()
reader = bufio.NewReader(file)
} else {
stat, err := os.Stdin.Stat()
if err != nil || (stat.Mode()&os.ModeCharDevice) != 0 {
usageAndExit("")
}
reader = bufio.NewReader(os.Stdin)
}
// Collect data
var data string
for {
line, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
data += line
break
}
usageAndExit("Failed to read input.")
}
data += line
}
// Writer
var file *os.File
var err error
if *output != "" {
file, err = os.Create(*output)
if err != nil {
usageAndExit("Error to create the wanted output file.")
}
} else {
file = os.Stdout
}
// Parse input string
result, err := StringRestricted(data, *noUnset, *noEmpty)
if err != nil {
errorAndExit(err)
}
if _, err := file.WriteString(result); err != nil {
filename := *output
if filename == "" {
filename = "STDOUT"
}
usageAndExit(fmt.Sprintf("Error writing output to: %s.", filename))
}
}
func usageAndExit(msg string) {
if msg != "" {
fmt.Fprintf(os.Stderr, msg)
fmt.Fprintf(os.Stderr, "\n\n")
}
flag.Usage()
fmt.Fprintf(os.Stderr, "\n")
os.Exit(1)
}
func errorAndExit(e error) {
fmt.Fprintf(os.Stderr, "%v\n\n", e.Error())
os.Exit(1)
}
|
package svrtest
import (
bnd "github.com/devwarrior777/atomicswap/libs/protobind"
)
/*
TEST DATA FOR THE LTC WALLET RPC COMMANDS
You will need your own testdata that reflects your coins configurations:
- Testnet or not
- RPC Info to connect to your LTC RPC wallet node(s)
*/
var ltcPingWalletRPCRequest = bnd.PingWalletRPCRequest{
Coin: bnd.COIN_LTC,
Testnet: false,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
}
var ltcTestnetPingWalletRPCRequest = bnd.PingWalletRPCRequest{
Coin: bnd.COIN_LTC,
Testnet: true,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
}
var ltcNewAddressRequest = bnd.NewAddressRequest{
Coin: bnd.COIN_LTC,
Testnet: false,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
}
var ltcTestnetNewAddressRequest = bnd.NewAddressRequest{
Coin: bnd.COIN_LTC,
Testnet: true,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
}
var ltcInitiateRequest = bnd.InitiateRequest{
Coin: bnd.COIN_LTC,
Testnet: false,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
Amount: 10000000,
}
var ltcTestnetInitiateRequest = bnd.InitiateRequest{
Coin: bnd.COIN_LTC,
Testnet: true,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
Amount: 10000000,
}
var ltcParticipateRequest = bnd.ParticipateRequest{
Coin: bnd.COIN_LTC,
Testnet: false,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
Amount: 10000000,
}
var ltcTestnetParticipateRequest = bnd.ParticipateRequest{
Coin: bnd.COIN_LTC,
Testnet: true,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
Amount: 10000000,
}
var ltcRedeemRequest = bnd.RedeemRequest{
Coin: bnd.COIN_LTC,
Testnet: false,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
}
var ltcTestnetRedeemRequest = bnd.RedeemRequest{
Coin: bnd.COIN_LTC,
Testnet: true,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
}
var ltcRefundRequest = bnd.RefundRequest{
Coin: bnd.COIN_LTC,
Testnet: false,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
}
var ltcTestnetRefundRequest = bnd.RefundRequest{
Coin: bnd.COIN_LTC,
Testnet: true,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
}
var ltcExtractSecretRequest = bnd.ExtractSecretRequest{
Coin: bnd.COIN_LTC,
Testnet: false,
}
var ltcTestnetExtractSecretRequest = bnd.ExtractSecretRequest{
Coin: bnd.COIN_LTC,
Testnet: true,
}
var ltcAuditRequest = bnd.AuditRequest{
Coin: bnd.COIN_LTC,
Testnet: false,
}
var ltcTestnetAuditRequest = bnd.AuditRequest{
Coin: bnd.COIN_LTC,
Testnet: true,
}
var ltcPublishRequest = bnd.PublishRequest{
Coin: bnd.COIN_LTC,
Testnet: false,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
}
var ltcTestnetPublishRequest = bnd.PublishRequest{
Coin: bnd.COIN_LTC,
Testnet: true,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
}
var ltcGetTxRequest = bnd.GetTxRequest{
Coin: bnd.COIN_LTC,
Testnet: false,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
}
var ltcTestnetGetTxRequest = bnd.GetTxRequest{
Coin: bnd.COIN_LTC,
Testnet: true,
Hostport: "localhost",
Rpcuser: "dev",
Rpcpass: "dev",
Wpass: "123",
Certs: "",
}
|
package bot
import "github.com/SevereCloud/vksdk/v2/object"
func getPersonalAreaKeyboard() *object.MessagesKeyboard {
k := object.NewMessagesKeyboardInline()
k.AddRow()
k.AddTextButton(`Изменить кабинет`, ``, `primary`)
k.AddRow()
k.AddTextButton(`История заказов`, ``, `secondary`)
k.AddRow()
k.AddTextButton(`Отменить заказ`, ``, `negative`)
return k
}
func getGeneralKeyboard(t bool) *object.MessagesKeyboard {
k := object.NewMessagesKeyboard(object.BaseBoolInt(t))
k.AddRow()
k.AddTextButton(`Личный кабинет`, ``, `primary`)
k.AddTextButton(`Сделать заказ`, ``, `primary`)
return k
}
|
package resolver
import (
"context"
"github.com/plexmediamanager/micro-torrent/proto"
)
func (service TorrentService) ApplicationVersion (_ context.Context, properties *proto.TorrentEmpty, response *proto.TorrentResponse) error {
result, err := service.Torrent.ApplicationVersion()
return structureToBytesWithError(result, err, response)
}
func (service TorrentService) ApplicationAPIVersion (_ context.Context, properties *proto.TorrentEmpty, response *proto.TorrentResponse) error {
result, err := service.Torrent.ApplicationAPIVersion()
return structureToBytesWithError(result, err, response)
}
func (service TorrentService) ApplicationBuildInformation (_ context.Context, properties *proto.TorrentEmpty, response *proto.TorrentResponse) error {
result, err := service.Torrent.ApplicationBuildInformation()
return structureToBytesWithError(result, err, response)
}
func (service TorrentService) ApplicationPreferences (_ context.Context, properties *proto.TorrentEmpty, response *proto.TorrentResponse) error {
result, err := service.Torrent.ApplicationPreferences()
return structureToBytesWithError(result, err, response)
}
|
package main
import (
"ShortURL/internal/app/store"
"ShortURL/pkg/api"
"ShortURL/pkg/grpcserver"
"log"
"net"
"os"
"github.com/go-redis/redis"
"github.com/joho/godotenv"
"google.golang.org/grpc"
)
func init() {
// loads values from .env into the system
if err := godotenv.Load(); err != nil {
log.Print("No .env file found")
}
}
func main() {
// Get env
redis_host, exists := os.LookupEnv("REDIS_HOST")
if !exists {
redis_host = "127.0.0.1"
}
redisPort, exists := os.LookupEnv("REDIS_PORT")
if !exists {
redisPort = "6379"
}
grpcPort, exists := os.LookupEnv("GRPC_PORT")
if !exists {
grpcPort = "5000"
}
// Connect Redis
client := redis.NewClient(&redis.Options{
Addr: redis_host + ":" + redisPort,
Password: "",
DB: 0,
})
_, err := client.Ping().Result()
if err != nil {
log.Fatal(err)
}
store := store.NewStoreRedis(client)
// GRPC server
s := grpc.NewServer()
srv := &grpcserver.GRPCServer{Store: store}
api.RegisterShortlinkServer(s, srv)
l, err := net.Listen("tcp", ":"+grpcPort)
if err != nil {
log.Fatal(err)
}
log.Fatal(s.Serve(l))
}
|
package userMemory
const (
UserName = "user"
UserPass = "user"
)
var BasicUser User
type User struct {
Name string
Pass string
}
func SetUserPassword() {
BasicUser = User{
Name: UserName,
Pass: UserPass,
}
}
func UpdateUserPass(pass string) {
BasicUser = User{
Name: UserName,
Pass: pass,
}
}
|
package redshift
import (
"context"
"database/sql"
"fmt"
"io/ioutil"
"log"
"regexp"
"strings"
"time"
kvlogger "gopkg.in/Clever/kayvee-go.v6/logger"
yaml "gopkg.in/yaml.v2"
"github.com/Clever/pathio"
multierror "github.com/hashicorp/go-multierror"
// Use our own version of the postgres library so we get keep-alive support.
// See https://github.com/Clever/pq/pull/1
"github.com/Clever/pq"
"github.com/Clever/s3-to-redshift/v3/logger"
"github.com/Clever/s3-to-redshift/v3/s3filepath"
)
type dbExecCloser interface {
Close() error
BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
}
// Redshift wraps a dbExecCloser and can be used to perform operations on a redshift database.
// We additionally give it a context for the duration of the job
type Redshift struct {
dbExecCloser
ctx context.Context
host string
port string
db string
user string
}
// Table is our representation of a Redshift table
type Table struct {
Name string `yaml:"dest"`
Columns []ColInfo `yaml:"columns"`
Meta Meta `yaml:"meta"`
}
// Meta holds information that might be not in Redshift or annoying to access
// in this case, we want to know the schema a table is part of
// and the column which corresponds to the timestamp at which the data was gathered
type Meta struct {
DataDateColumn string `yaml:"datadatecolumn"`
Schema string `yaml:"schema"`
}
// ColInfo is a struct that contains information about a column in a Redshift database.
// SortOrdinal and DistKey only make sense for Redshift
type ColInfo struct {
Name string `yaml:"dest"`
Type string `yaml:"type"`
DefaultVal string `yaml:"defaultval"`
NotNull bool `yaml:"notnull"`
PrimaryKey bool `yaml:"primarykey"`
DistKey bool `yaml:"distkey"`
SortOrdinal int `yaml:"sortord"`
}
type rangeQuery int
const (
rangeAll rangeQuery = iota
rangeYear
rangeMonth
rangeWeek
rangeDay
)
func rangeQueryString(r rangeQuery) string {
return []string{"", "YEAR", "MONTH", "WEEK", "DAY"}[r]
}
const (
// TODO: use parameter placeholder syntax instead
existQueryFormat = `SELECT table_name
FROM information_schema.tables WHERE table_schema='%s' AND table_name='%s'`
// returns one row per column with the attributes:
// name, type, default_val, not_null, primary_key, dist_key, and sort_ordinal
// need to pass a schema and table name as the parameters
schemaQueryFormat = `SELECT
f.attname AS name,
pg_catalog.format_type(f.atttypid,f.atttypmod) AS col_type,
CASE
WHEN f.atthasdef = 't' THEN d.adsrc
ELSE ''
END AS default_val,
f.attnotnull AS not_null,
p.contype IS NOT NULL AS primary_key,
f.attisdistkey AS dist_key,
f.attsortkeyord AS sort_ord
FROM pg_attribute f
JOIN pg_class c ON c.oid = f.attrelid
LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey) AND p.contype = 'p'
WHERE c.relkind = 'r'::char
AND n.nspname = '%s' -- Replace with schema name
AND c.relname = '%s' -- Replace with table name
AND f.attnum > 0 ORDER BY f.attnum`
)
var (
// map between the config file and the redshift internal representations for types
typeMapping = map[string]string{
"boolean": "boolean",
"float": "double precision",
"int": "integer",
"bigint": "bigint",
"date": "date",
"timestamp": "timestamp without time zone", // timestamp with timezone is not supported in redshift
"text": "character varying(256)", // unfortunately redshift turns text -> varchar 256
"longtext": "character varying(65535)", // when you actually need more than 256 characters
}
)
// NewRedshift returns a pointer to a new redshift object using configuration values passed in
// on instantiation and the AWS env vars we assume exist
// Don't need to pass s3 info unless doing a COPY operation
func NewRedshift(ctx context.Context, host, port, db, user, password string, timeout int) (*Redshift, error) {
source := fmt.Sprintf("host=%s port=%s dbname=%s keepalive=1 connect_timeout=%d", host, port, db, timeout)
log.Println("Connecting to Redshift Source: ", source)
source += fmt.Sprintf(" user=%s password=%s", user, password)
sqldb, err := sql.Open("postgres", source)
if err != nil {
return nil, err
}
if err := sqldb.Ping(); err != nil {
return nil, err
}
return &Redshift{
dbExecCloser: sqldb,
ctx: ctx,
host: host,
port: port,
db: db,
user: user,
}, nil
}
// Begin wraps a new transaction in the databases context
func (r *Redshift) Begin() (*sql.Tx, error) {
return r.dbExecCloser.BeginTx(r.ctx, nil)
}
// GetTableFromConf returns the redshift table representation of the s3 conf file
// It opens, unmarshalls, and does very very simple validation of the conf file
// This belongs here - s3filepath should not have to know about redshift tables
func (r *Redshift) GetTableFromConf(f s3filepath.S3File) (*Table, error) {
var tempSchema map[string]Table
log.Printf("Parsing file: %s", f.ConfFile)
reader, err := pathio.Reader(f.ConfFile)
if err != nil {
return nil, fmt.Errorf("error opening conf file: %s", err)
}
data, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
if err := yaml.Unmarshal(data, &tempSchema); err != nil {
return nil, fmt.Errorf("warning: could not parse file %s, err: %s", f.ConfFile, err)
}
// data we want is nested in a map - possible to have multiple tables in a conf file
for _, config := range tempSchema {
if config.Name == f.Table {
if config.Meta.Schema != f.Schema {
return nil, fmt.Errorf("mismatched schema, conf: %s, file: %s", config.Meta.Schema, f.Schema)
}
if config.Meta.DataDateColumn == "" {
return nil, fmt.Errorf("data date column must be set")
}
return &config, nil
}
}
return nil, fmt.Errorf("can't find table in conf")
}
// GetTableMetadata looks for a table and returns both the Table representation
// of the db table and the last data in the table, if that exists
// if the table does not exist it returns an empty table but does not error
func (r *Redshift) GetTableMetadata(schema, tableName, dataDateCol string) (*Table, *time.Time, error) {
var cols []ColInfo
// does the table exist?
var placeholder string
q := fmt.Sprintf(existQueryFormat, schema, tableName)
if err := r.QueryRowContext(r.ctx, q).Scan(&placeholder); err != nil {
// If the table doesn't exist, log it, but don't return an
// error since this is not an application error.
// The correct behavior is to create a new table.
if err == sql.ErrNoRows {
log.Printf("schema: %s, table: %s does not exist", schema, tableName)
return nil, nil, nil
}
return nil, nil, fmt.Errorf("issue just checking if the table exists: %s", err)
}
// table exists, what are the columns?
rows, err := r.QueryContext(r.ctx, fmt.Sprintf(schemaQueryFormat, schema, tableName))
if err != nil {
return nil, nil, fmt.Errorf("issue running column query: %s, err: %s", schemaQueryFormat, err)
}
defer rows.Close()
for rows.Next() {
var c ColInfo
if err := rows.Scan(&c.Name, &c.Type, &c.DefaultVal, &c.NotNull,
&c.PrimaryKey, &c.DistKey, &c.SortOrdinal,
); err != nil {
return nil, nil, fmt.Errorf("issue scanning column, err: %s", err)
}
cols = append(cols, c)
}
if err := rows.Err(); err != nil {
return nil, nil, fmt.Errorf("issue iterating over columns, err: %s", err)
}
// turn into Table struct
retTable := Table{
Name: tableName,
Columns: cols,
Meta: Meta{
DataDateColumn: dataDateCol,
Schema: schema,
},
}
// what's the last data in the table?
lastData, err := r.MaxTime(fmt.Sprintf(`"%s"."%s"`, schema, tableName), dataDateCol)
if err != nil {
return nil, nil, err
}
return &retTable, &lastData, nil
}
// MaxTime returns the maximum value for the time field in the specified table
func (r *Redshift) MaxTime(fullName, dataDateCol string) (time.Time, error) {
return r.maxTime(fullName, dataDateCol, rangeDay)
}
// maxTime is a helper function to scan progressively larger ranges of time to get more optimized
// max(time) queries - redshift doesn't have good optimizations for max on sort-keyed columns.
func (r *Redshift) maxTime(fullName, dataDateCol string, rangeLimit rangeQuery) (time.Time, error) {
lastDataQuery := fmt.Sprintf(`SELECT MAX("%s") FROM %s`, dataDateCol, fullName)
// SQL Optimization: Redshift doesn't do proper optimizations on max for sort keys, so to reduce our
// efficiency, we'll add a where clause to reduce our query area.
if rangeLimit != rangeAll {
lastDataQuery += fmt.Sprintf(` WHERE "%s" > GETDATE() - INTERVAL '1 %s'`, dataDateCol, rangeQueryString(rangeLimit))
}
var lastData pq.NullTime
err := r.QueryRowContext(r.ctx, lastDataQuery).Scan(&lastData)
// max will either return a value or null if no data, rather than no rows.
if err != nil {
return time.Time{}, fmt.Errorf("issue running query: %s, err: %s", lastDataQuery, err)
} else if !lastData.Valid {
// If we didn't find a hit in our reduced range, expand it and try again
if rangeLimit != rangeAll {
return r.maxTime(fullName, dataDateCol, rangeLimit-1)
}
return time.Time{}, nil
}
return lastData.Time, nil
}
func getColumnSQL(c ColInfo) string {
// note that we are relying on redshift to fail if we have created multiple sort keys
// currently we don't support that
defaultVal := ""
if c.DefaultVal != "" {
defaultVal = fmt.Sprintf("DEFAULT %s", c.DefaultVal)
}
notNull := ""
if c.NotNull {
notNull = "NOT NULL"
}
primaryKey := ""
if c.PrimaryKey {
primaryKey = "PRIMARY KEY"
}
sortKey := ""
if c.SortOrdinal == 1 {
sortKey = "SORTKEY"
}
distKey := ""
if c.DistKey {
distKey = "DISTKEY"
}
return fmt.Sprintf(" \"%s\" %s %s %s %s %s %s", c.Name, typeMapping[c.Type], defaultVal, notNull, sortKey, primaryKey, distKey)
}
// CreateTable runs the full create table command in the provided transaction, given a
// redshift representation of the table.
func (r *Redshift) CreateTable(tx *sql.Tx, table Table) error {
var columnSQL []string
for _, c := range table.Columns {
columnSQL = append(columnSQL, getColumnSQL(c))
}
args := []interface{}{strings.Join(columnSQL, ",")}
// for some reason prepare here was unable to succeed, perhaps look at this later
createSQL := fmt.Sprintf(`CREATE TABLE "%s"."%s" (%s)`, table.Meta.Schema, table.Name, strings.Join(columnSQL, ","))
if match, _ := regexp.MatchString("SORTKEY|DISTKEY", createSQL); !match {
return fmt.Errorf("both SORTKEY and DISTKEY should be specified in create table: %s. Either create your own table if you truly don't want those keys, or update the config to contain both", createSQL)
}
createStmt, err := tx.PrepareContext(r.ctx, createSQL)
if err != nil {
return fmt.Errorf("issue preparing statement: %s", err)
}
log.Printf("Running command: %s with args: %v", createSQL, args)
_, err = createStmt.ExecContext(r.ctx)
return err
}
// UpdateTable figures out what columns we need to add to the target table based on the
// input table, and completes this action in the transaction provided
// Note: only supports adding columns currently, not updating existing columns or removing them
func (r *Redshift) UpdateTable(tx *sql.Tx, inputTable, targetTable Table) error {
columnOps, err := checkSchemas(inputTable, targetTable)
if err != nil {
return fmt.Errorf("mismatched schema: %s", err)
}
// postgres only allows adding one column at a time
for _, op := range columnOps {
alterStmt, err := tx.PrepareContext(r.ctx, op)
if err != nil {
return fmt.Errorf("issue preparing statement: '%s' - err: %s", op, err)
}
log.Printf("Running command: %s", op)
_, err = alterStmt.ExecContext(r.ctx)
if err != nil {
return fmt.Errorf("issue running statement %s: %s", op, err)
}
}
return nil
}
// checkSchemas takes in two tables and compares their column schemas to make sure they're compatible.
// If they have any mismatched columns they are returned in the errors array. If the input table has
// columns at the end that the target table does not then the appropriate alter tables sql commands are
// returned.
func checkSchemas(inputTable, targetTable Table) ([]string, error) {
// If the schema is mongo_raw then we know the input files are json so ordering doesn't matter. At
// some point we could handle this in a more general way by checking if the input files are json.
// This wouldn't be too hard, but we would have to peak in the manifest file to check if all the
// files references are json.
if targetTable.Meta.Schema == "mongo_raw" {
return checkColumnsWithoutOrdering(inputTable, targetTable)
}
return checkColumnsAndOrdering(inputTable, targetTable)
}
func checkColumnsAndOrdering(inputTable, targetTable Table) ([]string, error) {
var columnOps []string
var errors error
if len(inputTable.Columns) < len(targetTable.Columns) {
errors = multierror.Append(errors, fmt.Errorf("target table has more columns than the input table"))
}
for idx, inCol := range inputTable.Columns {
if len(targetTable.Columns) <= idx {
log.Printf("Missing column -- running alter table\n")
alterSQL := fmt.Sprintf(`ALTER TABLE "%s"."%s" ADD COLUMN %s`, targetTable.Meta.Schema, targetTable.Name, getColumnSQL(inCol))
columnOps = append(columnOps, alterSQL)
continue
}
targetCol := targetTable.Columns[idx]
err := checkColumn(inCol, targetCol)
if err != nil {
errors = multierror.Append(errors, err)
}
}
return columnOps, errors
}
func checkColumnsWithoutOrdering(inputTable, targetTable Table) ([]string, error) {
var columnOps []string
var errors error
for _, inCol := range inputTable.Columns {
foundMatching := false
for _, targetCol := range targetTable.Columns {
if inCol.Name == targetCol.Name {
foundMatching = true
if err := checkColumn(inCol, targetCol); err != nil {
errors = multierror.Append(errors, err)
}
}
}
if !foundMatching {
log.Printf("Missing column -- running alter table\n")
alterSQL := fmt.Sprintf(`ALTER TABLE "%s"."%s" ADD COLUMN %s`,
targetTable.Meta.Schema, targetTable.Name, getColumnSQL(inCol))
columnOps = append(columnOps, alterSQL)
}
}
return columnOps, errors
}
func checkColumn(inCol ColInfo, targetCol ColInfo) error {
var errors error
mismatchedTemplate := "mismatched column: %s property: %s, input: %v, target: %v"
if inCol.Name != targetCol.Name {
errors = multierror.Append(errors, fmt.Errorf(mismatchedTemplate, inCol.Name, "Name", inCol.Name, targetCol.Name))
}
if typeMapping[inCol.Type] != targetCol.Type {
if strings.HasPrefix(typeMapping[inCol.Type], "character varying") && strings.HasPrefix(typeMapping[inCol.Type], "character varying") {
// If they are both varchars but differing values, we will ignore this
} else {
errors = multierror.Append(errors, fmt.Errorf(mismatchedTemplate, inCol.Name, "Type", typeMapping[inCol.Type], targetCol.Type))
}
}
if inCol.DefaultVal != targetCol.DefaultVal {
errors = multierror.Append(errors, fmt.Errorf(mismatchedTemplate, inCol.Name, "DefaultVal", inCol.DefaultVal, targetCol.DefaultVal))
}
if inCol.NotNull != targetCol.NotNull {
errors = multierror.Append(errors, fmt.Errorf(mismatchedTemplate, inCol.Name, "NotNull", inCol.NotNull, targetCol.NotNull))
}
if inCol.PrimaryKey != targetCol.PrimaryKey {
errors = multierror.Append(errors, fmt.Errorf(mismatchedTemplate, inCol.Name, "PrimaryKey", inCol.PrimaryKey, targetCol.PrimaryKey))
}
// for distkey & sortkey it's ok if the source doesn't have them, but they should at least not disagree
if inCol.DistKey && inCol.DistKey != targetCol.DistKey {
errors = multierror.Append(errors, fmt.Errorf(mismatchedTemplate, inCol.Name, "DistKey", inCol.DistKey, targetCol.DistKey))
}
if inCol.SortOrdinal != 0 && inCol.SortOrdinal != targetCol.SortOrdinal {
errors = multierror.Append(errors, fmt.Errorf(mismatchedTemplate, inCol.Name, "SortOrdinal", inCol.SortOrdinal, targetCol.SortOrdinal))
}
return errors
}
// Copy copies either CSV or JSON data present in an S3 file into a redshift table.
// It also supports CSV or JSON data pointed at by a manifest file, if you pass in a manifest file.
// this is meant to be run in a transaction, so the first arg must be a sql.Tx
// if not using jsonPaths, set s3File.JSONPaths to "auto"
func (r *Redshift) Copy(tx *sql.Tx, f s3filepath.S3File, delimiter string, creds, gzip bool) error {
var credSQL string
if creds {
credSQL = fmt.Sprintf(`IAM_ROLE '%s'`, f.Bucket.RedshiftRoleARN)
}
gzipSQL := ""
if gzip {
gzipSQL = "GZIP"
}
manifestSQL := ""
if f.Suffix == "manifest" {
manifestSQL = "manifest"
}
// default to CSV
jsonSQL := ""
jsonPathsSQL := ""
// always removequotes, UNLOAD should add quotes
// always say escape for CSVs, UNLOAD should always escape
delimSQL := fmt.Sprintf("DELIMITER AS '%s' REMOVEQUOTES ESCAPE TRIMBLANKS EMPTYASNULL ACCEPTANYDATE", delimiter)
// figure out if we're doing JSON - no delim means JSON
if delimiter == "" {
jsonSQL = "JSON"
jsonPathsSQL = "'auto'"
delimSQL = ""
}
copySQL := fmt.Sprintf(`COPY "%s"."%s" FROM '%s' WITH %s %s %s REGION '%s' TIMEFORMAT 'auto' TRUNCATECOLUMNS STATUPDATE ON %s %s %s`,
f.Schema, f.Table, f.GetDataFilename(), gzipSQL, jsonSQL, jsonPathsSQL, f.Bucket.Region, manifestSQL, credSQL, delimSQL)
log.Printf("Running command: %s", copySQL)
// can't use prepare b/c of redshift-specific syntax that postgres does not like
_, err := tx.ExecContext(r.ctx, copySQL)
return err
}
// UpdateLatencyInfo updates the latency table with the current time to indicate
// that the table data has been updated
func (r *Redshift) UpdateLatencyInfo(tx *sql.Tx, table Table) error {
dest := fmt.Sprintf("%s.%s", table.Meta.Schema, table.Name)
// Insert a row for the latencies table if it doesn't already exist.
// We do this outside of the transaction, since there's no reason to lock the entire table.
_, err := r.ExecContext(r.ctx, fmt.Sprintf(
`INSERT INTO latencies (name) (
SELECT '%s' AS name
EXCEPT
SELECT name FROM latencies WHERE name = '%s'
)`, dest, dest))
// Get the last latency value out of the table, for logging.
// We should do it in the same place as the insert, otherwise there's a chance serialization ends up without it existing yet.
latencyQuery := fmt.Sprintf("SELECT last_update FROM latencies WHERE name = '%s'", dest)
var t pq.NullTime
err = r.QueryRowContext(r.ctx, latencyQuery).Scan(&t)
// this will either return a value or null if no data, rather than no rows, because we inserted earleir
if err != nil {
return fmt.Errorf("error scanning latency table for %s: %s", dest, err)
}
// Update the latency table with the current timestamp, for the last run.
// This one should be inside the transaction!
// TODO: 8/27/2020 Redshift is having some issues with serialization right now (see ticket 7320802091)
// so we're going to leave this outside, so it at least updates, if not to the "best" time.
_, err = r.ExecContext(r.ctx, fmt.Sprintf(
"UPDATE latencies SET last_update = current_timestamp WHERE name = '%s'",
dest))
if err != nil {
return fmt.Errorf("error saving new latency to table for %s: %s", dest, err)
}
logger.GetLogger().InfoD("analytics-run-latency", kvlogger.M{
"database": fmt.Sprintf("%s/%s", r.host, r.db),
"destination": dest,
"previous_run": t.Time,
})
return nil
}
// Truncate deletes all items from a table, given a transaction, a schema string and a table name
// you should run vacuum and analyze soon after doing this for performance reasons
func (r *Redshift) Truncate(tx *sql.Tx, schema, table string) error {
// We run 'DELETE FROM' instead of 'TRUNCATE' because 'TRUNCATE' can't be run in a transaction.
// See http://docs.aws.amazon.com/redshift/latest/dg/r_TRUNCATE.html.
truncStmt, err := tx.PrepareContext(r.ctx, fmt.Sprintf(`DELETE FROM "%s"."%s"`, schema, table))
if err != nil {
return err
}
_, err = truncStmt.ExecContext(r.ctx)
return err
}
// TruncateInTimeRange deletes all items within a specific time range - that is,
// matching `dataDate` when rounded to a certain granularity `timeGranularity`
// NOTE: this assumes that "time" is a column in the table
func (r *Redshift) TruncateInTimeRange(tx *sql.Tx, schema, table, dataDateCol string,
start, end time.Time) error {
truncSQL := fmt.Sprintf(`
DELETE FROM "%s"."%s"
WHERE "%s" >= '%s' AND "%s" < '%s'
`, schema, table, dataDateCol, start.Format("2006-01-02 15:04:05"),
dataDateCol, end.Format("2006-01-02 15:04:05"))
truncStmt, err := tx.PrepareContext(r.ctx, truncSQL)
if err != nil {
return err
}
log.Printf("Refreshing with the latest data. Running command: %s", truncSQL)
_, err = truncStmt.ExecContext(r.ctx)
return err
}
|
package client
import (
"context"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"net/http"
"time"
"golang.org/x/oauth2"
oauth2JWT "golang.org/x/oauth2/jwt"
"github.com/pkg/errors"
jose "gopkg.in/square/go-jose.v2"
fctx "formation.engineering/library/lib/telemetry/context"
)
type Credentials struct {
Key []byte
IdentityID string
KeyID string
}
func ExtractKey(privateJWK []byte) (*Credentials, error) {
var err error
var rawJWK map[string]string
err = json.Unmarshal(privateJWK, &rawJWK)
if err != nil {
return nil, errors.WithMessage(err, "json parsing raw token")
}
identityId, ok := rawJWK["formation/identity-id"]
if !ok {
return nil, errors.New("No 'formation/identity-id' key present in credentials")
}
var parsedJWK jose.JSONWebKey
err = parsedJWK.UnmarshalJSON(privateJWK)
if err != nil {
return nil, errors.WithMessage(err, "decode jwk")
}
// var priv *rsa.PrivateKey
priv, ok := parsedJWK.Key.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("Invalid credentials")
}
privBytes := pem.EncodeToMemory(
&pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(priv),
},
)
out := Credentials{
Key: privBytes,
KeyID: parsedJWK.KeyID,
IdentityID: identityId,
}
return &out, nil
}
func OAuth2Source(ctx0 context.Context, url string, creds Credentials, scope string) oauth2.TokenSource {
ctx := ctx0
ctx1, err := updateContext(ctx0)
if err == nil {
ctx = ctx1
}
config := oauth2JWT.Config{
Email: creds.IdentityID,
PrivateKey: creds.Key,
PrivateKeyID: creds.KeyID,
Subject: "",
Scopes: []string{scope},
TokenURL: fmt.Sprintf("%s/token", url),
Expires: 1 * time.Minute,
Audience: "formation",
PrivateClaims: nil,
UseIDToken: false,
}
return config.TokenSource(ctx)
}
func updateContext(ctx0 context.Context) (context.Context, error) {
trace, ok := fctx.GetTraceID(ctx0)
if !ok {
return nil, fmt.Errorf("No trace_id information specified in ctx")
}
span, ok := fctx.GetSpanID(ctx0)
if !ok {
return nil, fmt.Errorf("No span_id information specified in ctx")
}
tr := withHeader{
traceID: trace,
spanID: span,
rt: http.DefaultTransport,
}
c := &http.Client{Transport: tr}
return context.WithValue(ctx0, oauth2.HTTPClient, c), nil
}
type withHeader struct {
traceID string
spanID string
rt http.RoundTripper
}
func (x withHeader) RoundTrip(req *http.Request) (*http.Response, error) {
req.Header.Add(fctx.TraceIDOverrideHeader, x.traceID)
req.Header.Add(fctx.ParentSpanIDOverrideHeader, x.spanID)
return x.rt.RoundTrip(req)
}
func OAuth2Transport(ctx context.Context, url string, creds Credentials, scope string) *oauth2.Transport {
source := OAuth2Source(ctx, url, creds, scope)
transport := oauth2.Transport{
Source: source,
Base: http.DefaultTransport,
}
return &transport
}
func OAuth2TransportFromSource(source oauth2.TokenSource) *oauth2.Transport {
return &oauth2.Transport{
Source: source,
Base: http.DefaultTransport,
}
}
|
package engine
import (
"github.com/Gregmus2/simple-engine/common"
"github.com/Gregmus2/simple-engine/graphics"
"github.com/go-gl/gl/v4.6-core/gl"
"github.com/go-gl/glfw/v3.3/glfw"
"github.com/sirupsen/logrus"
"time"
)
type App struct {
Window *glfw.Window
GL *graphics.OpenGL
updateActions []func(dt int64)
camera *graphics.Camera
scene common.Scene
quit bool
}
func NewApp(window *glfw.Window, gl *graphics.OpenGL, c *graphics.Camera, a common.UpdateActionsIn, scene common.Scene) (*App, error) {
return &App{
Window: window,
GL: gl,
camera: c,
updateActions: a.Actions,
scene: scene,
}, nil
}
func (app *App) initCallbacks() {
app.Window.SetKeyCallback(app.scene.Callback)
app.Window.SetMouseButtonCallback(app.scene.MouseCallback)
app.Window.SetScrollCallback(app.scene.ScrollCallback)
app.Window.SetCursorPosCallback(app.scene.CursorPositionCallback)
app.updateActions = append([]func(dt int64){app.scene.PreUpdate}, app.updateActions...)
app.updateActions = append(app.updateActions, app.scene.Update)
}
func (app *App) Loop() {
if app.scene == nil {
panic("scene isn't set")
}
app.scene.Init()
app.initCallbacks()
targetDT := int64(1000 / common.Config.Graphics.FPS)
t := time.Now()
for !app.Window.ShouldClose() {
dt := time.Now().Sub(t).Milliseconds()
if dt < targetDT {
time.Sleep(time.Millisecond * time.Duration(targetDT-dt))
dt = time.Now().Sub(t).Milliseconds()
}
t = time.Now()
app.OnUpdate(dt)
app.OnRender()
if app.quit {
break
}
}
app.Destroy()
}
func (app *App) Destroy() {
app.Window.Destroy()
glfw.Terminate()
}
func (app *App) OnUpdate(dt int64) {
for _, action := range app.updateActions {
action(dt)
}
}
func (app *App) OnRender() {
gl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)
for d := range app.scene.Drawable().Elements {
x, y := app.camera.Position()
err := d.Draw(app.camera.Scale(), x, y)
if err != nil {
logrus.WithError(err).Fatal("draw error")
}
}
glfw.PollEvents()
app.Window.SwapBuffers()
}
func (app *App) Shutdown() {
app.quit = true
}
|
package migration
import (
"log"
"github.com/Anondo/graphql-and-go/conn"
"github.com/Anondo/graphql-and-go/database/migration"
"github.com/spf13/cobra"
)
var downCMD = &cobra.Command{
Use: "down",
Short: "Drop tables from database",
Long: `Drop tables from database`,
RunE: downDatabase,
}
func downDatabase(cmd *cobra.Command, args []string) error {
log.Println("Dropping database table...")
db := conn.Default()
if err := db.DropTableIfExists(migration.Models...).Error; err != nil {
return err
}
log.Println("Database dopped successfully!")
return nil
}
|
package get
import (
"github.com/spf13/cobra"
)
var RootCMD = &cobra.Command{
Use: "get",
Short: "Get Studio resources",
Long: ``,
}
func init() {
RootCMD.AddCommand(customersCMD)
}
|
package config
const (
// TmpDataFileDir -
TmpDataFileDir = "/Users/duanyahong/tmp/data/"
// TmpChunkFileDir -
TmpChunkFileDir = "/Users/duanyahong/tmp/chunk/"
)
|
package server
import (
"github.com/Buhrietoe/brood/server/apiv1"
"github.com/Buhrietoe/brood/server/middleware"
"github.com/gin-gonic/gin"
)
type Server struct {
Address string
Port string
ListenString string // Complete string of address:port to listen on
}
// BuildServer configures the web server
func BuildServer() *gin.Engine {
// The 'root' routes are configured here
gin.SetMode(gin.ReleaseMode)
// Default() automatically implements the recovery and logger middlewares
router := gin.Default()
// Default server header
router.Use(middleware.ServerHeader("brood"))
// Root redirect
router.GET("/", func(c *gin.Context) {
c.Redirect(301, "/static/")
})
// Health endpoint
router.GET("/ping", func(c *gin.Context) {
c.String(200, "pong")
})
// Serve embedded GUI
router.StaticFS("/static", assetFS())
// API v1 endpoint
apiv1.APIV1(*router.Group("/api/v1/"))
return router
}
|
package hmacsha256
import (
"crypto"
"fmt"
"testing"
)
func TestHmacSha2562(t *testing.T) {
fmt.Println(HmacSha256("hello","111"))
}
func TestHmacEncrypt(t *testing.T) {
fmt.Println(HmacEncrypt([]byte("hello"),[]byte("111"),crypto.SHA256))
}
func TestHmacEncryptToBase64(t *testing.T) {
fmt.Println(HmacEncryptToBase64([]byte("hello"),[]byte("111"),crypto.SHA256))
}
|
package command
import (
"net/url"
"os"
"path/filepath"
"strings"
"time"
"github.com/cidverse/cid/pkg/core/util"
"github.com/cidverse/cidverseutils/pkg/containerruntime"
"github.com/cidverse/cidverseutils/pkg/filesystem"
"github.com/rs/zerolog/log"
)
func ApplyProxyConfiguration(containerExec *containerruntime.Container) {
// proxy
containerExec.AddEnvironmentVariable("HTTP_PROXY", os.Getenv("HTTP_PROXY"))
containerExec.AddEnvironmentVariable("HTTPS_PROXY", os.Getenv("HTTPS_PROXY"))
containerExec.AddEnvironmentVariable("NO_PROXY", os.Getenv("NO_PROXY"))
containerExec.AddEnvironmentVariable("http_proxy", os.Getenv("HTTP_PROXY"))
containerExec.AddEnvironmentVariable("https_proxy", os.Getenv("HTTPS_PROXY"))
containerExec.AddEnvironmentVariable("no_proxy", os.Getenv("NO_PROXY"))
// jvm
var javaProxyOpts []string
if len(os.Getenv("HTTP_PROXY")) > 0 {
proxyURL, err := url.Parse(os.Getenv("HTTP_PROXY"))
if err == nil {
javaProxyOpts = append(javaProxyOpts, "-Dhttp.proxyHost="+proxyURL.Hostname())
javaProxyOpts = append(javaProxyOpts, "-Dhttp.proxyPort="+proxyURL.Port())
javaProxyOpts = append(javaProxyOpts, "-Dhttp.nonProxyHosts="+ConvertNoProxyForJava(os.Getenv("NO_PROXY")))
}
}
if len(os.Getenv("HTTPS_PROXY")) > 0 {
proxyURL, err := url.Parse(os.Getenv("HTTPS_PROXY"))
if err == nil {
javaProxyOpts = append(javaProxyOpts, "-Dhttps.proxyHost="+proxyURL.Hostname())
javaProxyOpts = append(javaProxyOpts, "-Dhttps.proxyPort="+proxyURL.Port())
javaProxyOpts = append(javaProxyOpts, "-Dhttps.nonProxyHosts="+ConvertNoProxyForJava(os.Getenv("NO_PROXY")))
}
}
if len(javaProxyOpts) > 0 {
containerExec.AddEnvironmentVariable("CID_PROXY_JVM", strings.Join(javaProxyOpts, " "))
}
}
// GetCertFileByType returns the cert file by type (ca-bundle, java-keystore)
func GetCertFileByType(certFileType string) string {
var files []string
// take host ca bundle
GetCABundleFromHost(filepath.Join(util.GetUserConfigDirectory(), "certs", "ca-bundle.crt"))
if certFileType == "ca-bundle" {
files = append(files, filepath.Join(util.GetUserConfigDirectory(), "certs", "ca-bundle.crt"))
} else if certFileType == "java-keystore" {
files = append(files, filepath.Join(util.GetUserConfigDirectory(), "certs", "keystore.jks"))
}
for _, file := range files {
if _, err := os.Stat(file); err == nil {
return file
}
}
return ""
}
// see https://go.dev/src/crypto/x509/root_linux.go for possible paths
var caBundles = [][]string{
{"/etc/ssl/certs/ca-certificates.crt"}, // Debian/Ubuntu/Gentoo etc.
{"/etc/pki/tls/certs/ca-bundle.crt", "/etc/pki/tls/certs/ca-extra.crt"}, // RHEL
{"/etc/ssl/ca-bundle.pem"}, // OpenSUSE
{"/etc/pki/tls/cacert.pem"}, // OpenELEC
{"/etc/ssl/cert.pem"}, // Alpine Linux
}
func GetCABundleFromHost(target string) {
if filesystem.FileExists(target) {
return
}
var found []string
var bundledCerts []byte
for _, bundle := range caBundles {
for _, path := range bundle {
if _, err := os.Stat(path); err == nil {
found = append(found, path)
cert, err := os.ReadFile(path)
if err != nil {
log.Fatal().Err(err).Str("file", path).Msg("failed to read bundle file")
}
bundledCerts = append(bundledCerts, cert...)
}
}
if len(found) > 0 {
break
}
}
if len(bundledCerts) == 0 {
log.Fatal().Msg("no CA bundle found")
}
_ = os.MkdirAll(filepath.Dir(target), os.ModePerm)
err := os.WriteFile(target, bundledCerts, os.ModePerm)
if err != nil {
log.Fatal().Err(err).Str("file", target).Msg("failed to write merged CA bundle file")
}
log.Info().Strs("files", found).Msg("ca certificates parsed and merged")
}
func ApplyCertMount(containerExec *containerruntime.Container, certFile string, containerCertFile string) {
if certFile != "" {
customCertDir := os.Getenv("CID_CERT_MOUNT_DIR")
if customCertDir != "" {
// Copy certFile into customCertDir
_ = os.MkdirAll(customCertDir, os.ModePerm)
certDestinationFile := filepath.Join(customCertDir, filepath.Base(certFile))
_ = filesystem.CopyFile(certFile, certDestinationFile)
// Overwrite certFile with new path of file in customCertDir
certFile = certDestinationFile
}
containerExec.AddVolume(containerruntime.ContainerMount{
MountType: "directory",
Source: certFile,
Target: containerCertFile,
Mode: containerruntime.ReadMode,
})
}
}
// ReplaceCommandPlaceholders replaces env placeholders in a command
func ReplaceCommandPlaceholders(input string, env map[string]string) string {
// timestamp
input = strings.ReplaceAll(input, "{TIMESTAMP_RFC3339}", time.Now().Format(time.RFC3339))
// env
for k, v := range env {
input = strings.ReplaceAll(input, "{"+k+"}", v)
}
return input
}
func ConvertNoProxyForJava(input string) string {
return strings.ReplaceAll(input, ",", "|")
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package xform
import (
"sort"
"github.com/cockroachdb/cockroach/pkg/sql/inverted"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/cat"
"github.com/cockroachdb/cockroach/pkg/sql/opt/constraint"
"github.com/cockroachdb/cockroach/pkg/sql/opt/invertedidx"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/errors"
)
// IsLocking returns true if the ScanPrivate is configured to use a row-level
// locking mode. This can be the case either because the Scan is in the scope of
// a SELECT .. FOR [KEY] UPDATE/SHARE clause or because the Scan was configured
// as part of the row retrieval of a DELETE or UPDATE statement.
func (c *CustomFuncs) IsLocking(scan *memo.ScanPrivate) bool {
return scan.IsLocking()
}
// GeneratePartialIndexScans generates unconstrained index scans over all
// non-inverted, partial indexes with predicates that are implied by the
// filters. Partial indexes with predicates which cannot be proven to be implied
// by the filters are disregarded.
//
// When a filter completely matches the predicate, the remaining filters are
// simplified so that they do not include the filter. A redundant filter is
// unnecessary to include in the remaining filters because a scan over the partial
// index implicitly filters the results.
//
// For every partial index that is implied by the filters, a Scan will be
// generated along with a combination of an IndexJoin and Selects. There are
// three questions to consider which determine which operators are generated.
//
// 1. Does the index "cover" the columns needed?
// 2. Are there any remaining filters to apply after the Scan?
// 3. If there are remaining filters does the index cover the referenced
// columns?
//
// If the index covers the columns needed, no IndexJoin is need. The two
// possible generated expressions are either a lone Scan or a Scan wrapped in a
// Select that applies any remaining filters.
//
// (Scan $scanDef)
//
// (Select (Scan $scanDef) $remainingFilters)
//
// If the index is not covering, then an IndexJoin is required to retrieve the
// needed columns. Some or all of the remaining filters may be required to be
// applied after the IndexJoin, because they reference columns not covered by
// the index. Therefore, Selects can be constructed before, after, or both
// before and after the IndexJoin depending on the columns referenced in the
// remaining filters.
//
// If the index is not covering, then an IndexJoin is required to retrieve the
// needed columns. Some of the remaining filters may be applied in a Select
// before the IndexJoin, if all the columns referenced in the filter are covered
// by the index. Some of the remaining filters may be applied in a Select after
// the IndexJoin, if their columns are not covered. Therefore, Selects can be
// constructed before, after, or both before and after the IndexJoin.
//
// (IndexJoin (Scan $scanDef) $indexJoinDef)
//
// (IndexJoin
// (Select (Scan $scanDef) $remainingFilters)
// $indexJoinDef
// )
//
// (Select
// (IndexJoin (Scan $scanDef) $indexJoinDef)
// $outerFilter
// )
//
// (Select
// (IndexJoin
// (Select (Scan $scanDef) $innerFilter)
// $indexJoinDef
// )
// $outerFilter
// )
//
func (c *CustomFuncs) GeneratePartialIndexScans(
grp memo.RelExpr, scanPrivate *memo.ScanPrivate, filters memo.FiltersExpr,
) {
// Iterate over all partial indexes.
var pkCols opt.ColSet
var iter scanIndexIter
iter.Init(c.e.evalCtx, c.e.f, c.e.mem, &c.im, scanPrivate, filters, rejectNonPartialIndexes|rejectInvertedIndexes)
iter.ForEach(func(index cat.Index, remainingFilters memo.FiltersExpr, indexCols opt.ColSet, isCovering bool, constProj memo.ProjectionsExpr) {
var sb indexScanBuilder
sb.Init(c, scanPrivate.Table)
newScanPrivate := *scanPrivate
newScanPrivate.Index = index.Ordinal()
newScanPrivate.Cols = indexCols.Intersection(scanPrivate.Cols)
// If index is covering, just add a Select with the remaining filters,
// if there are any.
if isCovering {
sb.SetScan(&newScanPrivate)
sb.AddConstProjections(constProj)
sb.AddSelect(remainingFilters)
sb.Build(grp)
return
}
// Calculate the PK columns once.
if pkCols.Empty() {
pkCols = c.PrimaryKeyCols(scanPrivate.Table)
}
// If the index is not covering, scan the needed index columns plus
// primary key columns.
newScanPrivate.Cols.UnionWith(pkCols)
sb.SetScan(&newScanPrivate)
// Add a Select with any remaining filters that can be filtered before
// the IndexJoin. If there are no remaining filters this is a no-op. If
// all or parts of the remaining filters cannot be applied until after
// the IndexJoin, the new value of remainingFilters will contain those
// filters.
remainingFilters = sb.AddSelectAfterSplit(remainingFilters, newScanPrivate.Cols)
// Add an IndexJoin to retrieve the columns not provided by the Scan.
sb.AddIndexJoin(scanPrivate.Cols)
// Add a Select with any remaining filters.
sb.AddSelect(remainingFilters)
sb.Build(grp)
})
}
// GenerateConstrainedScans enumerates all non-inverted secondary indexes on the
// Scan operator's table and tries to push the given Select filter into new
// constrained Scan operators using those indexes. Since this only needs to be
// done once per table, GenerateConstrainedScans should only be called on the
// original unaltered primary index Scan operator (i.e. not constrained or
// limited).
//
// For each secondary index that "covers" the columns needed by the scan, there
// are three cases:
//
// - a filter that can be completely converted to a constraint over that index
// generates a single constrained Scan operator (to be added to the same
// group as the original Select operator):
//
// (Scan $scanDef)
//
// - a filter that can be partially converted to a constraint over that index
// generates a constrained Scan operator in a new memo group, wrapped in a
// Select operator having the remaining filter (to be added to the same group
// as the original Select operator):
//
// (Select (Scan $scanDef) $filter)
//
// - a filter that cannot be converted to a constraint generates nothing
//
// And for a secondary index that does not cover the needed columns:
//
// - a filter that can be completely converted to a constraint over that index
// generates a single constrained Scan operator in a new memo group, wrapped
// in an IndexJoin operator that looks up the remaining needed columns (and
// is added to the same group as the original Select operator)
//
// (IndexJoin (Scan $scanDef) $indexJoinDef)
//
// - a filter that can be partially converted to a constraint over that index
// generates a constrained Scan operator in a new memo group, wrapped in an
// IndexJoin operator that looks up the remaining needed columns; the
// remaining filter is distributed above and/or below the IndexJoin,
// depending on which columns it references:
//
// (IndexJoin
// (Select (Scan $scanDef) $filter)
// $indexJoinDef
// )
//
// (Select
// (IndexJoin (Scan $scanDef) $indexJoinDef)
// $filter
// )
//
// (Select
// (IndexJoin
// (Select (Scan $scanDef) $innerFilter)
// $indexJoinDef
// )
// $outerFilter
// )
//
// GenerateConstrainedScans will further constrain the enumerated index scans
// by trying to use the check constraints and computed columns that apply to the
// table being scanned, as well as the partitioning defined for the index. See
// comments above checkColumnFilters, computedColFilters, and
// partitionValuesFilters for more detail.
func (c *CustomFuncs) GenerateConstrainedScans(
grp memo.RelExpr, scanPrivate *memo.ScanPrivate, explicitFilters memo.FiltersExpr,
) {
var pkCols opt.ColSet
var sb indexScanBuilder
sb.Init(c, scanPrivate.Table)
// Generate implicit filters from constraints and computed columns as
// optional filters to help constrain an index scan.
optionalFilters := c.checkConstraintFilters(scanPrivate.Table)
computedColFilters := c.computedColFilters(scanPrivate, explicitFilters, optionalFilters)
optionalFilters = append(optionalFilters, computedColFilters...)
filterColumns := c.FilterOuterCols(explicitFilters)
filterColumns.UnionWith(c.FilterOuterCols(optionalFilters))
// Iterate over all non-inverted indexes.
md := c.e.mem.Metadata()
tabMeta := md.TableMeta(scanPrivate.Table)
var iter scanIndexIter
iter.Init(c.e.evalCtx, c.e.f, c.e.mem, &c.im, scanPrivate, explicitFilters, rejectInvertedIndexes)
iter.ForEach(func(index cat.Index, filters memo.FiltersExpr, indexCols opt.ColSet, isCovering bool, constProj memo.ProjectionsExpr) {
// We only consider the partition values when a particular index can otherwise
// not be constrained. For indexes that are constrained, the partitioned values
// add no benefit as they don't really constrain anything.
// Furthermore, if the filters don't take advantage of the index (use any of the
// index columns), using the partition values add no benefit.
//
// If the index is partitioned (by list), we generate two constraints and
// union them: the "main" constraint and the "in-between" constraint.The
// "main" constraint restricts the index to the known partition ranges. The
// "in-between" constraint restricts the index to the rest of the ranges
// (i.e. everything that falls in-between the main ranges); the in-between
// constraint is necessary for correctness (there can be rows outside of the
// partitioned ranges).
//
// For both constraints, the partition-related filters are passed as
// "optional" which guarantees that they return no remaining filters. This
// allows us to merge the remaining filters from both constraints.
//
// Consider the following index and its partition:
//
// CREATE INDEX orders_by_seq_num
// ON orders (region, seq_num, id)
// STORING (total)
// PARTITION BY LIST (region)
// (
// PARTITION us_east1 VALUES IN ('us-east1'),
// PARTITION us_west1 VALUES IN ('us-west1'),
// PARTITION europe_west2 VALUES IN ('europe-west2')
// )
//
// The constraint generated for the query:
// SELECT sum(total) FROM orders WHERE seq_num >= 100 AND seq_num < 200
// is:
// [/'europe-west2'/100 - /'europe-west2'/199]
// [/'us-east1'/100 - /'us-east1'/199]
// [/'us-west1'/100 - /'us-west1'/199]
//
// The spans before europe-west2, after us-west1 and in between the defined
// partitions are missing. We must add these spans now, appropriately
// constrained using the filters.
//
// It is important that we add these spans after the partition spans are generated
// because otherwise these spans would merge with the partition spans and would
// disallow the partition spans (and the in between ones) to be constrained further.
// Using the partitioning example and the query above, if we added the in between
// spans at the same time as the partitioned ones, we would end up with a span that
// looked like:
// [ - /'europe-west2'/99]
//
// Allowing the partition spans to be constrained further and then adding
// the spans give us a more constrained index scan as shown below:
// [ - /'europe-west2')
// [/'europe-west2'/100 - /'europe-west2'/199]
// [/e'europe-west2\x00'/100 - /'us-east1')
// [/'us-east1'/100 - /'us-east1'/199]
// [/e'us-east1\x00'/100 - /'us-west1')
// [/'us-west1'/100 - /'us-west1'/199]
// [/e'us-west1\x00'/100 - ]
//
// Notice how we 'skip' all the europe-west2 rows with seq_num < 100.
//
var partitionFilters, inBetweenFilters memo.FiltersExpr
indexColumns := tabMeta.IndexKeyColumns(index.Ordinal())
firstIndexCol := scanPrivate.Table.IndexColumnID(index, 0)
if !filterColumns.Contains(firstIndexCol) && indexColumns.Intersects(filterColumns) {
// Calculate any partition filters if appropriate (see below).
partitionFilters, inBetweenFilters = c.partitionValuesFilters(scanPrivate.Table, index)
}
// Check whether the filter (along with any partitioning filters) can constrain the index.
constraint, remainingFilters, ok := c.tryConstrainIndex(
filters,
append(optionalFilters, partitionFilters...),
scanPrivate.Table,
index.Ordinal(),
)
if !ok {
return
}
if len(partitionFilters) > 0 {
inBetweenConstraint, inBetweenRemainingFilters, ok := c.tryConstrainIndex(
filters,
append(optionalFilters, inBetweenFilters...),
scanPrivate.Table,
index.Ordinal(),
)
if !ok {
panic(errors.AssertionFailedf("in-between filters didn't yield a constraint"))
}
constraint.UnionWith(c.e.evalCtx, inBetweenConstraint)
// Even though the partitioned constraints and the inBetween constraints
// were consolidated, we must make sure their Union is as well.
constraint.ConsolidateSpans(c.e.evalCtx)
// Add all remaining filters that need to be present in the
// inBetween spans. Some of the remaining filters are common
// between them, so we must deduplicate them.
remainingFilters = c.ConcatFilters(remainingFilters, inBetweenRemainingFilters)
remainingFilters.Sort()
remainingFilters.Deduplicate()
}
// Construct new constrained ScanPrivate.
newScanPrivate := *scanPrivate
newScanPrivate.Index = index.Ordinal()
newScanPrivate.Cols = indexCols.Intersection(scanPrivate.Cols)
newScanPrivate.Constraint = constraint
// Record whether we were able to use partitions to constrain the scan.
newScanPrivate.PartitionConstrainedScan = (len(partitionFilters) > 0)
// If the alternate index includes the set of needed columns, then
// construct a new Scan operator using that index.
if isCovering {
sb.SetScan(&newScanPrivate)
// Project constants from partial index predicate filters, if there
// are any.
sb.AddConstProjections(constProj)
// If there are remaining filters, then the constrained Scan operator
// will be created in a new group, and a Select operator will be added
// to the same group as the original operator.
sb.AddSelect(remainingFilters)
sb.Build(grp)
return
}
// Otherwise, construct an IndexJoin operator that provides the columns
// missing from the index.
if scanPrivate.Flags.NoIndexJoin {
return
}
// Calculate the PK columns once.
if pkCols.Empty() {
pkCols = c.PrimaryKeyCols(scanPrivate.Table)
}
// If the index is not covering, scan the needed index columns plus
// primary key columns.
newScanPrivate.Cols.UnionWith(pkCols)
sb.SetScan(&newScanPrivate)
// If remaining filter exists, split it into one part that can be pushed
// below the IndexJoin, and one part that needs to stay above.
remainingFilters = sb.AddSelectAfterSplit(remainingFilters, newScanPrivate.Cols)
sb.AddIndexJoin(scanPrivate.Cols)
sb.AddSelect(remainingFilters)
sb.Build(grp)
})
}
// tryFoldComputedCol tries to reduce the computed column with the given column
// ID into a constant value, by evaluating it with respect to a set of other
// columns that are constant. If the computed column is constant, enter it into
// the constCols map and return false. Otherwise, return false.
func (c *CustomFuncs) tryFoldComputedCol(
tabMeta *opt.TableMeta, computedColID opt.ColumnID, constCols map[opt.ColumnID]opt.ScalarExpr,
) bool {
// Check whether computed column has already been folded.
if _, ok := constCols[computedColID]; ok {
return true
}
var replace func(e opt.Expr) opt.Expr
replace = func(e opt.Expr) opt.Expr {
if variable, ok := e.(*memo.VariableExpr); ok {
// Can variable be folded?
if constVal, ok := constCols[variable.Col]; ok {
// Yes, so replace it with its constant value.
return constVal
}
// No, but that may be because the variable refers to a dependent
// computed column. In that case, try to recursively fold that
// computed column. There are no infinite loops possible because the
// dependency graph is guaranteed to be acyclic.
if _, ok := tabMeta.ComputedCols[variable.Col]; ok {
if c.tryFoldComputedCol(tabMeta, variable.Col, constCols) {
return constCols[variable.Col]
}
}
return e
}
return c.e.f.Replace(e, replace)
}
computedCol := tabMeta.ComputedCols[computedColID]
replaced := replace(computedCol).(opt.ScalarExpr)
// If the computed column is constant, enter it into the constCols map.
if opt.IsConstValueOp(replaced) {
constCols[computedColID] = replaced
return true
}
return false
}
// inBetweenFilters returns a set of filters that are required to cover all the
// in-between spans given a set of partition values. This is required for
// correctness reasons; although values are unlikely to exist between defined
// partitions, they may exist and so the constraints of the scan must incorporate
// these spans.
func (c *CustomFuncs) inBetweenFilters(
tabID opt.TableID, index cat.Index, partitionValues []tree.Datums,
) memo.FiltersExpr {
var inBetween memo.ScalarListExpr
if len(partitionValues) == 0 {
return memo.EmptyFiltersExpr
}
// Sort the partitionValues lexicographically.
sort.Slice(partitionValues, func(i, j int) bool {
return partitionValues[i].Compare(c.e.evalCtx, partitionValues[j]) < 0
})
// Add the beginning span.
beginExpr := c.columnComparison(tabID, index, partitionValues[0], -1)
inBetween = append(inBetween, beginExpr)
// Add the end span.
endExpr := c.columnComparison(tabID, index, partitionValues[len(partitionValues)-1], 1)
inBetween = append(inBetween, endExpr)
// Add the in-between spans.
for i := 1; i < len(partitionValues); i++ {
lowerPartition := partitionValues[i-1]
higherPartition := partitionValues[i]
// The between spans will be greater than the lower partition but smaller
// than the higher partition.
var largerThanLower opt.ScalarExpr
if c.isPrefixOf(lowerPartition, higherPartition) {
// Since the lower partition is a prefix of the higher partition, the span
// must begin with the values defined in the lower partition. Consider the
// partitions ('us') and ('us', 'cali'). In this case the in-between span
// should be [/'us - /'us'/'cali').
largerThanLower = c.columnComparison(tabID, index, lowerPartition, 0)
} else {
largerThanLower = c.columnComparison(tabID, index, lowerPartition, 1)
}
smallerThanHigher := c.columnComparison(tabID, index, higherPartition, -1)
// Add the in-between span to the list of inBetween spans.
betweenExpr := c.e.f.ConstructAnd(largerThanLower, smallerThanHigher)
inBetween = append(inBetween, betweenExpr)
}
// Return an Or expression between all the expressions.
return memo.FiltersExpr{c.e.f.ConstructFiltersItem(c.constructOr(inBetween))}
}
// constructOr constructs an expression that is an OR between all the
// provided conditions
func (c *CustomFuncs) constructOr(conditions memo.ScalarListExpr) opt.ScalarExpr {
if len(conditions) == 0 {
return c.e.f.ConstructFalse()
}
orExpr := conditions[0]
for i := 1; i < len(conditions); i++ {
orExpr = c.e.f.ConstructOr(conditions[i], orExpr)
}
return orExpr
}
// columnComparison returns a filter that compares the index columns to the
// given values. The comp parameter can be -1, 0 or 1 to indicate whether the
// comparison type of the filter should be a Lt, Eq or Gt.
func (c *CustomFuncs) columnComparison(
tabID opt.TableID, index cat.Index, values tree.Datums, comp int,
) opt.ScalarExpr {
colTypes := make([]*types.T, len(values))
for i := range values {
colTypes[i] = values[i].ResolvedType()
}
columnVariables := make(memo.ScalarListExpr, len(values))
scalarValues := make(memo.ScalarListExpr, len(values))
for i, val := range values {
colID := tabID.IndexColumnID(index, i)
columnVariables[i] = c.e.f.ConstructVariable(colID)
scalarValues[i] = c.e.f.ConstructConstVal(val, val.ResolvedType())
}
colsTuple := c.e.f.ConstructTuple(columnVariables, types.MakeTuple(colTypes))
valsTuple := c.e.f.ConstructTuple(scalarValues, types.MakeTuple(colTypes))
if comp == 0 {
return c.e.f.ConstructEq(colsTuple, valsTuple)
} else if comp > 0 {
return c.e.f.ConstructGt(colsTuple, valsTuple)
}
return c.e.f.ConstructLt(colsTuple, valsTuple)
}
// inPartitionFilters returns a FiltersExpr that is required to cover
// all the partition spans. For each partition defined, inPartitionFilters
// will contain a FilterItem that restricts the index columns by
// the partition values. Use inBetweenFilters to generate filters that
// cover all the spans that the partitions don't cover.
func (c *CustomFuncs) inPartitionFilters(
tabID opt.TableID, index cat.Index, partitionValues []tree.Datums,
) memo.FiltersExpr {
var partitions memo.ScalarListExpr
// Sort the partition values so the most selective ones are first.
sort.Slice(partitionValues, func(i, j int) bool {
return len(partitionValues[i]) >= len(partitionValues[j])
})
// Construct all the partition filters.
for i, partition := range partitionValues {
// Only add this partition if a more selective partition hasn't
// been defined on the same partition.
partitionSeen := false
for j, moreSelectivePartition := range partitionValues {
if j >= i {
break
}
// At this point we know whether the current partition was seen before.
partitionSeen = c.isPrefixOf(partition, moreSelectivePartition)
if partitionSeen {
break
}
}
// This partition is a prefix of a more selective partition and so,
// will be taken care of by the in-between partitions.
if partitionSeen {
continue
}
// Get an expression that restricts the values of the index to the
// partition values.
inPartition := c.columnComparison(tabID, index, partition, 0)
partitions = append(partitions, inPartition)
}
// Return an Or expression between all the expressions.
return memo.FiltersExpr{c.e.f.ConstructFiltersItem(c.constructOr(partitions))}
}
// isPrefixOf returns whether pre is a prefix of other.
func (c *CustomFuncs) isPrefixOf(pre []tree.Datum, other []tree.Datum) bool {
if len(pre) > len(other) {
// Pre can't be a prefix of other as it is larger.
return false
}
for i := range pre {
if pre[i].Compare(c.e.evalCtx, other[i]) != 0 {
return false
}
}
return true
}
// partitionValuesFilters constructs filters with the purpose of
// constraining an index scan using the partition values similar to
// the filters added from the check constraints (see
// checkConstraintFilters). It returns two sets of filters, one to
// create the partition spans, and one to create the spans for all
// the in between ranges that are not part of any partitions.
//
// For example consider the following table and partitioned index:
//
// CREATE TABLE orders (
// region STRING NOT NULL, id INT8 NOT NULL, total DECIMAL NOT NULL, seq_num INT NOT NULL,
// PRIMARY KEY (region, id)
// )
//
// CREATE INDEX orders_by_seq_num
// ON orders (region, seq_num, id)
// STORING (total)
// PARTITION BY LIST (region)
// (
// PARTITION us_east1 VALUES IN ('us-east1'),
// PARTITION us_west1 VALUES IN ('us-west1'),
// PARTITION europe_west2 VALUES IN ('europe-west2')
// )
//
// Now consider the following query:
// SELECT sum(total) FROM orders WHERE seq_num >= 100 AND seq_num < 200
//
// Normally, the index would not be utilized but because we know what the
// partition values are for the prefix of the index, we can generate
// filters that allow us to use the index (adding the appropriate in-between
// filters to catch all the values that are not part of the partitions).
// By doing so, we get the following plan:
// scalar-group-by
// ├── select
// │ ├── scan orders@orders_by_seq_num
// │ │ └── constraint: /1/4/2: [ - /'europe-west2')
// │ │ [/'europe-west2'/100 - /'europe-west2'/199]
// │ │ [/e'europe-west2\x00'/100 - /'us-east1')
// │ │ [/'us-east1'/100 - /'us-east1'/199]
// │ │ [/e'us-east1\x00'/100 - /'us-west1')
// │ │ [/'us-west1'/100 - /'us-west1'/199]
// │ │ [/e'us-west1\x00'/100 - ]
// │ └── filters
// │ └── (seq_num >= 100) AND (seq_num < 200)
// └── aggregations
// └── sum
// └── variable: total
//
func (c *CustomFuncs) partitionValuesFilters(
tabID opt.TableID, index cat.Index,
) (partitionFilter, inBetweenFilter memo.FiltersExpr) {
// Find all the partition values
partitionValues := make([]tree.Datums, 0, index.PartitionCount())
for i, n := 0, index.PartitionCount(); i < n; i++ {
partitionValues = append(partitionValues, index.Partition(i).PartitionByListPrefixes()...)
}
if len(partitionValues) == 0 {
return partitionFilter, inBetweenFilter
}
// Get the in partition expressions.
inPartition := c.inPartitionFilters(tabID, index, partitionValues)
// Get the in between expressions.
inBetween := c.inBetweenFilters(tabID, index, partitionValues)
return inPartition, inBetween
}
// GenerateInvertedIndexScans enumerates all inverted indexes on the Scan
// operator's table and generates an alternate Scan operator for each inverted
// index that can service the query.
//
// The resulting Scan operator is pre-constrained and requires an IndexJoin to
// project columns other than the primary key columns. The reason it's pre-
// constrained is that we cannot treat an inverted index in the same way as a
// regular index, since it does not actually contain the indexed column.
func (c *CustomFuncs) GenerateInvertedIndexScans(
grp memo.RelExpr, scanPrivate *memo.ScanPrivate, filters memo.FiltersExpr,
) {
var pkCols opt.ColSet
var sb indexScanBuilder
sb.Init(c, scanPrivate.Table)
tabMeta := c.e.mem.Metadata().TableMeta(scanPrivate.Table)
// Generate implicit filters from constraints and computed columns as
// optional filters to help constrain an index scan.
optionalFilters := c.checkConstraintFilters(scanPrivate.Table)
computedColFilters := c.computedColFilters(scanPrivate, filters, optionalFilters)
optionalFilters = append(optionalFilters, computedColFilters...)
// Iterate over all inverted indexes.
var iter scanIndexIter
iter.Init(c.e.evalCtx, c.e.f, c.e.mem, &c.im, scanPrivate, filters, rejectNonInvertedIndexes)
iter.ForEach(func(index cat.Index, filters memo.FiltersExpr, indexCols opt.ColSet, _ bool, _ memo.ProjectionsExpr) {
// Check whether the filter can constrain the index.
spanExpr, constraint, remainingFilters, pfState, ok := invertedidx.TryFilterInvertedIndex(
c.e.evalCtx, c.e.f, filters, optionalFilters, scanPrivate.Table, index, tabMeta.ComputedCols,
)
if !ok {
// A span expression to constrain the inverted index could not be
// generated.
return
}
spansToRead := spanExpr.SpansToRead
// Override the filters with remainingFilters. If the index is a
// multi-column inverted index, the non-inverted prefix columns are
// constrained by the constraint. In this case, it may be possible to
// reduce the filters if the constraint fully describes some of
// sub-expressions. The remainingFilters are the filters that are not
// fully expressed by the constraint.
//
// Consider the example:
//
// CREATE TABLE t (a INT, b INT, g GEOMETRY, INVERTED INDEX (b, g))
//
// SELECT * FROM t WHERE a = 1 AND b = 2 AND ST_Intersects(.., g)
//
// The constraint would constrain b to [/2 - /2], guaranteeing that
// the inverted index scan would only produce rows where (b = 2).
// Reapplying the (b = 2) filter after the scan would be
// unnecessary, so the remainingFilters in this case would be
// (a = 1 AND ST_Intersects(.., g)).
filters = remainingFilters
// Construct new ScanOpDef with the new index and constraint.
newScanPrivate := *scanPrivate
newScanPrivate.Index = index.Ordinal()
newScanPrivate.Constraint = constraint
newScanPrivate.InvertedConstraint = spansToRead
// Calculate the PK columns once.
if pkCols.Empty() {
pkCols = c.PrimaryKeyCols(scanPrivate.Table)
}
// We will need an inverted filter above the scan if the spanExpr might
// produce duplicate primary keys or requires at least one UNION or
// INTERSECTION. In this case, we must scan both the primary key columns
// and the inverted key column.
needInvertedFilter := !spanExpr.Unique || spanExpr.Operator != inverted.None
newScanPrivate.Cols = pkCols.Copy()
var invertedCol opt.ColumnID
if needInvertedFilter {
invertedCol = scanPrivate.Table.ColumnID(index.VirtualInvertedColumn().Ordinal())
newScanPrivate.Cols.Add(invertedCol)
}
// The Scan operator always goes in a new group, since it's always nested
// underneath the IndexJoin. The IndexJoin may also go into its own group,
// if there's a remaining filter above it.
// TODO(mgartner): We don't always need to create an index join. The
// index join will be removed by EliminateIndexJoinInsideProject, but
// it'd be more efficient to not create the index join in the first
// place.
sb.SetScan(&newScanPrivate)
// Add an inverted filter if needed.
if needInvertedFilter {
sb.AddInvertedFilter(spanExpr, pfState, invertedCol)
}
// If remaining filter exists, split it into one part that can be pushed
// below the IndexJoin, and one part that needs to stay above.
filters = sb.AddSelectAfterSplit(filters, pkCols)
sb.AddIndexJoin(scanPrivate.Cols)
sb.AddSelect(filters)
sb.Build(grp)
})
}
// tryConstrainIndex tries to derive a constraint for the given index from the
// specified filter. If a constraint is derived, it is returned along with any
// filter remaining after extracting the constraint. If no constraint can be
// derived, then tryConstrainIndex returns ok = false.
func (c *CustomFuncs) tryConstrainIndex(
requiredFilters, optionalFilters memo.FiltersExpr, tabID opt.TableID, indexOrd int,
) (constraint *constraint.Constraint, remainingFilters memo.FiltersExpr, ok bool) {
// Start with fast check to rule out indexes that cannot be constrained.
if !c.canMaybeConstrainNonInvertedIndex(requiredFilters, tabID, indexOrd) &&
!c.canMaybeConstrainNonInvertedIndex(optionalFilters, tabID, indexOrd) {
return nil, nil, false
}
ic := c.initIdxConstraintForIndex(requiredFilters, optionalFilters, tabID, indexOrd)
constraint = ic.Constraint()
if constraint.IsUnconstrained() {
return nil, nil, false
}
// Return 0 if no remaining filter.
remaining := ic.RemainingFilters()
// Make copy of constraint so that idxconstraint instance is not referenced.
copy := *constraint
return ©, remaining, true
}
// canMaybeConstrainNonInvertedIndex returns true if we should try to constrain
// a given non-inverted index by the given filter. It returns false if it is
// impossible for the filter can constrain the scan.
//
// If any of the three following statements are true, then it is
// possible that the index can be constrained:
//
// 1. The filter references the first index column.
// 2. The constraints are not tight (see props.Scalar.TightConstraints).
// 3. Any of the filter's constraints start with the first index column.
//
func (c *CustomFuncs) canMaybeConstrainNonInvertedIndex(
filters memo.FiltersExpr, tabID opt.TableID, indexOrd int,
) bool {
md := c.e.mem.Metadata()
index := md.Table(tabID).Index(indexOrd)
for i := range filters {
filterProps := filters[i].ScalarProps()
// If the filter involves the first index column, then the index can
// possibly be constrained.
firstIndexCol := tabID.IndexColumnID(index, 0)
if filterProps.OuterCols.Contains(firstIndexCol) {
return true
}
// If the constraints are not tight, then the index can possibly be
// constrained, because index constraint generation supports more
// expressions than filter constraint generation.
if !filterProps.TightConstraints {
return true
}
// If any constraint involves the first index column, then the index can
// possibly be constrained.
cset := filterProps.Constraints
for i := 0; i < cset.Length(); i++ {
firstCol := cset.Constraint(i).Columns.Get(0).ID()
if firstCol == firstIndexCol {
return true
}
}
}
return false
}
// GenerateZigzagJoins generates zigzag joins for all pairs of indexes of the
// Scan table which contain one of the constant columns in the FiltersExpr as
// its prefix.
//
// Similar to the lookup join, if the selected index pair does not contain
// all the columns in the output of the scan, we wrap the zigzag join
// in another index join (implemented as a lookup join) on the primary index.
// The index join is implemented with a lookup join since the index join does
// not support arbitrary input sources that are not plain index scans.
func (c *CustomFuncs) GenerateZigzagJoins(
grp memo.RelExpr, scanPrivate *memo.ScanPrivate, filters memo.FiltersExpr,
) {
tab := c.e.mem.Metadata().Table(scanPrivate.Table)
// Short circuit unless zigzag joins are explicitly enabled.
if !c.e.evalCtx.SessionData.ZigzagJoinEnabled {
return
}
fixedCols := memo.ExtractConstColumns(filters, c.e.evalCtx)
if fixedCols.Len() == 0 {
// Zigzagging isn't helpful in the absence of fixed columns.
return
}
// Zigzag joins aren't currently equipped to produce system columns, so
// don't generate any if some system columns are requested.
foundSystemCol := false
scanPrivate.Cols.ForEach(func(colID opt.ColumnID) {
if tab.Column(scanPrivate.Table.ColumnOrdinal(colID)).Kind() == cat.System {
foundSystemCol = true
}
})
if foundSystemCol {
return
}
// Iterate through indexes, looking for those prefixed with fixedEq cols.
// Efficiently finding a set of indexes that make the most efficient zigzag
// join, with no limit on the number of indexes selected, is an instance of
// this NP-hard problem:
// https://en.wikipedia.org/wiki/Maximum_coverage_problem
//
// A formal definition would be: Suppose we have a set of fixed columns F
// (defined as fixedCols in the code above), and a set of indexes I. The
// "fixed prefix" of every index, in this context, refers to the longest
// prefix of each index's columns that is in F. In other words, we stop
// adding to the prefix when we come across the first non-fixed column
// in an index.
//
// We want to find at most k = 2 indexes from I (in the future k could be
// >= 2 when the zigzag joiner supports 2+ index zigzag joins) that cover
// the maximum number of columns in F. An index is defined to have covered
// a column if that column is in the index's fixed prefix.
//
// Since only 2-way zigzag joins are currently supported, the naive
// approach is bounded at n^2. For now, just do that - a quadratic
// iteration through all indexes.
//
// TODO(itsbilal): Implement the greedy or weighted version of the
// algorithm laid out here:
// https://en.wikipedia.org/wiki/Maximum_coverage_problem
//
// TODO(mgartner): We should consider primary indexes when it has multiple
// columns and only the first is being constrained.
var iter scanIndexIter
iter.Init(c.e.evalCtx, c.e.f, c.e.mem, &c.im, scanPrivate, filters, rejectPrimaryIndex|rejectInvertedIndexes)
iter.ForEach(func(leftIndex cat.Index, outerFilters memo.FiltersExpr, leftCols opt.ColSet, _ bool, _ memo.ProjectionsExpr) {
leftFixed := c.indexConstrainedCols(leftIndex, scanPrivate.Table, fixedCols)
// Short-circuit quickly if the first column in the index is not a fixed
// column.
if leftFixed.Len() == 0 {
return
}
var iter2 scanIndexIter
iter2.Init(c.e.evalCtx, c.e.f, c.e.mem, &c.im, scanPrivate, outerFilters, rejectPrimaryIndex|rejectInvertedIndexes)
iter2.SetOriginalFilters(filters)
iter2.ForEachStartingAfter(leftIndex.Ordinal(), func(rightIndex cat.Index, innerFilters memo.FiltersExpr, rightCols opt.ColSet, _ bool, _ memo.ProjectionsExpr) {
rightFixed := c.indexConstrainedCols(rightIndex, scanPrivate.Table, fixedCols)
// If neither side contributes a fixed column not contributed by the
// other, then there's no reason to zigzag on this pair of indexes.
if leftFixed.SubsetOf(rightFixed) || rightFixed.SubsetOf(leftFixed) {
return
}
// Columns that are in both indexes are, by definition, equal.
eqCols := leftCols.Intersection(rightCols)
eqCols.DifferenceWith(fixedCols)
if eqCols.Len() == 0 {
// A simple index join is more efficient in such cases.
return
}
// If there are any equalities across the columns of the two indexes,
// push them into the zigzag join spec.
leftEq, rightEq := memo.ExtractJoinEqualityColumns(
leftCols, rightCols, innerFilters,
)
leftEqCols, rightEqCols := eqColsForZigzag(
tab,
scanPrivate.Table,
leftIndex,
rightIndex,
fixedCols,
leftEq,
rightEq,
)
if len(leftEqCols) == 0 || len(rightEqCols) == 0 {
// One of the indexes is not sorted by any of the equality
// columns, because the equality columns do not immediately
// succeed the fixed columns. A zigzag join cannot be planned.
return
}
// Confirm the primary key columns are in both leftEqCols and
// rightEqCols. The conversion of a select with filters to a
// zigzag join requires the primary key columns to be in the output
// for output correctness; otherwise, we could be outputting more
// results than there should be (due to an equality on a non-unique
// non-required value).
pkIndex := tab.Index(cat.PrimaryIndex)
pkCols := make(opt.ColList, pkIndex.KeyColumnCount())
pkColsFound := true
for i := range pkCols {
pkCols[i] = scanPrivate.Table.IndexColumnID(pkIndex, i)
if _, ok := leftEqCols.Find(pkCols[i]); !ok {
pkColsFound = false
break
}
if _, ok := rightEqCols.Find(pkCols[i]); !ok {
pkColsFound = false
break
}
}
if !pkColsFound {
return
}
leftFixedCols, leftVals, leftTypes := c.fixedColsForZigzag(
leftIndex, scanPrivate.Table, innerFilters,
)
rightFixedCols, rightVals, rightTypes := c.fixedColsForZigzag(
rightIndex, scanPrivate.Table, innerFilters,
)
// If the fixed cols have been reduced during partial index
// implication, then a zigzag join cannot be planned. A single index
// scan should be more efficient.
if len(leftFixedCols) != leftFixed.Len() || len(rightFixedCols) != rightFixed.Len() {
return
}
zigzagJoin := memo.ZigzagJoinExpr{
On: innerFilters,
ZigzagJoinPrivate: memo.ZigzagJoinPrivate{
LeftTable: scanPrivate.Table,
LeftIndex: leftIndex.Ordinal(),
RightTable: scanPrivate.Table,
RightIndex: rightIndex.Ordinal(),
LeftEqCols: leftEqCols,
RightEqCols: rightEqCols,
LeftFixedCols: leftFixedCols,
RightFixedCols: rightFixedCols,
},
}
leftTupleTyp := types.MakeTuple(leftTypes)
rightTupleTyp := types.MakeTuple(rightTypes)
zigzagJoin.FixedVals = memo.ScalarListExpr{
c.e.f.ConstructTuple(leftVals, leftTupleTyp),
c.e.f.ConstructTuple(rightVals, rightTupleTyp),
}
zigzagJoin.On = memo.ExtractRemainingJoinFilters(
innerFilters,
zigzagJoin.LeftEqCols,
zigzagJoin.RightEqCols,
)
zigzagCols := leftCols.Copy()
zigzagCols.UnionWith(rightCols)
if scanPrivate.Cols.SubsetOf(zigzagCols) {
// Case 1 (zigzagged indexes contain all requested columns).
zigzagJoin.Cols = scanPrivate.Cols
c.e.mem.AddZigzagJoinToGroup(&zigzagJoin, grp)
return
}
if scanPrivate.Flags.NoIndexJoin {
return
}
// Case 2 (wrap zigzag join in an index join).
var indexJoin memo.LookupJoinExpr
// Ensure the zigzag join returns pk columns.
zigzagJoin.Cols = scanPrivate.Cols.Intersection(zigzagCols)
for i := range pkCols {
zigzagJoin.Cols.Add(pkCols[i])
}
if c.FiltersBoundBy(zigzagJoin.On, zigzagCols) {
// The ON condition refers only to the columns available in the zigzag
// indices.
indexJoin.On = memo.TrueFilter
} else {
// ON has some conditions that are bound by the columns in the index (at
// the very least, the equality conditions we used for EqCols and FixedCols),
// and some conditions that refer to other table columns. We can put
// the former in the lower ZigzagJoin and the latter in the index join.
conditions := zigzagJoin.On
zigzagJoin.On = c.ExtractBoundConditions(conditions, zigzagCols)
indexJoin.On = c.ExtractUnboundConditions(conditions, zigzagCols)
}
indexJoin.Input = c.e.f.ConstructZigzagJoin(
zigzagJoin.On,
&zigzagJoin.ZigzagJoinPrivate,
)
indexJoin.JoinType = opt.InnerJoinOp
indexJoin.Table = scanPrivate.Table
indexJoin.Index = cat.PrimaryIndex
indexJoin.KeyCols = pkCols
indexJoin.Cols = scanPrivate.Cols
indexJoin.LookupColsAreTableKey = true
// Create the LookupJoin for the index join in the same group as the
// original select.
c.e.mem.AddLookupJoinToGroup(&indexJoin, grp)
})
})
}
// eqColsForZigzag is a helper function to generate eqCol lists for the zigzag
// joiner. The zigzag joiner requires that the equality columns immediately
// follow the fixed columns in the index. Fixed here refers to columns that
// have been constrained to a constant value.
//
// There are two kinds of equality columns that this function takes care of:
// columns that have the same ColumnID on both sides (i.e. the same column),
// as well as columns that have been equated in some ON filter (i.e. they are
// contained in leftEqCols and rightEqCols at the same index).
//
// This function iterates through all columns of the indexes in order,
// skips past the fixed columns, and then generates however many eqCols
// there are that meet the above criteria.
//
// Returns a list of column ordinals for each index.
//
// See the comment in pkg/sql/rowexec/zigzag_joiner.go for more details
// on the role eqCols and fixed cols play in zigzag joins.
func eqColsForZigzag(
tab cat.Table,
tabID opt.TableID,
leftIndex cat.Index,
rightIndex cat.Index,
fixedCols opt.ColSet,
leftEqCols opt.ColList,
rightEqCols opt.ColList,
) (leftEqPrefix, rightEqPrefix opt.ColList) {
leftEqPrefix = make(opt.ColList, 0, len(leftEqCols))
rightEqPrefix = make(opt.ColList, 0, len(rightEqCols))
// We can only zigzag on columns present in the key component of the index,
// so use the LaxKeyColumnCount here because that's the longest prefix of the
// columns in the index which is guaranteed to exist in the key component.
// Using KeyColumnCount is invalid, because if we have a unique index with
// nullable columns, the "key columns" include the primary key of the table,
// which is only present in the key component if one of the other columns is
// NULL.
i, leftCnt := 0, leftIndex.LaxKeyColumnCount()
j, rightCnt := 0, rightIndex.LaxKeyColumnCount()
for ; i < leftCnt; i++ {
colID := tabID.IndexColumnID(leftIndex, i)
if !fixedCols.Contains(colID) {
break
}
}
for ; j < rightCnt; j++ {
colID := tabID.IndexColumnID(rightIndex, j)
if !fixedCols.Contains(colID) {
break
}
}
for i < leftCnt && j < rightCnt {
leftColID := tabID.IndexColumnID(leftIndex, i)
rightColID := tabID.IndexColumnID(rightIndex, j)
i++
j++
if leftColID == rightColID {
leftEqPrefix = append(leftEqPrefix, leftColID)
rightEqPrefix = append(rightEqPrefix, rightColID)
continue
}
leftIdx, leftOk := leftEqCols.Find(leftColID)
rightIdx, rightOk := rightEqCols.Find(rightColID)
// If both columns are at the same index in their respective
// EqCols lists, they were equated in the filters.
if leftOk && rightOk && leftIdx == rightIdx {
leftEqPrefix = append(leftEqPrefix, leftColID)
rightEqPrefix = append(rightEqPrefix, rightColID)
continue
} else {
// We've reached the first non-equal column; the zigzag
// joiner does not support non-contiguous/non-prefix equal
// columns.
break
}
}
return leftEqPrefix, rightEqPrefix
}
// fixedColsForZigzag is a helper function to generate FixedCols lists for the
// zigzag join expression. This function iterates through the columns of the
// specified index in order until it comes across the first column ID that is
// not constrained to a constant.
func (c *CustomFuncs) fixedColsForZigzag(
index cat.Index, tabID opt.TableID, filters memo.FiltersExpr,
) (fixedCols opt.ColList, vals memo.ScalarListExpr, typs []*types.T) {
for i, cnt := 0, index.ColumnCount(); i < cnt; i++ {
colID := tabID.IndexColumnID(index, i)
val := memo.ExtractValueForConstColumn(filters, c.e.evalCtx, colID)
if val == nil {
break
}
if vals == nil {
vals = make(memo.ScalarListExpr, 0, cnt-i)
typs = make([]*types.T, 0, cnt-i)
fixedCols = make(opt.ColList, 0, cnt-i)
}
dt := val.ResolvedType()
vals = append(vals, c.e.f.ConstructConstVal(val, dt))
typs = append(typs, dt)
fixedCols = append(fixedCols, colID)
}
return fixedCols, vals, typs
}
// indexConstrainedCols computes the set of columns in allFixedCols which form
// a prefix of the key columns in idx.
func (c *CustomFuncs) indexConstrainedCols(
idx cat.Index, tab opt.TableID, allFixedCols opt.ColSet,
) opt.ColSet {
var constrained opt.ColSet
for i, n := 0, idx.ColumnCount(); i < n; i++ {
col := tab.IndexColumnID(idx, i)
if allFixedCols.Contains(col) {
constrained.Add(col)
} else {
break
}
}
return constrained
}
// GenerateInvertedIndexZigzagJoins generates zigzag joins for constraints on
// inverted index. It looks for cases where one inverted index can satisfy
// two constraints, and it produces zigzag joins with the same index on both
// sides of the zigzag join for those cases, fixed on different constant values.
func (c *CustomFuncs) GenerateInvertedIndexZigzagJoins(
grp memo.RelExpr, scanPrivate *memo.ScanPrivate, filters memo.FiltersExpr,
) {
// Short circuit unless zigzag joins are explicitly enabled.
if !c.e.evalCtx.SessionData.ZigzagJoinEnabled {
return
}
var sb indexScanBuilder
sb.Init(c, scanPrivate.Table)
// Iterate over all inverted indexes.
var iter scanIndexIter
iter.Init(c.e.evalCtx, c.e.f, c.e.mem, &c.im, scanPrivate, filters, rejectNonInvertedIndexes)
iter.ForEach(func(index cat.Index, filters memo.FiltersExpr, indexCols opt.ColSet, _ bool, _ memo.ProjectionsExpr) {
if index.NonInvertedPrefixColumnCount() > 0 {
// TODO(mgartner): We don't yet support using multi-column inverted
// indexes with zigzag joins.
return
}
// Check whether the filter can constrain the index with spans that
// are guaranteed not to produce duplicate primary keys.
// TODO(mgartner): Once we support multi-column inverted indexes, pass
// optional filters generated from CHECK constraints and computed column
// expressions to help constrain non-inverted prefix columns.
spanExpr, _, _, _, ok := invertedidx.TryFilterInvertedIndex(
c.e.evalCtx,
c.e.f, filters,
nil, /* optionalFilters */
scanPrivate.Table,
index,
nil, /* computedColumns */
)
if !ok {
return
}
// Recursively traverse the span expression to find single-value spans.
//
// We'll store at most two values in vals, so initialize the slice with
// sufficient capacity.
vals := make([]inverted.EncVal, 0, 2)
var getVals func(invertedExpr inverted.Expression)
getVals = func(invertedExpr inverted.Expression) {
if len(vals) >= 2 {
// We only need two constraints to plan a zigzag join, so don't bother
// exploring further.
// TODO(rytaft): use stats here to choose the two most selective
// constraints instead of the first two.
return
}
spanExprLocal, ok := invertedExpr.(*inverted.SpanExpression)
if !ok {
// The invertedExpr was a NonInvertedColExpression and cannot be used
// to constrain the index. (This shouldn't ever happen, since
// TryFilterInvertedIndex should have returned ok=false in this case,
// but we don't want to panic if it does happen.)
return
}
switch spanExprLocal.Operator {
case inverted.SetIntersection:
if len(spanExprLocal.FactoredUnionSpans) > 0 {
// This is equivalent to a UNION between the FactoredUnionSpans and
// the intersected children, so we can't build a zigzag join with
// this subtree.
return
}
getVals(spanExprLocal.Left)
getVals(spanExprLocal.Right)
return
case inverted.SetUnion:
// Don't recurse into UNIONs. We can't build a zigzag join with this
// subtree.
return
}
// Check that this span expression represents a single-key span that is
// guaranteed not to produce duplicate primary keys.
if spanExprLocal.Unique && len(spanExprLocal.SpansToRead) == 1 &&
spanExprLocal.SpansToRead[0].IsSingleVal() {
vals = append(vals, spanExprLocal.SpansToRead[0].Start)
}
}
getVals(spanExpr)
if len(vals) < 2 {
return
}
// We treat the fixed values for JSON and Array as DBytes.
leftVal := tree.DBytes(vals[0])
rightVal := tree.DBytes(vals[1])
zigzagJoin := memo.ZigzagJoinExpr{
On: filters,
ZigzagJoinPrivate: memo.ZigzagJoinPrivate{
LeftTable: scanPrivate.Table,
LeftIndex: index.Ordinal(),
RightTable: scanPrivate.Table,
RightIndex: index.Ordinal(),
},
}
// The fixed columns include all the prefix columns and the inverted column.
fixedColsCount := index.NonInvertedPrefixColumnCount() + 1
// Get constant values and add them to FixedVals as tuples, with associated
// Column IDs in both {Left,Right}FixedCols.
leftVals := make(memo.ScalarListExpr, fixedColsCount)
leftTypes := make([]*types.T, fixedColsCount)
rightVals := make(memo.ScalarListExpr, fixedColsCount)
rightTypes := make([]*types.T, fixedColsCount)
zigzagJoin.LeftFixedCols = make(opt.ColList, fixedColsCount)
zigzagJoin.RightFixedCols = make(opt.ColList, fixedColsCount)
// TODO(rytaft): set types, values, and fixed columns for the prefix
// columns here.
// invertedColIdx is the position of the inverted column in the inverted
// index.
invertedColIdx := index.NonInvertedPrefixColumnCount()
leftVals[invertedColIdx] = c.e.f.ConstructConstVal(&leftVal, leftVal.ResolvedType())
leftTypes[invertedColIdx] = leftVal.ResolvedType()
rightVals[invertedColIdx] = c.e.f.ConstructConstVal(&rightVal, rightVal.ResolvedType())
rightTypes[invertedColIdx] = rightVal.ResolvedType()
invertedCol := scanPrivate.Table.ColumnID(index.VirtualInvertedColumn().Ordinal())
zigzagJoin.LeftFixedCols[invertedColIdx] = invertedCol
zigzagJoin.RightFixedCols[invertedColIdx] = invertedCol
leftTupleTyp := types.MakeTuple(leftTypes)
rightTupleTyp := types.MakeTuple(rightTypes)
zigzagJoin.FixedVals = memo.ScalarListExpr{
c.e.f.ConstructTuple(leftVals, leftTupleTyp),
c.e.f.ConstructTuple(rightVals, rightTupleTyp),
}
// Set equality columns - all remaining columns after the fixed prefix
// need to be equal.
eqColLen := index.ColumnCount() - fixedColsCount
zigzagJoin.LeftEqCols = make(opt.ColList, eqColLen)
zigzagJoin.RightEqCols = make(opt.ColList, eqColLen)
for i := fixedColsCount; i < index.ColumnCount(); i++ {
colID := scanPrivate.Table.IndexColumnID(index, i)
zigzagJoin.LeftEqCols[i-fixedColsCount] = colID
zigzagJoin.RightEqCols[i-fixedColsCount] = colID
}
zigzagJoin.On = filters
// Don't output the first column (i.e. the inverted index's JSON key
// col) from the zigzag join. It could contain partial values, so
// presenting it in the output or checking ON conditions against
// it makes little sense.
zigzagCols := indexCols
for i, cnt := 0, index.KeyColumnCount(); i < cnt; i++ {
colID := scanPrivate.Table.IndexColumnID(index, i)
zigzagCols.Remove(colID)
}
tab := c.e.mem.Metadata().Table(scanPrivate.Table)
pkIndex := tab.Index(cat.PrimaryIndex)
pkCols := make(opt.ColList, pkIndex.KeyColumnCount())
for i := range pkCols {
pkCols[i] = scanPrivate.Table.IndexColumnID(pkIndex, i)
// Ensure primary key columns are always retrieved from the zigzag
// join.
zigzagCols.Add(pkCols[i])
}
// Case 1 (zigzagged indexes contain all requested columns).
if scanPrivate.Cols.SubsetOf(zigzagCols) {
zigzagJoin.Cols = scanPrivate.Cols
c.e.mem.AddZigzagJoinToGroup(&zigzagJoin, grp)
return
}
if scanPrivate.Flags.NoIndexJoin {
return
}
// Case 2 (wrap zigzag join in an index join).
var indexJoin memo.LookupJoinExpr
// Ensure the zigzag join returns pk columns.
zigzagJoin.Cols = scanPrivate.Cols.Intersection(zigzagCols)
for i := range pkCols {
zigzagJoin.Cols.Add(pkCols[i])
}
if c.FiltersBoundBy(zigzagJoin.On, zigzagCols) {
// The ON condition refers only to the columns available in the zigzag
// indices.
indexJoin.On = memo.TrueFilter
} else {
// ON has some conditions that are bound by the columns in the index (at
// the very least, the equality conditions we used for EqCols and FixedCols),
// and some conditions that refer to other table columns. We can put
// the former in the lower ZigzagJoin and the latter in the index join.
conditions := zigzagJoin.On
zigzagJoin.On = c.ExtractBoundConditions(conditions, zigzagCols)
indexJoin.On = c.ExtractUnboundConditions(conditions, zigzagCols)
}
indexJoin.Input = c.e.f.ConstructZigzagJoin(
zigzagJoin.On,
&zigzagJoin.ZigzagJoinPrivate,
)
indexJoin.JoinType = opt.InnerJoinOp
indexJoin.Table = scanPrivate.Table
indexJoin.Index = cat.PrimaryIndex
indexJoin.KeyCols = pkCols
indexJoin.Cols = scanPrivate.Cols
indexJoin.LookupColsAreTableKey = true
// Create the LookupJoin for the index join in the same group as the
// original select.
c.e.mem.AddLookupJoinToGroup(&indexJoin, grp)
})
}
// SplitDisjunction finds the first disjunction in the filters that can be split
// into an interesting pair of expressions. It returns the pair of expressions
// and the Filters item they were a part of. If an "interesting" disjunction is
// not found, ok=false is returned.
//
// For details on what makes an "interesting" disjunction, see
// findInterestingDisjunctionPair.
func (c *CustomFuncs) SplitDisjunction(
sp *memo.ScanPrivate, filters memo.FiltersExpr,
) (left opt.ScalarExpr, right opt.ScalarExpr, itemToReplace *memo.FiltersItem, ok bool) {
for i := range filters {
if filters[i].Condition.Op() == opt.OrOp {
if left, right, ok := c.findInterestingDisjunctionPair(sp, &filters[i]); ok {
return left, right, &filters[i], true
}
}
}
return nil, nil, nil, false
}
// findInterestingDisjunctionPair groups disjunction sub-expressions into an
// "interesting" pair of expressions.
//
// An "interesting" pair of expressions is one where:
//
// 1. The column sets of both expressions in the pair are not
// equal.
// 2. Two index scans can potentially be constrained by both expressions in
// the pair.
//
// Consider the expression:
//
// u = 1 OR v = 2
//
// If an index exists on u and another on v, an "interesting" pair exists, ("u =
// 1", "v = 1"). If both indexes do not exist, there is no "interesting" pair
// possible.
//
// Now consider the expression:
//
// u = 1 OR u = 2
//
// There is no possible "interesting" pair here because the left and right sides
// of the disjunction share the same columns.
//
// findInterestingDisjunctionPair groups all sub-expressions adjacent to the
// input's top-level OrExpr into left and right expression groups. These two
// groups form the new filter expressions on the left and right side of the
// generated UnionAll in SplitDisjunction(AddKey).
//
// All sub-expressions with the same columns as the left-most sub-expression
// are grouped in the left group. All other sub-expressions are grouped in the
// right group.
//
// findInterestingDisjunctionPair returns an ok=false if all sub-expressions
// have the same columns. It also returns ok=false if either expression of the
// pair is likely to constrain an index scan. See canMaybeConstrainIndexWithCols
// for details on how this is determined.
func (c *CustomFuncs) findInterestingDisjunctionPair(
sp *memo.ScanPrivate, filter *memo.FiltersItem,
) (left opt.ScalarExpr, right opt.ScalarExpr, ok bool) {
var leftExprs memo.ScalarListExpr
var rightExprs memo.ScalarListExpr
var leftColSet opt.ColSet
var rightColSet opt.ColSet
// Traverse all adjacent OrExpr.
var collect func(opt.ScalarExpr)
collect = func(expr opt.ScalarExpr) {
switch t := expr.(type) {
case *memo.OrExpr:
collect(t.Left)
collect(t.Right)
return
}
cols := c.OuterCols(expr)
// Set the left-most non-Or expression as the left ColSet to match (or
// not match) on.
if leftColSet.Empty() {
leftColSet = cols
}
// If the current expression ColSet matches leftColSet, add the expr to
// the left group. Otherwise, add it to the right group.
if leftColSet.Equals(cols) {
leftExprs = append(leftExprs, expr)
} else {
rightColSet.UnionWith(cols)
rightExprs = append(rightExprs, expr)
}
}
collect(filter.Condition)
// Return an empty pair if either of the groups is empty or if either the
// left or right groups are unlikely to constrain an index scan.
if len(leftExprs) == 0 ||
len(rightExprs) == 0 ||
!c.canMaybeConstrainIndexWithCols(sp, leftColSet) ||
!c.canMaybeConstrainIndexWithCols(sp, rightColSet) {
return nil, nil, false
}
return c.constructOr(leftExprs), c.constructOr(rightExprs), true
}
// canMaybeConstrainIndexWithCols returns true if any indexes on the
// ScanPrivate's table could be constrained by cols. It is a fast check for
// SplitDisjunction to avoid matching a large number of queries that won't
// obviously be improved by the rule.
//
// canMaybeConstrainIndexWithCols checks for an intersection between the input
// columns and an index's columns (both indexed columns and columns referenced
// in a partial index predicate). An intersection between column sets implies
// that cols could constrain a scan on that index. For example, the columns "a"
// would constrain a scan on an index over columns "a, b", because the "a" is a
// subset of the index columns. Likewise, the columns "a" and "b" would
// constrain a scan on an index over column "a", because "a" and "b" are a
// superset of the index columns.
//
// Notice that this function can return both false positives and false
// negatives. As an example of a false negative, consider the following table
// and query.
//
// CREATE TABLE t (
// k PRIMARY KEY,
// a INT,
// hash INT AS (a % 4) STORED,
// INDEX hash (hash)
// )
//
// SELECT * FROM t WHERE a = 5
//
// The expression "a = 5" can constrain a scan over the hash index: The columns
// "hash" must be a constant value of 1 because it is dependent on column "a"
// with a constant value of 5. However, canMaybeConstrainIndexWithCols will
// return false in this case because "a" does not intersect with the index
// column, "hash".
func (c *CustomFuncs) canMaybeConstrainIndexWithCols(
scanPrivate *memo.ScanPrivate, cols opt.ColSet,
) bool {
md := c.e.mem.Metadata()
tabMeta := md.TableMeta(scanPrivate.Table)
// Iterate through all indexes of the table and return true if cols
// intersect with the index's key columns.
for i := 0; i < tabMeta.Table.IndexCount(); i++ {
index := tabMeta.Table.Index(i)
for j, n := 0, index.KeyColumnCount(); j < n; j++ {
col := index.Column(j)
ord := col.Ordinal()
if col.Kind() == cat.VirtualInverted {
ord = col.InvertedSourceColumnOrdinal()
}
if cols.Contains(tabMeta.MetaID.ColumnID(ord)) {
return true
}
}
// If a partial index's predicate references some of cols, it may be
// possible to generate an unconstrained partial index scan, which may
// lead to better query plans.
if _, isPartialIndex := index.Predicate(); isPartialIndex {
p, ok := tabMeta.PartialIndexPredicate(i)
if !ok {
// A partial index predicate expression was not built for the
// partial index. See Builder.buildScan for details on when this
// can occur.
continue
}
pred := *p.(*memo.FiltersExpr)
if pred.OuterCols().Intersects(cols) {
return true
}
}
}
return false
}
// MakeSetPrivate constructs a new SetPrivate with given left, right, and out
// columns.
func (c *CustomFuncs) MakeSetPrivate(left, right, out opt.ColSet) *memo.SetPrivate {
return &memo.SetPrivate{
LeftCols: left.ToList(),
RightCols: right.ToList(),
OutCols: out.ToList(),
}
}
// AddPrimaryKeyColsToScanPrivate creates a new ScanPrivate that is the same as
// the input ScanPrivate, but has primary keys added to the ColSet.
func (c *CustomFuncs) AddPrimaryKeyColsToScanPrivate(sp *memo.ScanPrivate) *memo.ScanPrivate {
keyCols := c.PrimaryKeyCols(sp.Table)
return &memo.ScanPrivate{
Table: sp.Table,
Cols: sp.Cols.Union(keyCols),
Flags: sp.Flags,
Locking: sp.Locking,
}
}
// TableIDFromScanPrivate returns the table ID of the scan private.
func (c *CustomFuncs) TableIDFromScanPrivate(sp *memo.ScanPrivate) opt.TableID {
return sp.Table
}
|
package main
import "fmt"
func main() int {
nums := [3]int{3,2,3}
target := 6
for i := 0; i < len(nums); i++ {
for j := i + 1; j < len(nums); j++ {
var total int
total = nums[i] + nums[j]
if total == target {
return[2]int{i, j}
fmt.Println(successVar)
return successVar
}
}
}
}
|
package nocgo
import (
"errors"
"reflect"
"unsafe"
)
func mustSpec(fn *byte, fun interface{}) {
err := makeSpec(uintptr(unsafe.Pointer(fn)), fun)
if err != nil {
panic(err)
}
}
// on 386 we need to do the dance of cgo_import_dynamic followed by two linknames,
// definining a variable that gets the dynamic symbol, and then derefercing it.
// Othwerwise we get an unknown relocation type error during linking
//go:linkname libc_dlopen_x libc_dlopen_x
var libc_dlopen_x byte
var libc_dlopen = &libc_dlopen_x
//go:linkname libc_dlclose_x libc_dlclose_x
var libc_dlclose_x byte
var libc_dlclose = &libc_dlclose_x
//go:linkname libc_dlsym_x libc_dlsym_x
var libc_dlsym_x byte
var libc_dlsym = &libc_dlsym_x
//go:linkname libc_dlerror_x libc_dlerror_x
var libc_dlerror_x byte
var libc_dlerror = &libc_dlerror_x
var dlopen func(filename []byte, flags int32) uintptr
var dlclose func(handle uintptr) int32
var dlsym func(handle uintptr, symbol []byte) uintptr
var dlerror func() uintptr
func init() {
mustSpec(libc_dlopen, &dlopen)
mustSpec(libc_dlclose, &dlclose)
mustSpec(libc_dlsym, &dlsym)
mustSpec(libc_dlerror, &dlerror)
}
func getLastError() error {
err := dlerror()
if err == 0 {
return errors.New("Unknown dl error")
}
return errors.New(MakeGoStringFromPointer(err))
}
// Library holds the handle to a loaded library
type Library uintptr
// Open opens the given dynamic library and returns a handle for loading symbols and functions.
func Open(library string) (Library, error) {
handle := dlopen(MakeCString(library), 2 /* RTLD_NOW */)
if handle != 0 {
return Library(handle), nil
}
return 0, getLastError()
}
// Close closes the library. This might also release all resources. Any Func and Value calls on the Library after this point can give unexpected results.
func (l Library) Close() error {
ret := dlclose(uintptr(l))
if ret == 0 {
return nil
}
return getLastError()
}
// Func returns a callable spec for the given symbol name and argument specification.
//
// WARNING! This does not and cannot check if the size of the given type is correct!
//
// Example:
// var puts func(s []byte) int32
// if err := lib.Func("puts", &puts); err != nil {
// //handle error; err will contain an error message from dlerror, or if something went wrong with building the spec
// }
// num := puts(nocgo.MakeCString("hello world!\n"))
// fmt.Printf("Successfully printed %d characters from C!\n", num)
//
// See package documentation for an explanation of C-types
func (l Library) Func(name string, fun interface{}) error {
addr := dlsym(uintptr(l), MakeCString(name))
if addr == 0 {
return getLastError()
}
return makeSpec(addr, fun)
}
// Value sets the given value (which must be pointer to pointer to the correct type) to the global symbol given by name.
//
// WARNING! This does not and cannot check if the size of the given type is correct! This might be possibly dangerous.
// See above for an explanation of C-types.
//
// Example:
// var value *int32
// if err := lib.Value("some_value", &value); err != nil {
// //handle error; error will contain an error message from dlerror
// }
//
// // *value now is the contents of the global symbol in the library
// fmt.Printf(*value)
func (l Library) Value(name string, value interface{}) error {
v := reflect.ValueOf(value)
if v.Kind() != reflect.Ptr {
return errors.New("value must be a pointer to a pointer")
}
v = v.Elem()
if v.Kind() != reflect.Ptr {
return errors.New("value must be pointer to a pointer")
}
addr := dlsym(uintptr(l), MakeCString(name))
if addr == 0 {
return getLastError()
}
*(*uintptr)(unsafe.Pointer(v.UnsafeAddr())) = addr
return nil
}
|
package model
type Cell struct {
Mine bool `json:"mine"`
Revealed bool `json:"revealed"`
Flagged bool `json:"flagged"`
MinesAround int `json:"mines_around"`
}
|
package main
import "github.com/drakmaniso/glam"
type Transform struct {
TransformMat glam.Mat4
}
func NewTransform(matrix glam.Mat4) *Transform {
return &Transform{matrix}
}
func MakeTransform() *Transform {
return NewTransform(*MatIdentity4)
}
func Translate(amount glam.Vec3) *Transform {
return NewTransform(glam.Translation(amount))
}
func (t *Transform) Translate(amount glam.Vec3) *Transform {
translation := glam.Translation(amount)
return NewTransform(t.TransformMat.Times(&translation))
}
func Rotate(angle float32, axis glam.Vec3) *Transform {
return NewTransform(glam.Rotation(angle, axis))
}
func (t *Transform) Rotate(angle float32, axis glam.Vec3) *Transform {
rotation := glam.Rotation(angle, axis)
return NewTransform(t.TransformMat.Times(&rotation))
}
func Scale(amount glam.Vec3) *Transform {
scale := glam.NewMat4(amount.X, 0, 0, 0,
0, amount.Y, 0, 0,
0, 0, amount.Z, 0,
0, 0, 0, 1)
return NewTransform(*scale)
}
func (t *Transform) Scale(amount glam.Vec3) *Transform {
return NewTransform(t.TransformMat.Times(glam.NewMat4(amount.X, 0, 0, 0,
0, amount.Y, 0, 0,
0, 0, amount.Z, 0,
0, 0, 0, 1)))
}
func ScaleBy(amount float32) *Transform {
scale := glam.NewMat4(amount, 0, 0, 0,
0, amount, 0, 0,
0, 0, amount, 0,
0, 0, 0, 1)
return NewTransform(*scale)
}
func (t *Transform) ScaleBy(amount float32) *Transform {
return NewTransform(t.TransformMat.Times(glam.NewMat4(amount, 0, 0, 0,
0, amount, 0, 0,
0, 0, amount, 0,
0, 0, 0, 1)))
} |
// Copyright (c) 2018 The MATRIX Authors
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php
package blkgenorV2
import (
"sort"
"sync"
"time"
"github.com/MatrixAINetwork/go-matrix/common"
"github.com/MatrixAINetwork/go-matrix/mc"
"github.com/MatrixAINetwork/go-matrix/params/manparams"
"github.com/pkg/errors"
)
type aiResultInfo struct {
aiMsg *mc.HD_V2_AIMiningRspMsg
localTime int64
verified bool
legal bool
}
///////////////////////////////////////////////////////////////////////////////////////////
// 协程安全AI挖矿结果池
type AIResultPool struct {
// 缓存结构为:map <parentHash, map <from address, *data> >
aiMap map[common.Hash]map[common.Address]*aiResultInfo
countMap map[common.Address]int
countLimit int
logInfo string
mu sync.RWMutex
}
func NewAIResultPool(logInfo string) *AIResultPool {
return &AIResultPool{
aiMap: make(map[common.Hash]map[common.Address]*aiResultInfo),
countMap: make(map[common.Address]int),
countLimit: manparams.AIResultCountLimit,
logInfo: logInfo,
}
}
func (self *AIResultPool) AddAIResult(aiResult *mc.HD_V2_AIMiningRspMsg) error {
if nil == aiResult {
return errors.Errorf("AI挖矿结果是空")
}
if common.EmptyHash(aiResult.BlockHash) {
return errors.Errorf("父区块hash是空")
}
self.mu.Lock()
defer self.mu.Unlock()
if count := self.getFromCount(aiResult.From); count >= self.countLimit {
return errors.Errorf("该账户发送AI挖矿结果超过存储最大的数目")
}
fromMap, OK := self.aiMap[aiResult.BlockHash]
if !OK {
fromMap = make(map[common.Address]*aiResultInfo)
self.aiMap[aiResult.BlockHash] = fromMap
}
_, exist := fromMap[aiResult.From]
if exist {
//log.Warn(self.logInfo, "AddAIResult", "AI结果已存在", "from", aiResult.From.Hex(), "parent hash", aiResult.BlockHash.TerminalString())
return errors.Errorf("矿工AI挖矿结果已经存在")
}
fromMap[aiResult.From] = &aiResultInfo{aiMsg: aiResult, verified: false, legal: false, localTime: time.Now().UnixNano()}
self.plusFromCount(aiResult.From)
return nil
}
func (self *AIResultPool) GetAIResults(parentHash common.Hash) ([]*aiResultInfo, error) {
if common.EmptyHash(parentHash) {
return nil, errors.Errorf("父区块Hash是空")
}
self.mu.RLock()
defer self.mu.RUnlock()
fromMap, OK := self.aiMap[parentHash]
if !OK || len(fromMap) == 0 {
return nil, nil
}
list := make([]*aiResultInfo, 0)
for _, result := range fromMap {
list = append(list, result)
}
sort.Slice(list, func(i, j int) bool {
return list[i].localTime < list[j].localTime
})
return list, nil
}
func (self *AIResultPool) getFromCount(address common.Address) int {
if count, OK := self.countMap[address]; OK {
return count
}
return 0
}
func (self *AIResultPool) plusFromCount(address common.Address) {
count, OK := self.countMap[address]
if !OK {
self.countMap[address] = 1
} else {
self.countMap[address] = count + 1
}
}
func (self *AIResultPool) minusFromCount(address common.Address) {
count, OK := self.countMap[address]
if OK {
if count > 0 {
self.countMap[address] = count - 1
} else {
self.countMap[address] = 0
}
}
}
|
/*
* Created on Thu Mar 21 2019 22:51:36
* Author: WuLC
* EMail: liangchaowu5@gmail.com
*/
func shipWithinDays(weights []int, D int) int {
left, right := 0, 0
for _, w := range weights {
left = max(left, w)
right += w
}
mid, tmp, days := 0, 0, 0
for left < right {
mid, tmp, days = left + ((right - left) >> 1), 0, 0
for _, w := range weights {
if tmp + w > mid {
days++
tmp = 0
}
tmp += w
}
days++
if days > D {
left = mid + 1
} else {
right = mid
}
}
return right
}
func max(a, b int) int {
if a > b {
return a
} else {
return b
}
} |
package models
type SysConfig struct {
Id string `json:"id" xorm:"pk 'id'"`
Key string `json:"key" xorm:"'key'"`
Value string `json:"value" xorm:"'value'"`
Comments string `json:"comments" xorm:"'comments'"`
}
func (SysConfig) TableName() string {
return "sys_config"
}
var DefaultSysConfig = make([]SysConfig, 0)
func init() {
DefaultSysConfig = append(DefaultSysConfig, SysConfig{Id: "cnzzid", Key: "cnzzid", Value: "1262308688", Comments: "cnzz data monitor"})
DefaultSysConfig = append(DefaultSysConfig, SysConfig{Id: "openregist", Key: "openregist", Value: "true", Comments: "open rigist?"})
DefaultSysConfig = append(DefaultSysConfig, SysConfig{Id: "alldownloadlimit", Key: "alldownloadlimit", Value: "10000", Comments: "how many books can download one day?"})
}
|
package main
func main() {
server := newSever(":1935")
server.run()
}
|
package main
/*
enum chess {
Queen,
King,
Knight,
Pawn,
};
*/
import "C"
import "fmt"
func main() {
var queen C.enum_chess = C.Queen
var king C.enum_chess = C.gKing
var pawn C.enum_chess = C.Pawn
var knight C.enum_chess = C.Knight
fmt.Println(queen)
fmt.Println(king)
fmt.Println(pawn)
fmt.Println(knight)
}
|
// This abstracts common-use functions from the database
// All functions here will return client-safe messages.
// That is, nothing internal will be exposed in these messages.
package main
import (
"crypto/rand"
"encoding/base64"
"fmt"
"time"
)
const (
tokenLength = 32
saltLength = 10
)
/*****************
* *
* User Functions *
* *
******************/
func getUserFromSession(sessionID string) (*User, error) {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
var u User
db.Where("token = ?", sessionID).First(&u)
if len(u.Email) == 0 {
return nil, fmt.Errorf("no user found for session")
}
return &u, nil
}
func getUserFromEmail(email string) (*User, error) {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
var u User
db.Where("email = ?", email).First(&u)
if len(u.Email) == 0 {
return nil, fmt.Errorf("no user found for given email %s", email)
}
return &u, nil
}
func emailExists(email string) bool {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
var u User
db.Where("email = ?", email).First(&u)
return len(u.Email) != 0
}
func logoutUser(user *User) {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
db.Model(user).Where("email = ?", user.Email).Update("token", "")
user.Token.String = ""
user.Token.Valid = false
}
func registerUser(user *User) (token string, err error) {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
// TODO: collision handling
token = generateRandomString(tokenLength)
u := &User{}
db.Where("email = ?", user.Email).First(u)
if len(u.Email) != 0 {
return "", fmt.Errorf("this email is already registered")
}
user.Token.String = token
user.Token.Valid = true
db.Create(user)
return
}
func setUserToken(user *User, token string) {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
db.Model(user).Update("token", token)
}
func saveUser(user *User) {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
db.Save(user)
}
/********************
* *
* Picture functions *
* *
********************/
// This will refresh the validURL if ExpirationTime < now + 10 minutes
// when refresh is set to true
func getPicture(user *User, pictureMask string, refresh bool) (*Picture, error) {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
goodTime := time.Now().Add(10 * time.Minute)
var (
picture Picture
user2 User
)
//db.Where("mask", pictureMask).First(&picture)
db.Where("id = ?", user.ID).Preload("Pictures", "mask = ?", pictureMask).First(&user2)
if len(user2.Pictures) == 0 {
return nil, fmt.Errorf("no picture found for your user session")
}
picture = user2.Pictures[0]
if picture.ExpirationTime.Before(goodTime) && refresh {
url, err := refreshURL(&picture)
if err != nil {
panic(err)
}
picture.ValidURL = url
picture.ExpirationTime = time.Now().Add(urlExpirationDuration)
db.Save(&picture)
}
return &picture, nil
}
func getPictures(user *User, pictureMasks []string, refresh bool) ([]Picture, error) {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
goodTime := time.Now().Add(10 * time.Minute)
var (
user2 User
)
db.Where("id = ?", user.ID).Preload("Pictures", "mask IN (?)", pictureMasks).First(&user2)
if len(user2.Pictures) == 0 {
return nil, fmt.Errorf("no picture found for your user session")
}
if refresh {
for _, picture := range user2.Pictures {
if picture.ExpirationTime.Before(goodTime) && refresh {
url, err := refreshURL(&picture)
if err != nil {
panic(err)
}
picture.ValidURL = url
picture.ExpirationTime = time.Now().Add(urlExpirationDuration)
db.Save(&picture)
}
}
}
return user2.Pictures, nil
}
func deletePictures(user *User, pictures []Picture) error {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
pictureMasks := make([]string, len(pictures))
i := 0
for _, p := range pictures {
pictureMasks[i] = p.Mask
i++
}
db.Exec("DELETE FROM pictures WHERE mask IN (?) AND user_id = ?", pictureMasks, user.ID)
return nil
}
func deletePicture(pictureMask string) {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
db.Exec("DELETE FROM pictures WHERE mask = ?", pictureMask)
}
// This will attempt to paginate on lines of multiples of limit
// If page * limit > length of result, then it will return the modulus
// of the final page and limit
// e.g. If you ask for page 2 of 50 results on limit 30, it will return the final 20 results
func getUsersPicturesAndRefreshURL(user *User, limit int, page int) (pictures []Picture, currentPage int, maxPages int) {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
pictures = make([]Picture, 0)
var (
rowCount int
offset int
)
if limit <= 0 {
limit = 1
}
if page <= 0 {
page = 1
}
db.Model(&Picture{}).Where("user_id = ?", user.ID).Count(&rowCount)
if (page-1)*limit >= rowCount {
page = (rowCount / limit)
if page == 0 {
page++
}
}
offset = (page - 1) * limit
maxPages = (rowCount / limit)
if limit*page > rowCount || (maxPages == 0 && rowCount > 0) {
maxPages++
}
db.Limit(limit).Offset(offset).Where("user_id = ?", user.ID).Find(&pictures)
goodTime := time.Now().Add(10 * time.Minute)
for i, picture := range pictures {
if picture.ExpirationTime.Before(goodTime) {
url, err := refreshURL(&picture)
if err != nil {
panic(err)
}
picture.ValidURL = url
picture.ExpirationTime = time.Now().Add(urlExpirationDuration)
db.Save(&picture)
}
if tags, err := getTags(&picture); err == nil {
pictures[i].Tags = tags
}
}
return pictures, page, maxPages
}
/**********
* *
* Tagging *
* *
**********/
func createTags(picture *Picture, tags []Tag) error {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
picture.Tags = append(picture.Tags, tags...)
return db.Save(picture).Error
}
func deleteTags(picture *Picture, tags []string) error {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
err = db.Exec("DELETE FROM tags WHERE tag IN (?) AND picture_mask = ?", tags, picture.Mask).Error
p, _ := getPicture(&User{ID: picture.UserID}, picture.Mask, false)
*picture = *p
return err
}
func getTags(picture *Picture) (tags []Tag, err error) {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
tags = make([]Tag, 0)
err = db.Where("picture_mask = ?", picture.Mask).Find(&tags).Error
return
}
func searchWithTag(u *User, term string, front, back, refresh bool) (pictures []Picture, err error) {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
pictureMasks := make([]string, 0)
var fuzz = term
if front && back {
fuzz = fmt.Sprintf("%%%s%%", term)
} else if front {
fuzz = fmt.Sprintf("%%%s", term)
} else if back {
fuzz = fmt.Sprintf("%s%%", term)
}
rows, err := db.Raw(`SELECT t.picture_mask FROM tags t LEFT JOIN pictures p ON t.picture_mask = p.mask LEFT JOIN users u ON p.user_id = u.id WHERE u.id = ? AND t.tag LIKE ?`, u.ID, fuzz).Rows()
if err != nil {
panic(err)
}
defer rows.Close()
for rows.Next() {
var mask string
rows.Scan(&mask)
pictureMasks = append(pictureMasks, mask)
}
return getPictures(u, pictureMasks, refresh)
}
/*********
* *
* Albums *
* *
*********/
// Precondition: album only has valid pictures for this user
func saveAlbum(album *Album) error {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
values := make([]string, len(album.Pictures))
for i, pic := range album.Pictures {
values[i] = fmt.Sprintf("('%s', '%s')", album.Mask, pic.Mask)
}
db.Save(album)
return db.Error
//sql := fmt.Sprintf("INSERT INTO `album_has_pictures` (`album_mask`,`picture_mask`) VALUES %s", strings.Join(values, ", "))
//return db.Exec(sql).Error
}
func createAlbum(album *Album, pictures []Picture) error {
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
if err := db.Create(album).Error; err != nil {
return err
}
return db.Model(album).Association("Pictures").Append(pictures).Error
}
func getAlbum(user *User, albumID string) (*Album, error) {
db, err := openConnection()
if err != nil {
panic(err)
}
var (
album Album
)
defer db.Close()
if err := db.Preload("Pictures").Find(&album, "mask = ?", albumID).Error; err != nil {
return nil, err
}
for _, pic := range album.Pictures {
getPicture(user, pic.Mask, true) // TODO: better way to just refresh the URL
}
return &album, nil
}
func deleteAlbum(album *Album) error {
if album == nil {
return fmt.Errorf("nil album provided")
}
db, err := openConnection()
if err != nil {
panic(err)
}
defer db.Close()
// Check the primary key!
if len(album.Mask) == 0 {
return fmt.Errorf("no primary key in provided album to delete")
}
err = db.Model(album).Association("Pictures").Clear().Error
if err != nil {
return err
}
return db.Delete(album).Error
}
func getAllAlbums(user *User) ([]Album, error) {
db, err := openConnection()
if err != nil {
panic(err)
}
var (
albums []Album
)
err = db.Model(user).Related(&albums, "Albums").Error
return albums, err
}
/****************
* *
* Miscellaneous *
* *
****************/
func generateRandomBytes(n int) ([]byte, error) {
b := make([]byte, n)
_, err := rand.Read(b)
if err != nil {
return nil, err
}
return b, nil
}
func generateRandomString(s int) string {
b, _ := generateRandomBytes(s)
return base64.URLEncoding.EncodeToString(b)[:s]
}
|
package global
import (
"context"
"testing"
"github.com/go-redis/redis/v8"
)
func TestGreetings(t *testing.T) {
GreetingsGlobal := NewGreetings(redis.NewClient(&redis.Options{
Username: "root",
Password: "",
Addr: ":6379",
}))
ctx := context.Background()
err := GreetingsGlobal.SetUserGreetingCount(ctx, 1, 1001, 1, 1)
if err != nil {
t.Fatal(err)
}
record, err := GreetingsGlobal.GetUserGreetingCount(ctx, 1, 1001, 1)
if err != nil {
t.Fatal(err)
}
t.Log(record)
}
|
package testdata
import (
"path/filepath"
"runtime"
)
func NginxIngressChartPath() string {
return filepath.Join(staticPath(), "nginx-ingress-0.31.0.tgz")
}
func staticPath() string {
_, file, _, ok := runtime.Caller(0)
if !ok {
panic("Could not locate path to tiltfile/testdata")
}
return filepath.Dir(file)
}
|
package main
import "fmt"
// Go doesn't support any pointer arithmatic like C
func main() {
var num int = 10
increment(&num)
fmt.Println(num)
fmt.Println(increment(&num))
}
func increment(x *int) int {
*x++;
return *x;
} |
package main
import "fmt"
type Student struct {
id int
name string
age int
sex string
score int
addr string
}
//结构体变量作为函数参数
func test(stu Student) {
stu.name = "野猪佩奇"
fmt.Println(stu)
}
func main0201() {
stu := Student{101, "喜羊羊", 6, "男", 100, "羊村"}
//值传递
test(stu)
fmt.Println(stu)
}
func test1(m map[int]Student) {
//指针不能直接.成员
//m[102].name = "威震天"//err
stu := m[102]
stu.name = "威震天"
m[102] = stu
//fmt.Println(stu)
//fmt.Printf("%T\n", stu)
//fmt.Println(m[102])
}
func main() {
//将结构体作为map中的value
m := make(map[int]Student)
//map中的数据不建议排序操作
m[101] = Student{name:"擎天柱", sex:"男", age:30, score:100, addr:"赛博坦星球"}
m[102] = Student{name:"大黄蜂", sex:"男", age:10, score:59, addr:"赛博坦星球"}
//将map作为函数参数
test1(m)
fmt.Println(m)
}
|
package domain
type Tweet struct {
User string
Text string
CreatedAt string
}
|
package mpath
import (
"bytes"
"fmt"
"net/http"
"path"
"strings"
"unicode"
"github.com/gorilla/muxy"
//"github.com/gorilla/muxy/encoder"
"golang.org/x/net/context"
)
func NotFoundHandler(h muxy.Handler) func(*matcher) {
return func(m *matcher) {
m.notFoundHandler = h
}
}
func New(options ...func(*matcher)) *muxy.Router {
m := &matcher{
root: &node{edges: map[string]*node{}},
patterns: map[*muxy.Route]*pattern{},
notFoundHandler: muxy.HandlerFunc(notFound),
}
for _, o := range options {
o(m)
}
return muxy.New(m)
}
type matcher struct {
root *node
patterns map[*muxy.Route]*pattern
notFoundHandler muxy.Handler
}
func (m *matcher) Route(pattern string) (*muxy.Route, error) {
segs, err := parse(pattern)
if err != nil {
return nil, err
}
e := m.root.edge(segs)
if r := e.leaf; r != nil {
return nil, fmt.Errorf("muxy: a route for the pattern %q or equivalent already exists: %q", pattern, r.Pattern)
}
e.leaf = &muxy.Route{}
m.patterns[e.leaf] = newPattern(segs)
return e.leaf, nil
}
func (m *matcher) Match(c context.Context, r *http.Request) (context.Context, muxy.Handler) {
var h muxy.Handler
// TODO: use a backward-compatible alternative to URL.RawPath here.
path := cleanPath(r.URL.Path)
e := m.root.match(path)
if e != nil && e.leaf != nil {
h = methodHandler(e.leaf.Handlers, r.Method)
}
if h == nil {
h = m.notFoundHandler
} else {
c = m.patterns[e.leaf].setVars(c, path)
}
return c, h
}
func (m *matcher) Build(r *muxy.Route, vars ...string) (string, error) {
if p, ok := m.patterns[r]; ok {
return p.build(vars...)
}
return "", fmt.Errorf("muxy: route not found: %v", r)
}
// -----------------------------------------------------------------------------
// methodHandler returns the handler registered for the given HTTP method.
func methodHandler(handlers map[string]muxy.Handler, method string) muxy.Handler {
if handlers == nil || len(handlers) == 0 {
return nil
}
if h, ok := handlers[method]; ok {
return h
}
switch method {
case "OPTIONS":
return allowHandler(handlers, 200)
case "HEAD":
if h, ok := handlers["GET"]; ok {
return h
}
fallthrough
default:
if h, ok := handlers[""]; ok {
return h
}
}
return allowHandler(handlers, 405)
}
// allowHandler returns a handler that sets a header with the given
// status code and allowed methods.
func allowHandler(handlers map[string]muxy.Handler, code int) muxy.Handler {
allowed := make([]string, len(handlers)+1)
allowed[0] = "OPTIONS"
i := 1
for m, _ := range handlers {
if m != "" && m != "OPTIONS" {
allowed[i] = m
i++
}
}
return muxy.HandlerFunc(func(c context.Context, w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("Allow", strings.Join(allowed[:i], ", "))
w.WriteHeader(code)
fmt.Fprintln(w, code, http.StatusText(code))
})
}
// -----------------------------------------------------------------------------
type node struct {
edges map[string]*node // static edges, if any
vEdge *node // variable edge, if any
wEdge *node // wildcard edge, if any
leaf *muxy.Route // leaf value, if any
}
// edge returns the edge for the given path segments, creating it if needed.
func (n *node) edge(segs []string) *node {
for _, seg := range segs {
switch seg[0] {
case ':':
if n.vEdge == nil {
n.vEdge = &node{edges: map[string]*node{}}
}
n = n.vEdge
case '*':
if n.wEdge == nil {
n.wEdge = &node{}
}
return n.wEdge
default:
if n.edges[seg] == nil {
n.edges[seg] = &node{edges: map[string]*node{}}
}
n = n.edges[seg]
}
}
return n
}
func (n *node) match(path string) *node {
part, path := "", path[1:]
for len(path) > 0 {
if i := strings.IndexByte(path, '/'); i < 0 {
part, path = path, ""
} else {
part, path = path[:i], path[i+1:]
}
if e, ok := n.edges[part]; ok {
n = e
continue
}
if e := n.vEdge; e != nil {
n = e
continue
}
return n.wEdge
}
return n
}
// -----------------------------------------------------------------------------
// newPattern returns a pattern for the given path segments.
func newPattern(segs []string) *pattern {
p := pattern{}
b := new(bytes.Buffer)
for _, s := range segs {
switch s[0] {
case ':':
s = s[1:]
case '*':
default:
b.WriteByte('/')
b.WriteString(s)
s = ""
}
if s != "" {
p.parts = append(p.parts, b.String()+"/")
b.Reset()
p.parts = append(p.parts, s)
p.keys = append(p.keys, muxy.Variable(s))
}
}
if b.Len() != 0 {
p.parts = append(p.parts, b.String())
}
return &p
}
type pattern struct {
parts []string
keys []muxy.Variable
}
// setVars sets the route variables in the context.
//
// Since the path matched already, we can make some assumptions: the path
// starts with a slash and there are no empty or dotted path segments.
//
// For the values:
//
// p := pattern{
// parts: []string{"/foo/bar/", "v1", "/baz/", "v2", "*"},
// keys: []muxy.Variable{"v1", "v2", "*"},
// }
// path := "/foo/bar/var1/baz/var2/x/y/z"
//
// The variables will be:
//
// vars = []string{"var1", "var2", "x/y/z"}
func (p *pattern) setVars(c context.Context, path string) context.Context {
if len(p.keys) == 0 {
return c
}
path, idx := path[1:], 0
vars := make([]string, len(p.keys))
for _, part := range p.parts {
switch part[0] {
case '/':
path = path[len(part)-1:]
case '*':
vars[idx] = path
break
default:
if i := strings.IndexByte(path, '/'); i < 0 {
vars[idx] = path
break
} else {
vars[idx] = path[:i]
path = path[i+1:]
idx++
}
}
}
return &varsCtx{c, p.keys, vars}
}
func (p *pattern) build(vars ...string) (string, error) {
if len(p.keys)*2 != len(vars) {
return "", fmt.Errorf("muxy: expected %d arguments, got %d: %v", len(p.keys)*2, len(vars), vars)
}
b := new(bytes.Buffer)
Loop:
for _, part := range p.parts {
if part[0] == '/' {
b.WriteString(part)
continue
}
for i, s := 0, len(vars); i < s; i += 2 {
if vars[i] == part {
b.WriteString(vars[i+1])
vars[i] = ""
continue Loop
}
}
return "", fmt.Errorf("muxy: missing argument for variable %q", part)
}
return b.String(), nil
}
// -----------------------------------------------------------------------------
func parse(pattern string) ([]string, error) {
pattern = cleanPath(pattern)
count := 0
for _, r := range pattern {
if r == '/' {
count++
}
}
segs := make([]string, count)
idx := 0
part, path := "", pattern[1:]
for len(path) > 0 {
i := strings.IndexByte(path, '/')
if i < 0 {
part, path = path, ""
} else {
part, path = path[:i], path[i+1:]
}
switch part[0] {
case ':':
if len(part) == 1 {
return nil, fmt.Errorf("empty variable name")
}
for k, r := range part[1:] {
if k == 0 {
if r != '_' && !unicode.IsLetter(r) {
return nil, fmt.Errorf("expected underscore or letter starting a variable name; got %q", r)
}
} else if r != '_' && !unicode.IsLetter(r) && !unicode.IsDigit(r) {
return nil, fmt.Errorf("unexpected %q in variable name", r)
}
}
case '*':
if len(part) != 1 {
return nil, fmt.Errorf("unexpected wildcard: %q", part)
}
if i >= 0 {
return nil, fmt.Errorf("wildcard must be at the end of a pattern; got: .../*/%v", path)
}
}
segs[idx] = part
idx++
}
return segs, nil
}
// cleanPath returns the canonical path for p, eliminating . and .. elements.
//
// Borrowed from net/http.
func cleanPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root;
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
np += "/"
}
return np
}
// -----------------------------------------------------------------------------
// varsCtx carries a key-variables mapping. It implements Context.Value() and
// delegates all other calls to the embedded Context.
type varsCtx struct {
context.Context
keys []muxy.Variable
vars []string
}
func (c *varsCtx) Value(key interface{}) interface{} {
for k, v := range c.keys {
if v == key {
return c.vars[k]
}
}
return c.Context.Value(key)
}
// -----------------------------------------------------------------------------
// notFound replies to the request with an HTTP 404 not found error.
func notFound(c context.Context, w http.ResponseWriter, r *http.Request) {
http.Error(w, "404 page not found", http.StatusNotFound)
}
|
package config
import "github.com/kelseyhightower/envconfig"
type MySQLConfig struct {
Host string `default:"127.0.0.1"`
Port string `default:"3306"`
DBUser string `default:"root"`
Password string `default:"mysql"`
DataBase string `default:""`
}
func Init() (*MySQLConfig, error) {
config := &MySQLConfig{}
err := envconfig.Process("", config)
if err != nil {
return &MySQLConfig{}, err
}
return config, nil
}
|
package analytics
import (
"errors"
"os"
"time"
"github.com/gobuffalo/uuid"
"go.uber.org/zap"
segment "gopkg.in/segmentio/analytics-go.v3"
)
// Client ...
type Client struct {
client segment.Client
logger *zap.Logger
}
// NewClient ...
func NewClient(logger *zap.Logger) (Client, error) {
writeKey, ok := os.LookupEnv("SEGMENT_WRITE_KEY")
if !ok {
return Client{}, errors.New("No value set for env SEGMENT_WRITEKEY")
}
return Client{
client: segment.New(writeKey),
logger: logger,
}, nil
}
// TestReportSummaryGenerated ...
func (c *Client) TestReportSummaryGenerated(appSlug, buildSlug, result string, numberOfTests int, time time.Time) {
err := c.client.Enqueue(segment.Track{
UserId: appSlug,
Event: "Test report summary generated",
Properties: segment.NewProperties().
Set("app_slug", appSlug).
Set("build_slug", buildSlug).
Set("result", result).
Set("number_of_tests", numberOfTests).
Set("datetime", time),
})
if err != nil {
c.logger.Warn("Failed to track analytics (TestReportSummaryGenerated)", zap.Error(err))
}
}
// TestReportResult ...
func (c *Client) TestReportResult(appSlug, buildSlug, result, testType string, testResultID uuid.UUID, time time.Time) {
err := c.client.Enqueue(segment.Track{
UserId: appSlug,
Event: "Test report result",
Properties: segment.NewProperties().
Set("app_slug", appSlug).
Set("build_slug", buildSlug).
Set("result", result).
Set("test_type", testType).
Set("datetime", time).
Set("test_report_id", testResultID.String()),
})
if err != nil {
c.logger.Warn("Failed to track analytics (TestReportResult)", zap.Error(err))
}
}
// NumberOfTestReports ...
func (c *Client) NumberOfTestReports(appSlug, buildSlug string, count int, time time.Time) {
err := c.client.Enqueue(segment.Track{
UserId: appSlug,
Event: "Number of test reports",
Properties: segment.NewProperties().
Set("app_slug", appSlug).
Set("build_slug", buildSlug).
Set("count", count).
Set("datetime", time),
})
if err != nil {
c.logger.Warn("Failed to track analytics (NumberOfTestReports)", zap.Error(err))
}
}
// Close ...
func (c *Client) Close() error {
return c.client.Close()
}
|
package solutions
func cloneGraph(node *GraphNode) *GraphNode {
if node == nil {
return nil
}
queue := []*GraphNode{node}
cloned := map[int]*GraphNode{
node.Val: {
Val: node.Val,
Neighbors: []*GraphNode{},
},
}
for len(queue) > 0 {
node, queue = queue[0], queue[1:]
nextNode, _ := cloned[node.Val]
for _, neighbor := range node.Neighbors {
if _, visited := cloned[neighbor.Val]; !visited {
cloned[neighbor.Val] = &GraphNode{
Val: neighbor.Val,
Neighbors: []*GraphNode{},
}
queue = append(queue, neighbor)
}
nextNode.Neighbors = append(nextNode.Neighbors, cloned[neighbor.Val])
}
}
return cloned[1]
}
|
package stdmeta
import (
"context"
"errors"
"reflect"
"sync"
"github.com/spf13/viper"
"github.com/superchalupa/sailfish/src/dell-resources/attributes"
"github.com/superchalupa/sailfish/src/log"
"github.com/superchalupa/sailfish/src/ocp/model"
"github.com/superchalupa/sailfish/src/ocp/testaggregate"
"github.com/superchalupa/sailfish/src/ocp/view"
domain "github.com/superchalupa/sailfish/src/redfishresource"
)
func RegisterFormatters(s *testaggregate.Service, d *domain.DomainObjects) {
expandOneFormatter := MakeExpandOneFormatter(d)
s.RegisterViewFunction("withFormatter_expandone", func(ctx context.Context, logger log.Logger, cfgMgr *viper.Viper, cfgMgrMu *sync.RWMutex, vw *view.View, cfg interface{}, parameters map[string]interface{}) error {
logger.Debug("Adding expandone formatter to view", "view", vw.GetURI())
vw.ApplyOption(view.WithFormatter("expandone", expandOneFormatter))
return nil
})
expandFormatter := MakeExpandListFormatter(d)
s.RegisterViewFunction("withFormatter_expand", func(ctx context.Context, logger log.Logger, cfgMgr *viper.Viper, cfgMgrMu *sync.RWMutex, vw *view.View, cfg interface{}, parameters map[string]interface{}) error {
logger.Debug("Adding expand formatter to view", "view", vw.GetURI())
vw.ApplyOption(view.WithFormatter("expand", expandFormatter))
return nil
})
s.RegisterViewFunction("withFormatter_count", func(ctx context.Context, logger log.Logger, cfgMgr *viper.Viper, cfgMgrMu *sync.RWMutex, vw *view.View, cfg interface{}, parameters map[string]interface{}) error {
logger.Debug("Adding count formatter to view", "view", vw.GetURI())
vw.ApplyOption(view.WithFormatter("count", CountFormatter))
return nil
})
s.RegisterViewFunction("withFormatter_attributeFormatter", func(ctx context.Context, logger log.Logger, cfgMgr *viper.Viper, cfgMgrMu *sync.RWMutex, vw *view.View, cfg interface{}, parameters map[string]interface{}) error {
logger.Debug("Adding attributeFormatter formatter to view", "view", vw.GetURI())
vw.ApplyOption(view.WithFormatter("attributeFormatter", attributes.FormatAttributeDump))
return nil
})
s.RegisterViewFunction("withFormatter_formatOdataList", func(ctx context.Context, logger log.Logger, cfgMgr *viper.Viper, cfgMgrMu *sync.RWMutex, vw *view.View, cfg interface{}, parameters map[string]interface{}) error {
logger.Debug("Adding FormatOdataList formatter to view", "view", vw.GetURI())
vw.ApplyOption(view.WithFormatter("formatOdataList", FormatOdataList))
return nil
})
s.RegisterViewFunction("stdFormatters", func(ctx context.Context, logger log.Logger, cfgMgr *viper.Viper, cfgMgrMu *sync.RWMutex, vw *view.View, cfg interface{}, parameters map[string]interface{}) error {
logger.Debug("Adding standard formatters (expand, expandone, count, attributeFormatter, formatOdataList) to view", "view", vw.GetURI())
vw.ApplyOption(view.WithFormatter("expandone", expandOneFormatter))
vw.ApplyOption(view.WithFormatter("expand", expandFormatter))
vw.ApplyOption(view.WithFormatter("count", CountFormatter))
vw.ApplyOption(view.WithFormatter("attributeFormatter", attributes.FormatAttributeDump))
vw.ApplyOption(view.WithFormatter("formatOdataList", FormatOdataList))
return nil
})
}
func MakeExpandListFormatter(d *domain.DomainObjects) func(context.Context, *view.View, *model.Model, *domain.RedfishResourceProperty, map[string]interface{}) error {
return func(
ctx context.Context,
v *view.View,
m *model.Model,
rrp *domain.RedfishResourceProperty,
meta map[string]interface{},
) error {
p, ok := meta["property"].(string)
uris, ok := m.GetProperty(p).([]string)
if !ok {
return errors.New("uris property not setup properly")
}
odata := []interface{}{}
for _, i := range uris {
out, err := d.ExpandURI(ctx, i)
if err == nil {
odata = append(odata, out)
}
}
rrp.Value = odata
return nil
}
}
func MakeExpandOneFormatter(d *domain.DomainObjects) func(context.Context, *view.View, *model.Model, *domain.RedfishResourceProperty, map[string]interface{}) error {
return func(
ctx context.Context,
v *view.View,
m *model.Model,
rrp *domain.RedfishResourceProperty,
meta map[string]interface{},
) error {
p, ok := meta["property"].(string)
uri, ok := m.GetProperty(p).(string)
if !ok {
return errors.New("uri property not setup properly")
}
out, err := d.ExpandURI(ctx, uri)
if err == nil {
rrp.Value = out
}
return nil
}
}
func CountFormatter(
ctx context.Context,
vw *view.View,
m *model.Model,
rrp *domain.RedfishResourceProperty,
meta map[string]interface{},
) error {
p, ok := meta["property"].(string)
if !ok {
return errors.New("property name to operate on not passed in meta.")
}
arr := m.GetProperty(p)
if arr == nil {
return errors.New("array property not setup properly")
}
v := reflect.ValueOf(arr)
switch v.Kind() {
case reflect.String:
rrp.Value = v.Len()
case reflect.Array:
rrp.Value = v.Len()
case reflect.Slice:
rrp.Value = v.Len()
case reflect.Map:
rrp.Value = v.Len()
case reflect.Chan:
rrp.Value = v.Len()
default:
rrp.Value = 0
}
return nil
}
func FormatOdataList(ctx context.Context, v *view.View, m *model.Model, rrp *domain.RedfishResourceProperty, meta map[string]interface{}) error {
p, ok := meta["property"].(string)
uris, ok := m.GetProperty(p).([]string)
if !ok {
uris = []string{}
}
// TODO: make this parse array using reflect
odata := []interface{}{}
for _, i := range uris {
odata = append(odata, map[string]interface{}{"@odata.id": i})
}
rrp.Value = odata
return nil
}
|
package app
import (
"context"
"encoding/json"
"log"
"net/http"
"github.com/jackc/pgx/v5/pgtype"
)
func (c *App) SuspendUser() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
id := query.Get("id")
log.Println("suspending user", id)
user := c.LoggedInUser(r)
if !user.Admin {
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"error": "Not authorized.",
"suspended": false,
},
})
return
}
err := c.MatrixDB.Queries.DeactivateUser(context.Background(), pgtype.Text{
String: id,
Valid: true,
})
if err != nil {
log.Println("error deleting user", err)
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"error": "Error deleting user.",
"suspended": false,
},
})
return
}
err = c.PurgeUserSessions(id)
if err != nil {
log.Println("error deleting user", err)
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"error": "Error deleting user.",
"suspended": false,
},
})
return
}
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"suspended": true,
},
})
}
}
func (c *App) PinEventToIndex() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
slug := query.Get("slug")
log.Println("pinnind event on index", slug)
user := c.LoggedInUser(r)
admin, err := c.MatrixDB.Queries.IsAdmin(context.Background(), pgtype.Text{String: user.MatrixUserID, Valid: true})
if err != nil || !admin {
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"error": "Not authorized.",
},
})
return
}
pinned, err := c.Cache.System.Get("pinned").Result()
if err != nil {
list := []string{slug}
serialized, err := json.Marshal(list)
if err != nil {
log.Println(err)
}
err = c.Cache.System.Set("pinned", serialized, 0).Err()
if err != nil {
log.Println("error getting event: ", err)
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"error": "Event could not be pinned.",
"exists": false,
},
})
return
}
} else {
var us []string
err = json.Unmarshal([]byte(pinned), &us)
if err != nil {
log.Println(err)
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"error": "Event could not be pinned.",
"exists": false,
},
})
return
}
us = append(us, slug)
serialized, err := json.Marshal(us)
if err != nil {
log.Println(err)
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"error": "Event could not be pinned.",
"exists": false,
},
})
return
}
err = c.Cache.System.Set("pinned", serialized, 0).Err()
if err != nil {
log.Println("error getting event: ", err)
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"error": "Event could not be pinned.",
"exists": false,
},
})
return
}
}
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"pinned": true,
},
})
}
}
func (c *App) UnpinIndexEvent() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
slug := query.Get("slug")
log.Println("unpinning event on index", slug)
user := c.LoggedInUser(r)
admin, err := c.MatrixDB.Queries.IsAdmin(context.Background(), pgtype.Text{String: user.MatrixUserID, Valid: true})
if err != nil || !admin {
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"error": "Not authorized.",
},
})
return
}
pinned, err := c.Cache.System.Get("pinned").Result()
if err != nil {
log.Println("error unpinning event: ", err)
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"error": "Could not unpin.",
"exists": false,
},
})
return
}
var us []string
err = json.Unmarshal([]byte(pinned), &us)
if err != nil {
log.Println(err)
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"error": "Could not unpin.",
"exists": false,
},
})
return
}
if len(us) == 1 {
err := c.Cache.System.Del("pinned").Err()
if err != nil {
log.Println("error unpinning event: ", err)
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"error": "Could not unpin.",
"exists": false,
},
})
return
}
}
if len(us) > 1 {
n := removeElement(us, slug)
serialized, err := json.Marshal(n)
if err != nil {
log.Println(err)
}
err = c.Cache.System.Set("pinned", serialized, 0).Err()
if err != nil {
log.Println("error getting event: ", err)
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"error": "Event could not be pinned.",
"exists": false,
},
})
return
}
}
RespondWithJSON(w, &JSONResponse{
Code: http.StatusOK,
JSON: map[string]any{
"unpinned": true,
},
})
}
}
|
// Will combine NACS with NACS_table
// Create a File.
// Then Submit to FIRESTORE
package main
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
firebase "firebase.google.com/go"
"google.golang.org/api/option"
)
var TableMap = map[string]NACSTable{}
func main() {
var ComplexNACS []COMPLEX_NONACS
NACS01json, err := os.Open("./NACS_TABLE_EDIT.json")
if err != nil {
fmt.Println(err)
}
fmt.Println("Successfully Opened NACS_TABLE_EDIT.json")
fmt.Println(NACS01json)
defer NACS01json.Close()
var NACSTableC []NACSTable
NACS01jsonA, _ := ioutil.ReadAll(NACS01json)
json.Unmarshal(NACS01jsonA, &NACSTableC)
for i := 0; i < len(NACSTableC); i++ {
fmt.Println("==========================================")
fmt.Println("INdication: " + NACSTableC[i].INDICATION)
fmt.Println("Name: " + NACSTableC[i].Name)
fmt.Println("Q01CAT: " + NACSTableC[i].Q01CAT)
fmt.Println("SCORE: " + NACSTableC[i].SCORE)
fmt.Println("SCOREDEF: " + NACSTableC[i].SCOREDEFINITIONS)
fmt.Println("SCOREGRAPHIC: " + NACSTableC[i].SCOREGRAPHIC)
fmt.Println("TERM DESCRIP: " + NACSTableC[i].TermDescription[0])
fmt.Println("==========================================")
}
fmt.Println("============Stating Table MaP=================")
var t string
for i := 0; i < len(NACSTableC); i++ {
t = NACSTableC[i].Name
TableMap[t] = NACSTableC[i]
}
fmt.Println(TableMap["69 - PCI"].Name)
fmt.Println("============ ended making table map =================")
//=====================================================================
fmt.Println("============ starting Map of NACS =================")
NACS01json, err = os.Open("./NACSv03_OUT.json")
// if we os.Open returns an error then handle it
if err != nil {
fmt.Println(err)
}
fmt.Println("Successfully Opened NACSv03_OUT.json")
// defer the closing of our jsonFile so that we can parse it later on
fmt.Println(NACS01json)
defer NACS01json.Close()
// we initialize our Users array
//var users Users
var NACS []NONACS
// read our opened xmlFile as a byte array.
NACS02jsonA, _ := ioutil.ReadAll(NACS01json)
json.Unmarshal(NACS02jsonA, &NACS)
for i := 0; i < len(NACS); i++ {
fmt.Println("==========================================")
fmt.Println("Index: " + NACS[i].INDEX)
fmt.Println("DeadEnd" + NACS[i].DEADEND)
fmt.Println("E01CABG: " + NACS[i].E01CABG)
fmt.Println("E01PCI: " + NACS[i].E01PCI)
fmt.Println("==========================================")
}
fmt.Println("============Building ComplexNACS=================")
var TempComplexNACS COMPLEX_NONACS
for i := 0; i < len(NACS); i++ {
fmt.Println("==========================================")
fmt.Println("Index: " + NACS[i].INDEX)
//pppppppppppppppppppppppppppppppppp
TempComplexNACS.Title = NACS[i].Title // string `json:"title"`
TempComplexNACS.INDEX = NACS[i].INDEX // string `json:"INDEX"`
TempComplexNACS.DEADEND = NACS[i].DEADEND // string `json:"DEAD_END"`
TempComplexNACS.Q01IschemicSymptoms = NACS[i].Q01IschemicSymptoms // string `json:"Q01 - Ischemic Symptoms"`
TempComplexNACS.Q02AntiIschemicMedicalTherapy = NACS[i].Q02AntiIschemicMedicalTherapy // string `json:"Q02 - Anti-ischemic Medical Therapy:"`
TempComplexNACS.Q03NonInvasiveTestResults = NACS[i].Q03NonInvasiveTestResults // string `json:"Q03 - Non-invasive Test Results:"`
TempComplexNACS.Q04PRIORCABG = NACS[i].Q04PRIORCABG // string `json:"Q04 - PRIOR CABG"`
TempComplexNACS.Q01 = NACS[i].Q01 // string `json:"Q01"`
TempComplexNACS.Q02 = NACS[i].Q02 // string `json:"Q02"`
TempComplexNACS.Q03 = NACS[i].Q03 // string `json:"Q03"`
TempComplexNACS.Q04 = NACS[i].Q04 // string `json:"Q04"`
TempComplexNACS.E01PRIMARY = NACS[i].E01PRIMARY // string `json:"E01 PRIMARY"`
TempComplexNACS.DE01PRIMARY = getTableMap(NACS[i].E01PRIMARY) // NACSTable `json:"de01_primary"`
//TempComplexNACS.DE01PRIMARY = getTableMap("22-Asymptomatic") // NACSTable `json:"de01_primary"`
TempComplexNACS.E01CABG = NACS[i].E01CABG // string `json:"E01 CABG"`
TempComplexNACS.DE01CABG = getTableMap(NACS[i].E01CABG) // NACSTable `json:"de01_cabg"`
TempComplexNACS.E01PCI = NACS[i].E01PCI // string `json:"E01 PCI"`
TempComplexNACS.DE01PCI = getTableMap(NACS[i].E01PCI) // NACSTable `json:"de01_pci"`
TempComplexNACS.E02PRIMARY = NACS[i].E02PRIMARY // string `json:"E02 PRIMARY"`
TempComplexNACS.DE02PRIMARY = getTableMap(NACS[i].E02PRIMARY) // NACSTable `json:"de02_primary"`
TempComplexNACS.E02CABG = NACS[i].E02CABG // string `json:"E02 CABG"`
TempComplexNACS.DE02CABG = getTableMap(NACS[i].E02CABG) // NACSTable `json:"de02_cabg"`
TempComplexNACS.E02PCI = NACS[i].E02PCI // string `json:"E02 PCI"`
TempComplexNACS.DE02PCI = getTableMap(NACS[i].E02PCI) // NACSTable `json:"de02_pci"`
TempComplexNACS.E03PRIMARY = NACS[i].E03PRIMARY // string `json:"E03 PRIMARY"`
TempComplexNACS.DE03PRIMARY = getTableMap(NACS[i].E03PRIMARY) // NACSTable `json:"de03_primary"`
TempComplexNACS.E03CABG = NACS[i].E03CABG // string `json:"E03 CABG"`
TempComplexNACS.DE03CABG = getTableMap(NACS[i].E03CABG) // NACSTable `json:"de03_cabg"`
TempComplexNACS.E03PCI = NACS[i].E03PCI // string `json:"E03 PCI"`
TempComplexNACS.DE03PCI = getTableMap(NACS[i].E03PCI) // NACSTable `json:"de03_pci"`
TempComplexNACS.E04PRIMARY = NACS[i].E04PRIMARY // string `json:"E04 PRIMARY"`
TempComplexNACS.DE04PRIMARY = getTableMap(NACS[i].E04PRIMARY) // NACSTable `json:"de04_primary"`
TempComplexNACS.E04CABG = NACS[i].E04CABG // string `json:"E04 CABG"`
TempComplexNACS.DE04CABG = getTableMap(NACS[i].E04CABG) // NACSTable `json:"de04_cabg"`
TempComplexNACS.E04PCI = NACS[i].E04PCI // string `json:"E04 PCI"`
TempComplexNACS.DE04PCI = getTableMap(NACS[i].E04PCI) // NACSTable `json:"de04_pci"`
TempComplexNACS.E05PRIMARY = NACS[i].E05PRIMARY // string `json:"E05 PRIMARY"`
TempComplexNACS.DE05PRIMARY = getTableMap(NACS[i].E05PRIMARY) // NACSTable `json:"de05_primary"`
TempComplexNACS.E05CABG = NACS[i].E05CABG // string `json:"E05 CABG"`
TempComplexNACS.DE05CABG = getTableMap(NACS[i].E05CABG) // NACSTable `json:"de05_cabg"`
TempComplexNACS.E05PCI = NACS[i].E05PCI // string `json:"E05 PCI"`
TempComplexNACS.DE05PCI = getTableMap(NACS[i].E05PCI) // NACSTable `json:"de05_pci"`
TempComplexNACS.E05APRIMARY = NACS[i].E05APRIMARY // string `json:"E05a PRIMARY"`
TempComplexNACS.DE05APRIMARY = getTableMap(NACS[i].E05APRIMARY) // NACSTable `json:"de05a_primary"`
TempComplexNACS.E05ACABG = NACS[i].E05ACABG // string `json:"E05a CABG"`
TempComplexNACS.DE05ACABG = getTableMap(NACS[i].E05ACABG) // NACSTable `json:"de05a_cabg"`
TempComplexNACS.E05APCI = NACS[i].E05APCI // string `json:"E05a PCI"`
TempComplexNACS.DE05APCI = getTableMap(NACS[i].E05APCI) // NACSTable `json:"de05a_pci"`
TempComplexNACS.E05BPRIMARY = NACS[i].E05BPRIMARY // string `json:"E05b PRIMARY"`
TempComplexNACS.DE05BPRIMARY = getTableMap(NACS[i].E05BPRIMARY) // NACSTable `json:"de05b_primary"`
TempComplexNACS.E05BCABG = NACS[i].E05BCABG // string `json:"E05b CABG"`
TempComplexNACS.DE05BCABG = getTableMap(NACS[i].E05BCABG) // NACSTable `json:"de05b_cabg"`
TempComplexNACS.E05BPCI = NACS[i].E05BPCI // string `json:"E05b PCI"`
TempComplexNACS.DE05BPCI = getTableMap(NACS[i].E05BPCI) // NACSTable `json:"de05b_pci"`
TempComplexNACS.E05CPRIMARY = NACS[i].E05CPRIMARY // string `json:"E05c PRIMARY"`
TempComplexNACS.DE05CPRIMARY = getTableMap(NACS[i].E05CPRIMARY) // NACSTable `json:"de05c_primary"`
TempComplexNACS.E05CCABG = NACS[i].E05CCABG // string `json:"E05c CABG"`
TempComplexNACS.DE05CCABG = getTableMap(NACS[i].E05CCABG) // NACSTable `json:"de05c_cabg"`
TempComplexNACS.E05CPCI = NACS[i].E05CPCI // string `json:"E05c PCI"`
TempComplexNACS.DE05CPCI = getTableMap(NACS[i].E05CPCI) // NACSTable `json:"de05c_pci"`
TempComplexNACS.E06PRIMARY = NACS[i].E06PRIMARY // string `json:"E06 PRIMARY"`
TempComplexNACS.DE06PRIMARY = getTableMap(NACS[i].E06PRIMARY) // NACSTable `json:"de06_primary"`
TempComplexNACS.E06CABG = NACS[i].E06CABG // string `json:"E06 CABG"`
TempComplexNACS.DE06CABG = getTableMap(NACS[i].E06CABG) // NACSTable `json:"de06_cabg"`
TempComplexNACS.E06PCI = NACS[i].E06PCI // string `json:"E06 PCI"`
TempComplexNACS.DE06PCI = getTableMap(NACS[i].E06PCI) // NACSTable `json:"de06_pci"`
TempComplexNACS.E06APRIMARY = NACS[i].E06APRIMARY // string `json:"E06a PRIMARY"`
TempComplexNACS.DE06APRIMARY = getTableMap(NACS[i].E06APRIMARY) // NACSTable `json:"de06a_primary"`
TempComplexNACS.E06ACABG = NACS[i].E06ACABG // string `json:"E06a CABG"`
TempComplexNACS.DE06ACABG = getTableMap(NACS[i].E06ACABG) // NACSTable `json:"de06a_cabg"`
TempComplexNACS.E06APCI = NACS[i].E06APCI // string `json:"E06a PCI"`
TempComplexNACS.DE06APCI = getTableMap(NACS[i].E06APCI) // NACSTable `json:"de06a_pci"`
TempComplexNACS.E06BPRIMARY = NACS[i].E06BPRIMARY // string `json:"E06b PRIMARY"`
TempComplexNACS.DE06BPRIMARY = getTableMap(NACS[i].E06BPRIMARY) // NACSTable `json:"de06b_primary"`
TempComplexNACS.E06BCABG = NACS[i].E06BCABG // string `json:"E06b CABG"`
TempComplexNACS.DE06BCABG = getTableMap(NACS[i].E06BCABG) // NACSTable `json:"de06b_cabg"`
TempComplexNACS.E06BPCI = NACS[i].E06BPCI // string `json:"E06b PCI"`
TempComplexNACS.DE06BPCI = getTableMap(NACS[i].E06BPCI) // NACSTable `json:"de06b_pci"`
TempComplexNACS.E06CPRIMARY = NACS[i].E06CPRIMARY // string `json:"E06c PRIMARY"`
TempComplexNACS.DE06CPRIMARY = getTableMap(NACS[i].E06CPRIMARY) // NACSTable `json:"de06c_primary"`
TempComplexNACS.E06CCABG = NACS[i].E06CCABG // string `json:"E06c CABG"`
TempComplexNACS.DE06CCABG = getTableMap(NACS[i].E06CCABG) // NACSTable `json:"de06c_cabg"`
TempComplexNACS.E06CPCI = NACS[i].E06CPCI // string `json:"E06c PCI"`
TempComplexNACS.DE06CPCI = getTableMap(NACS[i].E06CPCI) // NACSTable `json:"de06c_pci"`
TempComplexNACS.E07PRIMARY = NACS[i].E07PRIMARY // string `json:"E07 PRIMARY"`
TempComplexNACS.DE07PRIMARY = getTableMap(NACS[i].E07PRIMARY) // NACSTable `json:"de07_primary"`
TempComplexNACS.E07CABG = NACS[i].E07CABG // string `json:"E07 CABG"`
TempComplexNACS.DE07CABG = getTableMap(NACS[i].E07CABG) // NACSTable `json:"de07_cabg"`
TempComplexNACS.E07PCI = NACS[i].E07PCI // string `json:"E07 PCI"`
TempComplexNACS.DE07PCI = getTableMap(NACS[i].E07PCI) // NACSTable `json:"de07_pci"`
TempComplexNACS.E07APRIMARY = NACS[i].E07APRIMARY // string `json:"E07a PRIMARY"`
TempComplexNACS.DE07APRIMARY = getTableMap(NACS[i].E07APRIMARY) // NACSTable `json:"de07a_primary"`
TempComplexNACS.E07ACABG = NACS[i].E07ACABG // string `json:"E07a CABG"`
TempComplexNACS.DE07ACABG = getTableMap(NACS[i].E07ACABG) // NACSTable `json:"de07a_cabg"`
TempComplexNACS.E07APCI = NACS[i].E07APCI // string `json:"E07a PCI"`
TempComplexNACS.DE07APCI = getTableMap(NACS[i].E07APCI) // NACSTable `json:"de07a_pci"`
TempComplexNACS.E07BPRIMARY = NACS[i].E07BPRIMARY // string `json:"E07b PRIMARY"`
TempComplexNACS.DE07BPRIMARY = getTableMap(NACS[i].E07BPRIMARY) // NACSTable `json:"de07b_primary"`
TempComplexNACS.E07BCABG = NACS[i].E07BCABG // string `json:"E07b CABG"`
TempComplexNACS.DE07BCABG = getTableMap(NACS[i].E07BCABG) // NACSTable `json:"de07b_cabg"`
TempComplexNACS.E07BPCI = NACS[i].E07BPCI // string `json:"E07b PCI"`
TempComplexNACS.DE07BPCI = getTableMap(NACS[i].E07BPCI) // NACSTable `json:"de07b_pci"`
TempComplexNACS.E07CPRIMARY = NACS[i].E07CPRIMARY // string `json:"E07c PRIMARY"`
TempComplexNACS.DE07CPRIMARY = getTableMap(NACS[i].E07CPRIMARY) // NACSTable `json:"de07c_primary"`
TempComplexNACS.E07CCABG = NACS[i].E07CCABG // string `json:"E07c CABG"`
TempComplexNACS.DE07CCABG = getTableMap(NACS[i].E07CCABG) // NACSTable `json:"de07c_cabg"`
TempComplexNACS.E07CPCI = NACS[i].E07CPCI // string `json:"E07c PCI"`
TempComplexNACS.DE07CPCI = getTableMap(NACS[i].E07CPCI) // NACSTable `json:"de07c_pci"`
TempComplexNACS.F01PRIMARY = NACS[i].F01PRIMARY // string `json:"F01 PRIMARY"`
TempComplexNACS.DF01PRIMARY = getTableMap(NACS[i].F01PRIMARY) // NACSTable `json:"df01_primary"`
TempComplexNACS.F02PRIMARY = NACS[i].F02PRIMARY // string `json:"F02 PRIMARY"`
TempComplexNACS.DF02PRIMARY = getTableMap(NACS[i].F02PRIMARY) // NACSTable `json:"df02_primary"`
TempComplexNACS.F03PRIMARY = NACS[i].F03PRIMARY // string `json:"F03 PRIMARY"`
TempComplexNACS.DF03PRIMARY = getTableMap(NACS[i].F03PRIMARY) // NACSTable `json:"df03_primary"`
TempComplexNACS.F03CABGPCI = NACS[i].F03CABGPCI // string `json:"F03 CABG PCI"`
TempComplexNACS.DF03CABGPCI = getTableMap(NACS[i].F03CABGPCI) // NACSTable `json:"df03_cabg_pci"`
TempComplexNACS.F03CABG = NACS[i].F03CABG // string `json:"F03 CABG"`
TempComplexNACS.DF03CABG = getTableMap(NACS[i].F03CABG) // NACSTable `json:"df03_cabg"`
TempComplexNACS.F03PCI = NACS[i].F03PCI // string `json:"F03 PCI"`
TempComplexNACS.DF03PCI = getTableMap(NACS[i].F03PCI) // NACSTable `json:"df03_pci"`
TempComplexNACS.F04PRIMARY = NACS[i].F04PRIMARY // string `json:"F04 PRIMARY"`
TempComplexNACS.DF04PRIMARY = getTableMap(NACS[i].F04PRIMARY) // NACSTable `json:"df04_primary"`
TempComplexNACS.F04CABGPCI = NACS[i].F04CABGPCI // string `json:"F04 CABG PCI"`
TempComplexNACS.DF04CABGPCI = getTableMap(NACS[i].F04CABGPCI) // NACSTable `json:"df04_cabg_pci"`
TempComplexNACS.F04CABG = NACS[i].F04CABG // string `json:"F04 CABG"`
TempComplexNACS.DF04CABG = getTableMap(NACS[i].F04CABG) // NACSTable `json:"df04_cabg"`
TempComplexNACS.F04PCI = NACS[i].F04PCI // string `json:"F04 PCI"`
TempComplexNACS.DF04PCI = getTableMap(NACS[i].F04PCI) // NACSTable `json:"df04_pci"`
fmt.Println("TempComplexNACS.INDEXIndex: " + TempComplexNACS.INDEX)
fmt.Println("==========================================")
ComplexNACS = append(ComplexNACS, TempComplexNACS)
}
// // Save TempComplex to a ComplexNACS Slice
// ComplexJson, _ := json.Marshal(ComplexNACS)
// err = ioutil.WriteFile("COMPLEX_NACSv01.json", ComplexJson, 0644)
// fmt.Printf("%+v", ComplexJson)
// create local json files
fmt.Println("========= /////////////////////// ==========")
fmt.Println("========= writing JSON file ==========")
jsonFile, err := os.Create("./ComplexNACSv03_OUT.json")
if err != nil {
panic(err)
}
defer jsonFile.Close()
//b, err := json.Marshal(pages.A)
b, err := json.Marshal(ComplexNACS)
jsonFile.Write(b)
jsonFile.Close()
fmt.Println("JSON data written to ", jsonFile.Name())
sa := option.WithCredentialsFile("./scai-qit-firebase-adminsdk.json")
app, err := firebase.NewApp(context.Background(), nil, sa)
if err != nil {
log.Fatal(err)
}
client2, err := app.Firestore(context.Background())
if err != nil {
log.Fatal(err)
}
for i := 0; i < len(ComplexNACS); i++ {
_, err := client2.Collection("COMPLEX_NONACS").Doc(ComplexNACS[i].INDEX).Set(context.Background(), ComplexNACS[i])
if err != nil {
log.Fatal(err)
}
}
}
//=============================================================
func getTableMap(lookup string) NACSTable {
var lookUptable NACSTable = TableMap[lookup]
fmt.Println("======================")
fmt.Println(lookUptable.INDICATION)
fmt.Println(lookUptable)
return lookUptable
}
//=============================================================
type NACSTable struct {
Name string `json:"name"`
INDICATION string `json:"indication"`
TermDescription []string `json:"termdescription"`
Q01CAT string `json:"q01"`
SCORE string `json:"score"`
SCOREDEFINITIONS string `json:"score_def"`
SCOREGRAPHIC string `json:"score_graphic"`
}
type NONACS struct {
Title string `json:"title"`
INDEX string `json:"INDEX"`
DEADEND string `json:"DEAD_END"`
Q01IschemicSymptoms string `json:"Q01 - Ischemic Symptoms"`
Q02AntiIschemicMedicalTherapy string `json:"Q02 - Anti-ischemic Medical Therapy:"`
Q03NonInvasiveTestResults string `json:"Q03 - Non-invasive Test Results:"`
Q04PRIORCABG string `json:"Q04 - PRIOR CABG"`
Q01 string `json:"Q01"`
Q02 string `json:"Q02"`
Q03 string `json:"Q03"`
Q04 string `json:"Q04"`
E01PRIMARY string `json:"E01 PRIMARY"`
E01CABG string `json:"E01 CABG"`
E01PCI string `json:"E01 PCI"`
E02PRIMARY string `json:"E02 PRIMARY"`
E02CABG string `json:"E02 CABG"`
E02PCI string `json:"E02 PCI"`
E03PRIMARY string `json:"E03 PRIMARY"`
E03CABG string `json:"E03 CABG"`
E03PCI string `json:"E03 PCI"`
E04PRIMARY string `json:"E04 PRIMARY"`
E04CABG string `json:"E04 CABG"`
E04PCI string `json:"E04 PCI"`
E05PRIMARY string `json:"E05 PRIMARY"`
E05CABG string `json:"E05 CABG "`
E05PCI string `json:"E05 PCI"`
E05APRIMARY string `json:"E05a PRIMARY"`
E05ACABG string `json:"E05a CABG"`
E05APCI string `json:"E05a PCI"`
E05BPRIMARY string `json:"E05b PRIMARY"`
E05BCABG string `json:"E05b CABG"`
E05BPCI string `json:"E05b PCI"`
E05CPRIMARY string `json:"E05c PRIMARY"`
E05CCABG string `json:"E05c CABG"`
E05CPCI string `json:"E05c PCI"`
E06PRIMARY string `json:"E06 PRIMARY"`
E06CABG string `json:"E06 CABG"`
E06PCI string `json:"E06 PCI"`
E06APRIMARY string `json:"E06a PRIMARY"`
E06ACABG string `json:"E06a CABG"`
E06APCI string `json:"E06a PCI"`
E06BPRIMARY string `json:"E06b PRIMARY"`
E06BCABG string `json:"E06b CABG"`
E06BPCI string `json:"E06b PCI"`
E06CPRIMARY string `json:"E06c PRIMARY"`
E06CCABG string `json:"E06c CABG"`
E06CPCI string `json:"E06c PCI"`
E07PRIMARY string `json:"E07 PRIMARY"`
E07CABG string `json:"E07 CABG"`
E07PCI string `json:"E07 PCI"`
E07APRIMARY string `json:"E07a PRIMARY"`
E07ACABG string `json:"E07a CABG"`
E07APCI string `json:"E07a PCI"`
E07BPRIMARY string `json:"E07b PRIMARY"`
E07BCABG string `json:"E07b CABG"`
E07BPCI string `json:"E07b PCI"`
E07CPRIMARY string `json:"E07c PRIMARY"`
E07CCABG string `json:"E07c CABG"`
E07CPCI string `json:"E07c PCI"`
F01PRIMARY string `json:"F01 PRIMARY"`
F02PRIMARY string `json:"F02 PRIMARY"`
F03PRIMARY string `json:"F03 PRIMARY"`
F03CABGPCI string `json:"F03 CABG PCI"`
F03CABG string `json:"F03 CABG"`
F03PCI string `json:"F03 PCI"`
F04PRIMARY string `json:"F04 PRIMARY"`
F04CABGPCI string `json:"F04 CABG PCI"`
F04CABG string `json:"F04 CABG"`
F04PCI string `json:"F04 PCI"`
}
type COMPLEX_NONACS struct {
Title string `json:"title"`
INDEX string `json:"INDEX"`
DEADEND string `json:"DEAD_END"`
Q01IschemicSymptoms string `json:"Q01 - Ischemic Symptoms"`
Q02AntiIschemicMedicalTherapy string `json:"Q02 - Anti-ischemic Medical Therapy:"`
Q03NonInvasiveTestResults string `json:"Q03 - Non-invasive Test Results:"`
Q04PRIORCABG string `json:"Q04 - PRIOR CABG"`
Q01 string `json:"Q01"`
Q02 string `json:"Q02"`
Q03 string `json:"Q03"`
Q04 string `json:"Q04"`
E01PRIMARY string `json:"E01 PRIMARY"`
DE01PRIMARY NACSTable `json:"de01_primary"`
E01CABG string `json:"E01 CABG"`
DE01CABG NACSTable `json:"de01_cabg"`
E01PCI string `json:"E01 PCI"`
DE01PCI NACSTable `json:"de01_pci"`
E02PRIMARY string `json:"E02 PRIMARY"`
DE02PRIMARY NACSTable `json:"de02_primary"`
E02CABG string `json:"E02 CABG"`
DE02CABG NACSTable `json:"de02_cabg"`
E02PCI string `json:"E02 PCI"`
DE02PCI NACSTable `json:"de02_pci"`
E03PRIMARY string `json:"E03 PRIMARY"`
DE03PRIMARY NACSTable `json:"de03_primary"`
E03CABG string `json:"E03 CABG"`
DE03CABG NACSTable `json:"de03_cabg"`
E03PCI string `json:"E03 PCI"`
DE03PCI NACSTable `json:"de03_pci"`
E04PRIMARY string `json:"E04 PRIMARY"`
DE04PRIMARY NACSTable `json:"de04_primary"`
E04CABG string `json:"E04 CABG"`
DE04CABG NACSTable `json:"de04_cabg"`
E04PCI string `json:"E04 PCI"`
DE04PCI NACSTable `json:"de04_pci"`
E05PRIMARY string `json:"E05 PRIMARY"`
DE05PRIMARY NACSTable `json:"de05_primary"`
E05CABG string `json:"E05 CABG"`
DE05CABG NACSTable `json:"de05_cabg"`
E05PCI string `json:"E05 PCI"`
DE05PCI NACSTable `json:"de05_pci"`
E05APRIMARY string `json:"E05a PRIMARY"`
DE05APRIMARY NACSTable `json:"de05a_primary"`
E05ACABG string `json:"E05a CABG"`
DE05ACABG NACSTable `json:"de05a_cabg"`
E05APCI string `json:"E05a PCI"`
DE05APCI NACSTable `json:"de05a_pci"`
E05BPRIMARY string `json:"E05b PRIMARY"`
DE05BPRIMARY NACSTable `json:"de05b_primary"`
E05BCABG string `json:"E05b CABG"`
DE05BCABG NACSTable `json:"de05b_cabg"`
E05BPCI string `json:"E05b PCI"`
DE05BPCI NACSTable `json:"de05b_pci"`
E05CPRIMARY string `json:"E05c PRIMARY"`
DE05CPRIMARY NACSTable `json:"de05c_primary"`
E05CCABG string `json:"E05c CABG"`
DE05CCABG NACSTable `json:"de05c_cabg"`
E05CPCI string `json:"E05c PCI"`
DE05CPCI NACSTable `json:"de05c_pci"`
E06PRIMARY string `json:"E06 PRIMARY"`
DE06PRIMARY NACSTable `json:"de06_primary"`
E06CABG string `json:"E06 CABG"`
DE06CABG NACSTable `json:"de06_cabg"`
E06PCI string `json:"E06 PCI"`
DE06PCI NACSTable `json:"de06_pci"`
E06APRIMARY string `json:"E06a PRIMARY"`
DE06APRIMARY NACSTable `json:"de06a_primary"`
E06ACABG string `json:"E06a CABG"`
DE06ACABG NACSTable `json:"de06a_cabg"`
E06APCI string `json:"E06a PCI"`
DE06APCI NACSTable `json:"de06a_pci"`
E06BPRIMARY string `json:"E06b PRIMARY"`
DE06BPRIMARY NACSTable `json:"de06b_primary"`
E06BCABG string `json:"E06b CABG"`
DE06BCABG NACSTable `json:"de06b_cabg"`
E06BPCI string `json:"E06b PCI"`
DE06BPCI NACSTable `json:"de06b_pci"`
E06CPRIMARY string `json:"E06c PRIMARY"`
DE06CPRIMARY NACSTable `json:"de06c_primary"`
E06CCABG string `json:"E06c CABG"`
DE06CCABG NACSTable `json:"de06c_cabg"`
E06CPCI string `json:"E06c PCI"`
DE06CPCI NACSTable `json:"de06c_pci"`
E07PRIMARY string `json:"E07 PRIMARY"`
DE07PRIMARY NACSTable `json:"de07_primary"`
E07CABG string `json:"E07 CABG"`
DE07CABG NACSTable `json:"de07_cabg"`
E07PCI string `json:"E07 PCI"`
DE07PCI NACSTable `json:"de07_pci"`
E07APRIMARY string `json:"E07a PRIMARY"`
DE07APRIMARY NACSTable `json:"de07a_primary"`
E07ACABG string `json:"E07a CABG"`
DE07ACABG NACSTable `json:"de07a_cabg"`
E07APCI string `json:"E07a PCI"`
DE07APCI NACSTable `json:"de07a_pci"`
E07BPRIMARY string `json:"E07b PRIMARY"`
DE07BPRIMARY NACSTable `json:"de07b_primary"`
E07BCABG string `json:"E07b CABG"`
DE07BCABG NACSTable `json:"de07b_cabg"`
E07BPCI string `json:"E07b PCI"`
DE07BPCI NACSTable `json:"de07b_pci"`
E07CPRIMARY string `json:"E07c PRIMARY"`
DE07CPRIMARY NACSTable `json:"de07c_primary"`
E07CCABG string `json:"E07c CABG"`
DE07CCABG NACSTable `json:"de07c_cabg"`
E07CPCI string `json:"E07c PCI"`
DE07CPCI NACSTable `json:"de07c_pci"`
F01PRIMARY string `json:"F01 PRIMARY"`
DF01PRIMARY NACSTable `json:"df01_primary"`
F02PRIMARY string `json:"F02 PRIMARY"`
DF02PRIMARY NACSTable `json:"df02_primary"`
F03PRIMARY string `json:"F03 PRIMARY"`
DF03PRIMARY NACSTable `json:"df03_primary"`
F03CABGPCI string `json:"F03 CABG PCI"`
DF03CABGPCI NACSTable `json:"df03_cabg_pci"`
F03CABG string `json:"F03 CABG"`
DF03CABG NACSTable `json:"df03_cabg"`
F03PCI string `json:"F03 PCI"`
DF03PCI NACSTable `json:"df03_pci"`
F04PRIMARY string `json:"F04 PRIMARY"`
DF04PRIMARY NACSTable `json:"df04_primary"`
F04CABGPCI string `json:"F04 CABG PCI"`
DF04CABGPCI NACSTable `json:"df04_cabg_pci"`
F04CABG string `json:"F04 CABG"`
DF04CABG NACSTable `json:"df04_cabg"`
F04PCI string `json:"F04 PCI"`
DF04PCI NACSTable `json:"df04_pci"`
}
|
package tests_test
import (
"sigs.k8s.io/kustomize/k8sdeps/kunstruct"
"sigs.k8s.io/kustomize/k8sdeps/transformer"
"sigs.k8s.io/kustomize/pkg/fs"
"sigs.k8s.io/kustomize/pkg/loader"
"sigs.k8s.io/kustomize/pkg/resmap"
"sigs.k8s.io/kustomize/pkg/resource"
"sigs.k8s.io/kustomize/pkg/target"
"testing"
)
func writeKubebenchBase(th *KustTestHarness) {
th.writeF("/manifests/kubebench/base/cluster-role-binding.yaml", `
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubebench-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubebench-operator
subjects:
- kind: ServiceAccount
name: default
`)
th.writeF("/manifests/kubebench/base/cluster-role.yaml", `
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kubebench-operator
rules:
- apiGroups:
- kubebench.operator
resources:
- kubebenchjobs.kubebench.operator
- kubebenchjobs
verbs:
- create
- update
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
- pods
- pods/exec
- services
- endpoints
- persistentvolumeclaims
- events
- secrets
verbs:
- '*'
- apiGroups:
- kubeflow.org
resources:
- tfjobs
- pytorchjobs
verbs:
- '*'
- apiGroups:
- argoproj.io
resources:
- workflows
verbs:
- '*'
`)
th.writeF("/manifests/kubebench/base/crd.yaml", `
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: kubebenchjobs.kubebench.operator
spec:
group: kubebench.operator
names:
kind: KubebenchJob
plural: kubebenchjobs
scope: Namespaced
version: v1
`)
th.writeF("/manifests/kubebench/base/deployment.yaml", `
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: kubebench-dashboard
name: kubebench-dashboard
spec:
template:
metadata:
labels:
app: kubebench-dashboard
spec:
containers:
- image: gcr.io/kubeflow-images-public/kubebench/kubebench-dashboard:v0.4.0-13-g262c593
name: kubebench-dashboard
ports:
- containerPort: 8084
serviceAccountName: kubebench-dashboard
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kubebench-operator
spec:
selector:
matchLabels:
app: kubebench-operator
template:
metadata:
labels:
app: kubebench-operator
spec:
containers:
- image: gcr.io/kubeflow-images-public/kubebench/kubebench-operator:v0.4.0-13-g262c593
name: kubebench-operator
serviceAccountName: kubebench-operator
`)
th.writeF("/manifests/kubebench/base/role-binding.yaml", `
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
labels:
app: kubebench-dashboard
name: kubebench-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubebench-dashboard
subjects:
- kind: ServiceAccount
name: kubebench-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: kubebench-user-kubebench-job
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubebench-user-kubebench-job
subjects:
- kind: ServiceAccount
name: kubebench-user-kubebench-job
`)
th.writeF("/manifests/kubebench/base/role.yaml", `
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
labels:
app: kubebench-dashboard
name: kubebench-dashboard
rules:
- apiGroups:
- ""
resources:
- pods
- pods/exec
- pods/log
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: kubebench-user-kubebench-job
rules:
- apiGroups:
- kubeflow.org
resources:
- tfjobs
- pytorchjobs
- mpijobs
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- '*'
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- '*'
- apiGroups:
- batch
resources:
- jobs
verbs:
- '*'
- apiGroups:
- ""
resources:
- configmaps
- pods
- pods/log
- pods/exec
- services
- endpoints
- persistentvolumeclaims
- events
verbs:
- '*'
- apiGroups:
- apps
- extensions
resources:
- deployments
verbs:
- '*'
`)
th.writeF("/manifests/kubebench/base/service-account.yaml", `
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kubebench-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: default
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kubebench-user-kubebench-job
`)
th.writeF("/manifests/kubebench/base/service.yaml", `
apiVersion: v1
kind: Service
metadata:
annotations:
getambassador.io/config: |-
---
apiVersion: ambassador/v0
kind: Mapping
name: kubebench-dashboard-ui-mapping
prefix: /dashboard/
rewrite: /dashboard/
service: kubebench-dashboard.$(namespace)
name: kubebench-dashboard
spec:
ports:
- port: 80
targetPort: 9303
selector:
app: kubebench-dashboard
`)
th.writeF("/manifests/kubebench/base/virtual-service.yaml", `
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: kubebench-dashboard
spec:
gateways:
- kubeflow-gateway
hosts:
- '*'
http:
- match:
- uri:
prefix: /dashboard/
rewrite:
uri: /dashboard/
route:
- destination:
host: kubebench-dashboard.$(namespace).svc.$(clusterDomain)
port:
number: 80
`)
th.writeF("/manifests/kubebench/base/workflow.yaml", `
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
name: kubebench-job
spec:
entrypoint: kubebench-workflow
serviceAccountName: kubebench-user-kubebench-job
templates:
- name: kubebench-workflow
steps:
- - name: run-configurator
template: configurator
- - arguments:
parameters:
- name: kf-job-manifest
value: '{{steps.run-configurator.outputs.parameters.kf-job-manifest}}'
- name: experiment-id
value: '{{steps.run-configurator.outputs.parameters.experiment-id}}'
name: launch-main-job
template: main-job
- - arguments:
parameters:
- name: kf-job-manifest
value: '{{steps.run-configurator.outputs.parameters.kf-job-manifest}}'
name: wait-for-main-job
template: main-job-monitor
- - arguments:
parameters:
- name: kf-job-manifest
value: '{{steps.run-configurator.outputs.parameters.kf-job-manifest}}'
- name: experiment-id
value: '{{steps.run-configurator.outputs.parameters.experiment-id}}'
name: run-post-job
template: post-job
- - arguments:
parameters:
- name: kf-job-manifest
value: '{{steps.run-configurator.outputs.parameters.kf-job-manifest}}'
- name: experiment-id
value: '{{steps.run-configurator.outputs.parameters.experiment-id}}'
name: run-reporter
template: reporter
- container:
command:
- configurator
- '--template-ref={"name": "kubebench-example-tfcnn", "package": "kubebench-examples",
"registry": "github.com/kubeflow/kubebench/tree/master/kubebench"}'
- --config=tf-cnn/tf-cnn-dummy.yaml
- --namespace=kubeflow
- '--owner-references=[{"apiVersion": "argoproj.io/v1alpha1", "blockOwnerDeletion":
true, "kind": "Workflow", "name": "{{workflow.name}}", "uid": "{{workflow.uid}}"}]'
- '--volumes=[{"name": "kubebench-config-volume", "persistentVolumeClaim": {"claimName":
"kubebench-config-pvc"}}, {"name": "kubebench-exp-volume", "persistentVolumeClaim":
{"claimName": "kubebench-exp-pvc"}}]'
- '--volume-mounts=[{"mountPath": "/kubebench/config", "name": "kubebench-config-volume"},
{"mountPath": "/kubebench/experiments", "name": "kubebench-exp-volume"}]'
- '--env-vars=[{"name": "KUBEBENCH_CONFIG_ROOT", "value": "/kubebench/config"},
{"name": "KUBEBENCH_EXP_ROOT", "value": "/kubebench/experiments"}, {"name":
"KUBEBENCH_DATA_ROOT", "value": "/kubebench/data"}, {"name": "KUBEBENCH_EXP_ID",
"value": "null"}, {"name": "KUBEBENCH_EXP_PATH", "value": "$(KUBEBENCH_EXP_ROOT)/$(KUBEBENCH_EXP_ID)"},
{"name": "KUBEBENCH_EXP_CONFIG_PATH", "value": "$(KUBEBENCH_EXP_PATH)/config"},
{"name": "KUBEBENCH_EXP_OUTPUT_PATH", "value": "$(KUBEBENCH_EXP_PATH)/output"},
{"name": "KUBEBENCH_EXP_RESULT_PATH", "value": "$(KUBEBENCH_EXP_PATH)/result"}]'
- --manifest-output=/kubebench/configurator/output/kf-job-manifest.yaml
- --experiment-id-output=/kubebench/configurator/output/experiment-id
env:
- name: KUBEBENCH_CONFIG_ROOT
value: /kubebench/config
- name: KUBEBENCH_EXP_ROOT
value: /kubebench/experiments
- name: KUBEBENCH_DATA_ROOT
value: /kubebench/data
image: gcr.io/kubeflow-images-public/kubebench/kubebench-controller:v0.4.0-13-g262c593
volumeMounts:
- mountPath: /kubebench/config
name: kubebench-config-volume
- mountPath: /kubebench/experiments
name: kubebench-exp-volume
name: configurator
outputs:
parameters:
- name: kf-job-manifest
valueFrom:
path: /kubebench/configurator/output/kf-job-manifest.yaml
- name: experiment-id
valueFrom:
path: /kubebench/configurator/output/experiment-id
- inputs:
parameters:
- name: kf-job-manifest
name: main-job
resource:
action: create
manifest: '{{inputs.parameters.kf-job-manifest}}'
successCondition: status.startTime
- inputs:
parameters:
- name: kf-job-manifest
name: main-job-monitor
resource:
action: get
manifest: '{{inputs.parameters.kf-job-manifest}}'
successCondition: status.completionTime
- container:
env:
- name: KUBEBENCH_CONFIG_ROOT
value: /kubebench/config
- name: KUBEBENCH_EXP_ROOT
value: /kubebench/experiments
- name: KUBEBENCH_DATA_ROOT
value: /kubebench/data
- name: KUBEBENCH_EXP_ID
value: '{{inputs.parameters.experiment-id}}'
- name: KUBEBENCH_EXP_PATH
value: $(KUBEBENCH_EXP_ROOT)/$(KUBEBENCH_EXP_ID)
- name: KUBEBENCH_EXP_CONFIG_PATH
value: $(KUBEBENCH_EXP_PATH)/config
- name: KUBEBENCH_EXP_OUTPUT_PATH
value: $(KUBEBENCH_EXP_PATH)/output
- name: KUBEBENCH_EXP_RESULT_PATH
value: $(KUBEBENCH_EXP_PATH)/result
image: gcr.io/kubeflow-images-public/kubebench/kubebench-example-tf-cnn-post-processor:v0.4.0-13-g262c593
volumeMounts:
- mountPath: /kubebench/config
name: kubebench-config-volume
- mountPath: /kubebench/experiments
name: kubebench-exp-volume
inputs:
parameters:
- name: experiment-id
name: post-job
- container:
command:
- reporter
- csv
- --input-file=result.json
- --output-file=report.csv
env:
- name: KUBEBENCH_CONFIG_ROOT
value: /kubebench/config
- name: KUBEBENCH_EXP_ROOT
value: /kubebench/experiments
- name: KUBEBENCH_DATA_ROOT
value: /kubebench/data
- name: KUBEBENCH_EXP_ID
value: '{{inputs.parameters.experiment-id}}'
- name: KUBEBENCH_EXP_PATH
value: $(KUBEBENCH_EXP_ROOT)/$(KUBEBENCH_EXP_ID)
- name: KUBEBENCH_EXP_CONFIG_PATH
value: $(KUBEBENCH_EXP_PATH)/config
- name: KUBEBENCH_EXP_OUTPUT_PATH
value: $(KUBEBENCH_EXP_PATH)/output
- name: KUBEBENCH_EXP_RESULT_PATH
value: $(KUBEBENCH_EXP_PATH)/result
image: gcr.io/kubeflow-images-public/kubebench/kubebench-controller:v0.4.0-13-g262c593
volumeMounts:
- mountPath: /kubebench/config
name: kubebench-config-volume
- mountPath: /kubebench/experiments
name: kubebench-exp-volume
inputs:
parameters:
- name: experiment-id
name: reporter
volumes:
- name: kubebench-config-volume
persistentVolumeClaim:
claimName: kubebench-config-pvc
- name: kubebench-exp-volume
persistentVolumeClaim:
claimName: kubebench-exp-pvc
`)
th.writeF("/manifests/kubebench/base/params.yaml", `
varReference:
- path: metadata/annotations/getambassador.io\/config
kind: Service
- path: spec/http/route/destination/host
kind: VirtualService
`)
th.writeF("/manifests/kubebench/base/params.env", `
clusterDomain=cluster.local
`)
th.writeK("/manifests/kubebench/base", `
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cluster-role-binding.yaml
- cluster-role.yaml
- crd.yaml
- deployment.yaml
- role-binding.yaml
- role.yaml
- service-account.yaml
- service.yaml
- virtual-service.yaml
- workflow.yaml
namespace: kubeflow
commonLabels:
kustomize.component: kubebench
configMapGenerator:
- name: parameters
env: params.env
images:
- name: gcr.io/kubeflow-images-public/kubebench/kubebench-dashboard
newName: gcr.io/kubeflow-images-public/kubebench/kubebench-dashboard
newTag: v0.4.0-13-g262c593
- name: gcr.io/kubeflow-images-public/kubebench/kubebench-operator
newName: gcr.io/kubeflow-images-public/kubebench/kubebench-operator
newTag: v0.4.0-13-g262c593
- name: gcr.io/kubeflow-images-public/kubebench/kubebench-controller
newName: gcr.io/kubeflow-images-public/kubebench/kubebench-controller
newTag: v0.4.0-13-g262c593
- name: gcr.io/kubeflow-images-public/kubebench/kubebench-example-tf-cnn-post-processor
newName: gcr.io/kubeflow-images-public/kubebench/kubebench-example-tf-cnn-post-processor
newTag: v0.4.0-13-g262c593
vars:
- name: namespace
objref:
kind: Service
name: kubebench-dashboard
apiVersion: v1
fieldref:
fieldpath: metadata.namespace
- name: clusterDomain
objref:
kind: ConfigMap
name: parameters
apiVersion: v1
fieldref:
fieldpath: data.clusterDomain
configurations:
- params.yaml
`)
}
func TestKubebenchBase(t *testing.T) {
th := NewKustTestHarness(t, "/manifests/kubebench/base")
writeKubebenchBase(th)
m, err := th.makeKustTarget().MakeCustomizedResMap()
if err != nil {
t.Fatalf("Err: %v", err)
}
targetPath := "../kubebench/base"
fsys := fs.MakeRealFS()
_loader, loaderErr := loader.NewLoader(targetPath, fsys)
if loaderErr != nil {
t.Fatalf("could not load kustomize loader: %v", loaderErr)
}
rf := resmap.NewFactory(resource.NewFactory(kunstruct.NewKunstructuredFactoryImpl()))
kt, err := target.NewKustTarget(_loader, rf, transformer.NewFactoryImpl())
if err != nil {
th.t.Fatalf("Unexpected construction error %v", err)
}
n, err := kt.MakeCustomizedResMap()
if err != nil {
t.Fatalf("Err: %v", err)
}
expected, err := n.EncodeAsYaml()
th.assertActualEqualsExpected(m, string(expected))
}
|
package main
import (
"fmt"
"os"
)
func checkErr(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
}
|
package strategy
import (
"github.com/joshprzybyszewski/cribbage/model"
)
// GiveCribHighestPotential gives the crib the highest potential pointed crib
func GiveCribHighestPotential(_ int, hand []model.Card) ([]model.Card, error) {
return getEvaluatedHand(hand, newTossEvaluator(false, highestIsBetter))
}
// GiveCribLowestPotential gives the crib the lowest potential pointed hand
func GiveCribLowestPotential(_ int, hand []model.Card) ([]model.Card, error) {
return getEvaluatedHand(hand, newTossEvaluator(false, lowestIsBetter))
}
|
// Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v20190823
import (
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
tchttp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile"
)
const APIVersion = "2019-08-23"
type Client struct {
common.Client
}
// Deprecated
func NewClientWithSecretId(secretId, secretKey, region string) (client *Client, err error) {
cpf := profile.NewClientProfile()
client = &Client{}
client.Init(region).WithSecretId(secretId, secretKey).WithProfile(cpf)
return
}
func NewClient(credential *common.Credential, region string, clientProfile *profile.ClientProfile) (client *Client, err error) {
client = &Client{}
client.Init(region).
WithCredential(credential).
WithProfile(clientProfile)
return
}
func NewClearTablesRequest() (request *ClearTablesRequest) {
request = &ClearTablesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "ClearTables")
return
}
func NewClearTablesResponse() (response *ClearTablesResponse) {
response = &ClearTablesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 根据给定的表信息,清除表数据。
func (c *Client) ClearTables(request *ClearTablesRequest) (response *ClearTablesResponse, err error) {
if request == nil {
request = NewClearTablesRequest()
}
response = NewClearTablesResponse()
err = c.Send(request, response)
return
}
func NewCompareIdlFilesRequest() (request *CompareIdlFilesRequest) {
request = &CompareIdlFilesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "CompareIdlFiles")
return
}
func NewCompareIdlFilesResponse() (response *CompareIdlFilesResponse) {
response = &CompareIdlFilesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 选中目标表,上传并校验改表文件,返回是否允许修改表结构
func (c *Client) CompareIdlFiles(request *CompareIdlFilesRequest) (response *CompareIdlFilesResponse, err error) {
if request == nil {
request = NewCompareIdlFilesRequest()
}
response = NewCompareIdlFilesResponse()
err = c.Send(request, response)
return
}
func NewCreateAppRequest() (request *CreateAppRequest) {
request = &CreateAppRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "CreateApp")
return
}
func NewCreateAppResponse() (response *CreateAppResponse) {
response = &CreateAppResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 本接口用于创建TcaplusDB应用
func (c *Client) CreateApp(request *CreateAppRequest) (response *CreateAppResponse, err error) {
if request == nil {
request = NewCreateAppRequest()
}
response = NewCreateAppResponse()
err = c.Send(request, response)
return
}
func NewCreateTablesRequest() (request *CreateTablesRequest) {
request = &CreateTablesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "CreateTables")
return
}
func NewCreateTablesResponse() (response *CreateTablesResponse) {
response = &CreateTablesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 根据选择的IDL文件列表,批量创建表
func (c *Client) CreateTables(request *CreateTablesRequest) (response *CreateTablesResponse, err error) {
if request == nil {
request = NewCreateTablesRequest()
}
response = NewCreateTablesResponse()
err = c.Send(request, response)
return
}
func NewCreateZoneRequest() (request *CreateZoneRequest) {
request = &CreateZoneRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "CreateZone")
return
}
func NewCreateZoneResponse() (response *CreateZoneResponse) {
response = &CreateZoneResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 在TcaplusDB应用下创建大区
func (c *Client) CreateZone(request *CreateZoneRequest) (response *CreateZoneResponse, err error) {
if request == nil {
request = NewCreateZoneRequest()
}
response = NewCreateZoneResponse()
err = c.Send(request, response)
return
}
func NewDeleteAppRequest() (request *DeleteAppRequest) {
request = &DeleteAppRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "DeleteApp")
return
}
func NewDeleteAppResponse() (response *DeleteAppResponse) {
response = &DeleteAppResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 删除TcaplusDB应用实例,必须在应用实例所属所有资源(包括大区,表)都已经释放的情况下才会成功。
func (c *Client) DeleteApp(request *DeleteAppRequest) (response *DeleteAppResponse, err error) {
if request == nil {
request = NewDeleteAppRequest()
}
response = NewDeleteAppResponse()
err = c.Send(request, response)
return
}
func NewDeleteIdlFilesRequest() (request *DeleteIdlFilesRequest) {
request = &DeleteIdlFilesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "DeleteIdlFiles")
return
}
func NewDeleteIdlFilesResponse() (response *DeleteIdlFilesResponse) {
response = &DeleteIdlFilesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 指定应用ID和待删除IDL文件的信息,删除目标文件,如果文件正在被表关联则删除失败。
func (c *Client) DeleteIdlFiles(request *DeleteIdlFilesRequest) (response *DeleteIdlFilesResponse, err error) {
if request == nil {
request = NewDeleteIdlFilesRequest()
}
response = NewDeleteIdlFilesResponse()
err = c.Send(request, response)
return
}
func NewDeleteTablesRequest() (request *DeleteTablesRequest) {
request = &DeleteTablesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "DeleteTables")
return
}
func NewDeleteTablesResponse() (response *DeleteTablesResponse) {
response = &DeleteTablesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 根据指定的表信息删除目标表
func (c *Client) DeleteTables(request *DeleteTablesRequest) (response *DeleteTablesResponse, err error) {
if request == nil {
request = NewDeleteTablesRequest()
}
response = NewDeleteTablesResponse()
err = c.Send(request, response)
return
}
func NewDeleteZoneRequest() (request *DeleteZoneRequest) {
request = &DeleteZoneRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "DeleteZone")
return
}
func NewDeleteZoneResponse() (response *DeleteZoneResponse) {
response = &DeleteZoneResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 删除大区
func (c *Client) DeleteZone(request *DeleteZoneRequest) (response *DeleteZoneResponse, err error) {
if request == nil {
request = NewDeleteZoneRequest()
}
response = NewDeleteZoneResponse()
err = c.Send(request, response)
return
}
func NewDescribeAppsRequest() (request *DescribeAppsRequest) {
request = &DescribeAppsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "DescribeApps")
return
}
func NewDescribeAppsResponse() (response *DescribeAppsResponse) {
response = &DescribeAppsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 查询TcaplusDB应用列表,包含应用详细信息。
func (c *Client) DescribeApps(request *DescribeAppsRequest) (response *DescribeAppsResponse, err error) {
if request == nil {
request = NewDescribeAppsRequest()
}
response = NewDescribeAppsResponse()
err = c.Send(request, response)
return
}
func NewDescribeIdlFileInfosRequest() (request *DescribeIdlFileInfosRequest) {
request = &DescribeIdlFileInfosRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "DescribeIdlFileInfos")
return
}
func NewDescribeIdlFileInfosResponse() (response *DescribeIdlFileInfosResponse) {
response = &DescribeIdlFileInfosResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 查询表描述文件详情
func (c *Client) DescribeIdlFileInfos(request *DescribeIdlFileInfosRequest) (response *DescribeIdlFileInfosResponse, err error) {
if request == nil {
request = NewDescribeIdlFileInfosRequest()
}
response = NewDescribeIdlFileInfosResponse()
err = c.Send(request, response)
return
}
func NewDescribeRegionsRequest() (request *DescribeRegionsRequest) {
request = &DescribeRegionsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "DescribeRegions")
return
}
func NewDescribeRegionsResponse() (response *DescribeRegionsResponse) {
response = &DescribeRegionsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 查询TcaplusDB服务支持的地域列表
func (c *Client) DescribeRegions(request *DescribeRegionsRequest) (response *DescribeRegionsResponse, err error) {
if request == nil {
request = NewDescribeRegionsRequest()
}
response = NewDescribeRegionsResponse()
err = c.Send(request, response)
return
}
func NewDescribeTablesRequest() (request *DescribeTablesRequest) {
request = &DescribeTablesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "DescribeTables")
return
}
func NewDescribeTablesResponse() (response *DescribeTablesResponse) {
response = &DescribeTablesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 查询表详情
func (c *Client) DescribeTables(request *DescribeTablesRequest) (response *DescribeTablesResponse, err error) {
if request == nil {
request = NewDescribeTablesRequest()
}
response = NewDescribeTablesResponse()
err = c.Send(request, response)
return
}
func NewDescribeTablesInRecycleRequest() (request *DescribeTablesInRecycleRequest) {
request = &DescribeTablesInRecycleRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "DescribeTablesInRecycle")
return
}
func NewDescribeTablesInRecycleResponse() (response *DescribeTablesInRecycleResponse) {
response = &DescribeTablesInRecycleResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 查询回收站中的表详情
func (c *Client) DescribeTablesInRecycle(request *DescribeTablesInRecycleRequest) (response *DescribeTablesInRecycleResponse, err error) {
if request == nil {
request = NewDescribeTablesInRecycleRequest()
}
response = NewDescribeTablesInRecycleResponse()
err = c.Send(request, response)
return
}
func NewDescribeTasksRequest() (request *DescribeTasksRequest) {
request = &DescribeTasksRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "DescribeTasks")
return
}
func NewDescribeTasksResponse() (response *DescribeTasksResponse) {
response = &DescribeTasksResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 查询任务列表
func (c *Client) DescribeTasks(request *DescribeTasksRequest) (response *DescribeTasksResponse, err error) {
if request == nil {
request = NewDescribeTasksRequest()
}
response = NewDescribeTasksResponse()
err = c.Send(request, response)
return
}
func NewDescribeUinInWhitelistRequest() (request *DescribeUinInWhitelistRequest) {
request = &DescribeUinInWhitelistRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "DescribeUinInWhitelist")
return
}
func NewDescribeUinInWhitelistResponse() (response *DescribeUinInWhitelistResponse) {
response = &DescribeUinInWhitelistResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 查询本用户是否在白名单中,控制是否能创建TDR类型的APP或表
func (c *Client) DescribeUinInWhitelist(request *DescribeUinInWhitelistRequest) (response *DescribeUinInWhitelistResponse, err error) {
if request == nil {
request = NewDescribeUinInWhitelistRequest()
}
response = NewDescribeUinInWhitelistResponse()
err = c.Send(request, response)
return
}
func NewDescribeZonesRequest() (request *DescribeZonesRequest) {
request = &DescribeZonesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "DescribeZones")
return
}
func NewDescribeZonesResponse() (response *DescribeZonesResponse) {
response = &DescribeZonesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 查询大区列表
func (c *Client) DescribeZones(request *DescribeZonesRequest) (response *DescribeZonesResponse, err error) {
if request == nil {
request = NewDescribeZonesRequest()
}
response = NewDescribeZonesResponse()
err = c.Send(request, response)
return
}
func NewModifyAppNameRequest() (request *ModifyAppNameRequest) {
request = &ModifyAppNameRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "ModifyAppName")
return
}
func NewModifyAppNameResponse() (response *ModifyAppNameResponse) {
response = &ModifyAppNameResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 修改指定的应用名称
func (c *Client) ModifyAppName(request *ModifyAppNameRequest) (response *ModifyAppNameResponse, err error) {
if request == nil {
request = NewModifyAppNameRequest()
}
response = NewModifyAppNameResponse()
err = c.Send(request, response)
return
}
func NewModifyAppPasswordRequest() (request *ModifyAppPasswordRequest) {
request = &ModifyAppPasswordRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "ModifyAppPassword")
return
}
func NewModifyAppPasswordResponse() (response *ModifyAppPasswordResponse) {
response = &ModifyAppPasswordResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 修改指定AppInstanceId的实例密码,后台将在旧密码失效之前同时支持TcaplusDB SDK使用旧密码和新密码访问数据库。在旧密码失效之前不能提交新的密码修改请求,在旧密码失效之后不能提交修改旧密码过期时间的请求。
func (c *Client) ModifyAppPassword(request *ModifyAppPasswordRequest) (response *ModifyAppPasswordResponse, err error) {
if request == nil {
request = NewModifyAppPasswordRequest()
}
response = NewModifyAppPasswordResponse()
err = c.Send(request, response)
return
}
func NewModifyTableMemosRequest() (request *ModifyTableMemosRequest) {
request = &ModifyTableMemosRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "ModifyTableMemos")
return
}
func NewModifyTableMemosResponse() (response *ModifyTableMemosResponse) {
response = &ModifyTableMemosResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 修改表备注信息
func (c *Client) ModifyTableMemos(request *ModifyTableMemosRequest) (response *ModifyTableMemosResponse, err error) {
if request == nil {
request = NewModifyTableMemosRequest()
}
response = NewModifyTableMemosResponse()
err = c.Send(request, response)
return
}
func NewModifyTableQuotasRequest() (request *ModifyTableQuotasRequest) {
request = &ModifyTableQuotasRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "ModifyTableQuotas")
return
}
func NewModifyTableQuotasResponse() (response *ModifyTableQuotasResponse) {
response = &ModifyTableQuotasResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 表扩缩容
func (c *Client) ModifyTableQuotas(request *ModifyTableQuotasRequest) (response *ModifyTableQuotasResponse, err error) {
if request == nil {
request = NewModifyTableQuotasRequest()
}
response = NewModifyTableQuotasResponse()
err = c.Send(request, response)
return
}
func NewModifyTablesRequest() (request *ModifyTablesRequest) {
request = &ModifyTablesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "ModifyTables")
return
}
func NewModifyTablesResponse() (response *ModifyTablesResponse) {
response = &ModifyTablesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 根据用户选定的表定义IDL文件,批量修改指定的表
func (c *Client) ModifyTables(request *ModifyTablesRequest) (response *ModifyTablesResponse, err error) {
if request == nil {
request = NewModifyTablesRequest()
}
response = NewModifyTablesResponse()
err = c.Send(request, response)
return
}
func NewModifyZoneNameRequest() (request *ModifyZoneNameRequest) {
request = &ModifyZoneNameRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "ModifyZoneName")
return
}
func NewModifyZoneNameResponse() (response *ModifyZoneNameResponse) {
response = &ModifyZoneNameResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 修改TcaplusDB大区名称
func (c *Client) ModifyZoneName(request *ModifyZoneNameRequest) (response *ModifyZoneNameResponse, err error) {
if request == nil {
request = NewModifyZoneNameRequest()
}
response = NewModifyZoneNameResponse()
err = c.Send(request, response)
return
}
func NewRecoverRecycleTablesRequest() (request *RecoverRecycleTablesRequest) {
request = &RecoverRecycleTablesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "RecoverRecycleTables")
return
}
func NewRecoverRecycleTablesResponse() (response *RecoverRecycleTablesResponse) {
response = &RecoverRecycleTablesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 恢复回收站中,用户自行删除的表。对欠费待释放的表无效。
func (c *Client) RecoverRecycleTables(request *RecoverRecycleTablesRequest) (response *RecoverRecycleTablesResponse, err error) {
if request == nil {
request = NewRecoverRecycleTablesRequest()
}
response = NewRecoverRecycleTablesResponse()
err = c.Send(request, response)
return
}
func NewRollbackTablesRequest() (request *RollbackTablesRequest) {
request = &RollbackTablesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "RollbackTables")
return
}
func NewRollbackTablesResponse() (response *RollbackTablesResponse) {
response = &RollbackTablesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 表数据回档
func (c *Client) RollbackTables(request *RollbackTablesRequest) (response *RollbackTablesResponse, err error) {
if request == nil {
request = NewRollbackTablesRequest()
}
response = NewRollbackTablesResponse()
err = c.Send(request, response)
return
}
func NewVerifyIdlFilesRequest() (request *VerifyIdlFilesRequest) {
request = &VerifyIdlFilesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tcaplusdb", APIVersion, "VerifyIdlFiles")
return
}
func NewVerifyIdlFilesResponse() (response *VerifyIdlFilesResponse) {
response = &VerifyIdlFilesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 上传并校验加表文件,返回校验合法的表定义
func (c *Client) VerifyIdlFiles(request *VerifyIdlFilesRequest) (response *VerifyIdlFilesResponse, err error) {
if request == nil {
request = NewVerifyIdlFilesRequest()
}
response = NewVerifyIdlFilesResponse()
err = c.Send(request, response)
return
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package utility
import (
"strings"
"github.com/mattermost/mattermost-cloud/internal/tools/aws"
"github.com/mattermost/mattermost-cloud/model"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
type fluentbit struct {
awsClient aws.AWS
kubeconfigPath string
logger log.FieldLogger
desiredVersion *model.HelmUtilityVersion
actualVersion *model.HelmUtilityVersion
}
func newFluentbitOrUnmanagedHandle(cluster *model.Cluster, kubeconfigPath string, awsClient aws.AWS, logger log.FieldLogger) (Utility, error) {
desired := cluster.DesiredUtilityVersion(model.FluentbitCanonicalName)
actual := cluster.ActualUtilityVersion(model.FluentbitCanonicalName)
if model.UtilityIsUnmanaged(desired, actual) {
return newUnmanagedHandle(model.FluentbitCanonicalName, logger), nil
}
fluentbit := newFluentbitHandle(cluster, desired, kubeconfigPath, awsClient, logger)
err := fluentbit.validate()
if err != nil {
return nil, errors.Wrap(err, "fluentbit utility config is invalid")
}
return fluentbit, nil
}
func newFluentbitHandle(cluster *model.Cluster, desiredVersion *model.HelmUtilityVersion, kubeconfigPath string, awsClient aws.AWS, logger log.FieldLogger) *fluentbit {
return &fluentbit{
awsClient: awsClient,
kubeconfigPath: kubeconfigPath,
logger: logger.WithField("cluster-utility", model.FluentbitCanonicalName),
desiredVersion: desiredVersion,
actualVersion: cluster.UtilityMetadata.ActualVersions.Fluentbit,
}
}
func (f *fluentbit) validate() error {
if f.kubeconfigPath == "" {
return errors.New("kubeconfig path cannot be empty")
}
if f.awsClient == nil {
return errors.New("awsClient cannot be nil")
}
return nil
}
func (f *fluentbit) Destroy() error {
helm := f.newHelmDeployment()
return helm.Delete()
}
func (f *fluentbit) Migrate() error {
return nil
}
func (f *fluentbit) CreateOrUpgrade() error {
h := f.newHelmDeployment()
err := h.Update()
if err != nil {
return err
}
err = f.updateVersion(h)
return err
}
func (f *fluentbit) DesiredVersion() *model.HelmUtilityVersion {
return f.desiredVersion
}
func (f *fluentbit) ActualVersion() *model.HelmUtilityVersion {
if f.actualVersion == nil {
return nil
}
return &model.HelmUtilityVersion{
Chart: strings.TrimPrefix(f.actualVersion.Version(), "fluent-bit-"),
ValuesPath: f.actualVersion.Values(),
}
}
func (f *fluentbit) Name() string {
return model.FluentbitCanonicalName
}
func (f *fluentbit) newHelmDeployment() *helmDeployment {
return newHelmDeployment(
"fluent/fluent-bit",
"fluent-bit",
"fluent-bit",
f.kubeconfigPath,
f.desiredVersion,
defaultHelmDeploymentSetArgument,
f.logger,
)
}
func (f *fluentbit) ValuesPath() string {
if f.desiredVersion == nil {
return ""
}
return f.desiredVersion.Values()
}
func (f *fluentbit) updateVersion(h *helmDeployment) error {
actualVersion, err := h.Version()
if err != nil {
return err
}
f.actualVersion = actualVersion
return nil
}
|
package model
type PandoInfo struct {
PeerID string
Addresses APIAddresses
}
type APIAddresses struct {
HttpAPI string
GraphQLAPI string
GraphSyncAPI string
}
|
package array
import "fmt"
func removeDuplicatesLC80(nums []int) int {
n := len(nums)
if n < 2 {
return n
}
slow := 2
for fast := 2; fast < n; fast++ {
fmt.Println(slow)
if nums[fast] != nums[slow-2] {
nums[slow] = nums[fast]
slow++
}
}
return slow
}
|
/* A simple library to build queriable html structure.
*
* @author: FATESAIKOU
* @date : 04/17/2018
*/
package queriableHtml
import (
"fmt"
"bytes"
"strings"
"regexp"
"golang.org/x/net/html"
)
type DOMObj struct {
Atom string
Attrs map[string]string
Contents []DOMObj
TokenType html.TokenType
}
func (self *DOMObj) GetEleByAtom(pattern string) []DOMObj {
res := []DOMObj{}
for i := range self.Contents {
cmp_res, _ := regexp.MatchString(pattern, self.Contents[i].Atom)
if cmp_res {
res = append(res, self.Contents[i])
}
}
return res
}
func (self *DOMObj) GetEleByAttr(term string, pattern string) []DOMObj {
res := []DOMObj{}
for i := range self.Contents {
val, ok := self.Contents[i].Attrs[term]
if !ok {
continue
}
cmp_res, _ := regexp.MatchString(pattern, val)
if cmp_res {
res = append(res, self.Contents[i])
}
}
return res
}
func (self *DOMObj) GetEleByQuery(query_str string) []DOMObj {
query_terms := strings.Split(query_str, ",")
if len(query_terms) < 1 {
fmt.Println("Error while parsing the query string!")
return nil
}
switch term := query_terms[0]; term {
case "*", "*...":
return self.Contents
case "Attr":
return self.GetEleByAttr(query_terms[1], query_terms[2])
case "Atom":
return self.GetEleByAtom(query_terms[1])
default:
return nil
}
}
// The main function for query, query_strs is a list of query string,
// each query string can be:
// 1. *... // pass with no matching as much layer as possible while querying
// 2. * // pass with no matching one layer while querying
// 3. Attr,AttrName,regexp // get all the element that it's attribute of AttrName is matched with regexp
// 4. Atom,regexp // get all the element that it's tag name is matched with regexp
//
// and while the query string is multiple, the next query string will be apply to the result's child of
// previous query string.
func (self *DOMObj) Query(query_strs []string) []DOMObj {
if len(query_strs) < 1 {
return []DOMObj{*self}
}
res := []DOMObj{}
tmp_res := self.GetEleByQuery(query_strs[0])
for i := range tmp_res {
res = append(res,
tmp_res[i].Query(query_strs[1:])...)
}
if query_strs[0] == "*..." {
for i := range tmp_res {
res = append(res,
tmp_res[i].Query(query_strs)...)
}
}
return res
}
/* For constructing */
func BuildScope(body_bytes []byte) (map[int]bool, map[int]bool) {
type Scope struct {
Name string
SInd int
}
scope_stack := []Scope{}
in_seq := make(map[int]bool)
out_seq := make(map[int]bool)
tr := html.NewTokenizer(bytes.NewBuffer(body_bytes))
cnt := 0
for {
tt := tr.Next()
t := tr.Token()
if tt == html.StartTagToken {
scope_stack = append(scope_stack, Scope{
Name: t.Data,
SInd: cnt})
}
if tt == html.EndTagToken {
var m int
for m = len(scope_stack) - 1; m >=0; m -- {
if scope_stack[m].Name == t.Data {
break
}
}
if m >= 0 {
in_seq[scope_stack[m].SInd] = true
out_seq[cnt] = true
scope_stack = scope_stack[:m]
}
}
if tt == html.ErrorToken {
break
}
cnt ++
}
return in_seq, out_seq
}
func LoadAttr(attrs []html.Attribute) map[string]string {
loaded_attrs := map[string]string{}
for i := range attrs {
loaded_attrs[attrs[i].Key] = attrs[i].Val
}
return loaded_attrs
}
func NewQueriableHtml(body_bytes []byte) DOMObj {
root := DOMObj{
Atom: "<ROOT>",
Attrs: map[string]string{},
Contents: []DOMObj{},
TokenType: html.StartTagToken}
in_seq, out_seq := BuildScope(body_bytes)
token_reader := html.NewTokenizer(bytes.NewBuffer(body_bytes))
cnt := -1
root.Contents = ParseHtml(in_seq, out_seq, &cnt, token_reader)
return root
}
func ParseHtml(in_seq map[int]bool, out_seq map[int]bool, cnt *int, tr *html.Tokenizer) []DOMObj {
obj_list := []DOMObj{}
for {
(*cnt) ++
tt := tr.Next()
t := tr.Token()
if out_seq[*cnt] == true || tt == html.ErrorToken {
break
}
tmp_obj := DOMObj{
Atom: t.Data,
Attrs: LoadAttr(t.Attr),
Contents: nil,
TokenType: tt}
if in_seq[*cnt] == true {
tmp_obj.Contents = ParseHtml(in_seq, out_seq, cnt, tr)
}
obj_list = append(obj_list, tmp_obj)
}
return obj_list
}
|
package main
import (
"github.com/gin-gonic/gin"
"go-fcm-example/admin/src/define"
"go-fcm-example/admin/src/service"
"net"
"net/http"
"time"
)
/**
* @Author: caishi13202
* @Date: 2021/9/27 3:00 下午
*/
// 初始化路由
func initRouter(router *gin.Engine) {
loginUser := make(map[string]string, 16)
httpClient := createHTTPClient()
service := &service.Notification{
}
service.SetLoginUser(loginUser)
service.SetHttpClient(httpClient)
accessToken(service, router)
listAccount(service, router)
sendMsg(service, router)
}
// 注册token
func accessToken(service *service.Notification, router *gin.Engine) {
router.POST("/accessToken", func(c *gin.Context) {
req := &define.AccessTokenReq{}
c.ShouldBind(req)
c.JSON(http.StatusOK, service.Login(req))
})
}
// 查询当前登陆的用户列表
func listAccount(service *service.Notification, router *gin.Engine) {
router.GET("/list", func(c *gin.Context) {
c.JSON(http.StatusOK, service.ListAccount())
})
}
// 发送消息
func sendMsg(service *service.Notification, router *gin.Engine) {
router.POST("/send", func(c *gin.Context) {
req := &define.SendReq{}
c.ShouldBind(req)
c.JSON(http.StatusOK, service.Send(req))
})
}
// createHTTPClient for connection re-use
func createHTTPClient() *http.Client {
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
MaxIdleConns: 30,
MaxIdleConnsPerHost: 30,
IdleConnTimeout: time.Duration(5) * time.Second,
},
Timeout: time.Millisecond * time.Duration(5000),
}
return client
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package router provides utilities for accessing or controlling different routers.
package router
|
package access
import (
"github.com/kataras/iris"
"gopkg.in/mgo.v2"
)
type Resp struct {
Msg string `json:"msg"`
Data interface{} `json:"data"`
}
var (
RespOK = Resp{Msg: "OK"}
)
//Access interface
type Access interface {
Create(p interface{}) (interface{}, error)
Read(p interface{}, id string) (interface{}, error)
ReadWParam(p interface{}) (interface{}, error)
Update(p interface{}, id string) error
Delete(p interface{}, id string) (int, error)
}
type Dbsess struct {
Sess *mgo.Session
}
func RespErrWithMsg(ctx iris.Context, status int, msg string) {
ctx.StatusCode(status)
resp := Resp{Msg: msg}
ctx.JSON(resp)
}
func RespOkWithData(ctx iris.Context, status int, msg string, data interface{}) {
ctx.StatusCode(status)
resp := Resp{Msg: msg, Data: data}
ctx.JSON(resp)
}
func RespOk(ctx iris.Context, status int) {
ctx.StatusCode(status)
ctx.JSON(RespOK)
}
|
package apiserver
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"os"
"path"
"time"
"k8s.io/api/admissionregistration/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/cert"
apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
certResources "knative.dev/pkg/webhook/certificates/resources"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/tmax-cloud/registry-operator/internal/utils"
)
const (
CertDir = "/tmp/run-api"
K8sConfigMapName = "extension-apiserver-authentication"
K8sConfigMapKey = "requestheader-client-ca-file"
APIServiceName = "v1.registry.tmax.io"
MutatingWebhookConfigurationName = "registry-operator-webhook-cfg"
)
// Create and Store certificates for webhook server
// server key / server cert is stored as file in CertDir
// CA bundle is stored in ValidatingWebhookConfigurations
func createCert(ctx context.Context, client client.Client) error {
// Make directory recursively
if err := os.MkdirAll(CertDir, os.ModePerm); err != nil {
return err
}
// Get service name and namespace
svc := utils.OperatorServiceName()
ns, err := utils.Namespace()
if err != nil {
return err
}
// Create certs
tlsKey, tlsCrt, caCrt, err := certResources.CreateCerts(ctx, svc, ns, time.Now().AddDate(10, 0, 0))
if err != nil {
return err
}
// Write certs to file
keyPath := path.Join(CertDir, "tls.key")
err = ioutil.WriteFile(keyPath, tlsKey, 0644)
if err != nil {
return err
}
crtPath := path.Join(CertDir, "tls.crt")
err = ioutil.WriteFile(crtPath, tlsCrt, 0644)
if err != nil {
return err
}
// Update ApiService
apiService := &apiregv1.APIService{}
if err := client.Get(ctx, types.NamespacedName{Name: APIServiceName}, apiService); err != nil {
return err
}
apiService.Spec.CABundle = caCrt
if err := client.Update(ctx, apiService); err != nil {
return err
}
// Update MutatingWebhookConfiguration
mwConfig := &v1beta1.MutatingWebhookConfiguration{}
if err := client.Get(ctx, types.NamespacedName{Name: MutatingWebhookConfigurationName}, mwConfig); err != nil {
return err
}
if len(mwConfig.Webhooks) != 2 {
return fmt.Errorf("MutatingWebhookConfiguration's webhook must be two, but there is/are %d", len(mwConfig.Webhooks))
}
mwConfig.Webhooks[0].ClientConfig.CABundle = caCrt
mwConfig.Webhooks[1].ClientConfig.CABundle = caCrt
if err := client.Update(ctx, mwConfig); err != nil {
return err
}
return nil
}
func tlsConfig(ctx context.Context, client client.Client) (*tls.Config, error) {
caPool, err := getCAPool(ctx, client)
if err != nil {
return nil, err
}
return &tls.Config{
ClientCAs: caPool,
ClientAuth: tls.VerifyClientCertIfGiven,
}, nil
}
func getCAPool(ctx context.Context, client client.Client) (*x509.CertPool, error) {
cm := &corev1.ConfigMap{}
if err := client.Get(ctx, types.NamespacedName{Name: K8sConfigMapName, Namespace: metav1.NamespaceSystem}, cm); err != nil {
return nil, err
}
clientCA, ok := cm.Data[K8sConfigMapKey]
if !ok {
return nil, fmt.Errorf("no key [%s] found in configmap %s/%s", K8sConfigMapKey, metav1.NamespaceSystem, K8sConfigMapName)
}
certs, err := cert.ParseCertsPEM([]byte(clientCA))
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
for _, c := range certs {
pool.AddCert(c)
}
return pool, nil
}
|
package main
//1460. 通过翻转子数组使两个数组相等
//给你两个长度相同的整数数组target和arr。每一步中,你可以选择arr的任意 非空子数组并将它翻转。你可以执行此过程任意次。
//
//如果你能让 arr变得与 target相同,返回 True;否则,返回 False 。
//
//
//
//示例 1:
//
//输入:target = [1,2,3,4], arr = [2,4,1,3]
//输出:true
//解释:你可以按照如下步骤使 arr 变成 target:
//1- 翻转子数组 [2,4,1] ,arr 变成 [1,4,2,3]
//2- 翻转子数组 [4,2] ,arr 变成 [1,2,4,3]
//3- 翻转子数组 [4,3] ,arr 变成 [1,2,3,4]
//上述方法并不是唯一的,还存在多种将 arr 变成 target 的方法。
//示例 2:
//
//输入:target = [7], arr = [7]
//输出:true
//解释:arr 不需要做任何翻转已经与 target 相等。
//示例 3:
//
//输入:target = [3,7,9], arr = [3,7,11]
//输出:false
//解释:arr 没有数字 9 ,所以无论如何也无法变成 target 。
//
//
//提示:
//
//target.length == arr.length
//1 <= target.length <= 1000
//1 <= target[i] <= 1000
//1 <= arr[i] <= 1000
func canBeEqual(target []int, arr []int) bool {
hashMap := make(map[int]int)
for i, v := range target {
hashMap[v]++
hashMap[arr[i]]--
}
for _, v := range hashMap {
if v != 0 {
return false
}
}
return true
}
|
package mssql
import (
"fmt"
"gorm.io/gorm/schema"
)
type MniNamer struct {
TablePrefix string
SingularTable bool
}
func (MniNamer) TableName(table string) string {
return table
}
func (MniNamer) ColumnName(table, column string) string {
return fmt.Sprintf("%v", column)
}
func (MniNamer) JoinTableName(joinTable string) string {
return joinTable
}
func (MniNamer) RelationshipFKName(relationship schema.Relationship) string {
return relationship.Name
}
func (MniNamer) CheckerName(table, column string) string {
return fmt.Sprintf("%v.%v", table, column)
}
func (MniNamer) IndexName(table, column string) string {
return fmt.Sprintf("%v.%v", table, column)
}
// // TableName convert string to table name
// func (ns MniNamer) TableName(str string) string {
// if ns.SingularTable {
// return ns.TablePrefix + ns.toDBName(str)
// }
// return ns.TablePrefix + inflection.Plural(ns.toDBName(str))
// }
// // ColumnName convert string to column name
// func (ns MniNamer) ColumnName(table, column string) string {
// return ns.toDBName(column)
// }
// // JoinTableName convert string to join table name
// func (ns MniNamer) JoinTableName(str string) string {
// if !ns.NoLowerCase && strings.ToLower(str) == str {
// return ns.TablePrefix + str
// }
// if ns.SingularTable {
// return ns.TablePrefix + ns.toDBName(str)
// }
// return ns.TablePrefix + inflection.Plural(ns.toDBName(str))
// }
// // RelationshipFKName generate fk name for relation
// func (ns MniNamer) RelationshipFKName(rel schema.Relationship) string {
// return ns.formatName("fk", rel.Schema.Table, ns.toDBName(rel.Name))
// }
// // CheckerName generate checker name
// func (ns MniNamer) CheckerName(table, column string) string {
// return ns.formatName("chk", table, column)
// }
// // IndexName generate index name
// func (ns MniNamer) IndexName(table, column string) string {
// return ns.formatName("idx", table, ns.toDBName(column))
// }
// func (ns MniNamer) formatName(prefix, table, name string) string {
// formatedName := strings.Replace(fmt.Sprintf("%v_%v_%v", prefix, table, name), ".", "_", -1)
// if utf8.RuneCountInString(formatedName) > 64 {
// h := sha1.New()
// h.Write([]byte(formatedName))
// bs := h.Sum(nil)
// formatedName = fmt.Sprintf("%v%v%v", prefix, table, name)[0:56] + string(bs)[:8]
// }
// return formatedName
// }
// var (
// // https://github.com/golang/lint/blob/master/lint.go#L770
// commonInitialisms = []string{"API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "LHS", "QPS", "RAM", "RHS", "RPC", "SLA", "SMTP", "SSH", "TLS", "TTL", "UID", "UI", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XSRF", "XSS"}
// commonInitialismsReplacer *strings.Replacer
// )
// func init() {
// commonInitialismsForReplacer := make([]string, 0, len(commonInitialisms))
// for _, initialism := range commonInitialisms {
// commonInitialismsForReplacer = append(commonInitialismsForReplacer, initialism, strings.Title(strings.ToLower(initialism)))
// }
// commonInitialismsReplacer = strings.NewReplacer(commonInitialismsForReplacer...)
// }
// func (ns MniNamer) toDBName(name string) string {
// if name == "" {
// return ""
// }
// if ns.NoLowerCase {
// return name
// }
// var (
// value = commonInitialismsReplacer.Replace(name)
// buf strings.Builder
// lastCase, nextCase, nextNumber bool // upper case == true
// curCase = value[0] <= 'Z' && value[0] >= 'A'
// )
// for i, v := range value[:len(value)-1] {
// nextCase = value[i+1] <= 'Z' && value[i+1] >= 'A'
// nextNumber = value[i+1] >= '0' && value[i+1] <= '9'
// if curCase {
// if lastCase && (nextCase || nextNumber) {
// buf.WriteRune(v + 32)
// } else {
// if i > 0 && value[i-1] != '_' && value[i+1] != '_' {
// buf.WriteByte('_')
// }
// buf.WriteRune(v + 32)
// }
// } else {
// buf.WriteRune(v)
// }
// lastCase = curCase
// curCase = nextCase
// }
// if curCase {
// if !lastCase && len(value) > 1 {
// buf.WriteByte('_')
// }
// buf.WriteByte(value[len(value)-1] + 32)
// } else {
// buf.WriteByte(value[len(value)-1])
// }
// ret := buf.String()
// return ret
// }
|
package main
type Sema struct {
counter chan int8
}
func newSema(n int) *Sema {
return &Sema {
counter: make(chan int8, n),
}
}
func (s *Sema) acquire() {
var one int8
s.counter <- one
}
func (s *Sema) release() {
if s.isEmpty() {
return
}
<- s.counter
}
func (s *Sema) count() int {
return len(s.counter)
}
func (s *Sema) isEmpty() bool {
return s.count() == 0
}
func (s *Sema) close() {
close(s.counter)
}
|
package domain
import "fmt"
type Ads struct {
Id int64 `pg:",notnull"`
Title string `pg:",notnull"`
Description string `pg:",notnull"`
Price float64 `pg:",notnull"`
UserId int64 `pg:",notnull,fk"`
Picture string `pg:""`
Sold bool `pg:",use_zero"`
}
func StringAds(a *Ads) string {
return fmt.Sprintf("Ads<id(%d) title(%s) description(%s) price(%f) picture(%s)>", a.Id, a.Title, a.Description, a.Price, a.Picture)
} |
package main
import (
"bufio"
"fmt"
"log"
"os"
"strings"
)
const k = ` !"#$%&'()*+,-./0123456789:<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`
func gronsfeld(p, q string) string {
r := make([]byte, len(q))
for i := 0; i < len(q); i++ {
r[i] = k[(len(k)+strings.IndexRune(k, rune(q[i]))-int(p[i%len(p)]-'0'))%len(k)]
}
return string(r)
}
func main() {
data, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer data.Close()
scanner := bufio.NewScanner(data)
for scanner.Scan() {
s := strings.Split(scanner.Text(), ";")
fmt.Println(gronsfeld(s[0], s[1]))
}
}
|
package thirdapi
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
)
// AliIP 阿里ip归属地查询
type AliIP struct {
Status string `json:"status,omitempty"` //
Info string `json:"info,omitempty"` //
Infocode string `json:"infocode,omitempty"` //
Province string `json:"province,omitempty"` //
City string `json:"city,omitempty"` //
AdCode string `json:"adcode,omitempty"` //
Rectangle string `json:"rectangle,omitempty"` // 运营商
}
// AliIPBelongToJSON 阿里ip地址,返回凭借原始数据
func AliIPBelongToJSON(ip string) (string, error) {
var baseURL = "http://iploc.market.alicloudapi.com/v3/ip?ip="
var url = baseURL + ip
cilent := &http.Client{}
req, _ := http.NewRequest("GET", url, nil)
req.Header.Add("Authorization", "APPCODE "+aliAppCode)
resp, err := cilent.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
bs, err := ioutil.ReadAll(resp.Body)
return string(bs), err
}
// AliIPBelongToStruct 主工具函数
func AliIPBelongToStruct(ip string) (*AliIP, error) {
var res = new(AliIP)
str, err := AliIPBelongToJSON(ip)
if err != nil {
log.Println("http调用错误:", err)
return nil, err
}
err = json.Unmarshal([]byte(str), res)
if err != nil {
log.Println("json解析错误", err)
return nil, err
}
return res, nil
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package p9
import (
errors2 "errors"
"fmt"
"io"
"os"
"path"
"strings"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/errors"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/log"
)
// ExtractErrno extracts a unix.Errno from a error, best effort.
func ExtractErrno(err error) unix.Errno {
switch err {
case os.ErrNotExist:
return unix.ENOENT
case os.ErrExist:
return unix.EEXIST
case os.ErrPermission:
return unix.EACCES
case os.ErrInvalid:
return unix.EINVAL
}
// Attempt to unwrap.
switch e := err.(type) {
case *errors.Error:
return linuxerr.ToUnix(e)
case unix.Errno:
return e
case *os.PathError:
return ExtractErrno(e.Err)
case *os.SyscallError:
return ExtractErrno(e.Err)
case *os.LinkError:
return ExtractErrno(e.Err)
}
// Default case.
log.Warningf("unknown error: %v", err)
return unix.EIO
}
// newErr returns a new error message from an error.
func newErr(err error) *Rlerror {
return &Rlerror{Error: uint32(ExtractErrno(err))}
}
// ExtractLinuxerrErrno extracts a *errors.Error from a error, best effort.
// TODO(b/34162363): Merge this with ExtractErrno.
func ExtractLinuxerrErrno(err error) error {
switch err {
case os.ErrNotExist:
return linuxerr.ENOENT
case os.ErrExist:
return linuxerr.EEXIST
case os.ErrPermission:
return linuxerr.EACCES
case os.ErrInvalid:
return linuxerr.EINVAL
}
// Attempt to unwrap.
switch e := err.(type) {
case *errors.Error:
return linuxerr.ToError(e)
case unix.Errno:
return linuxerr.ErrorFromUnix(e)
case *os.PathError:
return ExtractLinuxerrErrno(e.Err)
case *os.SyscallError:
return ExtractLinuxerrErrno(e.Err)
case *os.LinkError:
return ExtractLinuxerrErrno(e.Err)
}
// Default case.
log.Warningf("unknown error: %v", err)
return linuxerr.EIO
}
// newErrFromLinuxerr returns an Rlerror from the linuxerr list.
// TODO(b/34162363): Merge this with newErr.
func newErrFromLinuxerr(err error) *Rlerror {
return &Rlerror{Error: uint32(ExtractErrno(err))}
}
// handler is implemented for server-handled messages.
//
// See server.go for call information.
type handler interface {
// Handle handles the given message.
//
// This may modify the server state. The handle function must return a
// message which will be sent back to the client. It may be useful to
// use newErr to automatically extract an error message.
handle(cs *connState) message
}
// handle implements handler.handle.
func (t *Tversion) handle(cs *connState) message {
if t.MSize == 0 {
return newErr(unix.EINVAL)
}
if t.MSize > maximumLength {
return newErr(unix.EINVAL)
}
cs.messageSize.Store(t.MSize)
requested, ok := parseVersion(t.Version)
if !ok {
return newErr(unix.EINVAL)
}
// The server cannot support newer versions that it doesn't know about. In this
// case we return EAGAIN to tell the client to try again with a lower version.
if requested > highestSupportedVersion {
return newErr(unix.EAGAIN)
}
// From Tversion(9P): "The server may respond with the client’s version
// string, or a version string identifying an earlier defined protocol version".
cs.version.Store(requested)
return &Rversion{
MSize: t.MSize,
Version: t.Version,
}
}
// handle implements handler.handle.
func (t *Tflush) handle(cs *connState) message {
cs.WaitTag(t.OldTag)
return &Rflush{}
}
// checkSafeName validates the name and returns nil or returns an error.
func checkSafeName(name string) error {
if name != "" && !strings.Contains(name, "/") && name != "." && name != ".." {
return nil
}
return unix.EINVAL
}
// handle implements handler.handle.
func (t *Tclunk) handle(cs *connState) message {
if !cs.DeleteFID(t.FID) {
return newErr(unix.EBADF)
}
return &Rclunk{}
}
func (t *Tsetattrclunk) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
setAttrErr := ref.safelyWrite(func() error {
// We don't allow setattr on files that have been deleted.
// This might be technically incorrect, as it's possible that
// there were multiple links and you can still change the
// corresponding inode information.
if !cs.server.options.SetAttrOnDeleted && ref.isDeleted() {
return unix.EINVAL
}
// Set the attributes.
return ref.file.SetAttr(t.Valid, t.SetAttr)
})
// Try to delete FID even in case of failure above. Since the state of the
// file is unknown to the caller, it will not attempt to close the file again.
if !cs.DeleteFID(t.FID) {
return newErr(unix.EBADF)
}
if setAttrErr != nil {
return newErr(setAttrErr)
}
return &Rsetattrclunk{}
}
// handle implements handler.handle.
func (t *Tremove) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
// Frustratingly, because we can't be guaranteed that a rename is not
// occurring simultaneously with this removal, we need to acquire the
// global rename lock for this kind of remove operation to ensure that
// ref.parent does not change out from underneath us.
//
// This is why Tremove is a bad idea, and clients should generally use
// Tunlinkat. All p9 clients will use Tunlinkat.
err := ref.safelyGlobal(func() error {
// Is this a root? Can't remove that.
if ref.isRoot() {
return unix.EINVAL
}
// N.B. this remove operation is permitted, even if the file is open.
// See also rename below for reasoning.
// Is this file already deleted?
if ref.isDeleted() {
return unix.EINVAL
}
// Retrieve the file's proper name.
name := ref.parent.pathNode.nameFor(ref)
// Attempt the removal.
if err := ref.parent.file.UnlinkAt(name, 0); err != nil {
return err
}
// Mark all relevant fids as deleted. We don't need to lock any
// individual nodes because we already hold the global lock.
ref.parent.markChildDeleted(name)
return nil
})
// "The remove request asks the file server both to remove the file
// represented by fid and to clunk the fid, even if the remove fails."
//
// "It is correct to consider remove to be a clunk with the side effect
// of removing the file if permissions allow."
// https://swtch.com/plan9port/man/man9/remove.html
if !cs.DeleteFID(t.FID) {
return newErr(unix.EBADF)
}
if err != nil {
return newErr(err)
}
return &Rremove{}
}
// handle implements handler.handle.
//
// We don't support authentication, so this just returns ENOSYS.
func (t *Tauth) handle(cs *connState) message {
return newErr(unix.ENOSYS)
}
// handle implements handler.handle.
func (t *Tattach) handle(cs *connState) message {
// Ensure no authentication FID is provided.
if t.Auth.AuthenticationFID != NoFID {
return newErr(unix.EINVAL)
}
// Must provide an absolute path.
if path.IsAbs(t.Auth.AttachName) {
// Trim off the leading / if the path is absolute. We always
// treat attach paths as absolute and call attach with the root
// argument on the server file for clarity.
t.Auth.AttachName = t.Auth.AttachName[1:]
}
// Do the attach on the root.
sf, err := cs.server.attacher.Attach()
if err != nil {
return newErr(err)
}
qid, valid, attr, err := sf.GetAttr(AttrMaskAll())
if err != nil {
sf.Close() // Drop file.
return newErr(err)
}
if !valid.Mode {
sf.Close() // Drop file.
return newErr(unix.EINVAL)
}
// Build a transient reference.
root := &fidRef{
server: cs.server,
parent: nil,
file: sf,
refs: atomicbitops.FromInt64(1),
mode: attr.Mode.FileType(),
pathNode: cs.server.pathTree,
}
defer root.DecRef()
// Attach the root?
if len(t.Auth.AttachName) == 0 {
cs.InsertFID(t.FID, root)
return &Rattach{QID: qid}
}
// We want the same traversal checks to apply on attach, so always
// attach at the root and use the regular walk paths.
names := strings.Split(t.Auth.AttachName, "/")
_, newRef, _, _, err := doWalk(cs, root, names, false)
if err != nil {
return newErr(err)
}
defer newRef.DecRef()
// Insert the FID.
cs.InsertFID(t.FID, newRef)
return &Rattach{QID: qid}
}
// CanOpen returns whether this file open can be opened, read and written to.
//
// This includes everything except symlinks and sockets.
func CanOpen(mode FileMode) bool {
return mode.IsRegular() || mode.IsDir() || mode.IsNamedPipe() || mode.IsBlockDevice() || mode.IsCharacterDevice()
}
// handle implements handler.handle.
func (t *Tlopen) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
var (
qid QID
ioUnit uint32
osFile *fd.FD
)
if err := ref.safelyRead(func() (err error) {
// Has it been deleted already?
if ref.isDeleted() {
return unix.EINVAL
}
// Has it been opened already?
if ref.opened || !CanOpen(ref.mode) {
return unix.EINVAL
}
if ref.mode.IsDir() {
// Directory must be opened ReadOnly.
if t.Flags&OpenFlagsModeMask != ReadOnly {
return unix.EISDIR
}
// Directory not truncatable.
if t.Flags&OpenTruncate != 0 {
return unix.EISDIR
}
}
osFile, qid, ioUnit, err = ref.file.Open(t.Flags)
return err
}); err != nil {
return newErr(err)
}
// Mark file as opened and set open mode.
ref.opened = true
ref.openFlags = t.Flags
rlopen := &Rlopen{QID: qid, IoUnit: ioUnit}
rlopen.SetFilePayload(osFile)
return rlopen
}
func (t *Tlcreate) do(cs *connState, uid UID) (*Rlcreate, error) {
if err := checkSafeName(t.Name); err != nil {
return nil, err
}
ref, ok := cs.LookupFID(t.FID)
if !ok {
return nil, unix.EBADF
}
defer ref.DecRef()
var (
osFile *fd.FD
nsf File
qid QID
ioUnit uint32
newRef *fidRef
)
if err := ref.safelyWrite(func() (err error) {
// Don't allow creation from non-directories or deleted directories.
if ref.isDeleted() || !ref.mode.IsDir() {
return unix.EINVAL
}
// Not allowed on open directories.
if ref.opened {
return unix.EINVAL
}
// Do the create.
osFile, nsf, qid, ioUnit, err = ref.file.Create(t.Name, t.OpenFlags, t.Permissions, uid, t.GID)
if err != nil {
return err
}
newRef = &fidRef{
server: cs.server,
parent: ref,
file: nsf,
opened: true,
openFlags: t.OpenFlags,
mode: ModeRegular,
pathNode: ref.pathNode.pathNodeFor(t.Name),
}
ref.pathNode.addChild(newRef, t.Name)
ref.IncRef() // Acquire parent reference.
return nil
}); err != nil {
return nil, err
}
// Replace the FID reference.
cs.InsertFID(t.FID, newRef)
rlcreate := &Rlcreate{Rlopen: Rlopen{QID: qid, IoUnit: ioUnit}}
rlcreate.SetFilePayload(osFile)
return rlcreate, nil
}
// handle implements handler.handle.
func (t *Tlcreate) handle(cs *connState) message {
rlcreate, err := t.do(cs, NoUID)
if err != nil {
return newErr(err)
}
return rlcreate
}
// handle implements handler.handle.
func (t *Tsymlink) handle(cs *connState) message {
rsymlink, err := t.do(cs, NoUID)
if err != nil {
return newErr(err)
}
return rsymlink
}
func (t *Tsymlink) do(cs *connState, uid UID) (*Rsymlink, error) {
if err := checkSafeName(t.Name); err != nil {
return nil, err
}
ref, ok := cs.LookupFID(t.Directory)
if !ok {
return nil, unix.EBADF
}
defer ref.DecRef()
var qid QID
if err := ref.safelyWrite(func() (err error) {
// Don't allow symlinks from non-directories or deleted directories.
if ref.isDeleted() || !ref.mode.IsDir() {
return unix.EINVAL
}
// Not allowed on open directories.
if ref.opened {
return unix.EINVAL
}
// Do the symlink.
qid, err = ref.file.Symlink(t.Target, t.Name, uid, t.GID)
return err
}); err != nil {
return nil, err
}
return &Rsymlink{QID: qid}, nil
}
// handle implements handler.handle.
func (t *Tlink) handle(cs *connState) message {
if err := checkSafeName(t.Name); err != nil {
return newErr(err)
}
ref, ok := cs.LookupFID(t.Directory)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
refTarget, ok := cs.LookupFID(t.Target)
if !ok {
return newErr(unix.EBADF)
}
defer refTarget.DecRef()
if err := ref.safelyWrite(func() (err error) {
// Don't allow create links from non-directories or deleted directories.
if ref.isDeleted() || !ref.mode.IsDir() {
return unix.EINVAL
}
// Not allowed on open directories.
if ref.opened {
return unix.EINVAL
}
// Do the link.
return ref.file.Link(refTarget.file, t.Name)
}); err != nil {
return newErr(err)
}
return &Rlink{}
}
// handle implements handler.handle.
func (t *Trenameat) handle(cs *connState) message {
if err := checkSafeName(t.OldName); err != nil {
return newErr(err)
}
if err := checkSafeName(t.NewName); err != nil {
return newErr(err)
}
ref, ok := cs.LookupFID(t.OldDirectory)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
refTarget, ok := cs.LookupFID(t.NewDirectory)
if !ok {
return newErr(unix.EBADF)
}
defer refTarget.DecRef()
// Perform the rename holding the global lock.
if err := ref.safelyGlobal(func() (err error) {
// Don't allow renaming across deleted directories.
if ref.isDeleted() || !ref.mode.IsDir() || refTarget.isDeleted() || !refTarget.mode.IsDir() {
return unix.EINVAL
}
// Not allowed on open directories.
if ref.opened {
return unix.EINVAL
}
// Is this the same file? If yes, short-circuit and return success.
if ref.pathNode == refTarget.pathNode && t.OldName == t.NewName {
return nil
}
// Attempt the actual rename.
if err := ref.file.RenameAt(t.OldName, refTarget.file, t.NewName); err != nil {
return err
}
// Update the path tree.
ref.renameChildTo(t.OldName, refTarget, t.NewName)
return nil
}); err != nil {
return newErr(err)
}
return &Rrenameat{}
}
// handle implements handler.handle.
func (t *Tunlinkat) handle(cs *connState) message {
if err := checkSafeName(t.Name); err != nil {
return newErr(err)
}
ref, ok := cs.LookupFID(t.Directory)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
if err := ref.safelyWrite(func() (err error) {
// Don't allow deletion from non-directories or deleted directories.
if ref.isDeleted() || !ref.mode.IsDir() {
return unix.EINVAL
}
// Not allowed on open directories.
if ref.opened {
return unix.EINVAL
}
// Before we do the unlink itself, we need to ensure that there
// are no operations in flight on associated path node. The
// child's path node lock must be held to ensure that the
// unlinkat marking the child deleted below is atomic with
// respect to any other read or write operations.
//
// This is one case where we have a lock ordering issue, but
// since we always acquire deeper in the hierarchy, we know
// that we are free of lock cycles.
childPathNode := ref.pathNode.pathNodeFor(t.Name)
childPathNode.opMu.Lock()
defer childPathNode.opMu.Unlock()
// Do the unlink.
err = ref.file.UnlinkAt(t.Name, t.Flags)
if err != nil {
return err
}
// Mark the path as deleted.
ref.markChildDeleted(t.Name)
return nil
}); err != nil {
return newErr(err)
}
return &Runlinkat{}
}
// handle implements handler.handle.
func (t *Trename) handle(cs *connState) message {
if err := checkSafeName(t.Name); err != nil {
return newErr(err)
}
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
refTarget, ok := cs.LookupFID(t.Directory)
if !ok {
return newErr(unix.EBADF)
}
defer refTarget.DecRef()
if err := ref.safelyGlobal(func() (err error) {
// Don't allow a root rename.
if ref.isRoot() {
return unix.EINVAL
}
// Don't allow renaming deleting entries, or target non-directories.
if ref.isDeleted() || refTarget.isDeleted() || !refTarget.mode.IsDir() {
return unix.EINVAL
}
// If the parent is deleted, but we not, something is seriously wrong.
// It's fail to die at this point with an assertion failure.
if ref.parent.isDeleted() {
panic(fmt.Sprintf("parent %+v deleted, child %+v is not", ref.parent, ref))
}
// N.B. The rename operation is allowed to proceed on open files. It
// does impact the state of its parent, but this is merely a sanity
// check in any case, and the operation is safe. There may be other
// files corresponding to the same path that are renamed anyways.
// Check for the exact same file and short-circuit.
oldName := ref.parent.pathNode.nameFor(ref)
if ref.parent.pathNode == refTarget.pathNode && oldName == t.Name {
return nil
}
// Call the rename method on the parent.
if err := ref.parent.file.RenameAt(oldName, refTarget.file, t.Name); err != nil {
return err
}
// Update the path tree.
ref.parent.renameChildTo(oldName, refTarget, t.Name)
return nil
}); err != nil {
return newErr(err)
}
return &Rrename{}
}
// handle implements handler.handle.
func (t *Treadlink) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
var target string
if err := ref.safelyRead(func() (err error) {
// Don't allow readlink on deleted files. There is no need to
// check if this file is opened because symlinks cannot be
// opened.
if ref.isDeleted() || !ref.mode.IsSymlink() {
return unix.EINVAL
}
// Do the read.
target, err = ref.file.Readlink()
return err
}); err != nil {
return newErr(err)
}
return &Rreadlink{target}
}
// handle implements handler.handle.
func (t *Tread) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
// Constrain the size of the read buffer.
if int(t.Count) > int(maximumLength) {
return newErr(unix.ENOBUFS)
}
var (
data = make([]byte, t.Count)
n int
)
if err := ref.safelyRead(func() (err error) {
// Has it been opened already?
if !ref.opened {
return unix.EINVAL
}
// Can it be read? Check permissions.
if ref.openFlags&OpenFlagsModeMask == WriteOnly {
return unix.EPERM
}
n, err = ref.file.ReadAt(data, t.Offset)
return err
}); err != nil && err != io.EOF {
return newErr(err)
}
return &Rread{Data: data[:n]}
}
// handle implements handler.handle.
func (t *Twrite) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
var n int
if err := ref.safelyRead(func() (err error) {
// Has it been opened already?
if !ref.opened {
return unix.EINVAL
}
// Can it be written? Check permissions.
if ref.openFlags&OpenFlagsModeMask == ReadOnly {
return unix.EPERM
}
n, err = ref.file.WriteAt(t.Data, t.Offset)
return err
}); err != nil {
return newErr(err)
}
return &Rwrite{Count: uint32(n)}
}
// handle implements handler.handle.
func (t *Tmknod) handle(cs *connState) message {
rmknod, err := t.do(cs, NoUID)
if err != nil {
return newErr(err)
}
return rmknod
}
func (t *Tmknod) do(cs *connState, uid UID) (*Rmknod, error) {
if err := checkSafeName(t.Name); err != nil {
return nil, err
}
ref, ok := cs.LookupFID(t.Directory)
if !ok {
return nil, unix.EBADF
}
defer ref.DecRef()
var qid QID
if err := ref.safelyWrite(func() (err error) {
// Don't allow mknod on deleted files.
if ref.isDeleted() || !ref.mode.IsDir() {
return unix.EINVAL
}
// Not allowed on open directories.
if ref.opened {
return unix.EINVAL
}
// Do the mknod.
qid, err = ref.file.Mknod(t.Name, t.Mode, t.Major, t.Minor, uid, t.GID)
return err
}); err != nil {
return nil, err
}
return &Rmknod{QID: qid}, nil
}
// handle implements handler.handle.
func (t *Tmkdir) handle(cs *connState) message {
rmkdir, err := t.do(cs, NoUID)
if err != nil {
return newErr(err)
}
return rmkdir
}
func (t *Tmkdir) do(cs *connState, uid UID) (*Rmkdir, error) {
if err := checkSafeName(t.Name); err != nil {
return nil, err
}
ref, ok := cs.LookupFID(t.Directory)
if !ok {
return nil, unix.EBADF
}
defer ref.DecRef()
var qid QID
if err := ref.safelyWrite(func() (err error) {
// Don't allow mkdir on deleted files.
if ref.isDeleted() || !ref.mode.IsDir() {
return unix.EINVAL
}
// Not allowed on open directories.
if ref.opened {
return unix.EINVAL
}
// Do the mkdir.
qid, err = ref.file.Mkdir(t.Name, t.Permissions, uid, t.GID)
return err
}); err != nil {
return nil, err
}
return &Rmkdir{QID: qid}, nil
}
// handle implements handler.handle.
func (t *Tgetattr) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
// We allow getattr on deleted files. Depending on the backing
// implementation, it's possible that races exist that might allow
// fetching attributes of other files. But we need to generally allow
// refreshing attributes and this is a minor leak, if at all.
var (
qid QID
valid AttrMask
attr Attr
)
if err := ref.safelyRead(func() (err error) {
qid, valid, attr, err = ref.file.GetAttr(t.AttrMask)
return err
}); err != nil {
return newErr(err)
}
return &Rgetattr{QID: qid, Valid: valid, Attr: attr}
}
// handle implements handler.handle.
func (t *Tsetattr) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
if err := ref.safelyWrite(func() error {
// We don't allow setattr on files that have been deleted.
// This might be technically incorrect, as it's possible that
// there were multiple links and you can still change the
// corresponding inode information.
if !cs.server.options.SetAttrOnDeleted && ref.isDeleted() {
return unix.EINVAL
}
// Set the attributes.
return ref.file.SetAttr(t.Valid, t.SetAttr)
}); err != nil {
return newErr(err)
}
return &Rsetattr{}
}
// handle implements handler.handle.
func (t *Tallocate) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
if err := ref.safelyWrite(func() error {
// Has it been opened already?
if !ref.opened {
return unix.EINVAL
}
// Can it be written? Check permissions.
if ref.openFlags&OpenFlagsModeMask == ReadOnly {
return unix.EBADF
}
// We don't allow allocate on files that have been deleted.
if !cs.server.options.AllocateOnDeleted && ref.isDeleted() {
return unix.EINVAL
}
return ref.file.Allocate(t.Mode, t.Offset, t.Length)
}); err != nil {
return newErr(err)
}
return &Rallocate{}
}
// handle implements handler.handle.
func (t *Txattrwalk) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
// We don't support extended attributes.
return newErr(unix.ENODATA)
}
// handle implements handler.handle.
func (t *Txattrcreate) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
// We don't support extended attributes.
return newErr(unix.ENOSYS)
}
// handle implements handler.handle.
func (t *Tgetxattr) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
var val string
if err := ref.safelyRead(func() (err error) {
// Don't allow getxattr on files that have been deleted.
if ref.isDeleted() {
return unix.EINVAL
}
val, err = ref.file.GetXattr(t.Name, t.Size)
return err
}); err != nil {
return newErr(err)
}
return &Rgetxattr{Value: val}
}
// handle implements handler.handle.
func (t *Tsetxattr) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
if err := ref.safelyWrite(func() error {
// Don't allow setxattr on files that have been deleted.
if ref.isDeleted() {
return unix.EINVAL
}
return ref.file.SetXattr(t.Name, t.Value, t.Flags)
}); err != nil {
return newErr(err)
}
return &Rsetxattr{}
}
// handle implements handler.handle.
func (t *Tlistxattr) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
var xattrs map[string]struct{}
if err := ref.safelyRead(func() (err error) {
// Don't allow listxattr on files that have been deleted.
if ref.isDeleted() {
return unix.EINVAL
}
xattrs, err = ref.file.ListXattr(t.Size)
return err
}); err != nil {
return newErr(err)
}
xattrList := make([]string, 0, len(xattrs))
for x := range xattrs {
xattrList = append(xattrList, x)
}
return &Rlistxattr{Xattrs: xattrList}
}
// handle implements handler.handle.
func (t *Tremovexattr) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
if err := ref.safelyWrite(func() error {
// Don't allow removexattr on files that have been deleted.
if ref.isDeleted() {
return unix.EINVAL
}
return ref.file.RemoveXattr(t.Name)
}); err != nil {
return newErr(err)
}
return &Rremovexattr{}
}
// handle implements handler.handle.
func (t *Treaddir) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.Directory)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
var entries []Dirent
if err := ref.safelyRead(func() (err error) {
// Don't allow reading deleted directories.
if ref.isDeleted() || !ref.mode.IsDir() {
return unix.EINVAL
}
// Has it been opened yet?
if !ref.opened {
return unix.EINVAL
}
// Read the entries.
entries, err = ref.file.Readdir(t.DirentOffset, t.Count)
if err != nil && err != io.EOF {
return err
}
return nil
}); err != nil {
return newErr(err)
}
return &Rreaddir{Count: t.Count, Entries: entries}
}
// handle implements handler.handle.
func (t *Tfsync) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
if err := ref.safelyRead(func() (err error) {
// Has it been opened yet?
if !ref.opened {
return unix.EINVAL
}
// Perform the sync.
return ref.file.FSync()
}); err != nil {
return newErr(err)
}
return &Rfsync{}
}
// handle implements handler.handle.
func (t *Tstatfs) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
st, err := ref.file.StatFS()
if err != nil {
return newErr(err)
}
return &Rstatfs{st}
}
// handle implements handler.handle.
func (t *Tflushf) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
if err := ref.safelyRead(ref.file.Flush); err != nil {
return newErr(err)
}
return &Rflushf{}
}
// walkOne walks zero or one path elements.
//
// The slice passed as qids is append and returned.
func walkOne(qids []QID, from File, names []string, getattr bool) ([]QID, File, AttrMask, Attr, error) {
if len(names) > 1 {
// We require exactly zero or one elements.
return nil, nil, AttrMask{}, Attr{}, unix.EINVAL
}
var (
localQIDs []QID
sf File
valid AttrMask
attr Attr
err error
)
switch {
case getattr:
localQIDs, sf, valid, attr, err = from.WalkGetAttr(names)
// Can't put fallthrough in the if because Go.
if err != unix.ENOSYS {
break
}
fallthrough
default:
localQIDs, sf, err = from.Walk(names)
if err != nil {
// No way to walk this element.
break
}
if getattr {
_, valid, attr, err = sf.GetAttr(AttrMaskAll())
if err != nil {
// Don't leak the file.
sf.Close()
}
}
}
if err != nil {
// Error walking, don't return anything.
return nil, nil, AttrMask{}, Attr{}, err
}
if len(localQIDs) != 1 {
// Expected a single QID.
sf.Close()
return nil, nil, AttrMask{}, Attr{}, unix.EINVAL
}
return append(qids, localQIDs...), sf, valid, attr, nil
}
// doWalk walks from a given fidRef.
//
// This enforces that all intermediate nodes are walkable (directories). The
// fidRef returned (newRef) has a reference associated with it that is now
// owned by the caller and must be handled appropriately.
func doWalk(cs *connState, ref *fidRef, names []string, getattr bool) (qids []QID, newRef *fidRef, valid AttrMask, attr Attr, err error) {
// Check the names.
for _, name := range names {
err = checkSafeName(name)
if err != nil {
return
}
}
// Has it been opened already?
err = ref.safelyRead(func() (err error) {
if ref.opened {
return unix.EBUSY
}
return nil
})
if err != nil {
return
}
// Is this an empty list? Handle specially. We don't actually need to
// validate anything since this is always permitted.
if len(names) == 0 {
var sf File // Temporary.
if err := ref.maybeParent().safelyRead(func() (err error) {
// Clone the single element.
qids, sf, valid, attr, err = walkOne(nil, ref.file, nil, getattr)
if err != nil {
return err
}
newRef = &fidRef{
server: cs.server,
parent: ref.parent,
file: sf,
mode: ref.mode,
pathNode: ref.pathNode,
}
if !ref.isRoot() {
if !newRef.isDeleted() {
// Add only if a non-root node; the same node.
ref.parent.pathNode.addChild(newRef, ref.parent.pathNode.nameFor(ref))
}
ref.parent.IncRef() // Acquire parent reference.
}
// doWalk returns a reference.
newRef.IncRef()
return nil
}); err != nil {
return nil, nil, AttrMask{}, Attr{}, err
}
// Do not return the new QID.
return nil, newRef, valid, attr, nil
}
// Do the walk, one element at a time.
walkRef := ref
walkRef.IncRef()
for i := 0; i < len(names); i++ {
// We won't allow beyond past symlinks; stop here if this isn't
// a proper directory and we have additional paths to walk.
if !walkRef.mode.IsDir() {
walkRef.DecRef() // Drop walk reference; no lock required.
return nil, nil, AttrMask{}, Attr{}, unix.EINVAL
}
var sf File // Temporary.
if err := walkRef.safelyRead(func() (err error) {
// It is not safe to walk on a deleted directory. It could have been
// replaced with a malicious symlink.
if walkRef.isDeleted() {
// Fail this operation as the result will not be meaningful if walkRef
// is deleted.
return unix.ENOENT
}
// Pass getattr = true to walkOne since we need the file type for
// newRef.
qids, sf, valid, attr, err = walkOne(qids, walkRef.file, names[i:i+1], true)
if err != nil {
return err
}
// Note that we don't need to acquire a lock on any of
// these individual instances. That's because they are
// not actually addressable via a FID. They are
// anonymous. They exist in the tree for tracking
// purposes.
newRef := &fidRef{
server: cs.server,
parent: walkRef,
file: sf,
mode: attr.Mode.FileType(),
pathNode: walkRef.pathNode.pathNodeFor(names[i]),
}
walkRef.pathNode.addChild(newRef, names[i])
// We allow our walk reference to become the new parent
// reference here and so we don't IncRef. Instead, just
// set walkRef to the newRef above and acquire a new
// walk reference.
walkRef = newRef
walkRef.IncRef()
return nil
}); err != nil {
walkRef.DecRef() // Drop the old walkRef.
return nil, nil, AttrMask{}, Attr{}, err
}
}
// Success.
return qids, walkRef, valid, attr, nil
}
// handle implements handler.handle.
func (t *Twalk) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
// Do the walk.
qids, newRef, _, _, err := doWalk(cs, ref, t.Names, false)
if err != nil {
return newErr(err)
}
defer newRef.DecRef()
// Install the new FID.
cs.InsertFID(t.NewFID, newRef)
return &Rwalk{QIDs: qids}
}
// handle implements handler.handle.
func (t *Twalkgetattr) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
// Do the walk.
qids, newRef, valid, attr, err := doWalk(cs, ref, t.Names, true)
if err != nil {
return newErr(err)
}
defer newRef.DecRef()
// Install the new FID.
cs.InsertFID(t.NewFID, newRef)
return &Rwalkgetattr{QIDs: qids, Valid: valid, Attr: attr}
}
// handle implements handler.handle.
func (t *Tucreate) handle(cs *connState) message {
rlcreate, err := t.Tlcreate.do(cs, t.UID)
if err != nil {
return newErr(err)
}
return &Rucreate{*rlcreate}
}
// handle implements handler.handle.
func (t *Tumkdir) handle(cs *connState) message {
rmkdir, err := t.Tmkdir.do(cs, t.UID)
if err != nil {
return newErr(err)
}
return &Rumkdir{*rmkdir}
}
// handle implements handler.handle.
func (t *Tusymlink) handle(cs *connState) message {
rsymlink, err := t.Tsymlink.do(cs, t.UID)
if err != nil {
return newErr(err)
}
return &Rusymlink{*rsymlink}
}
// handle implements handler.handle.
func (t *Tumknod) handle(cs *connState) message {
rmknod, err := t.Tmknod.do(cs, t.UID)
if err != nil {
return newErr(err)
}
return &Rumknod{*rmknod}
}
// handle implements handler.handle.
func (t *Tbind) handle(cs *connState) message {
if err := checkSafeName(t.SockName); err != nil {
return newErr(err)
}
ref, ok := cs.LookupFID(t.Directory)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
var (
sockRef *fidRef
qid QID
valid AttrMask
attr Attr
)
if err := ref.safelyWrite(func() (err error) {
// Don't allow creation from non-directories or deleted directories.
if ref.isDeleted() || !ref.mode.IsDir() {
return unix.EINVAL
}
// Not allowed on open directories.
if ref.opened {
return unix.EINVAL
}
var sockF File
sockF, qid, valid, attr, err = ref.file.Bind(t.SockType, t.SockName, t.UID, t.GID)
if err != nil {
return err
}
sockRef = &fidRef{
server: cs.server,
parent: ref,
file: sockF,
mode: ModeSocket,
pathNode: ref.pathNode.pathNodeFor(t.SockName),
}
ref.pathNode.addChild(sockRef, t.SockName)
ref.IncRef() // Acquire parent reference.
return nil
}); err != nil {
return newErr(err)
}
cs.InsertFID(t.NewFID, sockRef)
return &Rbind{QID: qid, Valid: valid, Attr: attr}
}
// handle implements handler.handle.
func (t *Tlconnect) handle(cs *connState) message {
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
var osFile *fd.FD
if err := ref.safelyRead(func() (err error) {
// Don't allow connecting to deleted files.
if ref.isDeleted() || !ref.mode.IsSocket() {
return unix.EINVAL
}
// Do the connect.
osFile, err = ref.file.Connect(t.SocketType)
return err
}); err != nil {
return newErr(err)
}
rlconnect := &Rlconnect{}
rlconnect.SetFilePayload(osFile)
return rlconnect
}
// handle implements handler.handle.
func (t *Tchannel) handle(cs *connState) message {
// Ensure that channels are enabled.
if err := cs.initializeChannels(); err != nil {
return newErr(err)
}
ch := cs.lookupChannel(t.ID)
if ch == nil {
return newErr(unix.ENOSYS)
}
// Return the payload. Note that we need to duplicate the file
// descriptor for the channel allocator, because sending is a
// destructive operation between sendRecvLegacy (and now the newer
// channel send operations). Same goes for the client FD.
rchannel := &Rchannel{
Offset: uint64(ch.desc.Offset),
Length: uint64(ch.desc.Length),
}
switch t.Control {
case 0:
// Open the main data channel.
mfd, err := unix.Dup(int(cs.channelAlloc.FD()))
if err != nil {
return newErr(err)
}
rchannel.SetFilePayload(fd.New(mfd))
case 1:
cfd, err := unix.Dup(ch.client.FD())
if err != nil {
return newErr(err)
}
rchannel.SetFilePayload(fd.New(cfd))
default:
return newErr(unix.EINVAL)
}
return rchannel
}
// handle implements handler.handle.
func (t *Tmultigetattr) handle(cs *connState) message {
for i, name := range t.Names {
if len(name) == 0 && i == 0 {
// Empty name is allowed on the first entry to indicate that the current
// FID needs to be included in the result.
continue
}
if err := checkSafeName(name); err != nil {
return newErr(err)
}
}
ref, ok := cs.LookupFID(t.FID)
if !ok {
return newErr(unix.EBADF)
}
defer ref.DecRef()
if cs.server.options.MultiGetAttrSupported {
var stats []FullStat
if err := ref.safelyRead(func() (err error) {
stats, err = ref.file.MultiGetAttr(t.Names)
return err
}); err != nil {
return newErr(err)
}
return &Rmultigetattr{Stats: stats}
}
stats := make([]FullStat, 0, len(t.Names))
mask := AttrMaskAll()
start := ref.file
startNode := ref.pathNode
parent := start
parentNode := startNode
closeParent := func() {
if parent != start {
_ = parent.Close()
}
}
defer closeParent()
cs.server.renameMu.RLock()
defer cs.server.renameMu.RUnlock()
for i, name := range t.Names {
if len(name) == 0 && i == 0 {
startNode.opMu.RLock()
qid, valid, attr, err := start.GetAttr(mask)
startNode.opMu.RUnlock()
if err != nil {
return newErr(err)
}
stats = append(stats, FullStat{
QID: qid,
Valid: valid,
Attr: attr,
})
continue
}
parentNode.opMu.RLock()
if parentNode.deleted.Load() != 0 {
parentNode.opMu.RUnlock()
break
}
qids, child, valid, attr, err := parent.WalkGetAttr([]string{name})
if err != nil {
parentNode.opMu.RUnlock()
if errors2.Is(err, unix.ENOENT) {
break
}
return newErr(err)
}
stats = append(stats, FullStat{
QID: qids[0],
Valid: valid,
Attr: attr,
})
// Update with next generation.
closeParent()
parent = child
childNode := parentNode.pathNodeFor(name)
parentNode.opMu.RUnlock()
parentNode = childNode
if attr.Mode.FileType() != ModeDirectory {
// Doesn't need to continue if entry is not a dir. Including symlinks
// that cannot be followed.
break
}
}
return &Rmultigetattr{Stats: stats}
}
|
package gflConst
type RankC int
const (
Navigator RankC = 300 + iota
FlightEngineer
SecondOfficer
FirstOfficer
Captain
)
/*
type RankC struct {
elements map[string]int
}
func (l *RankC) Const(ref string) int {
if ret, ok := l.elements[ref]; ok {
return ret
} else {
return -1
}
}
func NewRankC() *RankC {
lt := new(RankC)
lt.elements = make(map[string]int)
lt.elements["ramp operator"] = 500
lt.elements["flight engineer"] = 510
lt.elements["first officer"] = 520
lt.elements["captain"] = 530
return lt
}
*/ |
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package transactionrecord
import (
"github.com/bitmark-inc/bitmarkd/account"
"github.com/bitmark-inc/bitmarkd/currency"
"github.com/bitmark-inc/bitmarkd/fault"
"github.com/bitmark-inc/bitmarkd/merkle"
"github.com/bitmark-inc/bitmarkd/util"
)
// Unpack - turn a byte slice into a record
// Note: the unpacker will access the underlying array of the packed
// record so p[x:y].Unpack() can read past p[y] and couldcontinue up to cap(p)
// i.e p[x:cap(p)].Unpack() performs the same operation
// elements beefore p[x] cannot be accessed
// see: https://blog.golang.org/go-slices-usage-and-internals
//
// must cast result to correct type
//
// e.g.
// registration, ok := result.(*transaction.Registration)
// or:
// switch tx := result.(type) {
// case *transaction.Registration:
func (record Packed) Unpack(testnet bool) (t Transaction, n int, e error) {
defer func() {
if r := recover(); nil != r {
e = fault.NotTransactionPack
}
}()
recordType, n := util.ClippedVarint64(record, 1, 8192)
if 0 == n {
return nil, 0, fault.NotTransactionPack
}
unpack_switch:
switch TagType(recordType) {
case BaseDataTag:
// currency
c, currencyLength := util.FromVarint64(record[n:])
if 0 == currencyLength {
break unpack_switch
}
n += currencyLength
currency, err := currency.FromUint64(c)
if nil != err {
return nil, 0, err
}
// paymentAddress
paymentAddressLength, paymentAddressOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == paymentAddressOffset {
break unpack_switch
}
n += paymentAddressOffset
paymentAddress := string(record[n : n+paymentAddressLength])
n += paymentAddressLength
// owner public key
ownerLength, ownerOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == ownerOffset {
break unpack_switch
}
n += ownerOffset
owner, err := account.AccountFromBytes(record[n : n+ownerLength])
if nil != err {
return nil, 0, err
}
if owner.IsTesting() != testnet {
return nil, 0, fault.WrongNetworkForPublicKey
}
n += ownerLength
// nonce
nonce, nonceLength := util.FromVarint64(record[n:])
if 0 == nonceLength {
break unpack_switch
}
n += nonceLength
// signature is remainder of record
signatureLength, signatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == signatureOffset {
break unpack_switch
}
signature := make(account.Signature, signatureLength)
n += signatureOffset
copy(signature, record[n:n+signatureLength])
n += signatureLength
r := &OldBaseData{
Owner: owner,
Currency: currency,
PaymentAddress: string(paymentAddress),
Nonce: nonce,
Signature: signature,
}
err = r.check(testnet)
if nil != err {
return nil, 0, err
}
return r, n, nil
case AssetDataTag:
// name
nameLength, nameOffset := util.ClippedVarint64(record[n:], 0, 8192)
name := make([]byte, nameLength)
n += nameOffset
copy(name, record[n:n+nameLength])
n += nameLength
// fingerprint
fingerprintLength, fingerprintOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == fingerprintOffset {
break unpack_switch
}
fingerprint := make([]byte, fingerprintLength)
n += fingerprintOffset
copy(fingerprint, record[n:n+fingerprintLength])
n += fingerprintLength
// metadata (can be zero length)
metadataLength, metadataOffset := util.ClippedVarint64(record[n:], 0, 8192) // Note: zero is valid here
if 0 == metadataOffset {
break unpack_switch
}
metadata := make([]byte, metadataLength)
n += metadataOffset
copy(metadata, record[n:n+metadataLength])
n += metadataLength
// registrant public key
registrantLength, registrantOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == registrantOffset {
break unpack_switch
}
n += registrantOffset
registrant, err := account.AccountFromBytes(record[n : n+registrantLength])
if nil != err {
return nil, 0, err
}
if registrant.IsTesting() != testnet {
return nil, 0, fault.WrongNetworkForPublicKey
}
n += registrantLength
// signature is remainder of record
signatureLength, signatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == signatureOffset {
break unpack_switch
}
signature := make(account.Signature, signatureLength)
n += signatureOffset
copy(signature, record[n:n+signatureLength])
n += signatureLength
r := &AssetData{
Name: string(name),
Fingerprint: string(fingerprint),
Metadata: string(metadata),
Registrant: registrant,
Signature: signature,
}
err = r.check(testnet)
if nil != err {
return nil, 0, err
}
return r, n, nil
case BitmarkIssueTag:
// asset id
assetIdentifierLength, assetIdentifierOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == assetIdentifierOffset {
break unpack_switch
}
n += assetIdentifierOffset
var assetId AssetIdentifier
err := AssetIdentifierFromBytes(&assetId, record[n:n+assetIdentifierLength])
if nil != err {
return nil, 0, err
}
n += assetIdentifierLength
// owner public key
ownerLength, ownerOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == ownerOffset {
break unpack_switch
}
n += ownerOffset
owner, err := account.AccountFromBytes(record[n : n+ownerLength])
if nil != err {
return nil, 0, err
}
if owner.IsTesting() != testnet {
return nil, 0, fault.WrongNetworkForPublicKey
}
n += ownerLength
// nonce
nonce, nonceLength := util.FromVarint64(record[n:])
if 0 == nonceLength {
break unpack_switch
}
n += nonceLength
// signature is remainder of record
signatureLength, signatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == signatureOffset {
break unpack_switch
}
signature := make(account.Signature, signatureLength)
n += signatureOffset
copy(signature, record[n:n+signatureLength])
n += signatureLength
r := &BitmarkIssue{
AssetId: assetId,
Owner: owner,
Signature: signature,
Nonce: nonce,
}
err = r.check(testnet)
if nil != err {
return nil, 0, err
}
return r, n, nil
case BitmarkTransferUnratifiedTag:
// link
linkLength, linkOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == linkOffset {
break unpack_switch
}
n += linkOffset
var link merkle.Digest
err := merkle.DigestFromBytes(&link, record[n:n+linkLength])
if nil != err {
return nil, 0, err
}
n += linkLength
// optional escrow payment
escrow, n, err := unpackEscrow(record, n)
if nil != err {
return nil, 0, err
}
// owner public key
ownerLength, ownerOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == ownerOffset {
break unpack_switch
}
n += ownerOffset
owner, err := account.AccountFromBytes(record[n : n+ownerLength])
if nil != err {
return nil, 0, err
}
if owner.IsTesting() != testnet {
return nil, 0, fault.WrongNetworkForPublicKey
}
n += ownerLength
// signature is remainder of record
signatureLength, signatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == signatureOffset {
break unpack_switch
}
signature := make(account.Signature, signatureLength)
n += signatureOffset
copy(signature, record[n:n+signatureLength])
n += signatureLength
r := &BitmarkTransferUnratified{
Link: link,
Escrow: escrow,
Owner: owner,
Signature: signature,
}
err = r.check(testnet)
if nil != err {
return nil, 0, err
}
return r, n, nil
case BitmarkTransferCountersignedTag:
// link
linkLength, linkOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == linkOffset {
break unpack_switch
}
n += linkOffset
var link merkle.Digest
err := merkle.DigestFromBytes(&link, record[n:n+linkLength])
if nil != err {
return nil, 0, err
}
n += linkLength
// optional escrow payment
escrow, n, err := unpackEscrow(record, n)
if nil != err {
return nil, 0, err
}
// owner public key
ownerLength, ownerOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == ownerOffset {
break unpack_switch
}
n += ownerOffset
owner, err := account.AccountFromBytes(record[n : n+ownerLength])
if nil != err {
return nil, 0, err
}
if owner.IsTesting() != testnet {
return nil, 0, fault.WrongNetworkForPublicKey
}
n += ownerLength
// signature
signatureLength, signatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == signatureOffset {
break unpack_switch
}
signature := make(account.Signature, signatureLength)
n += signatureOffset
copy(signature, record[n:n+signatureLength])
n += signatureLength
// countersignature
countersignatureLength, countersignatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == countersignatureOffset {
break unpack_switch
}
countersignature := make(account.Signature, countersignatureLength)
n += countersignatureOffset
copy(countersignature, record[n:n+countersignatureLength])
n += countersignatureLength
r := &BitmarkTransferCountersigned{
Link: link,
Escrow: escrow,
Owner: owner,
Signature: signature,
Countersignature: countersignature,
}
err = r.check(testnet)
if nil != err {
return nil, 0, err
}
return r, n, nil
case BlockFoundationTag:
// version
version, versionLength := util.FromVarint64(record[n:])
if 0 == versionLength {
break unpack_switch
}
n += versionLength
if version < 1 || version >= uint64(len(versions)) {
return nil, 0, fault.InvalidCurrencyAddress // ***** FIX THIS: is this error right?
}
// payment map
paymentsLength, paymentsOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == paymentsOffset {
break unpack_switch
}
n += paymentsOffset
payments, cs, err := currency.UnpackMap(record[n:n+paymentsLength], testnet)
if nil != err {
return nil, 0, err
}
if cs != versions[version] {
return nil, 0, fault.InvalidCurrencyAddress // ***** FIX THIS: is this error right?
}
n += paymentsLength
// owner public key
ownerLength, ownerOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == ownerOffset {
break unpack_switch
}
n += ownerOffset
owner, err := account.AccountFromBytes(record[n : n+ownerLength])
if nil != err {
return nil, 0, err
}
if owner.IsTesting() != testnet {
return nil, 0, fault.WrongNetworkForPublicKey
}
n += ownerLength
// nonce
nonce, nonceLength := util.FromVarint64(record[n:])
if 0 == nonceLength {
break unpack_switch
}
n += nonceLength
// signature is remainder of record
signatureLength, signatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == signatureOffset {
break unpack_switch
}
signature := make(account.Signature, signatureLength)
n += signatureOffset
copy(signature, record[n:n+signatureLength])
n += signatureLength
r := &BlockFoundation{
Version: version,
Owner: owner,
Payments: payments,
Nonce: nonce,
Signature: signature,
}
err = r.check(testnet)
if nil != err {
return nil, 0, err
}
return r, n, nil
case BlockOwnerTransferTag:
// link
linkLength, linkOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == linkOffset {
break unpack_switch
}
n += linkOffset
var link merkle.Digest
err := merkle.DigestFromBytes(&link, record[n:n+linkLength])
if nil != err {
return nil, 0, err
}
n += linkLength
// optional escrow payment
escrow, n, err := unpackEscrow(record, n)
if nil != err {
return nil, 0, err
}
// version
version, versionLength := util.FromVarint64(record[n:])
if 0 == versionLength {
break unpack_switch
}
n += versionLength
if version < 1 || version >= uint64(len(versions)) {
return nil, 0, fault.InvalidCurrencyAddress // ***** FIX THIS: is this error right?
}
// payment map
paymentsLength, paymentsOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == paymentsOffset {
break unpack_switch
}
n += paymentsOffset
payments, cs, err := currency.UnpackMap(record[n:n+paymentsLength], testnet)
if nil != err {
return nil, 0, err
}
if cs != versions[version] {
return nil, 0, fault.InvalidCurrencyAddress // ***** FIX THIS: is this error right?
}
n += paymentsLength
// owner public key
ownerLength, ownerOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == ownerOffset {
break unpack_switch
}
n += ownerOffset
owner, err := account.AccountFromBytes(record[n : n+ownerLength])
if nil != err {
return nil, 0, err
}
if owner.IsTesting() != testnet {
return nil, 0, fault.WrongNetworkForPublicKey
}
n += ownerLength
// signature
signatureLength, signatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == signatureOffset {
break unpack_switch
}
signature := make(account.Signature, signatureLength)
n += signatureOffset
copy(signature, record[n:n+signatureLength])
n += signatureLength
// countersignature
countersignatureLength, countersignatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == countersignatureOffset {
break unpack_switch
}
countersignature := make(account.Signature, countersignatureLength)
n += countersignatureOffset
copy(countersignature, record[n:n+countersignatureLength])
n += countersignatureLength
r := &BlockOwnerTransfer{
Link: link,
Escrow: escrow,
Version: version,
Owner: owner,
Payments: payments,
Signature: signature,
Countersignature: countersignature,
}
err = r.check(testnet)
if nil != err {
return nil, 0, err
}
return r, n, nil
case BitmarkShareTag:
// link
linkLength, linkOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == linkOffset {
break unpack_switch
}
n += linkOffset
var link merkle.Digest
err := merkle.DigestFromBytes(&link, record[n:n+linkLength])
if nil != err {
return nil, 0, err
}
n += linkLength
// total number of shares to issue
quantity, quantityLength := util.FromVarint64(record[n:])
if 0 == quantityLength {
break unpack_switch
}
n += quantityLength
// signature
signatureLength, signatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == signatureOffset {
break unpack_switch
}
signature := make(account.Signature, signatureLength)
n += signatureOffset
copy(signature, record[n:n+signatureLength])
n += signatureLength
r := &BitmarkShare{
Link: link,
Quantity: quantity,
Signature: signature,
}
err = r.check(testnet)
if nil != err {
return nil, 0, err
}
return r, n, nil
case ShareGrantTag:
// share id
shareIdLength, shareIdOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == shareIdOffset {
break unpack_switch
}
n += shareIdOffset
var shareId merkle.Digest
err := merkle.DigestFromBytes(&shareId, record[n:n+shareIdLength])
if nil != err {
return nil, 0, err
}
n += shareIdLength
// number of shares to transfer
quantity, quantityLength := util.FromVarint64(record[n:])
if 0 == quantityLength {
break unpack_switch
}
n += quantityLength
// owner public key
ownerLength, ownerOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == ownerOffset {
break unpack_switch
}
n += ownerOffset
owner, err := account.AccountFromBytes(record[n : n+ownerLength])
if nil != err {
return nil, 0, err
}
if owner.IsTesting() != testnet {
return nil, 0, fault.WrongNetworkForPublicKey
}
n += ownerLength
// recipient public key
recipientLength, recipientOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == recipientOffset {
break unpack_switch
}
n += recipientOffset
recipient, err := account.AccountFromBytes(record[n : n+recipientLength])
if nil != err {
return nil, 0, err
}
if recipient.IsTesting() != testnet {
return nil, 0, fault.WrongNetworkForPublicKey
}
n += recipientLength
// time limit
beforeBlock, beforeBlockLength := util.FromVarint64(record[n:])
if 0 == beforeBlockLength {
break unpack_switch
}
n += beforeBlockLength
// signature
signatureLength, signatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == signatureOffset {
break unpack_switch
}
signature := make(account.Signature, signatureLength)
n += signatureOffset
copy(signature, record[n:n+signatureLength])
n += signatureLength
// countersignature
countersignatureLength, countersignatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == countersignatureOffset {
break unpack_switch
}
countersignature := make(account.Signature, countersignatureLength)
n += countersignatureOffset
copy(countersignature, record[n:n+countersignatureLength])
n += countersignatureLength
r := &ShareGrant{
ShareId: shareId,
Quantity: quantity,
Owner: owner,
Recipient: recipient,
BeforeBlock: beforeBlock,
Signature: signature,
Countersignature: countersignature,
}
err = r.check(testnet)
if nil != err {
return nil, 0, err
}
return r, n, nil
case ShareSwapTag:
// share one
shareIdOneLength, shareIdOneOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == shareIdOneOffset {
break unpack_switch
}
n += shareIdOneOffset
var shareIdOne merkle.Digest
err := merkle.DigestFromBytes(&shareIdOne, record[n:n+shareIdOneLength])
if nil != err {
return nil, 0, err
}
n += shareIdOneLength
// number of shares to transfer
quantityOne, quantityOneLength := util.FromVarint64(record[n:])
if 0 == quantityOneLength {
break unpack_switch
}
n += quantityOneLength
// owner one public key
ownerOneLength, ownerOneOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == ownerOneOffset {
break unpack_switch
}
n += ownerOneOffset
ownerOne, err := account.AccountFromBytes(record[n : n+ownerOneLength])
if nil != err {
return nil, 0, err
}
if ownerOne.IsTesting() != testnet {
return nil, 0, fault.WrongNetworkForPublicKey
}
n += ownerOneLength
// share two
shareIdTwoLength, shareIdTwoOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == shareIdTwoOffset {
break unpack_switch
}
n += shareIdTwoOffset
var shareIdTwo merkle.Digest
err = merkle.DigestFromBytes(&shareIdTwo, record[n:n+shareIdTwoLength])
if nil != err {
return nil, 0, err
}
n += shareIdTwoLength
// number of shares to transfer
quantityTwo, quantityTwoLength := util.FromVarint64(record[n:])
if 0 == quantityTwoLength {
break unpack_switch
}
n += quantityTwoLength
// owner two public key
ownerTwoLength, ownerTwoOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == ownerTwoOffset {
break unpack_switch
}
n += ownerTwoOffset
ownerTwo, err := account.AccountFromBytes(record[n : n+ownerTwoLength])
if nil != err {
return nil, 0, err
}
if ownerTwo.IsTesting() != testnet {
return nil, 0, fault.WrongNetworkForPublicKey
}
n += ownerTwoLength
// time limit
beforeBlock, beforeBlockLength := util.FromVarint64(record[n:])
if 0 == beforeBlockLength {
break unpack_switch
}
n += beforeBlockLength
// signature
signatureLength, signatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == signatureOffset {
break unpack_switch
}
signature := make(account.Signature, signatureLength)
n += signatureOffset
copy(signature, record[n:n+signatureLength])
n += signatureLength
// countersignature
countersignatureLength, countersignatureOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == countersignatureOffset {
break unpack_switch
}
countersignature := make(account.Signature, countersignatureLength)
n += countersignatureOffset
copy(countersignature, record[n:n+countersignatureLength])
n += countersignatureLength
r := &ShareSwap{
ShareIdOne: shareIdOne,
QuantityOne: quantityOne,
OwnerOne: ownerOne,
ShareIdTwo: shareIdTwo,
QuantityTwo: quantityTwo,
OwnerTwo: ownerTwo,
BeforeBlock: beforeBlock,
Signature: signature,
Countersignature: countersignature,
}
err = r.check(testnet)
if nil != err {
return nil, 0, err
}
return r, n, nil
default: // also NullTag
}
return nil, 0, fault.NotTransactionPack
}
func unpackEscrow(record []byte, n int) (*Payment, int, error) {
// optional escrow payment
payment := (*Payment)(nil)
if 0 == record[n] {
n += 1
} else if 1 == record[n] {
n += 1
// currency
c, currencyLength := util.FromVarint64(record[n:])
if 0 == currencyLength {
return nil, 0, fault.NotTransactionPack
}
n += currencyLength
currency, err := currency.FromUint64(c)
if nil != err {
return nil, 0, err
}
// address
addressLength, addressOffset := util.ClippedVarint64(record[n:], 1, 8192)
if 0 == addressOffset {
return nil, 0, fault.NotTransactionPack
}
n += addressOffset
address := string(record[n : n+addressLength])
n += addressLength
// amount
amount, amountLength := util.FromVarint64(record[n:])
if 0 == amountLength {
return nil, 0, fault.NotTransactionPack
}
n += amountLength
payment = &Payment{
Currency: currency,
Address: address,
Amount: amount,
}
} else {
return nil, 0, fault.NotTransactionPack
}
return payment, n, nil
}
|
package text
// Import external packages
import (
"github.com/veandco/go-sdl2/sdl"
)
// subpackages
import (
"flood_go/graphicsx"
)
// =====================================================================
// Struct: TextObject
// =====================================================================
type TextObjectConfig struct {
Graphics *graphicsx.Graphics
Text string
Font string
FontSize int
Color *sdl.Color
BgColor *sdl.Color
}
type TextObject struct {
Graphics *graphicsx.Graphics
Image *graphicsx.Image
Text string
Font string
FontSize int
Color *sdl.Color
BgColor *sdl.Color
Rect *sdl.Rect
UpdateRect func(*TextObject)
}
func (this *TextObject) SetText(text string) {
this.Text = text
this.Render()
}
func (this *TextObject) Render() {
// Create Image
var image = this.Graphics.CreateTextImage(
this.Text, this.Font, this.FontSize, this.Color, this.BgColor)
this.Image = &image
}
func NewTextObject(toc TextObjectConfig) *TextObject {
// Create Struct
var textobj = TextObject{
Graphics: toc.Graphics,
Text: toc.Text,
Font: toc.Font,
FontSize: toc.FontSize,
Color: toc.Color,
BgColor: toc.BgColor,
}
// Create Image
textobj.Render()
return &textobj
}
|
package order
type Product struct {
Name string `json:"name" validate:"required"`
Quantity float64 `json:"quantity"`
Unit int `json:"unit"`
Price float64 `json:"price"`
Measure float64 `json:"measure"`
}
|
package easyquery
import (
"fmt"
"gorm.io/gorm"
)
func PageScope(paginater Paginater) func(db *gorm.DB) *gorm.DB {
return func(db *gorm.DB) *gorm.DB {
return db.Limit(paginater.GetSize()).Offset(paginater.GetOffset())
}
}
func PageOrderIdDescScope(paginater Paginater, table string) func(db *gorm.DB) *gorm.DB {
return func(db *gorm.DB) *gorm.DB {
return db.Order(fmt.Sprintf(`"%s"."id" desc`, table)).Limit(paginater.GetSize()).Offset(paginater.GetOffset())
}
}
func PageOrderIdAscScope(paginater Paginater, table string) func(db *gorm.DB) *gorm.DB {
return func(db *gorm.DB) *gorm.DB {
return db.Order(fmt.Sprintf(`"%s"."id" asc`, table)).Limit(paginater.GetSize()).Offset(paginater.GetOffset())
}
}
func GroupScope(field string) func(db *gorm.DB) *gorm.DB {
return func(db *gorm.DB) *gorm.DB {
return db.Select(fmt.Sprintf("%s as label, count(1) as value", field)).Group(field).Order("value desc")
}
}
func GroupOrderScope(clause, group, order string) func(db *gorm.DB) *gorm.DB {
return func(db *gorm.DB) *gorm.DB {
return db.Select(clause).Group(group).Order(order)
}
}
func GroupOrderLimitScope(clause, group, order string, limit int) func(db *gorm.DB) *gorm.DB {
return func(db *gorm.DB) *gorm.DB {
return db.Select(clause).Group(group).Order(order).Limit(limit)
}
}
|
package kubeconf
import (
"io/ioutil"
"github.com/ghodss/yaml"
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
)
// GetKubeletConfigFromLocalFile returns KubeletConfiguration loaded from the node local config
func GetKubeletConfigFromLocalFile(kubeletConfigPath string) (*kubeletconfigv1beta1.KubeletConfiguration, error) {
kubeletBytes, err := ioutil.ReadFile(kubeletConfigPath)
if err != nil {
return nil, err
}
kubeletConfig := &kubeletconfigv1beta1.KubeletConfiguration{}
if err := yaml.Unmarshal(kubeletBytes, kubeletConfig); err != nil {
return nil, err
}
return kubeletConfig, nil
}
|
package models
// Copyright 2016-2017 MediaMath
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"github.com/MediaMath/go-t1/time"
)
// PlacementSlot represents a placement_slot object
type PlacementSlot struct {
AdSlot int `json:"ad_slot"`
AllowRemnant bool `json:"allow_remnant"`
AuctionType string `json:"auction_type"`
Budget float32 `json:"budget"`
BuyPrice float32 `json:"buy_price"`
BuyPriceType string `json:"buy_price_type"`
CreatedOn t1time.T1Time `json:"created_on"`
Description string `json:"description"`
EndDate t1time.T1Time `json:"end_date"`
EstVolume float32 `json:"est_volume"`
FrequencyAmount int `json:"frequency_amount"`
FrequencyInterval string `json:"frequency_interval"`
FrequencyType string `json:"frequency_type"`
Height int `json:"height"`
ID int `json:"id,omitempty,readonly"`
Name string `json:"name"`
PRMPubCeiling float32 `json:"prm_pub_ceiling"`
PRMPubMarkup float32 `json:"prm_pub_markup"`
SellPrice float32 `json:"sell_price"`
SellPriceType string `json:"sell_price_type"`
SitePlacementID int `json:"site_placement_id"`
StartDate t1time.T1Time `json:"start_date"`
UpdatedOn t1time.T1Time `json:"updated_on"`
Version int `json:"version"`
VolumeUnit string `json:"volume_unit"`
Width int `json:"width"`
EntityType string `json:"entity_type"`
}
|
/*
* Copyright 2019, Offchain Labs, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vm
import "fmt"
type WarningHandler interface {
AnyWarnings() bool
Warn(string)
Clone() WarningHandler
SwitchMachinePC(stack *MachinePC)
}
type VerboseWarningHandler struct {
pc *MachinePC
anyWarnings bool
num int
}
func NewVerboseWarningHandler(m *MachinePC) *VerboseWarningHandler {
return &VerboseWarningHandler{m, false, 0}
}
func (hand *VerboseWarningHandler) AnyWarnings() bool {
return hand.anyWarnings
}
func (hand *VerboseWarningHandler) Warn(wstr string) {
hand.num = hand.num + 1
if hand.num >= 10 {
panic("Too many warnings")
}
if hand.pc != nil {
fmt.Println(hand.pc, ":", wstr)
// fmt.Println(hand.locations[hand.pc.pc], ":", wstr)
}
}
func (hand *VerboseWarningHandler) Clone() WarningHandler {
return &VerboseWarningHandler{hand.pc, hand.anyWarnings, hand.num}
}
func (hand *VerboseWarningHandler) SwitchMachinePC(m *MachinePC) {
hand.pc = m
}
type SilentWarningHandler struct {
anyWarnings bool
}
func NewSilentWarningHandler() *SilentWarningHandler {
return &SilentWarningHandler{false}
}
func (hand *SilentWarningHandler) AnyWarnings() bool {
return hand.anyWarnings
}
func (hand *SilentWarningHandler) Warn(wstr string) {
hand.anyWarnings = true
}
func (hand *SilentWarningHandler) Clone() WarningHandler {
return &SilentWarningHandler{hand.anyWarnings}
}
func (hand *SilentWarningHandler) SwitchMachinePC(m *MachinePC) {
// do nothing
}
|
package common
import (
"github.com/pkg/errors"
"log"
"os"
"strconv"
)
const (
EnvCriticalFusePodEnabled = "CRITICAL_FUSE_POD"
)
var criticalFusePodEnabled bool
func init() {
if strVal, exist := os.LookupEnv(EnvCriticalFusePodEnabled); exist {
if boolVal, err := strconv.ParseBool(strVal); err != nil {
panic(errors.Wrapf(err, "can't parse env %s to bool", EnvCriticalFusePodEnabled))
} else {
criticalFusePodEnabled = boolVal
}
}
log.Printf("Using %s = %v\n", EnvCriticalFusePodEnabled, criticalFusePodEnabled)
}
func CriticalFusePodEnabled() bool {
return criticalFusePodEnabled
}
|
package streaming_transmit
import (
"encoding/binary"
"fmt"
"io"
"sync"
"time"
"github.com/lithdew/bytesutil"
"github.com/valyala/bytebufferpool"
)
var DefaultReadBufferSize = 4096
var DefaultWriteBufferSize = 4096
var DefaultReadTimeout = 3 * time.Second
var DefaultWriteTimeout = 3 * time.Second
var DefaultSeqOffset uint32 = 1
var DefaultSeqDelta uint32 = 2
type Conn struct {
Handler Handler
ReadBufferSize int
WriteBufferSize int
ReadTimeout time.Duration
WriteTimeout time.Duration
SeqOffset uint32
SeqDelta uint32
mu sync.Mutex
once sync.Once
writerQueue []*pendingWrite
writerCond sync.Cond
writerDone bool
reqs map[uint32]*pendingRequest
seq uint32
}
func (c *Conn) NumOfPendingWrites() int {
c.mu.Lock()
defer c.mu.Unlock()
return len(c.writerQueue)
}
func (c *Conn) Handle(done chan struct{}, conn BufferedConn) error {
c.once.Do(c.init)
writerDone := make(chan error)
go func() {
writerDone <- c.writeLoop(conn)
close(writerDone)
}()
readerDone := make(chan error)
go func() {
readerDone <- c.readLoop(conn)
close(readerDone)
}()
var err error
select {
case <-done:
c.closeWriter()
err = <-writerDone
_ = conn.Close()
if err == nil {
err = <-readerDone
} else {
<-readerDone
}
case err = <-writerDone:
c.closeWriter()
_ = conn.Close()
if err == nil {
err = <-readerDone
} else {
<-readerDone
}
case err = <-readerDone:
c.closeWriter()
if err == nil {
err = <-writerDone
} else {
<-writerDone
}
_ = conn.Close()
}
return err
}
func (c *Conn) Send(payload []byte) error { c.once.Do(c.init); return c.send(0, payload) }
func (c *Conn) SendNoWait(payload []byte) error { c.once.Do(c.init); return c.sendNoWait(0, payload) }
func (c *Conn) Request(dst []byte, payload []byte) ([]byte, error) {
c.once.Do(c.init)
pr := pendingRequestPool.acquire(dst)
defer pendingRequestPool.release(pr)
pr.wg.Add(1)
seq := c.next()
c.mu.Lock()
c.reqs[seq] = pr
c.mu.Unlock()
err := c.sendNoWait(seq, payload)
if err != nil {
pr.wg.Done()
c.mu.Lock()
delete(c.reqs, seq)
c.mu.Unlock()
return nil, err
}
pr.wg.Wait()
return pr.dst, pr.err
}
func (c *Conn) init() {
c.reqs = make(map[uint32]*pendingRequest)
c.writerCond.L = &c.mu
}
func (c *Conn) send(seq uint32, payload []byte) error {
buf := bytebufferpool.Get()
defer bytebufferpool.Put(buf)
buf.B = bytesutil.ExtendSlice(buf.B, 4+len(payload))
binary.BigEndian.PutUint32(buf.B[:4], seq)
copy(buf.B[4:], payload)
return c.write(buf)
}
func (c *Conn) sendNoWait(seq uint32, payload []byte) error {
buf := bytebufferpool.Get()
buf.B = bytesutil.ExtendSlice(buf.B, 4+len(payload))
binary.BigEndian.PutUint32(buf.B[:4], seq)
copy(buf.B[4:], payload)
return c.writeNoWait(buf)
}
func (c *Conn) write(buf *bytebufferpool.ByteBuffer) error {
pw, err := c.preparePendingWrite(buf, true)
if err != nil {
return err
}
defer pendingWritePool.release(pw)
pw.wg.Wait()
return pw.err
}
func (c *Conn) writeNoWait(buf *bytebufferpool.ByteBuffer) error {
_, err := c.preparePendingWrite(buf, false)
return err
}
func (c *Conn) preparePendingWrite(buf *bytebufferpool.ByteBuffer, wait bool) (*pendingWrite, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.writerDone {
return nil, fmt.Errorf("node is shut down: %w", io.EOF)
}
pw := pendingWritePool.acquire(buf, wait)
if wait {
pw.wg.Add(1)
}
c.writerQueue = append(c.writerQueue, pw)
c.writerCond.Signal()
return pw, nil
}
func (c *Conn) closeWriter() {
c.mu.Lock()
defer c.mu.Unlock()
c.writerDone = true
c.writerCond.Signal()
}
func (c *Conn) getHandler() Handler {
if c.Handler == nil {
return DefaultHandler
}
return c.Handler
}
func (c *Conn) getReadBufferSize() int {
if c.ReadBufferSize <= 0 {
return DefaultReadBufferSize
}
return c.ReadBufferSize
}
func (c *Conn) getWriteBufferSize() int {
if c.WriteBufferSize <= 0 {
return DefaultWriteBufferSize
}
return c.WriteBufferSize
}
func (c *Conn) getReadTimeout() time.Duration {
if c.ReadTimeout < 0 {
return DefaultReadTimeout
}
return c.ReadTimeout
}
func (c *Conn) getWriteTimeout() time.Duration {
if c.WriteTimeout < 0 {
return DefaultWriteTimeout
}
return c.WriteTimeout
}
func (c *Conn) getSeqOffset() uint32 {
if c.SeqOffset == 0 {
return DefaultSeqOffset
}
return c.SeqOffset
}
func (c *Conn) getSeqDelta() uint32 {
if c.SeqDelta == 0 {
return DefaultSeqDelta
}
return c.SeqDelta
}
func (c *Conn) next() uint32 {
c.mu.Lock()
defer c.mu.Unlock()
if c.seq == 0 {
c.seq = c.getSeqOffset()
} else {
c.seq += c.getSeqDelta()
}
return c.seq
}
func (c *Conn) writeLoop(conn BufferedConn) error {
var queue []*pendingWrite
var err error
for {
c.mu.Lock()
for !c.writerDone && len(c.writerQueue) == 0 {
c.writerCond.Wait()
}
done := c.writerDone
if n := len(c.writerQueue) - cap(queue); n > 0 {
queue = append(queue[:cap(queue)], make([]*pendingWrite, n)...)
}
queue = queue[:len(c.writerQueue)]
copy(queue, c.writerQueue)
c.writerQueue = c.writerQueue[:0]
c.mu.Unlock()
if done && len(queue) == 0 {
break
}
timeout := c.getWriteTimeout()
if timeout > 0 {
err = conn.SetWriteDeadline(time.Now().Add(timeout))
if err != nil {
for _, pw := range queue {
if pw.wait {
pw.err = err
pw.wg.Done()
} else {
bytebufferpool.Put(pw.buf)
pendingWritePool.release(pw)
}
}
break
}
}
for _, pw := range queue {
if err == nil {
_, err = conn.Write(pw.buf.B)
}
if pw.wait {
pw.err = err
pw.wg.Done()
} else {
bytebufferpool.Put(pw.buf)
pendingWritePool.release(pw)
}
}
if err != nil {
break
}
err = conn.Flush()
if err != nil {
break
}
}
if err != nil {
err = fmt.Errorf("write_loop: %w", err)
}
return err
}
func (c *Conn) readLoop(conn BufferedConn) error {
buf := make([]byte, c.getReadBufferSize())
var (
n int
err error
)
for {
timeout := c.getReadTimeout()
if timeout > 0 {
err = conn.SetReadDeadline(time.Now().Add(timeout))
if err != nil {
break
}
}
n, err = conn.Read(buf)
if err != nil {
break
}
data := buf[:n]
if len(data) < 4 {
err = fmt.Errorf("no sequence number to decode: %w", io.ErrUnexpectedEOF)
break
}
seq := bytesutil.Uint32BE(data)
data = data[4:]
c.mu.Lock()
pr, exists := c.reqs[seq]
if exists {
delete(c.reqs, seq)
}
c.mu.Unlock()
if seq == 0 || !exists {
err = c.call(seq, data)
if err != nil {
err = fmt.Errorf("handler encountered an error: %w", err)
break
}
continue
}
// received response
pr.dst = bytesutil.ExtendSlice(pr.dst, len(data))
copy(pr.dst, data)
pr.wg.Done()
}
return fmt.Errorf("read_loop: %w", err)
}
func (c *Conn) call(seq uint32, data []byte) error {
ctx := contextPool.acquire(c, seq, data)
defer contextPool.release(ctx)
return c.getHandler().HandleMessage(ctx)
}
func (c *Conn) close(err error) {
c.mu.Lock()
defer c.mu.Unlock()
for _, pw := range c.writerQueue {
if pw.wait {
pw.err = err
pw.wg.Done()
} else {
bytebufferpool.Put(pw.buf)
pendingWritePool.release(pw)
}
}
c.writerQueue = nil
for seq := range c.reqs {
pr := c.reqs[seq]
pr.err = err
pr.wg.Done()
delete(c.reqs, seq)
}
c.seq = 0
}
|
package main
/*
This question was asked by BufferBox.
Given a binary tree where all nodes are either 0 or 1, prune the tree so
that subtrees containing all 0s are removed.
For example, given the following tree:
0
/ \
1 0
/ \
1 0
/ \
0 0
should be pruned to:
0
/ \
1 0
/
1
We do not remove the tree at the root or its left child because it still
has a 1 as a descendant.
*/
import (
"binary_tree/tree"
"fmt"
"os"
)
func prune(node *tree.StringNode) *tree.StringNode {
if node == nil {
return nil
}
node.Left = prune(node.Left)
node.Right = prune(node.Right)
if node.Left == nil && node.Right == nil {
if node.Data == "0" {
return nil
}
return node
}
return node
}
func main() {
root := tree.CreateFromString(os.Args[1])
fmt.Printf("digraph g1 {\n")
fmt.Printf("subgraph cluster_0 {\n\tlabel=\"before\"\n")
tree.DrawPrefixed(os.Stdout, root, "orig")
fmt.Printf("\n}\n")
pruned := prune(root)
fmt.Printf("subgraph cluster_1 {\n\tlabel=\"after\"\n")
tree.DrawPrefixed(os.Stdout, pruned, "prune")
fmt.Printf("\n}\n")
fmt.Printf("\n}\n")
}
|
package packet
import (
"github.com/google/gopacket"
layers "github.com/google/gopacket/layers"
"github.com/taciomcosta/dnsbyo/dns"
"net"
)
type Packet struct {
dnsPacket *layers.DNS
clientAddr net.Addr
}
func New(buff []byte, addr net.Addr) Packet {
packet := gopacket.NewPacket(buff, layers.LayerTypeDNS, gopacket.Default)
layer := packet.Layer(layers.LayerTypeDNS)
dnsPacket, _ := layer.(*layers.DNS)
return Packet{dnsPacket, addr}
}
func (p *Packet) Query() dns.Query {
return dns.Query{
Name: string(p.dnsPacket.Questions[0].Name),
QClass: dns.Class(p.dnsPacket.Questions[0].Class),
QType: dns.Type(p.dnsPacket.Questions[0].Type),
}
}
func (p *Packet) Serialize() []byte {
buff := gopacket.NewSerializeBuffer()
p.dnsPacket.SerializeTo(buff, gopacket.SerializeOptions{})
return buff.Bytes()
}
func (p *Packet) ClientAddr() net.Addr {
return p.clientAddr
}
func (p *Packet) AddResponse(response dns.Response) {
p.dnsPacket.AA = response.AA
p.dnsPacket.ANCount = response.ANCount
p.dnsPacket.ResponseCode = layers.DNSResponseCode(response.RCode)
p.dnsPacket.QR = true
p.dnsPacket.Answers = []layers.DNSResourceRecord{
{
Name: []byte(response.RData.Name),
IP: response.RData.IP,
Type: layers.DNSType(response.RData.Type),
Class: layers.DNSClass(response.RData.Class),
},
}
}
|
// Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tfgen
import (
"testing"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/pulumi/pulumi-terraform-bridge/v2/pkg/tfbridge"
"github.com/pulumi/pulumi/sdk/v2/go/common/tokens"
"github.com/stretchr/testify/assert"
)
type typeTest struct {
schema *schema.Schema
info *tfbridge.SchemaInfo
expectedOutput string
expectedInput string
}
var tsTypeTests = []typeTest{
{
// Bool Schema
schema: &schema.Schema{Type: schema.TypeBool},
expectedOutput: "boolean",
expectedInput: "pulumi.Input<boolean>",
},
{
// Int Schema
schema: &schema.Schema{Type: schema.TypeInt},
expectedOutput: "number",
expectedInput: "pulumi.Input<number>",
},
{
// Float Schema
schema: &schema.Schema{Type: schema.TypeFloat},
expectedOutput: "number",
expectedInput: "pulumi.Input<number>",
},
{
// String Schema
schema: &schema.Schema{Type: schema.TypeString},
expectedOutput: "string",
expectedInput: "pulumi.Input<string>",
},
{
// Basic Set Schema
schema: &schema.Schema{
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
},
expectedOutput: "string[]",
expectedInput: "pulumi.Input<pulumi.Input<string>[]>",
},
{
// Basic List Schema
schema: &schema.Schema{
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
},
expectedOutput: "string[]",
expectedInput: "pulumi.Input<pulumi.Input<string>[]>",
},
{
// Basic Map Schema
schema: &schema.Schema{
Type: schema.TypeMap,
Elem: &schema.Schema{Type: schema.TypeString},
},
expectedOutput: "{[key: string]: string}",
expectedInput: "pulumi.Input<{[key: string]: pulumi.Input<string>}>",
},
{
// Resource Map Schema
schema: &schema.Schema{
Type: schema.TypeMap,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"foo": {Type: schema.TypeString},
},
},
},
expectedOutput: "{ foo: string }",
expectedInput: "pulumi.Input<{ foo: pulumi.Input<string> }>",
},
{
// Basic alt types
info: &tfbridge.SchemaInfo{
Type: "string",
AltTypes: []tokens.Type{"Foo"},
},
expectedOutput: "string",
expectedInput: "pulumi.Input<string | Foo>",
},
{
// Basic array alt types
info: &tfbridge.SchemaInfo{
Type: "string",
AltTypes: []tokens.Type{"Foo[]"},
},
expectedOutput: "string",
expectedInput: "pulumi.Input<string | Foo[]>",
},
{
// Complex array alt types
info: &tfbridge.SchemaInfo{
Type: "string",
AltTypes: []tokens.Type{"pkg:mod/foo:Foo[]"},
},
expectedOutput: "string",
expectedInput: "pulumi.Input<string | Foo[]>",
},
{
// Asset
schema: &schema.Schema{Type: schema.TypeString},
info: &tfbridge.SchemaInfo{
Asset: &tfbridge.AssetTranslation{
Kind: tfbridge.FileAsset,
},
},
expectedOutput: "pulumi.asset.Asset | pulumi.asset.Archive",
expectedInput: "pulumi.Input<pulumi.asset.Asset | pulumi.asset.Archive>",
},
{
// Archive
schema: &schema.Schema{Type: schema.TypeString},
info: &tfbridge.SchemaInfo{
Asset: &tfbridge.AssetTranslation{
Kind: tfbridge.FileArchive,
},
},
expectedOutput: "pulumi.asset.Archive",
expectedInput: "pulumi.Input<pulumi.asset.Archive>",
},
}
func Test_TsTypes(t *testing.T) {
for _, test := range tsTypeTests {
v := &variable{
name: "foo",
schema: test.schema,
info: test.info,
opt: true,
typ: makePropertyType("foo", test.schema, test.info, false, parsedDoc{}),
}
// Output
v.out = true
assert.Equal(t, test.expectedOutput, tsType("", "", v, nil, nil, false, false, false))
// Input
v.out = false
assert.Equal(t, test.expectedInput, tsType("", "", v, nil, nil, false, true, false))
}
}
func Test_Issue130(t *testing.T) {
schema := &schema.Schema{
Type: schema.TypeList,
MaxItems: 1,
Elem: &schema.Schema{Type: schema.TypeString},
}
assert.Equal(t, "string", tsType("", "", &variable{
name: "condition",
schema: schema,
out: true,
typ: makePropertyType("condition", schema, nil, true, parsedDoc{}),
}, nil, nil, false, false, false))
assert.Equal(t, "pulumi.Input<string>", tsType("", "", &variable{
name: "condition",
schema: schema,
out: false,
typ: makePropertyType("condition", schema, nil, false, parsedDoc{}),
}, nil, nil, false, true, false))
}
func Test_GatherCustomImports_ComplexArrayAltType(t *testing.T) {
expected := importMap{
"./foo": map[string]bool{
"Foo": true,
},
}
g := &nodeJSGenerator{pkg: "pkg"}
info := tfbridge.SchemaInfo{
Type: "string",
AltTypes: []tokens.Type{"pkg:mod/foo:Foo[]"},
}
actual := make(importMap)
err := g.gatherCustomImports(newModule("mod"), &info, actual)
assert.NoError(t, err)
assert.Equal(t, expected, actual)
}
|
package telepathy
import (
"net/url"
"gitlab.com/kavenc/argo"
"github.com/sirupsen/logrus"
)
// Plugin defines the functions that need to be implemented for all plugins
// Plugins may optionally implement other functions by implement intefaces below
type Plugin interface {
// Id returns the unique id for the plugin
ID() string
// SetLogger will be called in init stage to provide logger for the plugin
SetLogger(*logrus.Entry)
// Start is the main routine of the plugin
// this function only returns when the plugin is terminated
Start()
// Stop triggers termination of the plugin
Stop()
}
// PluginMessenger defines the necessary functions for a messenger plugin
// A messenger plugin which serves as the interface for messenger app must implement PluginMessenger
type PluginMessenger interface {
// InMsgChannel should provide the channel used to get
// all inbound messages received by the messenger plugin
InMsgChannel() <-chan InboundMessage
// AttachOutMsgChannel is used to attach outbound message
// that should be sent out by the messenger plugin
AttachOutMsgChannel(<-chan OutboundMessage)
}
// PluginCommandHandler defines the necessary functions if a plugin implements command intefaces
// The input parameter channel will be closed once the command parser is terminated
// and no more command will be triggered
type PluginCommandHandler interface {
Command(<-chan interface{}) *argo.Action
}
// PluginWebhookHandler defines the necessary functions if a plugin is handling webhook
type PluginWebhookHandler interface {
Webhook() map[string]HTTPHandler
SetWebhookURL(map[string]*url.URL)
}
// PluginMsgConsumer defines the necesaary functions if a plugin handles inbound messages
type PluginMsgConsumer interface {
AttachInMsgChannel(<-chan InboundMessage)
}
// PluginMsgProducer defeins necessary functions if a plugin would send out messages
type PluginMsgProducer interface {
OutMsgChannel() <-chan OutboundMessage
}
// PluginDatabaseUser defines necessary functions if a plugin accesses database
type PluginDatabaseUser interface {
DBRequestChannel() <-chan DatabaseRequest
}
|
package steps
import (
"fmt"
survey "github.com/AlecAivazis/survey/v2"
"github.com/lib/pq"
"github.com/pganalyze/collector/setup/query"
s "github.com/pganalyze/collector/setup/state"
)
var EnsureMonitoringUser = &s.Step{
ID: "ensure_monitoring_user",
Description: "Ensure the monitoring user (db_user in the collector config file) exists in Postgres",
Check: func(state *s.SetupState) (bool, error) {
pgaUserKey, err := state.CurrentSection.GetKey("db_username")
if err != nil {
return false, err
}
pgaUser := pgaUserKey.String()
var result query.Row
result, err = state.QueryRunner.QueryRow(fmt.Sprintf("SELECT true FROM pg_user WHERE usename = %s", pq.QuoteLiteral(pgaUser)))
if err == query.ErrNoRows {
return false, nil
} else if err != nil {
return false, err
}
return result.GetBool(0), nil
},
Run: func(state *s.SetupState) error {
pgaUserKey, err := state.CurrentSection.GetKey("db_username")
if err != nil {
return err
}
pgaUser := pgaUserKey.String()
var doCreateUser bool
if state.Inputs.Scripted {
if !state.Inputs.EnsureMonitoringUser.Valid ||
!state.Inputs.EnsureMonitoringUser.Bool {
return fmt.Errorf("create_monitoring_user flag not set and specified monitoring user %s does not exist", pgaUser)
}
doCreateUser = state.Inputs.EnsureMonitoringUser.Bool
} else {
err = survey.AskOne(&survey.Confirm{
Message: fmt.Sprintf("User %s does not exist in Postgres; create user (will be saved to Postgres)?", pgaUser),
Help: "If you skip this step, create the user manually before proceeding",
Default: false,
}, &doCreateUser)
if err != nil {
return err
}
}
if !doCreateUser {
return nil
}
return state.QueryRunner.Exec(
fmt.Sprintf(
"CREATE USER %s CONNECTION LIMIT 5",
pq.QuoteIdentifier(pgaUser),
),
)
},
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.