text
stringlengths 11
4.05M
|
|---|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"container/heap"
"context"
"fmt"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/errorpb"
sst "github.com/pingcap/kvproto/pkg/import_sstpb"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/tidb/br/pkg/lightning/common"
"github.com/pingcap/tidb/br/pkg/lightning/config"
"github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/br/pkg/lightning/metric"
"github.com/pingcap/tidb/br/pkg/logutil"
"github.com/pingcap/tidb/br/pkg/restore/split"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/mathutil"
"github.com/tikv/client-go/v2/util"
"go.uber.org/zap"
"google.golang.org/grpc"
)
type jobStageTp string
/*
+
v
+------+------+
+->+regionScanned+<------+
| +------+------+ |
| | |
| | |
| v |
| +--+--+ +-----+----+
| |wrote+---->+needRescan|
| +--+--+ +-----+----+
| | ^
| | |
| v |
| +---+----+ |
+-----+ingested+---------+
+---+----+
|
v
above diagram shows the state transition of a region job, here are some special
cases:
- regionScanned can directly jump to ingested if the keyRange has no data
- regionScanned can only transit to wrote. TODO: check if it should be transited
to needRescan
- if a job only partially writes the data, after it becomes ingested, it will
update its keyRange and transits to regionScanned to continue the remaining
data
- needRescan may output multiple regionScanned jobs when the old region is split
*/
const (
regionScanned jobStageTp = "regionScanned"
wrote jobStageTp = "wrote"
ingested jobStageTp = "ingested"
needRescan jobStageTp = "needRescan"
// suppose each KV is about 32 bytes, 16 * units.KiB / 32 = 512
defaultKVBatchCount = 512
)
func (j jobStageTp) String() string {
return string(j)
}
// regionJob is dedicated to import the data in [keyRange.start, keyRange.end)
// to a region. The keyRange may be changed when processing because of writing
// partial data to TiKV or region split.
type regionJob struct {
keyRange Range
// TODO: check the keyRange so that it's always included in region
region *split.RegionInfo
// stage should be updated only by convertStageTo
stage jobStageTp
// writeResult is available only in wrote and ingested stage
writeResult *tikvWriteResult
ingestData common.IngestData
regionSplitSize int64
regionSplitKeys int64
metrics *metric.Metrics
retryCount int
waitUntil time.Time
lastRetryableErr error
// injected is used in test to set the behaviour
injected []injectedBehaviour
}
type tikvWriteResult struct {
sstMeta []*sst.SSTMeta
count int64
totalBytes int64
remainingStartKey []byte
}
type injectedBehaviour struct {
write injectedWriteBehaviour
ingest injectedIngestBehaviour
}
type injectedWriteBehaviour struct {
result *tikvWriteResult
err error
}
type injectedIngestBehaviour struct {
nextStage jobStageTp
err error
}
func (j *regionJob) convertStageTo(stage jobStageTp) {
j.stage = stage
switch stage {
case regionScanned:
j.writeResult = nil
case ingested:
// when writing is skipped because key range is empty
if j.writeResult == nil {
return
}
j.ingestData.Finish(j.writeResult.totalBytes, j.writeResult.count)
if j.metrics != nil {
j.metrics.BytesCounter.WithLabelValues(metric.StateImported).
Add(float64(j.writeResult.totalBytes))
}
case needRescan:
j.region = nil
}
}
// writeToTiKV writes the data to TiKV and mark this job as wrote stage.
// if any write logic has error, writeToTiKV will set job to a proper stage and return nil. TODO: <-check this
// if any underlying logic has error, writeToTiKV will return an error.
// we don't need to do cleanup for the pairs written to tikv if encounters an error,
// tikv will take the responsibility to do so.
// TODO: let client-go provide a high-level write interface.
func (local *Backend) writeToTiKV(ctx context.Context, j *regionJob) error {
if j.stage != regionScanned {
return nil
}
failpoint.Inject("fakeRegionJobs", func() {
front := j.injected[0]
j.injected = j.injected[1:]
j.writeResult = front.write.result
err := front.write.err
if err == nil {
j.convertStageTo(wrote)
}
failpoint.Return(err)
})
apiVersion := local.tikvCodec.GetAPIVersion()
clientFactory := local.importClientFactory
kvBatchSize := local.KVWriteBatchSize
bufferPool := local.bufferPool
writeLimiter := local.writeLimiter
begin := time.Now()
region := j.region.Region
firstKey, lastKey, err := j.ingestData.GetFirstAndLastKey(j.keyRange.start, j.keyRange.end)
if err != nil {
return errors.Trace(err)
}
if firstKey == nil {
j.convertStageTo(ingested)
log.FromContext(ctx).Debug("keys within region is empty, skip doIngest",
logutil.Key("start", j.keyRange.start),
logutil.Key("regionStart", region.StartKey),
logutil.Key("end", j.keyRange.end),
logutil.Key("regionEnd", region.EndKey))
return nil
}
firstKey = codec.EncodeBytes([]byte{}, firstKey)
lastKey = codec.EncodeBytes([]byte{}, lastKey)
u := uuid.New()
meta := &sst.SSTMeta{
Uuid: u[:],
RegionId: region.GetId(),
RegionEpoch: region.GetRegionEpoch(),
Range: &sst.Range{
Start: firstKey,
End: lastKey,
},
ApiVersion: apiVersion,
}
annotateErr := func(in error, peer *metapb.Peer) error {
// annotate the error with peer/store/region info to help debug.
return errors.Annotatef(in, "peer %d, store %d, region %d, epoch %s", peer.Id, peer.StoreId, region.Id, region.RegionEpoch.String())
}
leaderID := j.region.Leader.GetId()
clients := make([]sst.ImportSST_WriteClient, 0, len(region.GetPeers()))
allPeers := make([]*metapb.Peer, 0, len(region.GetPeers()))
req := &sst.WriteRequest{
Chunk: &sst.WriteRequest_Meta{
Meta: meta,
},
Context: &kvrpcpb.Context{
ResourceControlContext: &kvrpcpb.ResourceControlContext{
ResourceGroupName: local.ResourceGroupName,
},
RequestSource: util.BuildRequestSource(true, kv.InternalTxnLightning, local.TaskType),
},
}
for _, peer := range region.GetPeers() {
cli, err := clientFactory.Create(ctx, peer.StoreId)
if err != nil {
return annotateErr(err, peer)
}
wstream, err := cli.Write(ctx)
if err != nil {
return annotateErr(err, peer)
}
// Bind uuid for this write request
if err = wstream.Send(req); err != nil {
return annotateErr(err, peer)
}
clients = append(clients, wstream)
allPeers = append(allPeers, peer)
}
req.Chunk = &sst.WriteRequest_Batch{
Batch: &sst.WriteBatch{
CommitTs: j.ingestData.GetTS(),
},
}
bytesBuf := bufferPool.NewBuffer()
defer bytesBuf.Destroy()
pairs := make([]*sst.Pair, 0, defaultKVBatchCount)
count := 0
size := int64(0)
totalSize := int64(0)
totalCount := int64(0)
// if region-split-size <= 96MiB, we bump the threshold a bit to avoid too many retry split
// because the range-properties is not 100% accurate
regionMaxSize := j.regionSplitSize
if j.regionSplitSize <= int64(config.SplitRegionSize) {
regionMaxSize = j.regionSplitSize * 4 / 3
}
flushKVs := func() error {
req.Chunk.(*sst.WriteRequest_Batch).Batch.Pairs = pairs[:count]
preparedMsg := &grpc.PreparedMsg{}
// by reading the source code, Encode need to find codec and compression from the stream
// because all stream has the same codec and compression, we can use any one of them
if err := preparedMsg.Encode(clients[0], req); err != nil {
return err
}
for i := range clients {
if err := writeLimiter.WaitN(ctx, allPeers[i].StoreId, int(size)); err != nil {
return errors.Trace(err)
}
if err := clients[i].SendMsg(preparedMsg); err != nil {
return annotateErr(err, allPeers[i])
}
}
failpoint.Inject("afterFlushKVs", func() {
log.FromContext(ctx).Info(fmt.Sprintf("afterFlushKVs count=%d,size=%d", count, size))
})
return nil
}
iter := j.ingestData.NewIter(ctx, j.keyRange.start, j.keyRange.end)
//nolint: errcheck
defer iter.Close()
var remainingStartKey []byte
for iter.First(); iter.Valid(); iter.Next() {
kvSize := int64(len(iter.Key()) + len(iter.Value()))
// here we reuse the `*sst.Pair`s to optimize object allocation
if count < len(pairs) {
pairs[count].Key = bytesBuf.AddBytes(iter.Key())
pairs[count].Value = bytesBuf.AddBytes(iter.Value())
} else {
pair := &sst.Pair{
Key: bytesBuf.AddBytes(iter.Key()),
Value: bytesBuf.AddBytes(iter.Value()),
}
pairs = append(pairs, pair)
}
count++
totalCount++
size += kvSize
totalSize += kvSize
if size >= kvBatchSize {
if err := flushKVs(); err != nil {
return errors.Trace(err)
}
count = 0
size = 0
bytesBuf.Reset()
}
if totalSize >= regionMaxSize || totalCount >= j.regionSplitKeys {
// we will shrink the key range of this job to real written range
if iter.Next() {
remainingStartKey = append([]byte{}, iter.Key()...)
log.FromContext(ctx).Info("write to tikv partial finish",
zap.Int64("count", totalCount),
zap.Int64("size", totalSize),
logutil.Key("startKey", j.keyRange.start),
logutil.Key("endKey", j.keyRange.end),
logutil.Key("remainStart", remainingStartKey),
logutil.Region(region),
logutil.Leader(j.region.Leader))
}
break
}
}
if iter.Error() != nil {
return errors.Trace(iter.Error())
}
if count > 0 {
if err := flushKVs(); err != nil {
return errors.Trace(err)
}
count = 0
size = 0
bytesBuf.Reset()
}
var leaderPeerMetas []*sst.SSTMeta
for i, wStream := range clients {
resp, closeErr := wStream.CloseAndRecv()
if closeErr != nil {
return annotateErr(closeErr, allPeers[i])
}
if resp.Error != nil {
return annotateErr(errors.New(resp.Error.Message), allPeers[i])
}
if leaderID == region.Peers[i].GetId() {
leaderPeerMetas = resp.Metas
log.FromContext(ctx).Debug("get metas after write kv stream to tikv", zap.Reflect("metas", leaderPeerMetas))
}
}
failpoint.Inject("NoLeader", func() {
log.FromContext(ctx).Warn("enter failpoint NoLeader")
leaderPeerMetas = nil
})
// if there is not leader currently, we don't forward the stage to wrote and let caller
// handle the retry.
if len(leaderPeerMetas) == 0 {
log.FromContext(ctx).Warn("write to tikv no leader",
logutil.Region(region), logutil.Leader(j.region.Leader),
zap.Uint64("leader_id", leaderID), logutil.SSTMeta(meta),
zap.Int64("kv_pairs", totalCount), zap.Int64("total_bytes", totalSize))
return common.ErrNoLeader.GenWithStackByArgs(region.Id, leaderID)
}
takeTime := time.Since(begin)
log.FromContext(ctx).Debug("write to kv", zap.Reflect("region", j.region), zap.Uint64("leader", leaderID),
zap.Reflect("meta", meta), zap.Reflect("return metas", leaderPeerMetas),
zap.Int64("kv_pairs", totalCount), zap.Int64("total_bytes", totalSize),
zap.Int64("buf_size", bytesBuf.TotalSize()),
zap.Stringer("takeTime", takeTime))
if m, ok := metric.FromContext(ctx); ok {
m.SSTSecondsHistogram.WithLabelValues(metric.SSTProcessWrite).Observe(takeTime.Seconds())
}
j.writeResult = &tikvWriteResult{
sstMeta: leaderPeerMetas,
count: totalCount,
totalBytes: totalSize,
remainingStartKey: remainingStartKey,
}
j.convertStageTo(wrote)
return nil
}
// ingest tries to finish the regionJob.
// if any ingest logic has error, ingest may retry sometimes to resolve it and finally
// set job to a proper stage with nil error returned.
// if any underlying logic has error, ingest will return an error to let caller
// handle it.
func (local *Backend) ingest(ctx context.Context, j *regionJob) (err error) {
if j.stage != wrote {
return nil
}
failpoint.Inject("fakeRegionJobs", func() {
front := j.injected[0]
j.injected = j.injected[1:]
j.convertStageTo(front.ingest.nextStage)
failpoint.Return(front.ingest.err)
})
if len(j.writeResult.sstMeta) == 0 {
j.convertStageTo(ingested)
return nil
}
if m, ok := metric.FromContext(ctx); ok {
begin := time.Now()
defer func() {
if err == nil {
m.SSTSecondsHistogram.WithLabelValues(metric.SSTProcessIngest).Observe(time.Since(begin).Seconds())
}
}()
}
for retry := 0; retry < maxRetryTimes; retry++ {
resp, err := local.doIngest(ctx, j)
if err == nil && resp.GetError() == nil {
j.convertStageTo(ingested)
return nil
}
if err != nil {
if common.IsContextCanceledError(err) {
return err
}
log.FromContext(ctx).Warn("meet underlying error, will retry ingest",
log.ShortError(err), logutil.SSTMetas(j.writeResult.sstMeta),
logutil.Region(j.region.Region), logutil.Leader(j.region.Leader))
continue
}
canContinue, err := j.convertStageOnIngestError(resp)
if common.IsContextCanceledError(err) {
return err
}
if !canContinue {
log.FromContext(ctx).Warn("meet error and handle the job later",
zap.Stringer("job stage", j.stage),
logutil.ShortError(j.lastRetryableErr),
j.region.ToZapFields(),
logutil.Key("start", j.keyRange.start),
logutil.Key("end", j.keyRange.end))
return nil
}
log.FromContext(ctx).Warn("meet error and will doIngest region again",
logutil.ShortError(j.lastRetryableErr),
j.region.ToZapFields(),
logutil.Key("start", j.keyRange.start),
logutil.Key("end", j.keyRange.end))
}
return nil
}
func (local *Backend) checkWriteStall(
ctx context.Context,
region *split.RegionInfo,
) (bool, *sst.IngestResponse, error) {
clientFactory := local.importClientFactory
for _, peer := range region.Region.GetPeers() {
cli, err := clientFactory.Create(ctx, peer.StoreId)
if err != nil {
return false, nil, errors.Trace(err)
}
// currently we use empty MultiIngestRequest to check if TiKV is busy.
// If in future the rate limit feature contains more metrics we can switch to use it.
resp, err := cli.MultiIngest(ctx, &sst.MultiIngestRequest{})
if err != nil {
return false, nil, errors.Trace(err)
}
if resp.Error != nil && resp.Error.ServerIsBusy != nil {
return true, resp, nil
}
}
return false, nil, nil
}
// doIngest send ingest commands to TiKV based on regionJob.writeResult.sstMeta.
// When meet error, it will remove finished sstMetas before return.
func (local *Backend) doIngest(ctx context.Context, j *regionJob) (*sst.IngestResponse, error) {
clientFactory := local.importClientFactory
supportMultiIngest := local.supportMultiIngest
shouldCheckWriteStall := local.ShouldCheckWriteStall
if shouldCheckWriteStall {
writeStall, resp, err := local.checkWriteStall(ctx, j.region)
if err != nil {
return nil, errors.Trace(err)
}
if writeStall {
return resp, nil
}
}
batch := 1
if supportMultiIngest {
batch = len(j.writeResult.sstMeta)
}
var resp *sst.IngestResponse
for start := 0; start < len(j.writeResult.sstMeta); start += batch {
end := mathutil.Min(start+batch, len(j.writeResult.sstMeta))
ingestMetas := j.writeResult.sstMeta[start:end]
log.FromContext(ctx).Debug("ingest meta", zap.Reflect("meta", ingestMetas))
failpoint.Inject("FailIngestMeta", func(val failpoint.Value) {
// only inject the error once
var resp *sst.IngestResponse
switch val.(string) {
case "notleader":
resp = &sst.IngestResponse{
Error: &errorpb.Error{
NotLeader: &errorpb.NotLeader{
RegionId: j.region.Region.Id,
Leader: j.region.Leader,
},
},
}
case "epochnotmatch":
resp = &sst.IngestResponse{
Error: &errorpb.Error{
EpochNotMatch: &errorpb.EpochNotMatch{
CurrentRegions: []*metapb.Region{j.region.Region},
},
},
}
}
failpoint.Return(resp, nil)
})
leader := j.region.Leader
if leader == nil {
leader = j.region.Region.GetPeers()[0]
}
cli, err := clientFactory.Create(ctx, leader.StoreId)
if err != nil {
return nil, errors.Trace(err)
}
reqCtx := &kvrpcpb.Context{
RegionId: j.region.Region.GetId(),
RegionEpoch: j.region.Region.GetRegionEpoch(),
Peer: leader,
ResourceControlContext: &kvrpcpb.ResourceControlContext{
ResourceGroupName: local.ResourceGroupName,
},
RequestSource: util.BuildRequestSource(true, kv.InternalTxnLightning, local.TaskType),
}
if supportMultiIngest {
req := &sst.MultiIngestRequest{
Context: reqCtx,
Ssts: ingestMetas,
}
resp, err = cli.MultiIngest(ctx, req)
} else {
req := &sst.IngestRequest{
Context: reqCtx,
Sst: ingestMetas[0],
}
resp, err = cli.Ingest(ctx, req)
}
if resp.GetError() != nil || err != nil {
// remove finished sstMetas
j.writeResult.sstMeta = j.writeResult.sstMeta[start:]
return resp, errors.Trace(err)
}
}
return resp, nil
}
// convertStageOnIngestError will try to fix the error contained in ingest response.
// Return (_, error) when another error occurred.
// Return (true, nil) when the job can retry ingesting immediately.
// Return (false, nil) when the job should be put back to queue.
func (j *regionJob) convertStageOnIngestError(
resp *sst.IngestResponse,
) (bool, error) {
if resp.GetError() == nil {
return true, nil
}
var newRegion *split.RegionInfo
switch errPb := resp.GetError(); {
case errPb.NotLeader != nil:
j.lastRetryableErr = common.ErrKVNotLeader.GenWithStack(errPb.GetMessage())
// meet a problem that the region leader+peer are all updated but the return
// error is only "NotLeader", we should update the whole region info.
j.convertStageTo(needRescan)
return false, nil
case errPb.EpochNotMatch != nil:
j.lastRetryableErr = common.ErrKVEpochNotMatch.GenWithStack(errPb.GetMessage())
if currentRegions := errPb.GetEpochNotMatch().GetCurrentRegions(); currentRegions != nil {
var currentRegion *metapb.Region
for _, r := range currentRegions {
if insideRegion(r, j.writeResult.sstMeta) {
currentRegion = r
break
}
}
if currentRegion != nil {
var newLeader *metapb.Peer
for _, p := range currentRegion.Peers {
if p.GetStoreId() == j.region.Leader.GetStoreId() {
newLeader = p
break
}
}
if newLeader != nil {
newRegion = &split.RegionInfo{
Leader: newLeader,
Region: currentRegion,
}
}
}
}
if newRegion != nil {
j.region = newRegion
j.convertStageTo(regionScanned)
return false, nil
}
j.convertStageTo(needRescan)
return false, nil
case strings.Contains(errPb.Message, "raft: proposal dropped"):
j.lastRetryableErr = common.ErrKVRaftProposalDropped.GenWithStack(errPb.GetMessage())
j.convertStageTo(needRescan)
return false, nil
case errPb.ServerIsBusy != nil:
j.lastRetryableErr = common.ErrKVServerIsBusy.GenWithStack(errPb.GetMessage())
return false, nil
case errPb.RegionNotFound != nil:
j.lastRetryableErr = common.ErrKVRegionNotFound.GenWithStack(errPb.GetMessage())
j.convertStageTo(needRescan)
return false, nil
case errPb.ReadIndexNotReady != nil:
j.lastRetryableErr = common.ErrKVReadIndexNotReady.GenWithStack(errPb.GetMessage())
// this error happens when this region is splitting, the error might be:
// read index not ready, reason can not read index due to split, region 64037
// we have paused schedule, but it's temporary,
// if next request takes a long time, there's chance schedule is enabled again
// or on key range border, another engine sharing this region tries to split this
// region may cause this error too.
j.convertStageTo(needRescan)
return false, nil
case errPb.DiskFull != nil:
j.lastRetryableErr = common.ErrKVIngestFailed.GenWithStack(errPb.GetMessage())
return false, errors.Errorf("non-retryable error: %s", resp.GetError().GetMessage())
}
// all others doIngest error, such as stale command, etc. we'll retry it again from writeAndIngestByRange
j.lastRetryableErr = common.ErrKVIngestFailed.GenWithStack(resp.GetError().GetMessage())
j.convertStageTo(regionScanned)
return false, nil
}
type regionJobRetryHeap []*regionJob
var _ heap.Interface = (*regionJobRetryHeap)(nil)
func (h *regionJobRetryHeap) Len() int {
return len(*h)
}
func (h *regionJobRetryHeap) Less(i, j int) bool {
v := *h
return v[i].waitUntil.Before(v[j].waitUntil)
}
func (h *regionJobRetryHeap) Swap(i, j int) {
v := *h
v[i], v[j] = v[j], v[i]
}
func (h *regionJobRetryHeap) Push(x any) {
*h = append(*h, x.(*regionJob))
}
func (h *regionJobRetryHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
// regionJobRetryer is a concurrent-safe queue holding jobs that need to put
// back later, and put back when the regionJob.waitUntil is reached. It maintains
// a heap of jobs internally based on the regionJob.waitUntil field.
type regionJobRetryer struct {
// lock acquiring order: protectedClosed > protectedQueue > protectedToPutBack
protectedClosed struct {
mu sync.Mutex
closed bool
}
protectedQueue struct {
mu sync.Mutex
q regionJobRetryHeap
}
protectedToPutBack struct {
mu sync.Mutex
toPutBack *regionJob
}
putBackCh chan<- *regionJob
reload chan struct{}
jobWg *sync.WaitGroup
}
// startRegionJobRetryer starts a new regionJobRetryer and it will run in
// background to put the job back to `putBackCh` when job's waitUntil is reached.
// Cancel the `ctx` will stop retryer and `jobWg.Done` will be trigger for jobs
// that are not put back yet.
func startRegionJobRetryer(
ctx context.Context,
putBackCh chan<- *regionJob,
jobWg *sync.WaitGroup,
) *regionJobRetryer {
ret := ®ionJobRetryer{
putBackCh: putBackCh,
reload: make(chan struct{}, 1),
jobWg: jobWg,
}
ret.protectedQueue.q = make(regionJobRetryHeap, 0, 16)
go ret.run(ctx)
return ret
}
// run is only internally used, caller should not use it.
func (q *regionJobRetryer) run(ctx context.Context) {
defer q.close()
for {
var front *regionJob
q.protectedQueue.mu.Lock()
if len(q.protectedQueue.q) > 0 {
front = q.protectedQueue.q[0]
}
q.protectedQueue.mu.Unlock()
switch {
case front != nil:
select {
case <-ctx.Done():
return
case <-q.reload:
case <-time.After(time.Until(front.waitUntil)):
q.protectedQueue.mu.Lock()
q.protectedToPutBack.mu.Lock()
q.protectedToPutBack.toPutBack = heap.Pop(&q.protectedQueue.q).(*regionJob)
// release the lock of queue to avoid blocking regionJobRetryer.push
q.protectedQueue.mu.Unlock()
// hold the lock of toPutBack to make sending to putBackCh and
// resetting toPutBack atomic w.r.t. regionJobRetryer.close
select {
case <-ctx.Done():
q.protectedToPutBack.mu.Unlock()
return
case q.putBackCh <- q.protectedToPutBack.toPutBack:
q.protectedToPutBack.toPutBack = nil
q.protectedToPutBack.mu.Unlock()
}
}
default:
// len(q.q) == 0
select {
case <-ctx.Done():
return
case <-q.reload:
}
}
}
}
// close is only internally used, caller should not use it.
func (q *regionJobRetryer) close() {
q.protectedClosed.mu.Lock()
defer q.protectedClosed.mu.Unlock()
q.protectedClosed.closed = true
count := len(q.protectedQueue.q)
if q.protectedToPutBack.toPutBack != nil {
count++
}
for count > 0 {
q.jobWg.Done()
count--
}
}
// push should not be blocked for long time in any cases.
func (q *regionJobRetryer) push(job *regionJob) bool {
q.protectedClosed.mu.Lock()
defer q.protectedClosed.mu.Unlock()
if q.protectedClosed.closed {
return false
}
q.protectedQueue.mu.Lock()
heap.Push(&q.protectedQueue.q, job)
q.protectedQueue.mu.Unlock()
select {
case q.reload <- struct{}{}:
default:
}
return true
}
|
/*
Copyright 2020 The Qmgo Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package qmgo
import (
"context"
opts "github.com/qiniu/qmgo/options"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/x/mongo/driver"
)
// Session is an struct that represents a MongoDB logical session
type Session struct {
session mongo.Session
}
// StartTransaction starts transaction
//precondition:
//- version of mongoDB server >= v4.0
//- Topology of mongoDB server is not Single
//At the same time, please pay attention to the following
//- make sure all operations in callback use the sessCtx as context parameter
//- Dont forget to call EndSession if session is not used anymore
//- if operations in callback takes more than(include equal) 120s, the operations will not take effect,
//- if operation in callback return qmgo.ErrTransactionRetry,
// the whole transaction will retry, so this transaction must be idempotent
//- if operations in callback return qmgo.ErrTransactionNotSupported,
//- If the ctx parameter already has a Session attached to it, it will be replaced by this session.
func (s *Session) StartTransaction(ctx context.Context, cb func(sessCtx context.Context) (interface{}, error), opts ...*opts.TransactionOptions) (interface{}, error) {
transactionOpts := options.Transaction()
if len(opts) > 0 && opts[0].TransactionOptions != nil {
transactionOpts = opts[0].TransactionOptions
}
result, err := s.session.WithTransaction(ctx, wrapperCustomCb(cb), transactionOpts)
if err != nil {
return nil, err
}
return result, nil
}
// EndSession will abort any existing transactions and close the session.
func (s *Session) EndSession(ctx context.Context) {
s.session.EndSession(ctx)
}
// AbortTransaction aborts the active transaction for this session. This method will return an error if there is no
// active transaction for this session or the transaction has been committed or aborted.
func (s *Session) AbortTransaction(ctx context.Context) error {
return s.session.AbortTransaction(ctx)
}
// wrapperCustomF wrapper caller's callback function to mongo dirver's
func wrapperCustomCb(cb func(ctx context.Context) (interface{}, error)) func(sessCtx mongo.SessionContext) (interface{}, error) {
return func(sessCtx mongo.SessionContext) (interface{}, error) {
result, err := cb(sessCtx)
if err == ErrTransactionRetry {
return nil, mongo.CommandError{Labels: []string{driver.TransientTransactionError}}
}
return result, err
}
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"github.com/spencercjh/sshctx/internal/printer"
"io"
"os"
"github.com/fatih/color"
)
type Op interface {
Run(stdout, stderr io.Writer) error
}
func main() {
op := parseArgs(os.Args[1:])
if err := op.Run(color.Output, color.Error); err != nil {
_ = printer.Error(color.Error, err.Error())
defer os.Exit(1)
}
}
|
package p01
import (
"testing"
)
func TestStock3(t *testing.T) {
a := []int{3, 3, 5, 0, 0, 3, 1, 4}
if maxProfits(a) != 6 {
t.FailNow()
}
}
func maxProfits(prices []int) int {
if len(prices) == 0 {
return 0
}
count := 2
dp := make([][3][2]int, len(prices))
for i := 1; i < len(prices); i++ {
for j := 2; j >= 1; j-- {
if i == 1 {
dp[i][j][0] = 0
dp[i][j][1] = -prices[i]
continue
}
dp[i][j][0] = maxNum(dp[i-1][j][0], dp[i-1][j][1]+prices[i])
dp[i][j][1] = maxNum(dp[i-1][j-1][0]-prices[i], dp[i-1][j][1])
}
}
return dp[len(prices)-1][count][0]
}
|
/*
The pigeonhole principle states that
If N items are put into M boxes, with N > M, then at least one box must contain more than one item.
For many, this principle has a special status compared to other mathematical enouncements. As E.W. Dijkstra wrote,
It is surrounded by some mystique. Proofs using it are often regarded as something special, something particularly ingenious.
The challenge
The purpose of this challenge is to illustrate the pigeonhole principle using ASCII art representations. Specifically:
Take as input N (number of items) and M (number of boxes), with N non-negative and M positive. N may be smaller than M (even if the principle does not apply in that case).
Randomly select one of the possible assignments of items to boxes. Each assignment should have a non-zero probability of being picked.
Produce an ASCII art representation of the assignment as follows:
There are M lines, each corresponding to a box.
Each line starts with a non-whitespace character, such as |.
Following that character is another non-whitespace character, such as #, repeated as many times as there are items in that box.
Consider for example N = 8, M = 5. If the selected assigment of items to boxes is 4, 1, 0, 3, 0, the representation is
|####
|#
|
|###
|
A different run (resulting in a different assignment) of the same program could give
|#
|##
|#
|#
|###
There is some flexibility regarding the representation; see below.
Specific rules
The code should theoretically run for any values of N and M. In practice it may be restricted by memory size or data type limitations.
Since observing the output is not sufficient to determine if all assignments have non-zero probability, each submission should explain how the code achieves that, if not obvious.
The following representation variations are allowed:
Any pair of different, non-whitespace characters can be chosen. They must be consistent across program executions.
90-degree rotations of the representation are acceptable. Again, the choice must be consistent.
Trailing or leading whitespace is allowed.
As an example with a different representation format, for N = 15, M = 6 the results of two executions of the program could be
VVVVVV
@@@@@@
@@ @@@
@ @@
@
or
VVVVV
@@@ @
@@@ @
@ @ @
@ @ @
@
Likewise, N = 5, M = 7 could give, using another variation of the representation,
*
* * * *
UUUUUUU
or
*** **
UUUUUUU
or
*
* *
* *
UUUUUUU
Note how the principle is not applicable in this case, because N <M.
General rules
Programs or functions are allowed, in any programming language. Standard loopholes are forbidden.
Input can be taken by any reasonable means; and with any format, such as an array of two numbers or two different strings.
Output means and format are also flexible. For example, the output can be a list of strings or a string with newlines; returned as function output argument or displayed in STDOUT. In the latter case it is not necessary to worry about line wrapping caused by limited display width.
Shortest code in bytes wins.
*/
package main
import (
"fmt"
"math/rand"
"strings"
"time"
)
func main() {
rand.Seed(time.Now().UnixNano())
pigeonhole(8, 5)
}
func pigeonhole(n, m int) {
b := make([]int, m)
for i := 0; i < n; i++ {
j := rand.Intn(m)
b[j]++
}
for i := 0; i < m; i++ {
fmt.Printf("|%s\n", strings.Repeat("#", b[i]))
}
fmt.Printf("\n")
}
|
package db
import (
"database/sql"
"fmt"
"log"
)
func StartDB(user, pass, db, host string) *sql.DB {
DB_CONNECT_STRING := fmt.Sprintf("host=%s port=5432 user=%s password=%s dbname=%s sslmode=disable", host, user, pass, db)
dbConn, err := sql.Open("postgres", DB_CONNECT_STRING)
if err != nil {
log.Fatalf("Database opening error -->%v\n", err)
}
return dbConn
}
|
package main
import "fmt"
func main() {
//var s uint16、
//s := []int{0, 1, 2, 3, 4, 5, 6}
//s1 := s[1:3]
//s1[0] = 121
//fmt.Println(s)
data := []string{"one", "", "three"}
data1 := nonempty2(data)
fmt.Println(data,data1)
//s2 := string(b)
}
func nonempty2(strings []string) []string {
out := strings[:0] // zero-length slice of original for _, s := range strings {
for _, s := range strings {
if s != "" {
out = append(out, s)
}
}
return out
}
|
package main
import "fmt"
func main() {
// // Define map
// emails := make(map[string]string)
// // assign kv
// emails["Abhinav"] = "abhinav@designs.studio"
// emails["Joe"] = "Joe@JoeDonuts.com"
// emails["John"] = "John@Doe.com"
// // print map
// fmt.Println(emails)
// // print one kv
// fmt.Println(emails["Abhinav"])
// // Delete from map
// delete(emails, "John")
// // print map
// fmt.Println(emails)
// declare and assign
emails := map[string]string{"Abhinav": "abhinav@designs.studio", "Joe": "Joe@JoeDonuts.com"}
// print map
fmt.Println(emails)
}
|
package main
import (
"bufio"
"fmt"
"os"
"sort"
"strconv"
"strings"
"sync"
)
func min(a, b int) int {
if a < b {
return a
}
return b
}
func sortChunk(arr []int, wg *sync.WaitGroup) {
defer wg.Done()
fmt.Println("Sorting chunk : ", arr)
if len(arr) > 1 {
sort.Slice(arr, func(i, j int) bool { return arr[i] < arr[j] })
}
}
func mergeChunk(arr1 []int, arr2 []int, arr []int, wg *sync.WaitGroup) {
defer wg.Done()
n := len(arr1)
m := len(arr2)
i, j, k := 0, 0, 0
for i < n && j < m {
if arr1[i] < arr2[j] {
arr[k] = arr1[i]
i++
} else {
arr[k] = arr2[j]
j++
}
k++
}
for i < n {
arr[k] = arr1[i]
k++
i++
}
for j < m {
arr[k] = arr2[j]
k++
j++
}
}
func main() {
scanner := bufio.NewScanner(os.Stdin)
var wg sync.WaitGroup
arr := make([]int, 0)
fmt.Println("Enter the sequence of integers separated by space:")
if scanner.Scan() {
input := strings.Split(scanner.Text(), " ")
for _, val := range input {
if len(val) == 0 {
continue
}
num, err := strconv.Atoi(val)
if err != nil {
panic(err)
}
arr = append(arr, num)
}
}
n := len(arr)
if n == 0 {
fmt.Printf("Sorted array : ")
fmt.Println(arr)
return
}
// Creating four parts out of input integer seequence
part1 := arr[0 : n/4]
var part2, part3, part4 []int
if n/4 < n {
part2 = arr[n/4 : min(n/2, n)]
}
if n/2 < n {
part3 = arr[n/2 : min(n, int((3*n)/4))]
}
if (3*n)/4 < n {
part4 = arr[(3*n)/4 : n]
}
// fmt.Println(arr)
// fmt.Println(part1, part2, part3, part4)
// Invoking goroutines to sort each part individually
wg.Add(4)
go sortChunk(part1, &wg)
go sortChunk(part2, &wg)
go sortChunk(part3, &wg)
go sortChunk(part4, &wg)
// waiting for the above goroutines to finish
wg.Wait()
// fmt.Println(part1, part2, part3, part4)
wg.Add(2)
part12 := make([]int, len(part1)+len(part2))
part34 := make([]int, len(part3)+len(part4))
// merge the part1 and part2 --> part12
go mergeChunk(part1, part2, part12, &wg)
// merge the part3 and part4 --> part34
go mergeChunk(part3, part4, part34, &wg)
// waiting for the above goroutines to finish
wg.Wait()
wg.Add(1)
sortedArr := make([]int, len(part12)+len(part34))
// merge the part12 and part34 --> sortedArr
go mergeChunk(part12, part34, sortedArr, &wg)
wg.Wait()
fmt.Println("Sorted array : ", sortedArr)
}
|
package main
import (
"fmt"
"html/template"
"log"
"net/http"
"os"
"sort"
"github.com/gorilla/pat"
"github.com/gorilla/sessions"
"github.com/markbates/goth"
"github.com/markbates/goth/gothic"
"github.com/markbates/goth/providers/twitter"
)
//ProviderIndex ...
type ProviderIndex struct {
Providers []string
ProvidersMap map[string]string
}
func init() {
gothic.Store = sessions.NewCookieStore([]byte(os.Getenv("TWITTER_SECRET")))
}
func main() {
goth.UseProviders(
twitter.New(os.Getenv("TWITTER_KEY"), os.Getenv("TWITTER_SECRET"), "http://127.0.0.1:3000/auth/twitter/callback"),
)
m := make(map[string]string)
m["twitter"] = "Twitter"
var keys []string
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
providerIndex := &ProviderIndex{
Providers: keys,
ProvidersMap: m,
}
p := pat.New()
p.Get("/auth/{provider}/callback", func(res http.ResponseWriter, req *http.Request) {
user, err := gothic.CompleteUserAuth(res, req)
if err != nil {
fmt.Fprintln(res, err)
return
}
t, _ := template.ParseFiles("templates/success.html")
t.Execute(res, user)
})
p.Get("/logout/{provider}", func(res http.ResponseWriter, req *http.Request) {
gothic.Logout(res, req)
res.Header().Set("Location", "/")
res.WriteHeader(http.StatusTemporaryRedirect)
})
p.Get("/auth/{provider}", func(res http.ResponseWriter, req *http.Request) {
// try to get the user without re-authenticating
if gothUser, err := gothic.CompleteUserAuth(res, req); err == nil {
t, _ := template.ParseFiles("templates/success.html")
t.Execute(res, gothUser)
} else {
gothic.BeginAuthHandler(res, req)
}
})
p.Get("/", func(res http.ResponseWriter, req *http.Request) {
t, _ := template.ParseFiles("templates/index.html")
t.Execute(res, providerIndex)
})
log.Println("listening on localhost:3000")
log.Fatal(http.ListenAndServe(":3000", p))
}
|
package atomix
import (
"fmt"
"math"
"sync/atomic"
)
// Complex64 is an atomic wrapper around float32.
type Complex64 struct {
atomicType
ri uint64
}
// NewComplex64 creates a Complex64.
func NewComplex64(c complex64) *Complex64 {
return &Complex64{ri: complex64ToUint64(c)}
}
func (c *Complex64) String() string {
return fmt.Sprint(c.Load())
}
// Load atomically the value.
func (c *Complex64) Load() complex64 {
return uint64ToComplex64(atomic.LoadUint64(&c.ri))
}
// Store atomically the given value.
func (c *Complex64) Store(s complex64) {
atomic.StoreUint64(&c.ri, complex64ToUint64(s))
}
// Add atomically and return the new value.
func (c *Complex64) Add(s complex64) complex64 {
for {
oc := c.Load()
nc := oc + s
if c.CAS(oc, nc) {
return nc
}
}
}
// Sub atomically and return the new value.
func (c *Complex64) Sub(s complex64) complex64 {
return c.Add(-s)
}
// CAS is an atomic Compare-And-Swap operation.
func (c *Complex64) CAS(oc, nc complex64) bool {
return atomic.CompareAndSwapUint64(&c.ri, complex64ToUint64(oc), complex64ToUint64(nc))
}
func uint64ToComplex64(u uint64) complex64 {
return complex(math.Float32frombits(uint32(u>>32)), math.Float32frombits(uint32((u<<32)>>32)))
}
func complex64ToUint64(c complex64) uint64 {
return uint64(math.Float32bits(real(c)))<<32 | uint64(math.Float32bits(imag(c)))
}
|
package routinghelpers
import (
"context"
"testing"
routing "gx/ipfs/QmRjT8Bkut84fHf9nxMQBxGsqLAkqzMdFaemDK7e61dBNZ/go-libp2p-routing"
)
func TestLimitedValueStore(t *testing.T) {
d := LimitedValueStore{
ValueStore: new(dummyValueStore),
Namespaces: []string{"allow"},
}
ctx := context.Background()
for i, k := range []string{
"/allow/hello",
"/allow/foo",
"/allow/foo/bar",
} {
if err := d.PutValue(ctx, k, []byte{byte(i)}); err != nil {
t.Fatal(err)
}
v, err := d.GetValue(ctx, k)
if err != nil {
t.Fatal(err)
}
if len(v) != 1 || v[0] != byte(i) {
t.Fatalf("expected value [%d], got %v", i, v)
}
}
for i, k := range []string{
"/deny/hello",
"/allow",
"allow",
"deny",
"",
"/",
"//",
"///",
"//allow",
} {
if err := d.PutValue(ctx, k, []byte{byte(i)}); err != routing.ErrNotSupported {
t.Fatalf("expected put with key %s to fail", k)
}
_, err := d.GetValue(ctx, k)
if err != routing.ErrNotFound {
t.Fatalf("expected get with key %s to fail", k)
}
_, err = d.ValueStore.GetValue(ctx, k)
if err != routing.ErrNotFound {
t.Fatalf("expected get with key %s to fail", k)
}
err = d.ValueStore.PutValue(ctx, k, []byte{byte(i)})
if err != nil {
t.Fatal(err)
}
_, err = d.GetValue(ctx, k)
if err == nil {
t.Fatalf("expected get with key %s to fail", k)
}
}
}
|
package main
import (
"fmt"
"time"
)
func countWeekday(fromYear, toYear int, weekday time.Weekday) int {
var cnt int
for y := fromYear; y <= toYear; y++ {
for m := 1; m <= 12; m++ {
d := time.Date(y, time.Month(m), 1, 0, 0, 0, 0, time.UTC)
if d.Weekday() == weekday {
cnt++
}
}
}
return cnt
}
func main() {
fmt.Println(countWeekday(1901, 2000, 0))
}
// Count the number of Sunday on the first date of the month.
|
package main
import (
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"log"
"fmt"
"bytes"
"net/http"
"io/ioutil"
)
var s3Objects []*s3.Object
var svc *s3.S3
var bucket = "gn5456-easel-dev-batch-image-uploader"
//var bucketPrefix = "jb_source/"
var doneDir = "done/"
var errorDir = "error/"
var kafkaUrl = "http://qa2-usw2np-cp-kafka-rest.service.usw2-np.consul:8082/topics/easel-tmp"
func init() {
sess := session.Must(session.NewSession())
svc = s3.New(sess)
result, _ := svc.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
// Prefix: aws.String(bucketPrefix),
// StartAfter: aws.String(bucketPrefix),
})
s3Objects = result.Contents
}
func easelKafkaHandler() (error) {
log.Print("Starting process...\n")
for _, s3Object := range s3Objects {
key := *s3Object.Key
err := validateExtension(key)
if err != nil {
log.Print(err)
//err := copyAndDeleteObject(key, errorDir)
continue
}
jsonString, err := getObject(key)
if err != nil {
log.Print("unable to get object ", key, " from s3: ", err)
continue
}
err = postToKafka(jsonString)
if err != nil {
log.Print(err)
_ = copyAndDeleteObject(key, errorDir)
continue
}
err = copyAndDeleteObject(key, doneDir)
//err = copyObject(key, doneDir)
if err != nil {
log.Print(err)
continue
}
/*
err = deleteObject(key)
if err != nil {
log.Print(err)
continue
}
*/
}
return nil
}
func postToKafka(jsonObject []byte) error {
log.Print("preparing to post ", string(jsonObject), " to: ",kafkaUrl, "\n")
req, err := http.NewRequest("POST", kafkaUrl, bytes.NewBuffer(jsonObject))
req.Header.Set("Content-Type", "application/vnd.kafka.json.v2+json")
response, err := http.DefaultClient.Do(req)
if err != nil {
return fmt.Errorf("Unable to parse response. There may be a problem with the kafka topic. %v", err)
}
jsonResponse, _ := ioutil.ReadAll(response.Body)
log.Print("postToKafka response: ", string(jsonResponse))
return nil
}
func getObject(key string) ([]byte, error) {
log.Print("Grabbing file: ",key)
result, err := svc.GetObject(&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
})
if err != nil {
return []byte{}, err
}
jsonString, _ := ioutil.ReadAll(result.Body)
return jsonString, nil
}
func copyAndDeleteObject(key, destination string) error {
err := copyObject(key, destination)
if err != nil {
return err
}
err = deleteObject(key)
if err != nil {
return err
}
return nil
}
func copyObject(key, destination string) error {
log.Print("copying key: ", key, " to ", destination)
_, err := svc.CopyObject(&s3.CopyObjectInput{
Bucket: aws.String(bucket),
CopySource: aws.String(bucket + "/" + key),
Key: aws.String(destination + key),
})
if err != nil {
return fmt.Errorf("Unable to copy object to %s bucket. %v", destination, err)
}
log.Print("Waiting for copy to complete")
err = svc.WaitUntilObjectExists(&s3.HeadObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(destination + key),
})
if err != nil {
return fmt.Errorf("Error occurred while waiting for item to be copied to %s bucket. %v", destination, err)
}
return nil
}
func deleteObject(key string) error {
log.Print("deleting key: ", key)
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
})
if err != nil {
return fmt.Errorf("Error occurred while deleting key from bucket. %v", err)
}
log.Print("Waiting for deletion to complete")
err = svc.WaitUntilObjectNotExists(&s3.HeadObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
})
if err != nil {
return fmt.Errorf("Error occurred while waiting for item to be deleted to done bucket. %v", err)
}
return nil
}
func validateExtension(key string) error {
if key[len(key)-5:] == ".json" {
return nil
}
return fmt.Errorf("S3 Object: %s is of improper format", key)
}
func main() {
lambda.Start(easelKafkaHandler)
}
|
package netlib
import (
"net"
)
func InterfaceCheckIPContains(ip string) bool {
//
if _ip := net.ParseIP(ip); nil != _ip {
if list, err := InterfaceAddrs(); nil == err {
for _, item := range list {
switch result := item.(type) {
case *net.IPNet:
if result.Contains(_ip) {
return true
}
case *net.IPAddr:
if result.IP.Equal(_ip) {
return true
}
case *net.TCPAddr:
if result.IP.Equal(_ip) {
return true
}
case *net.UDPAddr:
if result.IP.Equal(_ip) {
return true
}
case *net.UnixAddr:
}
}
}
}
//
return false
}
func InterfaceAddrs() ([]net.Addr, error) {
return InterfaceAddrsFilter(false, false)
}
func InterfaceAddrsFilter(mustUp, noLoopback bool) ([]net.Addr, error) {
//
var results []net.Addr
//
if list, err := net.Interfaces(); nil == err {
//
for _, item := range list {
//
if mustUp && 0 == (net.FlagUp&item.Flags) {
continue
}
//
if noLoopback && 0 != (net.FlagLoopback&item.Flags) {
continue
}
//
if addrs, err := item.Addrs(); nil == err {
results = append(results, addrs...)
}
}
} else {
//
return nil, err
}
//
return results, nil
}
|
package go_recommend_me
// Parmeters for the algorithm
type ModelParameters struct{
NumUsers int
NumItems int
// k or the dimensionality of the joint latent factor space
// ie kind of determining the space size for the latent factors
Dimensionality int
//number of known ratings
TrainingSize int
// Step size in the sdg algo
Steps int
//constant that controls the extent of regularization
Alpha float64
Beta float64
algorithmType int
binWidth int
projFamSize int
}
|
package octopus
import (
"math"
"runtime"
"sync"
"sync/atomic"
"time"
)
type cachedWorker struct {
pool *WorkPool
jobChannel chan Future
stop chan bool
}
func newCacheWorker(cachedpool *WorkPool) *cachedWorker {
return &cachedWorker {
pool : cachedpool ,
jobChannel : make(chan Future) ,
stop : make(chan bool) ,
}
}
func (w *cachedWorker) getStopChannel() chan bool {
return w.stop
}
func (w *cachedWorker) getJobChannel() chan Future {
return w.jobChannel
}
func (w *cachedWorker) start() {
timer := time.NewTimer(w.pool.keepAliveTime)
go func() {
var job Future
for {
timer.Reset(w.pool.keepAliveTime)
w.pool.workerChannel <- w
select {
case job = <- w.jobChannel :
w.pool.activeCount = w.pool.activeCount + 1
w.pool.wg.Add(1)
job.execute()
w.pool.wg.Done()
w.pool.activeCount = w.pool.activeCount - 1
w.pool.completedJobCount = w.pool.completedJobCount + 1
// get job timeout
case <- timer.C :
if w.pool.poolSize > w.pool.minPoolSize {
w.pool.poolSize = w.pool.poolSize - 1
return
} else {
select {
case job = <- w.jobChannel :
w.pool.activeCount = w.pool.activeCount + 1
w.pool.wg.Add(1)
job.execute()
w.pool.wg.Done()
w.pool.activeCount = w.pool.activeCount - 1
w.pool.completedJobCount = w.pool.completedJobCount + 1
case stop := <- w.stop :
if stop {
w.pool.poolSize = w.pool.poolSize - 1
w.stop <- true
return
}
}
}
case stop := <- w.stop :
if stop {
w.pool.poolSize = w.pool.poolSize - 1
w.stop <- true
return
}
}
}
}()
}
type WorkPool struct {
workerChannel chan worker
jobChannel chan Future
stop chan bool
isPoolOpen uint32
isShutdownNow bool
canDropJob bool
wg *sync.WaitGroup
minPoolSize uint64
poolSize uint64
maxPoolSize uint64
keepAliveTime time.Duration
awaitWokerTime time.Duration
activeCount uint64
completedJobCount uint64
logFunc LogFunc
}
// Creates a goroutine pool that reuses a fixed number of goroutines.
func NewFixWorkerPool(workerNum uint64) (workpool *WorkPool, err error) {
workpool, err = NewBaseCachedWorkerPool(workerNum, workerNum, 60*time.Second, 1*time.Second)
return
}
// Creates a goroutine pool that creates new goroutines as needed, but will reuse previously constructed goroutines when they are available.
func NewCachedWorkerPool() (workpool *WorkPool, err error) {
workpool, err = NewBaseCachedWorkerPool(uint64(runtime.NumCPU()), math.MaxUint64, 60*time.Second, 1*time.Second)
return
}
// Creates a goroutine pool with MinPoolSize , MaxPoolSize, the KeepAliveTime of a worker, the time of manager await worker.
// Please note that the KeepAliveTime must be greater than one second.
func NewBaseCachedWorkerPool(MinPoolSize uint64, MaxPoolSize uint64, KeepAliveTime time.Duration, AwaitWokerTime time.Duration) (workpool *WorkPool, err error) {
if MaxPoolSize == 0 {
err = ErrInvalidArguments
return
}
if KeepAliveTime < 1*time.Second {
err = ErrKeepAliveTimeArguments
return
}
wc := make(chan worker, MinPoolSize)
jc := make(chan Future, MinPoolSize)
sc := make(chan bool)
pool := &WorkPool {
workerChannel : wc ,
jobChannel : jc ,
stop : sc,
isPoolOpen : poolOpen ,
isShutdownNow : false ,
canDropJob : false ,
wg : &sync.WaitGroup{},
minPoolSize : MinPoolSize,
poolSize : MinPoolSize,
maxPoolSize : MaxPoolSize,
activeCount : 0,
keepAliveTime : KeepAliveTime,
awaitWokerTime : AwaitWokerTime,
completedJobCount : 0,
}
for i := 0; i < int(MinPoolSize); i++ {
w := newCacheWorker(pool)
w.start()
}
go pool.manager()
workpool = pool
return
}
func (pool *WorkPool) manager() {
pool.log("WorkPool Manager : started")
timer := time.NewTimer(pool.awaitWokerTime)
for {
timer.Reset(pool.awaitWokerTime)
select {
case w := <- pool.workerChannel :
select {
case job := <- pool.jobChannel :
w.getJobChannel() <- job
default:
pool.workerChannel <- w
}
// if await worker timeout
case <-timer.C :
if !pool.canDropJob {
if pool.GetPoolSize() < pool.GetMaxPoolSize() {
pool.log("WorkPool Manager : add routine")
worker := newCacheWorker(pool)
worker.start()
pool.poolSize = pool.poolSize + 1
}
} else {
select {
case <- pool.jobChannel :
pool.log("WorkPool Manager : drop job")
default:
}
}
case stop := <- pool.stop :
if stop {
pool.log("WorkPool Manager : stop pool begin")
close(pool.jobChannel)
if len(pool.jobChannel) > 0 {
for j := range pool.jobChannel {
w := <- pool.workerChannel
w.getJobChannel() <- j
}
}
if pool.isShutdownNow {
// wait for all job done
pool.wg.Wait()
}
// close all worker
for pool.poolSize != 0 {
worker := <- pool.workerChannel
worker.getStopChannel() <- true
<-worker.getStopChannel()
}
pool.stop <- true
pool.log("WorkPool Manager : stop pool end")
return
}
}
}
}
// Set drop job if await worker timeout, it will drop jobs when manager appears awaitWorkerTime timeout .
func (pool *WorkPool) SetDropJob(ok bool) {
pool.canDropJob = ok
}
// CanDropJob will return if the manager will drop jobs when pool is busy.
func (pool *WorkPool) CanDropJob() bool {
return pool.canDropJob
}
func (pool *WorkPool) log(msg string) {
if pool.logFunc != nil {
pool.logFunc(msg)
}
}
// Return approximate total number of goroutines in pool.
func (pool *WorkPool) GetPoolSize() uint64 {
return pool.poolSize
}
// Return the approximate total number of woker executing a job in pool.
func (pool *WorkPool) GetActiveCount() uint64 {
return pool.activeCount
}
// Return the approximate total number of jobs that have completed execution.
func (pool *WorkPool) GetCompletedJobCount() uint64 {
return pool.completedJobCount
}
// if pool is close it will return true.
func (pool *WorkPool) IsShutDown() bool {
return (atomic.LoadUint32(&pool.isPoolOpen) == poolClose)
}
// Set a log function to record log infos.
func (pool *WorkPool) SetLogFunc(function LogFunc) {
pool.logFunc = function
}
// Set the minimum number of goroutines.
func (pool *WorkPool) SetMinPoolSize(minPoolSize uint64) {
pool.minPoolSize = minPoolSize
}
// Return the minimum number of goroutines.
func (pool *WorkPool) GetMinPoolSize() uint64 {
return pool.minPoolSize
}
// Set the maximum allowed number of goroutines.
func (pool *WorkPool) SetMaxPoolSize(maxPoolSize uint64) {
pool.maxPoolSize = maxPoolSize
}
// Return the maximum allowed number of goroutines.
func (pool *WorkPool) GetMaxPoolSize() uint64 {
return pool.maxPoolSize
}
// Return the KeepAliveTime of a worker.
func (pool *WorkPool) GetKeepAliveTime() time.Duration {
return pool.keepAliveTime
}
// Set the KeepAliveTime of a worker. Please note that it must be greater than one second.
func (pool *WorkPool) SetKeepAliveTime(keepAliveTime time.Duration) error {
if keepAliveTime < 1*time.Second {
return ErrKeepAliveTimeArguments
}
pool.keepAliveTime = keepAliveTime
return nil
}
// Return the awaitWorkerTime of pool manager.
func (pool *WorkPool) GetAwaitWorkerTime() time.Duration {
return pool.awaitWokerTime
}
// Submit a Runnable job for execution and return a Future representing that job.
func (pool *WorkPool) SubmitRunnable(job Runnable) (future Future, err error) {
if pool.IsShutDown() {
err = ErrPoolShutdown
return
}
future = newRunnableFuture(pool, job)
pool.jobChannel <- future
return
}
// Submit a Callable job for execution and return a Future representing that job.
func (pool *WorkPool) SubmitCallable(job Callable) (future Future, err error) {
if pool.IsShutDown() {
err = ErrPoolShutdown
return
}
future = newCallableFuture(pool, job)
pool.jobChannel <- future
return
}
// Submit Runnable jobs for execution and return Futures representing those jobs.
func (pool *WorkPool) InvokeAllRunnable(jobs []Runnable) (futures []Future,err error) {
if pool.IsShutDown() {
err = ErrPoolShutdown
return
}
futures = make([]Future,len(jobs))
for i,j := range jobs {
f , _ := pool.SubmitRunnable(j)
futures[i] = f
}
return
}
// Submit Callable jobs for execution and return Futures representing those jobs.
func (pool *WorkPool) InvokeAllCallable(jobs []Callable) (futures []Future,err error) {
if pool.IsShutDown() {
err = ErrPoolShutdown
return
}
futures = make([]Future,len(jobs))
for i,j := range jobs {
f , _ := pool.SubmitCallable(j)
futures[i] = f
}
return
}
// Close the pool and wait for all goroutines done, it may be block.
func (pool *WorkPool) Shutdown() {
if pool.IsShutDown() {
return
}
atomic.SwapUint32(&pool.isPoolOpen, poolClose)
pool.isShutdownNow = false
pool.stop <- true
<- pool.stop
close(pool.stop)
}
// Close the pool but will not wait for all goroutines done, it will be never block.
func (pool *WorkPool) ShutdownNow() {
if pool.IsShutDown() {
return
}
atomic.SwapUint32(&pool.isPoolOpen, poolClose)
pool.isShutdownNow = true
pool.stop <- true
close(pool.stop)
}
|
package main
import (
"encoding/json"
"fmt"
)
type person struct {
Name string `json:"name"`
Age int `json:"age"`
}
func main() {
str := `{"name":"张三", "age":15}`
var p person
json.Unmarshal([]byte(str), &p)
fmt.Println(p.Name, p.Age)
}
|
package rudp
import "net"
type udpSrv struct {
net.Conn
}
func (us udpSrv) recvUDP() ([]byte, error) {
buf := make([]byte, maxUDPPktSize)
n, err := us.Read(buf)
return buf[:n], err
}
// Connect returns a Conn connected to conn.
func Connect(conn net.Conn) *Conn {
return newConn(udpSrv{conn}, PeerIDSrv, PeerIDNil)
}
|
package main
import (
"fmt"
)
func main() {
A := []int{6, 1, 1, 3, 2, 9, 0, 5, 7}
sorted := countingSort(A, 10)
fmt.Println("A after counting sort:", sorted)
}
/**
countingSort sorts input array *A* using counting sort
Time complexity: O(len(A) + k)
*/
//parameter A: input array
//parameter k: values of *A* should be in *0..k*
func countingSort(A []int, k int) []int {
C := make([]int, k+1)
B := make([]int, len(A))
for j := 0; j < len(A); j++ {
C[A[j]]++
}
for i := 1; i <= k; i++ {
C[i] += C[i-1]
}
for j := len(A) - 1; j >= 0; j-- {
n := A[j]
B[C[n]-1] = n
C[n]--
}
return B
}
|
// +build: darwin dragonfly freebsd linux nacl netbsd openbsd
package main
import (
"os/exec"
)
func System(cmd Command) error {
c := exec.Command("bash", "-c", string(cmd))
if err := c.Start(); err != nil {
return err
}
if err := c.Wait(); err != nil {
return err
}
return nil
}
|
package instapi
import (
"context"
"net/http"
"net/url"
"time"
"github.com/instapi/client-go/types"
)
// AssignRole assigns a account role for the given user.
func (c *Client) AssignRole(ctx context.Context, account, email, role string, options ...RequestOption) error {
_, _, err := c.doRequest(
ctx,
http.MethodPost,
types.JSON,
c.endpoint+"accounts/"+url.PathEscape(account)+"/roles",
0,
struct {
Email string `json:"email"`
Role string `json:"role"`
}{Email: email, Role: role},
nil,
options...,
)
return err
}
// Subscribe creates a subscription to a schema.
func (c *Client) Subscribe(ctx context.Context, src, dst, schema, role string, expiresAt time.Time, options ...RequestOption) error {
_, _, err := c.doRequest(
ctx,
http.MethodPost,
types.JSON,
c.endpoint+"accounts/"+url.PathEscape(src)+"/schemas/"+schema+"/roles",
0,
struct {
Account string `json:"account"`
Role string `json:"role"`
ExpiresAt time.Time `json:"expiresAt"`
}{
Account: dst,
Role: role,
ExpiresAt: expiresAt,
},
nil,
options...,
)
return err
}
|
package main
import "fmt"
const s string = "ROBIN IS AWESOME"
const constantNumber = 591726
//structs, and they are mututable!
type person struct {
name string
age int
}
func DEMO() {
fmt.Println("Constant", s, constantNumber)
fmt.Println("Hello")
}
//variadic function that accepts variable args
func add(nums...int) {
var total = 0
for _, num := range nums {
total += num
}
fmt.Println(total)
}
func printStuff(stuff string) {
fmt.Println(stuff)
}
func returnMultipleValues(blah int) (int, int, int) {
return blah, 3, 7
}
func factorial(n int) (int) {
if n == 0 {
return 1
}
return n * factorial(n-1)
}
func createPerson(name string, age int) person {
return person{name, age}
}
|
package timers
import (
"sync"
"time"
)
// EarlyPeriodicTimer is a timer will periodically invoke a given task. However,
// it also has the option to start the task ahead of time. When a task has been
// prematurely started, the timer will reset.
type EarlyPeriodicTimer struct {
timerMu sync.Mutex
timer *time.Timer
period time.Duration
task func()
stopChan chan struct{}
}
type earlyTimerOpts struct {
stopChan chan struct{}
runOnStart bool
}
type EarlyTimerOption func(*earlyTimerOpts)
func WithStopCh(stopCh chan struct{}) EarlyTimerOption {
return func(opts *earlyTimerOpts) {
opts.stopChan = stopCh
}
}
func RunOnStart() EarlyTimerOption {
return func(opts *earlyTimerOpts) {
opts.runOnStart = true
}
}
func NewEarlyPeriodicTimer(period time.Duration, task func(), opts ...EarlyTimerOption) *EarlyPeriodicTimer {
if task == nil {
panic("Empty task")
}
var options earlyTimerOpts
for _, opt := range opts {
opt(&options)
}
if options.stopChan == nil {
options.stopChan = make(chan struct{})
}
timer := time.NewTimer(0)
timer.Stop()
ret := &EarlyPeriodicTimer{
timer: timer,
task: task,
period: period,
stopChan: options.stopChan,
}
go ret.run(options.runOnStart)
return ret
}
func (p *EarlyPeriodicTimer) startTimer() {
p.timerMu.Lock()
p.timer.Reset(p.period)
p.timerMu.Unlock()
}
func (p *EarlyPeriodicTimer) stopTimer() {
p.timerMu.Lock()
p.timer.Stop()
p.timerMu.Unlock()
}
func (p *EarlyPeriodicTimer) Stop() {
close(p.stopChan)
}
func (p *EarlyPeriodicTimer) RunNow() {
p.stopTimer()
p.task()
p.startTimer()
}
func (p *EarlyPeriodicTimer) run(runOnStart bool) {
if runOnStart {
p.task()
}
p.startTimer()
for {
select {
case <-p.timer.C:
{
p.RunNow()
}
case <-p.stopChan:
{
p.stopTimer()
return
}
}
}
}
|
package clusterdata
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"reflect"
"testing"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"github.com/Azure/ARO-RP/pkg/api"
"github.com/Azure/ARO-RP/test/util/cmp"
)
func TestServicePrincipalEnricherTask(t *testing.T) {
log := logrus.NewEntry(logrus.StandardLogger())
for _, tt := range []struct {
name string
client kubernetes.Interface
wantOc *api.OpenShiftCluster
wantErr string
}{
{
name: "config map object exists - valid json",
client: fake.NewSimpleClientset(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cloud-provider-config",
Namespace: "openshift-config",
},
Data: map[string]string{
"config": `{
"cloud": "AzurePublicCloud",
"tenantId": "fake-tenant-id",
"aadClientId": "fake-client-id",
"aadClientSecret": "fake-client-secret"
}`,
},
}),
wantOc: &api.OpenShiftCluster{
Properties: api.OpenShiftClusterProperties{
ServicePrincipalProfile: api.ServicePrincipalProfile{
ClientID: "fake-client-id",
TenantID: "fake-tenant-id",
},
},
},
},
{
name: "config map object exists - invalid json",
client: fake.NewSimpleClientset(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cloud-provider-config",
Namespace: "openshift-config",
},
Data: map[string]string{
"config": "invalid",
},
}),
wantOc: &api.OpenShiftCluster{},
wantErr: "invalid character 'i' looking for beginning of value",
},
{
name: `config map object exists - not "config" key`,
client: fake.NewSimpleClientset(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cloud-provider-config",
Namespace: "openshift-config",
},
}),
wantOc: &api.OpenShiftCluster{},
wantErr: "unexpected end of JSON input",
},
{
name: "config map object does not exist",
client: fake.NewSimpleClientset(),
wantOc: &api.OpenShiftCluster{},
wantErr: `configmaps "cloud-provider-config" not found`,
},
} {
t.Run(tt.name, func(t *testing.T) {
oc := &api.OpenShiftCluster{}
e := &servicePrincipalEnricherTask{
log: log,
client: tt.client,
oc: oc,
}
e.SetDefaults()
callbacks := make(chan func())
errors := make(chan error)
go e.FetchData(callbacks, errors)
select {
case f := <-callbacks:
f()
if !reflect.DeepEqual(oc, tt.wantOc) {
t.Error(cmp.Diff(oc, tt.wantOc))
}
case err := <-errors:
if tt.wantErr != err.Error() {
t.Error(err)
}
}
})
}
}
|
package golify
type golifyIntegerObject struct {
Value int64
Err *golifyErr
}
func (g golifyIntegerObject) MoreThan(min int64, errCode int, errMsg string) golifyIntegerObject {
if g.Err != nil {
return g
}
if g.Value > min {
return golifyIntegerObject{
Value: g.Value,
Err: nil,
}
}
return golifyIntegerObject{
Value: g.Value,
Err: &golifyErr{
ErrCode: errCode,
ErrMsg: errMsg,
},
}
}
func (g golifyIntegerObject) LessThan(max int64, errCode int, errMsg string) golifyIntegerObject {
if g.Err != nil {
return g
}
if g.Value < max {
return golifyIntegerObject{
Value: g.Value,
Err: nil,
}
}
return golifyIntegerObject{
Value: g.Value,
Err: &golifyErr{
ErrCode: errCode,
ErrMsg: errMsg,
},
}
}
|
package leetcodego
import "testing"
func Test_romanToInt(t *testing.T) {
res := romanToInt("DCXXI")
if res != 621 {
t.Error(res)
}
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//416. Partition Equal Subset Sum
//Given a non-empty array containing only positive integers, find if the array can be partitioned into two subsets such that the sum of elements in both subsets is equal.
//Note:
//Each of the array element will not exceed 100.
//The array size will not exceed 200.
//Example 1:
//Input: [1, 5, 11, 5]
//Output: true
//Explanation: The array can be partitioned as [1, 5, 5] and [11].
//Example 2:
//Input: [1, 2, 3, 5]
//Output: false
//Explanation: The array cannot be partitioned into equal sum subsets.
//func canPartition(nums []int) bool {
//}
// Time Is Money
|
package keeper
import (
"github.com/irisnet/irishub/app/v1/asset/internal/types"
"github.com/irisnet/irishub/tests"
"testing"
"github.com/irisnet/irishub/app/v1/auth"
"github.com/irisnet/irishub/app/v1/bank"
"github.com/irisnet/irishub/app/v1/params"
"github.com/irisnet/irishub/codec"
sdk "github.com/irisnet/irishub/types"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
)
// TestAssetAnteHandler tests the ante handler of asset
func TestAssetAnteHandler(t *testing.T) {
ms, accountKey, assetKey, paramskey, paramsTkey := tests.SetupMultiStore()
cdc := codec.New()
types.RegisterCodec(cdc)
auth.RegisterBaseAccount(cdc)
ctx := sdk.NewContext(ms, abci.Header{}, false, log.NewNopLogger())
paramsKeeper := params.NewKeeper(cdc, paramskey, paramsTkey)
ak := auth.NewAccountKeeper(cdc, accountKey, auth.ProtoBaseAccount)
bk := bank.NewBaseKeeper(cdc, ak)
keeper := NewKeeper(cdc, assetKey, bk, types.DefaultCodespace, paramsKeeper.Subspace(types.DefaultParamSpace))
// init params
keeper.Init(ctx)
// set test accounts
addr1 := sdk.AccAddress([]byte("addr1"))
addr2 := sdk.AccAddress([]byte("addr2"))
acc1 := ak.NewAccountWithAddress(ctx, addr1)
acc2 := ak.NewAccountWithAddress(ctx, addr2)
// get asset fees
gatewayCreateFee := GetGatewayCreateFee(ctx, keeper, "mon")
nativeTokenIssueFee := GetTokenIssueFee(ctx, keeper, "sym")
gatewayTokenIssueFee := GetGatewayTokenIssueFee(ctx, keeper, "sym")
nativeTokenMintFee := GetTokenMintFee(ctx, keeper, "sym")
// construct msgs
msgCreateGateway := types.NewMsgCreateGateway(addr1, "mon", "i", "d", "w")
msgIssueNativeToken := types.MsgIssueToken{Source: types.AssetSource(0x00), Symbol: "sym"}
msgIssueGatewayToken := types.MsgIssueToken{Source: types.AssetSource(0x02), Symbol: "sym"}
msgMintNativeToken := types.MsgMintToken{TokenId: "i.sym"}
msgNonAsset1 := sdk.NewTestMsg(addr1)
msgNonAsset2 := sdk.NewTestMsg(addr2)
// construct test txs
tx1 := auth.StdTx{Msgs: []sdk.Msg{msgCreateGateway, msgIssueNativeToken, msgIssueGatewayToken, msgMintNativeToken}}
tx2 := auth.StdTx{Msgs: []sdk.Msg{msgCreateGateway, msgIssueNativeToken, msgNonAsset1, msgIssueGatewayToken, msgMintNativeToken}}
tx3 := auth.StdTx{Msgs: []sdk.Msg{msgNonAsset2, msgCreateGateway, msgIssueNativeToken, msgIssueGatewayToken, msgMintNativeToken}}
// set signers and construct an ante handler
newCtx := auth.WithSigners(ctx, []auth.Account{acc1, acc2})
anteHandler := NewAnteHandler(keeper)
// assert that the ante handler will return with `abort` set to true
acc1.SetCoins(sdk.Coins{gatewayCreateFee.Add(nativeTokenIssueFee)})
_, res, abort := anteHandler(newCtx, tx1, false)
require.Equal(t, true, abort)
require.Equal(t, false, res.IsOK())
// assert that the ante handler will return with `abort` set to true
acc1.SetCoins(acc1.GetCoins().Add(sdk.Coins{gatewayTokenIssueFee}))
_, res, abort = anteHandler(newCtx, tx1, false)
require.Equal(t, true, abort)
require.Equal(t, false, res.IsOK())
// assert that the ante handler will return with `abort` set to false
acc1.SetCoins(acc1.GetCoins().Add(sdk.Coins{nativeTokenMintFee}))
_, res, abort = anteHandler(newCtx, tx1, false)
require.Equal(t, false, abort)
require.Equal(t, true, res.IsOK())
// assert that the ante handler will return with `abort` set to false
acc1.SetCoins(sdk.Coins{gatewayCreateFee.Add(nativeTokenIssueFee)})
_, res, abort = anteHandler(newCtx, tx2, false)
require.Equal(t, false, abort)
require.Equal(t, true, res.IsOK())
// assert that the ante handler will return with `abort` set to false
acc1.SetCoins(sdk.Coins{})
_, res, abort = anteHandler(newCtx, tx3, false)
require.Equal(t, false, abort)
require.Equal(t, true, res.IsOK())
// assert that the ante handler will return with `abort` set to true
newCtx = auth.WithSigners(ctx, []auth.Account{})
_, res, abort = anteHandler(newCtx, tx3, false)
require.Equal(t, true, abort)
require.Equal(t, false, res.IsOK())
}
|
package main
import (
"testing"
zs "github.com/zerostick/zerostick/daemon"
//_ "github.com/zerostick/zerostick/daemon"
)
func TestWifi(t *testing.T) {
wifi := &zs.Wifi{
SSID: "flaf",
Password: "flaf",
Priority: 1,
UseForSync: false,
}
wifi.EncryptPassword()
if wifi.Password != "" {
t.Errorf("Password is not empty after EncryptPassword is called: %s", wifi.Password)
}
}
func TestWifis(t *testing.T) {
wifi := &zs.Wifi{
SSID: "flaf",
Password: "flaf",
Priority: 1,
UseForSync: false,
}
wifi.EncryptPassword()
wifis := zs.Wifis{}
wifis.AddWifiToList(*wifi)
if len(wifis.List) != 1 {
t.Errorf("Wifis does not have a single wifi config, but contains %+v", wifis)
}
}
func TestGetWpaSupplicantConf(t *testing.T) {
wifi := &zs.Wifi{
SSID: "flaf",
Password: "flaf",
Priority: 1,
UseForSync: false,
}
wifi.EncryptPassword()
wifi2 := &zs.Wifi{
SSID: "flaf22",
Password: "flaf22",
Priority: 22,
UseForSync: false,
}
wifi2.EncryptPassword()
wifis := zs.Wifis{}
wifis.AddWifiToList(*wifi)
wifis.AddWifiToList(*wifi2)
expectedConfig := `ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
update_config=1
country=US
network={
ssid"flaf"
psk=21ad66ddf9a61afa2a66d9cf233c722e3993b2dd361b5ca1c3456dd7ea9d8ff4
priority=1
}
network={
ssid"flaf22"
psk=6abd60875676ec4c046945a7773f09ce7b8d49b219a514479a49d50e85b74629
priority=22
}
`
generatedConfig := wifis.GetWpaSupplicantConf()
if generatedConfig != expectedConfig {
t.Errorf("%s", generatedConfig)
}
}
func TestWifiList(t *testing.T) {
scan, err := zs.ScanNetworks()
if err != nil {
t.Errorf("ScanNetwork() failed. Check the mocks. %s", err)
}
if len(scan) != 3 {
t.Errorf("Scan of Wifi return not expected: %+v", scan)
}
}
|
package retry_test
import (
"sync"
"testing"
"time"
"github.com/hamba/testutils/retry"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
const timeDeltaAllowed = float64(25 * time.Millisecond)
func TestRun(t *testing.T) {
mockT := new(MockTestingT)
mockT.On("Log", []interface{}{"test message"}).Once()
mockT.On("FailNow").Once()
var wg sync.WaitGroup
wg.Add(1)
start := time.Now()
go func() {
defer wg.Done()
retry.Run(mockT, func(t *retry.SubT) {
t.Fatal("test message")
})
}()
wg.Wait()
dur := time.Since(start)
mockT.AssertExpectations(t)
assert.InDelta(t, 5*time.Second, dur, timeDeltaAllowed)
}
func TestRunWith_AllowsPassing(t *testing.T) {
mockT := new(MockTestingT)
var wg sync.WaitGroup
var runs int
wg.Add(1)
start := time.Now()
go func() {
defer wg.Done()
retry.RunWith(mockT, retry.NewCounter(3, 10*time.Millisecond), func(t *retry.SubT) {
runs++
})
}()
wg.Wait()
dur := time.Since(start)
mockT.AssertExpectations(t)
assert.Equal(t, 1, runs)
assert.InDelta(t, 0, dur, timeDeltaAllowed)
}
func TestRunWith_HandlesFailing(t *testing.T) {
mockT := new(MockTestingT)
mockT.On("Log", []interface{}{"test message"}).Once()
mockT.On("FailNow").Once()
var wg sync.WaitGroup
var runs int
wg.Add(1)
start := time.Now()
go func() {
defer wg.Done()
retry.RunWith(mockT, retry.NewCounter(3, 10*time.Millisecond), func(t *retry.SubT) {
runs++
t.Fatal("test message")
})
}()
wg.Wait()
dur := time.Since(start)
mockT.AssertExpectations(t)
assert.Equal(t, 3, runs)
assert.InDelta(t, 30*time.Millisecond, dur, timeDeltaAllowed)
}
func TestRunWith_RunsCleanup(t *testing.T) {
mockT := new(MockTestingT)
mockT.On("FailNow").Once()
var wg sync.WaitGroup
var runs int
wg.Add(1)
go func() {
defer wg.Done()
retry.RunWith(mockT, retry.NewCounter(3, 10*time.Millisecond), func(t *retry.SubT) {
t.Cleanup(func() { runs++ })
t.FailNow()
})
}()
wg.Wait()
mockT.AssertExpectations(t)
assert.Equal(t, 3, runs)
}
func TestCounter_Next(t *testing.T) {
p := retry.NewCounter(3, 100*time.Millisecond)
runs := 0
start := time.Now()
for p.Next() {
runs++
}
dur := time.Since(start)
assert.Equal(t, 3, runs)
assert.InDelta(t, 200*time.Millisecond, dur, timeDeltaAllowed)
}
func TestTimer_Next(t *testing.T) {
p := retry.NewTimer(200*time.Millisecond, 100*time.Millisecond)
runs := 0
start := time.Now()
for p.Next() {
runs++
}
dur := time.Since(start)
assert.Equal(t, 3, runs)
assert.InDelta(t, 200*time.Millisecond, dur, timeDeltaAllowed)
}
type MockTestingT struct {
mock.Mock
}
func (m *MockTestingT) Log(args ...interface{}) {
m.Called(args)
}
func (m *MockTestingT) FailNow() {
m.Called()
}
|
// Copyright 2020 The SwiftShader Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cov
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"../cause"
"../llvm"
)
// Location describes a single line-column position in a source file.
type Location struct {
Line, Column int
}
func (l Location) String() string {
return fmt.Sprintf("%v:%v", l.Line, l.Column)
}
// Compare returns -1 if l comes before o, 1 if l comes after o, otherwise 0.
func (l Location) Compare(o Location) int {
switch {
case l.Line < o.Line:
return -1
case l.Line > o.Line:
return 1
}
return 0
}
// Before returns true if l comes before o.
func (l Location) Before(o Location) bool { return l.Compare(o) == -1 }
// Span describes a start and end interval in a source file.
type Span struct {
Start, End Location
}
func (s Span) String() string {
return fmt.Sprintf("%v-%v", s.Start, s.End)
}
// Compare returns -1 if l comes before o, 1 if l comes after o, otherwise 0.
func (s Span) Compare(o Span) int {
switch {
case s.Start.Before(o.Start):
return -1
case o.Start.Before(s.Start):
return 1
case s.End.Before(o.End):
return -1
case o.End.Before(s.End):
return 1
}
return 0
}
// Before returns true if span s comes before o.
func (s Span) Before(o Span) bool { return s.Compare(o) == -1 }
// File describes the coverage spans in a single source file.
type File struct {
Path string
Spans []Span
}
// Coverage describes the coverage spans for all the source files for a single
// process invocation.
type Coverage struct {
Files []File
}
// Env holds the enviroment settings for performing coverage processing.
type Env struct {
LLVM llvm.Toolchain
RootDir string // path to SwiftShader git root directory
ExePath string // path to the executable binary
TurboCov string // path to turbo-cov (optional)
}
// AppendRuntimeEnv returns the environment variables env with the
// LLVM_PROFILE_FILE environment variable appended.
func AppendRuntimeEnv(env []string, coverageFile string) []string {
return append(env, "LLVM_PROFILE_FILE="+coverageFile)
}
// Import uses the llvm-profdata and llvm-cov tools to import the coverage
// information from a .profraw file.
func (e Env) Import(profrawPath string) (*Coverage, error) {
profdata := profrawPath + ".profdata"
if err := exec.Command(e.LLVM.Profdata(), "merge", "-sparse", profrawPath, "-output", profdata).Run(); err != nil {
return nil, cause.Wrap(err, "llvm-profdata errored")
}
defer os.Remove(profdata)
if e.TurboCov == "" {
args := []string{
"export",
e.ExePath,
"-instr-profile=" + profdata,
"-format=text",
}
if e.LLVM.Version.GreaterEqual(llvm.Version{Major: 9}) {
// LLVM 9 has new flags that omit stuff we don't care about.
args = append(args,
"-skip-expansions",
"-skip-functions",
)
}
data, err := exec.Command(e.LLVM.Cov(), args...).Output()
if err != nil {
return nil, cause.Wrap(err, "llvm-cov errored: %v", string(err.(*exec.ExitError).Stderr))
}
cov, err := e.parseCov(data)
if err != nil {
return nil, cause.Wrap(err, "Couldn't parse coverage json data")
}
return cov, nil
}
data, err := exec.Command(e.TurboCov, e.ExePath, profdata).Output()
if err != nil {
return nil, cause.Wrap(err, "turbo-cov errored: %v", string(err.(*exec.ExitError).Stderr))
}
cov, err := e.parseTurboCov(data)
if err != nil {
return nil, cause.Wrap(err, "Couldn't process turbo-cov output")
}
return cov, nil
}
// https://clang.llvm.org/docs/SourceBasedCodeCoverage.html
// https://stackoverflow.com/a/56792192
func (e Env) parseCov(raw []byte) (*Coverage, error) {
// line int, col int, count int64, hasCount bool, isRegionEntry bool
type segment []interface{}
type file struct {
// expansions ignored
Name string `json:"filename"`
Segments []segment `json:"segments"`
// summary ignored
}
type data struct {
Files []file `json:"files"`
}
root := struct {
Data []data `json:"data"`
}{}
err := json.NewDecoder(bytes.NewReader(raw)).Decode(&root)
if err != nil {
return nil, err
}
c := &Coverage{Files: make([]File, 0, len(root.Data[0].Files))}
for _, f := range root.Data[0].Files {
relpath, err := filepath.Rel(e.RootDir, f.Name)
if err != nil {
return nil, err
}
if strings.HasPrefix(relpath, "..") {
continue
}
file := File{Path: relpath}
for sIdx := 0; sIdx+1 < len(f.Segments); sIdx++ {
start := Location{(int)(f.Segments[sIdx][0].(float64)), (int)(f.Segments[sIdx][1].(float64))}
end := Location{(int)(f.Segments[sIdx+1][0].(float64)), (int)(f.Segments[sIdx+1][1].(float64))}
covered := f.Segments[sIdx][2].(float64) != 0
if covered {
if c := len(file.Spans); c > 0 && file.Spans[c-1].End == start {
file.Spans[c-1].End = end
} else {
file.Spans = append(file.Spans, Span{start, end})
}
}
}
if len(file.Spans) > 0 {
c.Files = append(c.Files, file)
}
}
return c, nil
}
func (e Env) parseTurboCov(data []byte) (*Coverage, error) {
u32 := func() uint32 {
out := binary.LittleEndian.Uint32(data)
data = data[4:]
return out
}
u8 := func() uint8 {
out := data[0]
data = data[1:]
return out
}
str := func() string {
len := u32()
out := data[:len]
data = data[len:]
return string(out)
}
numFiles := u32()
c := &Coverage{Files: make([]File, 0, numFiles)}
for i := 0; i < int(numFiles); i++ {
path := str()
relpath, err := filepath.Rel(e.RootDir, path)
if err != nil {
return nil, err
}
if strings.HasPrefix(relpath, "..") {
continue
}
file := File{Path: relpath}
type segment struct {
location Location
count int
covered bool
}
numSegements := u32()
segments := make([]segment, numSegements)
for j := range segments {
segment := &segments[j]
segment.location.Line = int(u32())
segment.location.Column = int(u32())
segment.count = int(u32())
segment.covered = u8() != 0
}
for sIdx := 0; sIdx+1 < len(segments); sIdx++ {
start := segments[sIdx].location
end := segments[sIdx+1].location
if segments[sIdx].count > 0 {
if c := len(file.Spans); c > 0 && file.Spans[c-1].End == start {
file.Spans[c-1].End = end
} else {
file.Spans = append(file.Spans, Span{start, end})
}
}
}
if len(file.Spans) > 0 {
c.Files = append(c.Files, file)
}
}
return c, nil
}
// Path is a tree node path formed from a list of strings
type Path []string
|
package cate
import (
"MI/models"
"MI/pkg/logger"
"MI/service/cate"
"MI/utils/response"
"github.com/gin-gonic/gin"
"strconv"
)
func Cate(c *gin.Context){
isNav := c.Query("is_nav")
if isNav == "" {
response.RespError(c,"参数不能为空")
return
}
//当参数为空 返回全部类别信息
is_nav, err := strconv.Atoi(isNav)
if err != nil {
logger.Logger.Info("类型转化失败:",err)
}
var list []models.Categories
switch is_nav {
case 0:
//nav导航栏 递归类别
list = cate.CateTree(0)
case 1:
//header-nav 导航栏 流动显示
if list,err = models.GetCategoriesBy("is_nav=?",is_nav);err == nil {
for i, v := range list {
product ,_ := models.GetAllProductBy(1,6,"cid=?",int(v.Id))
list[i].Product = product
}
}
case 2:
//这里遍历二级cate
list,err = models.GetCategoriesBy("is_nav > ?",0)
if err != nil {
logger.Logger.Error("select cate err:",err)
return
}
}
response.RespData(c,"",list)
}
|
package persist
import (
"fmt"
"log"
"github.com/m-o-s-e-s/mgm/mgm"
)
func (m mgmDB) queryJobs() []mgm.Job {
var jobs []mgm.Job
con, err := m.db.GetConnection()
if err != nil {
errMsg := fmt.Sprintf("Error connecting to database: %v", err.Error())
log.Fatal(errMsg)
return jobs
}
defer con.Close()
rows, err := con.Query("Select * from jobs")
if err != nil {
errMsg := fmt.Sprintf("Error reading jobs: %v", err.Error())
m.log.Error(errMsg)
return jobs
}
defer rows.Close()
for rows.Next() {
j := mgm.Job{}
err = rows.Scan(
&j.ID,
&j.Timestamp,
&j.Type,
&j.User,
&j.Data,
)
if err != nil {
errMsg := fmt.Sprintf("Error reading jobs: %v", err.Error())
m.log.Error(errMsg)
return jobs
}
jobs = append(jobs, j)
}
return jobs
}
func (m mgmDB) GetJobs() []mgm.Job {
var jobs []mgm.Job
r := mgmReq{}
r.request = "GetJobs"
r.result = make(chan interface{}, 64)
m.reqs <- r
for {
h, ok := <-r.result
if !ok {
return jobs
}
jobs = append(jobs, h.(mgm.Job))
}
}
func (m mgmDB) UpdateJob(j mgm.Job) {
r := mgmReq{}
r.request = "UpdateJob"
r.object = j
m.reqs <- r
}
func (m mgmDB) RemoveJob(j mgm.Job) {
r := mgmReq{}
r.request = "RemoveJob"
r.object = j
m.reqs <- r
}
|
package service_test
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"github.com/ONSdigital/dp-api-clients-go/v2/health"
"github.com/ONSdigital/dp-healthcheck/healthcheck"
"github.com/ONSdigital/florence/config"
"github.com/ONSdigital/florence/service"
"github.com/ONSdigital/florence/service/mock"
serviceMock "github.com/ONSdigital/florence/service/mock"
"github.com/gorilla/mux"
"github.com/pkg/errors"
. "github.com/smartystreets/goconvey/convey"
)
var (
ctx = context.Background()
testBuildTime = "BuildTime"
testGitCommit = "GitCommit"
testVersion = "Version"
errServer = errors.New("HTTP Server error")
)
var (
errHealthcheck = errors.New("healthCheck error")
)
var funcDoGetHealthcheckErr = func(cfg *config.Config, buildTime string, gitCommit string, version string) (service.HealthChecker, error) {
return nil, errHealthcheck
}
var funcDoGetHTTPServerNil = func(bindAddr string, router http.Handler) service.HTTPServer {
return nil
}
func TestRun(t *testing.T) {
Convey("Having a set of mocked dependencies", t, func() {
cfg, err := config.Get()
So(err, ShouldBeNil)
hcMock := &serviceMock.HealthCheckerMock{
AddCheckFunc: func(name string, checker healthcheck.Checker) error { return nil },
StartFunc: func(ctx context.Context) {},
}
serverWg := &sync.WaitGroup{}
serverMock := &serviceMock.HTTPServerMock{
ListenAndServeFunc: func() error {
serverWg.Done()
return nil
},
}
failingServerMock := &serviceMock.HTTPServerMock{
ListenAndServeFunc: func() error {
serverWg.Done()
return errServer
},
}
funcDoGetHealthcheckOk := func(cfg *config.Config, buildTime string, gitCommit string, version string) (service.HealthChecker, error) {
return hcMock, nil
}
funcDoGetHTTPServer := func(bindAddr string, router http.Handler) service.HTTPServer {
return serverMock
}
funcDoGetFailingHTTPSerer := func(bindAddr string, router http.Handler) service.HTTPServer {
return failingServerMock
}
funcDoGetHealthClientOk := func(name string, url string) *health.Client {
return &health.Client{
URL: url,
Name: name,
}
}
funcHasRoute := func(r *mux.Router, method, path string, match *mux.RouteMatch) bool {
req := httptest.NewRequest(method, path, nil)
return r.Match(req, match)
}
Convey("Given that initialising Healthcheck returns an error", func() {
initMock := &serviceMock.InitialiserMock{
DoGetHealthClientFunc: funcDoGetHealthClientOk,
DoGetHealthCheckFunc: funcDoGetHealthcheckErr,
}
svcErrors := make(chan error, 1)
svcList := service.NewServiceList(initMock)
_, err := service.Run(ctx, cfg, svcList, testBuildTime, testGitCommit, testVersion, svcErrors)
Convey("Then service Run fails with the same error and the flag is not set. No further initialisations are attempted", func() {
So(err, ShouldResemble, errHealthcheck)
So(svcList.HealthCheck, ShouldBeFalse)
})
})
Convey("Given that Checkers cannot be registered", func() {
errAddheckFail := errors.New("Error(s) registering checkers for healthcheck")
hcMockAddFail := &serviceMock.HealthCheckerMock{
AddCheckFunc: func(name string, checker healthcheck.Checker) error { return errAddheckFail },
StartFunc: func(ctx context.Context) {},
}
initMock := &serviceMock.InitialiserMock{
DoGetHealthClientFunc: funcDoGetHealthClientOk,
DoGetHealthCheckFunc: func(cfg *config.Config, buildTime string, gitCommit string, version string) (service.HealthChecker, error) {
return hcMockAddFail, nil
},
}
svcErrors := make(chan error, 1)
svcList := service.NewServiceList(initMock)
_, err := service.Run(ctx, cfg, svcList, testBuildTime, testGitCommit, testVersion, svcErrors)
Convey("Then service Run fails, but all checks try to register", func() {
So(err, ShouldNotBeNil)
So(err.Error(), ShouldResemble, fmt.Sprintf("unable to register checkers: %s", errAddheckFail.Error()))
So(svcList.HealthCheck, ShouldBeTrue)
So(len(hcMockAddFail.AddCheckCalls()), ShouldEqual, 1)
So(hcMockAddFail.AddCheckCalls()[0].Name, ShouldResemble, "API router")
})
})
Convey("Given that all dependencies are successfully initialised", func() {
initMock := &serviceMock.InitialiserMock{
DoGetHealthClientFunc: funcDoGetHealthClientOk,
DoGetHealthCheckFunc: funcDoGetHealthcheckOk,
DoGetHTTPServerFunc: funcDoGetHTTPServer,
}
svcErrors := make(chan error, 1)
svcList := service.NewServiceList(initMock)
serverWg.Add(1)
match := &mux.RouteMatch{}
s, err := service.Run(ctx, cfg, svcList, testBuildTime, testGitCommit, testVersion, svcErrors)
Convey("Then service Run succeeds and all the flags are set", func() {
So(err, ShouldBeNil)
So(svcList.HealthCheck, ShouldBeTrue)
})
Convey("And the following route should have been added", func() {
So(funcHasRoute(s.Router, "GET", "/health", match), ShouldBeTrue)
So(match.Handler, ShouldEqual, s.HealthCheck.Handler)
})
Convey("The checkers are registered and the healthcheck and http server started", func() {
So(len(hcMock.AddCheckCalls()), ShouldEqual, 1)
So(hcMock.AddCheckCalls()[0].Name, ShouldResemble, "API router")
So(len(initMock.DoGetHTTPServerCalls()), ShouldEqual, 1)
So(initMock.DoGetHTTPServerCalls()[0].BindAddr, ShouldEqual, ":8080")
So(len(hcMock.StartCalls()), ShouldEqual, 1)
serverWg.Wait() // Wait for HTTP server go-routine to finish
So(len(serverMock.ListenAndServeCalls()), ShouldEqual, 1)
})
})
Convey("Given that all dependencies are successfully initialised but the http server fails", func() {
initMock := &serviceMock.InitialiserMock{
DoGetHealthClientFunc: funcDoGetHealthClientOk,
DoGetHealthCheckFunc: funcDoGetHealthcheckOk,
DoGetHTTPServerFunc: funcDoGetFailingHTTPSerer,
}
svcErrors := make(chan error, 1)
svcList := service.NewServiceList(initMock)
serverWg.Add(1)
_, err := service.Run(ctx, cfg, svcList, testBuildTime, testGitCommit, testVersion, svcErrors)
So(err, ShouldBeNil)
Convey("Then the error is returned in the error channel", func() {
sErr := <-svcErrors
So(sErr.Error(), ShouldResemble, fmt.Sprintf("failure in http listen and serve: %s", errServer.Error()))
So(len(failingServerMock.ListenAndServeCalls()), ShouldEqual, 1)
})
})
})
}
func TestClose(t *testing.T) {
Convey("Having a correctly initialised service", t, func() {
cfg, err := config.Get()
So(err, ShouldBeNil)
hcStopped := false
// healthcheck Stop does not depend on any other service being closed/stopped
hcMock := &serviceMock.HealthCheckerMock{
AddCheckFunc: func(name string, checker healthcheck.Checker) error { return nil },
StartFunc: func(ctx context.Context) {},
StopFunc: func() { hcStopped = true },
}
// server Shutdown will fail if healthcheck is not stopped
serverMock := &serviceMock.HTTPServerMock{
ListenAndServeFunc: func() error { return nil },
ShutdownFunc: func(ctx context.Context) error {
if !hcStopped {
return errors.New("Server stopped before healthcheck")
}
return nil
},
}
Convey("Closing the service results in all the dependencies being closed in the expected order", func() {
svcList := service.NewServiceList(nil)
svcList.HealthCheck = true
svc := service.Service{
Config: cfg,
ServiceList: svcList,
Server: serverMock,
HealthCheck: hcMock,
}
err = svc.Close(context.Background())
So(err, ShouldBeNil)
So(len(hcMock.StopCalls()), ShouldEqual, 1)
So(len(serverMock.ShutdownCalls()), ShouldEqual, 1)
})
Convey("If services fail to stop, the Close operation tries to close all dependencies and returns an error", func() {
failingserverMock := &serviceMock.HTTPServerMock{
ListenAndServeFunc: func() error { return nil },
ShutdownFunc: func(ctx context.Context) error {
return errors.New("Failed to stop http server")
},
}
svcList := service.NewServiceList(nil)
svcList.HealthCheck = true
svc := service.Service{
Config: cfg,
ServiceList: svcList,
Server: failingserverMock,
HealthCheck: hcMock,
}
err = svc.Close(context.Background())
So(err, ShouldNotBeNil)
So(err.Error(), ShouldResemble, "failed to shutdown gracefully")
So(len(hcMock.StopCalls()), ShouldEqual, 1)
So(len(failingserverMock.ShutdownCalls()), ShouldEqual, 1)
})
Convey("If service times out while shutting down, the Close operation fails with the expected error", func() {
cfg.GracefulShutdownTimeout = 100 * time.Millisecond
timeoutServerMock := &mock.HTTPServerMock{
ListenAndServeFunc: func() error { return nil },
ShutdownFunc: func(ctx context.Context) error {
time.Sleep(200 * time.Millisecond)
return nil
},
}
svcList := service.NewServiceList(nil)
svcList.HealthCheck = true
svc := service.Service{
Config: cfg,
ServiceList: svcList,
Server: timeoutServerMock,
HealthCheck: hcMock,
}
err = svc.Close(context.Background())
So(err, ShouldNotBeNil)
So(err.Error(), ShouldResemble, "context deadline exceeded")
So(len(hcMock.StopCalls()), ShouldEqual, 1)
So(len(timeoutServerMock.ShutdownCalls()), ShouldEqual, 1)
})
})
}
|
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package imds
import (
"net"
"syscall"
"github.com/aws/amazon-vpc-cni-plugins/network/vpc"
log "github.com/cihub/seelog"
"github.com/vishvananda/netlink"
)
// BlockInstanceMetadataEndpoint adds a blackhole rule for IMDS endpoint.
func BlockInstanceMetadataEndpoint() error {
for _, ep := range vpc.InstanceMetadataEndpoints {
log.Infof("Adding route to block instance metadata endpoint %s", ep)
_, imdsNetwork, err := net.ParseCIDR(ep)
if err != nil {
// This should never happen as these IP addresses are hardcoded.
log.Errorf("Unable to parse instance metadata endpoint %s", ep)
return err
}
err = netlink.RouteAdd(&netlink.Route{
Dst: imdsNetwork,
Type: syscall.RTN_BLACKHOLE,
})
if err != nil {
log.Errorf("Unable to add route to block instance metadata: %v", err)
return err
}
}
return nil
}
|
package utils
import (
"github.com/astaxie/beego/orm"
"go_blog/models"
)
func GetAllTableNames() ([]string) {
return []string{"blog", "category"}
}
func GetAllBlogs() ([]models.Blog, error) {
o := orm.NewOrm()
var blogs []models.Blog
_, err := o.QueryTable("blog").All(&blogs)
if err != nil {
return nil, err
} else {
return blogs, nil
}
}
func GetAllBlogsWithCategorys() ([]models.Blog, error) {
blogs, err := GetAllBlogs()
if err != nil {
return blogs, err
}
for index, blog := range blogs {
blogWithCategorys, _ := GetBlogWithCategorys("id", blog.Id)
blogs[index] = *blogWithCategorys
}
return blogs, err
}
func GetAllCategorys() ([]models.Category, error) {
o := orm.NewOrm()
var categorys []models.Category
_, err := o.QueryTable("category").All(&categorys)
if err != nil {
return nil, err
} else {
return categorys, nil
}
}
func GetBlog(fieldName string, fieldValue interface{}) (*models.Blog, error) {
o := orm.NewOrm()
var blog models.Blog
err := o.QueryTable("blog").Filter(fieldName, fieldValue).One(&blog)
return &blog, err
}
func GetBlogWithCategorys(fieldName string, fieldValue interface{}) (*models.Blog, error) {
o := orm.NewOrm()
if blog, err := GetBlog(fieldName, fieldValue); err != nil {
return nil, err
} else {
_, err := o.LoadRelated(blog, "Categorys")
return blog, err
}
}
func GetCategory(fieldName string, fieldValue interface{}) (*models.Category, error) {
o := orm.NewOrm()
var category models.Category
err := o.QueryTable("category").Filter(fieldName, fieldValue).One(&category)
return &category, err
}
func CreateBlog(blog models.Blog) (int64, error) {
o := orm.NewOrm()
id, err := o.Insert(&blog)
return id, err
}
func CreateBlogWithCategorys(blog models.Blog, categorys []*models.Category) (int64, error) {
o := orm.NewOrm()
// todo Should create a transaction instead of this
// insert blog
id, err := o.Insert(&blog)
if categorys != nil {
// insert m2m
m2m := o.QueryM2M(&blog, "Categorys")
_, err = m2m.Add(categorys)
if err != nil {
return -1, err
}
}
return id, err
}
func CreateCategory(category models.Category) (int64, error) {
o := orm.NewOrm()
id, err := o.Insert(&category)
return id, err
}
func UpdateBlog(blog models.Blog) error {
o := orm.NewOrm()
_, err := o.Update(&blog)
return err
}
func UpdateBlogWithCategory(blog models.Blog, categorys []*models.Category) error {
o := orm.NewOrm()
// todo Should create a transaction instead of this
// delete and update m2m
m2m := o.QueryM2M(&blog, "Categorys")
// query exist m2m
if _, err := o.LoadRelated(&blog, "Categorys"); err != nil {
return err
}
// delete old m2m
if len(blog.Categorys) != 0 {
_, err := m2m.Remove(blog.Categorys)
if err != nil {
return err
}
}
// add new m2m
_, err := m2m.Add(categorys)
if err != nil {
return err
}
// update blog
_, err = o.Update(&blog)
return err
}
func UpdateCategory(category models.Category) error {
o := orm.NewOrm()
_, err := o.Update(&category)
return err
}
func DeleteBlog(id int) error {
o := orm.NewOrm()
_, err := o.Delete(&models.Blog{Id:id})
return err
}
func DeleteCategory(id int) error {
o := orm.NewOrm()
_, err := o.Delete(&models.Category{Id:id})
return err
}
func SearchBlog(search string, limit int) ([]models.Blog, error) {
var blogs []models.Blog
cond := orm.NewCondition()
condition := cond.And("title__icontains", search).Or("content__icontains", search)
qs := orm.NewOrm().QueryTable("blog")
qs = qs.SetCond(condition)
_, err := qs.Limit(limit).All(&blogs)
return blogs, err
}
|
package install
import (
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
)
// toAttributesSet converts the given user, namespace, and PolicyRule into a set of Attributes expected. This is useful for checking
// if a composed set of Roles/RoleBindings satisfies a PolicyRule.
func toAttributesSet(user user.Info, namespace string, rule rbacv1.PolicyRule) []authorizer.Attributes {
set := map[authorizer.AttributesRecord]struct{}{}
// add empty string for empty groups, resources, resource names, and non resource urls
groups := rule.APIGroups
if len(groups) == 0 {
groups = make([]string, 1)
}
resources := rule.Resources
if len(resources) == 0 {
resources = make([]string, 1)
}
names := rule.ResourceNames
if len(names) == 0 {
names = make([]string, 1)
}
nonResourceURLs := rule.NonResourceURLs
if len(nonResourceURLs) == 0 {
nonResourceURLs = make([]string, 1)
}
for _, verb := range rule.Verbs {
for _, group := range groups {
for _, resource := range resources {
for _, name := range names {
for _, nonResourceURL := range nonResourceURLs {
set[attributesRecord(user, namespace, verb, group, resource, name, nonResourceURL)] = struct{}{}
}
}
}
}
}
attributes := make([]authorizer.Attributes, len(set))
i := 0
for attribute := range set {
attributes[i] = attribute
i++
}
log.Debugf("attributes set %+v", attributes)
return attributes
}
// attribute creates a new AttributesRecord with the given info. Currently RBAC authz only looks at user, verb, apiGroup, resource, and name.
func attributesRecord(user user.Info, namespace, verb, apiGroup, resource, name, path string) authorizer.AttributesRecord {
resourceRequest := path == ""
return authorizer.AttributesRecord{
User: user,
Verb: verb,
Namespace: namespace,
APIGroup: apiGroup,
Resource: resource,
Name: name,
ResourceRequest: resourceRequest,
Path: path,
}
}
func toDefaultInfo(sa *corev1.ServiceAccount) *user.DefaultInfo {
// TODO(Nick): add Group if necessary
return &user.DefaultInfo{
Name: serviceaccount.MakeUsername(sa.GetNamespace(), sa.GetName()),
UID: string(sa.GetUID()),
}
}
|
package streams
import (
"encoding/json"
"fmt"
"strconv"
)
type orderDataRaw struct {
Rate string `json:"rate"`
Type string `json:"type"`
Amount string `json:"amount"`
TradeID string `json:"tradeID"`
Date string `json:"date"`
Total string `json:"total"`
}
type OrderData struct {
Rate float64
Type string
Amount float64
TradeID string
Date string
Total float64
}
func (od *OrderData) UnmarshalJSON(b []byte) error {
var raw orderDataRaw
err := json.Unmarshal(b, &raw)
if err != nil {
return err
}
od.Type = raw.Type
if raw.TradeID != "" {
od.TradeID = raw.TradeID
}
if raw.Date != "" {
od.Date = raw.Date
}
if raw.Rate != "" {
od.Rate, err = strconv.ParseFloat(raw.Rate, 64)
if err != nil {
return fmt.Errorf("Could not parse orderDataRaw.Rate as float64: %v", err)
}
}
if raw.Amount != "" {
od.Amount, err = strconv.ParseFloat(raw.Amount, 64)
if err != nil {
return fmt.Errorf("Could not parse orderDataRaw.Amount as float64: %v", err)
}
}
if raw.Total != "" {
od.Total, err = strconv.ParseFloat(raw.Total, 64)
if err != nil {
return fmt.Errorf("Could not parse orderDataRaw.Total as float64: %v", err)
}
}
return nil
}
|
package httpModel
import (
"github.com/astaxie/beego/orm"
"strconv"
"tokensky_bg_admin/models"
"tokensky_bg_admin/utils"
)
const (
//减少数据,允许精度
FLOAT_PRECISE_8 float64 = 0.00000001
FLOAT_NUM_8 int = 8
//允许货币类型校验[暂缺]
)
var (
//允许的货币类型
acceptSymbol map[string]struct{}
)
func init() {
acceptSymbol = make(map[string]struct{})
}
//用户资产变动
type BalanceChange struct {
Uid int `json:"uid"`
//货币类型
Symbol string `json:"symbol"`
//操作 add加 sub减 mul乘法 qup除法
MethodBalance string `json:"methodBalance"`
Balance string `json:"balance"`
balance float64 `json:"-"`
MethodFrozenBalance string `json:"methodFrozenBalance"`
FrozenBalance string `json:"frozenBalance"`
frozenBalance float64 `json:"-"`
SignId string `json:"signId"`
}
func (this *BalanceChange) Check()(bool,string) {
if this.MethodBalance == "" && this.MethodFrozenBalance == "" {
return false,"操作空"
}
if this.Uid <= 0 {
return false,"uid空"
}
//允许的资产类型校验
var err error
if _,found := acceptSymbol[this.Symbol];!found{
if models.TokenskyUserBalanceCoinIsFound(this.Symbol){
acceptSymbol[this.Symbol] = struct{}{}
}else {
return false,"资产类型不存在"
}
}
found := false
if this.Balance != "" {
this.balance, err = strconv.ParseFloat(this.Balance, 64)
switch this.MethodBalance {
case "add", "sub", "mul", "qup":
default:
return false,"操作错误"
}
if this.balance < 0 {
return false,"资产为负数"
}
found = true
}
if err != nil {
return false,err.Error()
}
if this.FrozenBalance != "" {
this.frozenBalance, err = strconv.ParseFloat(this.FrozenBalance, 64)
switch this.MethodFrozenBalance {
case "add", "sub", "mul", "qup":
default:
return false,"冻结资产操作错误"
}
if this.frozenBalance < 0 {
return false,"冻结资产负数"
}
found = true
}
if err != nil {
return false,err.Error()
}
return found,"ok"
}
//用户资产变化
func BalanceChangeIsOne(res *RequestOne) (bool, string, *UserBalance) {
//校验是否重复
o := orm.NewOrm()
hashObj := models.TokenskyUserBalanceHashOne(o,res.HashId)
if hashObj != nil{
if hashObj.BalanceStatus == 1{
return false, "重复请求", nil
}
}else {
hashObj = &models.TokenskyUserBalanceHash{
BalanceStatus:1,
Source:res.Source,
HashId:res.HashId,
}
}
if res.Change == nil {
return false, "数据不存在", nil
}
obj := res.Change
//判断用户是否存在
if found,msg := obj.Check();!found {
return false, "数据校验未通过:" + msg, nil
}
err := o.Begin()
if err != nil {
return false, "开启事务失败", nil
}
new := false
balance := &models.TokenskyUserBalance{}
balanceRecord := &models.TokenskyUserBalancesRecord{
User: &models.TokenskyUser{UserId:obj.Uid},
Symbol: obj.Symbol,
Cont: res.Cont,
Source: res.Source,
Mold: res.Mold,
PushTime: res.PushTime,
SignId: obj.SignId,
MethodBalance:obj.MethodBalance,
MethodFrozenBalance:obj.MethodFrozenBalance,
Balance:obj.Balance,
FrozenBalance:obj.FrozenBalance,
HashId:res.HashId,
}
query := o.QueryTable(models.TokenskyUserBalanceTBName())
err = query.Filter("user_id__exact", obj.Uid).Filter("coin_type__exact", obj.Symbol).One(balance)
if err != nil {
if err.Error() == "<QuerySeter> no row found" {
//新增
balance.UserId = obj.Uid
balance.CoinType = obj.Symbol
new = true
} else {
o.Rollback()
return false, "获取数据异常", nil
}
}
balanceRecord.OldBalance = balance.Balance
balanceRecord.OldFrozenBalance = balance.FrozenBalance
//资产计算
ok, msg := BalanceAmount(balance, obj)
if !ok {
o.Rollback()
return false, msg, nil
}
if new {
_, err = o.Insert(balance)
if err != nil {
o.Rollback()
return false, "新增数据失败", nil
}
} else {
_, err = o.Update(balance)
if err != nil {
o.Rollback()
return false, "更新数据失败", nil
}
}
balanceRecord.NewBalance = balance.Balance
balanceRecord.NewFrozenBalance = balance.FrozenBalance
_, err = o.Insert(balanceRecord)
if err != nil {
o.Rollback()
return false, "新增记录失败", nil
}
//新增哈希记录表
_,err = o.InsertOrUpdate(hashObj)
if err !=nil{
o.Rollback()
return false, "创建哈希记录失败", nil
}
err = o.Commit()
if err != nil {
return false, "事务执行失败", nil
}
resp := &UserBalance{
Uid: balance.UserId,
Symbol: balance.CoinType,
Balance: strconv.FormatFloat(balance.Balance, 'f', FLOAT_NUM_8, 64),
FrozenBalance: strconv.FormatFloat(balance.FrozenBalance, 'f', FLOAT_NUM_8, 64),
}
return true, "", resp
}
func BalanceChangeIsMulti(res *RequestMulti) (bool, string, []*UserBalance) {
//校验是否重复
o := orm.NewOrm()
hashObj := models.TokenskyUserBalanceHashOne(o,res.HashId)
if hashObj != nil{
if hashObj.BalanceStatus == 1{
return false, "重复请求", nil
}
}else {
hashObj = &models.TokenskyUserBalanceHash{
BalanceStatus:1,
Source:res.Source,
HashId:res.HashId,
}
}
objs := res.Changes
if len(objs) < 1 {
return false, "数据不存在", nil
}
changes := make(map[string]map[int]*BalanceChange)
records := make([]*models.TokenskyUserBalancesRecord, 0)
balances := make(map[string]map[int]*models.TokenskyUserBalance)
resp := make([]*UserBalance, 0)
for _, obj := range objs {
if found,msg := obj.Check();found {
if _, found := changes[obj.Symbol]; !found {
changes[obj.Symbol] = make(map[int]*BalanceChange)
//records[obj.Symbol] = make(map[int]*models.TokenskyUserBalancesRecord)
balances[obj.Symbol] = make(map[int]*models.TokenskyUserBalance)
}
changes[obj.Symbol][obj.Uid] = obj
} else {
return false, "用户:" + strconv.Itoa(obj.Uid) + ";货币:" + obj.Symbol + " 校验失败 err:" + msg, nil
}
}
err := o.Begin()
if err != nil {
return false, "开启事务失败", nil
}
for symbol, mapp := range changes {
query := o.QueryTable(models.TokenskyUserBalanceTBName())
query = query.Filter("coin_type__exact", symbol)
ids := make([]int, 0,len(mapp))
for id, _ := range mapp {
ids = append(ids, id)
}
data := make([]*models.TokenskyUserBalance, 0)
query = query.Filter("user_id__in", ids)
_, err = query.All(&data)
if err != nil {
o.Rollback()
return false, "获取数据失败", nil
}
ids2 := make(map[int]*models.TokenskyUserBalance)
for _, obj := range data {
ids2[obj.UserId] = obj
}
data2 := make([]*models.TokenskyUserBalance, 0)
for _, obj := range mapp {
if balance, found := ids2[obj.Uid]; found {
//修改
record := &models.TokenskyUserBalancesRecord{
User: &models.TokenskyUser{UserId:obj.Uid},
Source: res.Source,
Cont: res.Cont,
Symbol: obj.Symbol,
OldBalance: balance.Balance,
OldFrozenBalance: balance.FrozenBalance,
Mold: res.Mold,
PushTime: res.PushTime,
SignId: obj.SignId,
MethodBalance:obj.MethodBalance,
MethodFrozenBalance:obj.MethodFrozenBalance,
Balance:obj.Balance,
FrozenBalance:obj.FrozenBalance,
HashId:res.HashId,
}
ok, msg := BalanceAmount(balance, obj)
if !ok {
o.Rollback()
return false, msg, nil
}
_, err = o.Update(balance)
if err != nil {
o.Rollback()
return false, "更新数据失败", nil
}
record.NewBalance = balance.Balance
record.NewFrozenBalance = balance.FrozenBalance
records = append(records, record)
resp = append(resp, &UserBalance{
Uid: balance.UserId,
Symbol: balance.CoinType,
Balance: strconv.FormatFloat(balance.Balance, 'f', FLOAT_NUM_8, 64),
FrozenBalance: strconv.FormatFloat(balance.FrozenBalance, 'f', FLOAT_NUM_8, 64),
})
} else {
//新增
balance = &models.TokenskyUserBalance{UserId: obj.Uid, CoinType: symbol}
ok, msg := BalanceAmount(balance, obj)
if !ok {
o.Rollback()
return false, msg, nil
}
data2 = append(data2, balance)
record := &models.TokenskyUserBalancesRecord{
User: &models.TokenskyUser{UserId:obj.Uid},
Source: res.Source,
Cont: res.Cont,
Symbol: obj.Symbol,
OldBalance: 0,
OldFrozenBalance: 0,
NewBalance: balance.Balance,
NewFrozenBalance: balance.FrozenBalance,
Mold: res.Mold,
PushTime: res.PushTime,
SignId: obj.SignId,
MethodBalance:obj.MethodBalance,
MethodFrozenBalance:obj.MethodFrozenBalance,
Balance:obj.Balance,
FrozenBalance:obj.FrozenBalance,
HashId:res.HashId,
}
records = append(records, record)
resp = append(resp, &UserBalance{
Uid: balance.UserId,
Symbol: balance.CoinType,
Balance: strconv.FormatFloat(balance.Balance, 'f', FLOAT_NUM_8, 64),
FrozenBalance: strconv.FormatFloat(balance.FrozenBalance, 'f', FLOAT_NUM_8, 64),
})
}
}
if len(data2) > 0 {
_, err = o.InsertMulti(len(data2), data2)
if err != nil {
o.Rollback()
return false, "新增数据失败", nil
}
}
}
if len(records) > 0 {
_, err = o.InsertMulti(len(records), records)
if err != nil {
o.Rollback()
return false, "新增记录失败", nil
}
}
//新增哈希记录表
_,err = o.InsertOrUpdate(hashObj)
if err !=nil{
o.Rollback()
return false, "创建哈希记录失败", nil
}
err = o.Commit()
if err != nil {
o.Rollback()
return false, "事务更新数据失败", nil
}
return true, "", resp
}
func BalanceAmount(balance *models.TokenskyUserBalance, obj *BalanceChange) (bool, string) {
switch obj.MethodBalance {
case "add":
//加
balance.Balance = utils.Float64Add(balance.Balance, obj.balance)
case "sub":
//减
balance.Balance = utils.Float64Sub(balance.Balance, obj.balance)
if balance.Balance < FLOAT_PRECISE_8 && balance.Balance > -FLOAT_PRECISE_8 {
balance.Balance = 0
}
if balance.Balance < 0 {
return false, "资产为负数"
}
case "mul":
//乘
balance.Balance = utils.Float64Mul(balance.Balance, obj.balance)
case "quo":
//除法
balance.Balance = utils.Float64Quo(balance.Balance, obj.balance)
}
switch obj.MethodFrozenBalance {
case "add":
//加
balance.FrozenBalance = utils.Float64Add(balance.FrozenBalance, obj.frozenBalance)
case "sub":
//减
balance.FrozenBalance = utils.Float64Sub(balance.FrozenBalance, obj.frozenBalance)
if balance.FrozenBalance < FLOAT_PRECISE_8 && balance.FrozenBalance > -FLOAT_PRECISE_8 {
balance.FrozenBalance = 0
}
if balance.FrozenBalance < 0 {
return false, "冻结资产为负数"
}
case "mul":
//乘
balance.FrozenBalance = utils.Float64Mul(balance.FrozenBalance, obj.frozenBalance)
case "quo":
//除法
balance.FrozenBalance = utils.Float64Quo(balance.FrozenBalance, obj.frozenBalance)
}
//冻结资产大于实际资产
if balance.FrozenBalance > balance.Balance {
return false, "冻结资产大于实际资产"
}
return true, ""
}
//用户资产
type UserBalance struct {
Uid int `json:"uid"`
Symbol string `json:"symbol"`
Balance string `json:"balance"`
FrozenBalance string `json:"frozenBalance"`
}
//响应
type ResponseMulti struct {
//0正常
Code int `json:"code"`
//返回最新数据
Balances []*UserBalance `json:"balances"`
//说明
Msg string `json:"msg"`
//哈希
HashId string `json:"hashId"`
}
//响应
type ResponseOne struct {
//0正常
Code int `json:"code"`
//返回最新数据
Balance *UserBalance `json:"balance"`
//说明
Msg string `json:"msg"`
//哈希
HashId string `json:"hashId"`
}
//请求
type RequestMulti struct {
//来源 1后端 2后台 3定时任务
Source int `json:"source"`
//资产变动多个
Changes []*BalanceChange `json:"changes"`
//说明
Cont string `json:"cont"`
//操作模版
Mold string `json:"mold"`
//时间戳 单位毫秒
PushTime int64 `json:"pushTime"`
//唯一哈希
HashId string `json:"hashId"`
}
type RequestOne struct {
//来源 1后端 2后台
Source int `json:"source"`
//单个
Change *BalanceChange `json:"change"`
//说明
Cont string `json:"cont"`
//操作模版
Mold string `json:"mold"`
//时间戳 单位毫秒
PushTime int64 `json:"pushTime"`
//唯一哈希
HashId string `json:"hashId"`
}
|
package vaultengine
import (
"fmt"
"log"
"github.com/hashicorp/vault/api"
)
// CollectPaths will retrieve all paths to secrets defined under the given path
func (client *Client) CollectPaths(path string) ([]string, error) {
var secretPaths []string
folder, err := client.FolderRead(path)
if err != nil {
return nil, err
}
for _, key := range folder {
strKey := fmt.Sprintf("%v", key)
newPath := path + strKey
newPath = CleanupPath(newPath)
if IsFolder(strKey) {
t, err := client.CollectPaths(newPath)
secretPaths = append(secretPaths, t...)
if err != nil {
return nil, err
}
} else {
// client.SecretDelete(newPath)
secretPaths = append(secretPaths, newPath)
}
}
return secretPaths, nil
}
// SecretDelete deletet a Vault secret by given path
func (client *Client) SecretDelete(path string) (*api.Secret, error) {
infix := "/metadata"
if client.engineType == "kv1" {
infix = "/"
}
finalPath := client.engine + infix + path
result, err := client.vc.Logical().Delete(finalPath)
if err != nil {
log.Fatalf("Unable to delete secret: %s", err)
}
return result, nil
}
|
package controllers
import (
"crypto/md5"
"github.com/gin-gonic/gin"
"github.com/xiaoqunSun/api-server/mysql"
)
func HandlerAccount(r *gin.Engine) {
r.POST("/registerAccount", func(c *gin.Context) {
username := c.PostForm("username")
password := c.PostForm("password")
if len(username) < 6 || len(username) > 15 {
errorResponse(c, "username length must be in 6-15")
}
if len(password) < 6 || len(password) > 15 {
errorResponse(c, "password length must be in 6-15")
}
db := mysql.DB()
rows, err := db.Query("call sp_registerAccount(?,?)", username, md5.Sum([]byte(password)))
var result int
defer rows.Close()
if err != nil {
errorResponse(c, err)
}
for rows.Next() {
if err := rows.Scan(&result); err != nil {
errorResponse(c, err)
}
}
if result == 0 {
c.JSON(200, gin.H{})
} else if result == 1 {
c.JSON(200,gin.H{
"error":"username has exist"
})
}
})
}
|
package types
type BeaconEntry struct {
Round uint64
Data []byte
Metadata map[string]interface{}
}
func NewBeaconEntry(round uint64, data []byte, metadata map[string]interface{}) BeaconEntry {
return BeaconEntry{
Round: round,
Data: data,
Metadata: metadata,
}
}
|
package main
import (
"fmt"
"time"
)
func main() {
c := make(chan int, 10)
for _, i := range [10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} {
c <- i
}
time.AfterFunc(time.Second * 1, func(){
close(c)
})
for {
select {
case v, _ := <-c:
if v == 3 {
c = nil
fmt.Printf("c = nil \n")
}
fmt.Printf("come into %v\n", v)
default:
fmt.Printf("come into default\n")
goto end
}
}
end:
fmt.Printf("over \n")
}
|
// Package ir provides the library for constructing a SSVM intermediate representation program in SSA form.
package ir
type Value interface {
Type() Type // the type of the value
SetName(string) // sets the name of the value
Name() string // returns the name of the value
Identifier() string // returns the identifier of the value (@name for globals, %name for locals)
IsGlobal() bool // returns true if the value is a global value
IsLit() bool // returns true if the value is a literal
validate() error // checks the value is valid, returns an error if invalid. TODO: move validation to a separate pass
string() string // returns the value's representation in SSVM ir language
}
|
/*
* Copyright (c) 2019 QLC Chain Team
*
* This software is released under the MIT License.
* https://opensource.org/licenses/MIT
*/
package contract
import (
"errors"
"github.com/qlcchain/go-qlc/common/types"
"github.com/qlcchain/go-qlc/vm/abi"
cabi "github.com/qlcchain/go-qlc/vm/contract/abi"
"github.com/qlcchain/go-qlc/vm/vmstore"
)
//ContractBlock generated by contract
type ContractBlock struct {
VMContext *vmstore.VMContext
Block *types.StateBlock
ToAddress types.Address
BlockType types.BlockType
Amount types.Balance
Token types.Hash
Data []byte
}
type InternalContract interface {
GetFee(ctx *vmstore.VMContext, block *types.StateBlock) (types.Balance, error)
// check status, update state
DoReceive(ctx *vmstore.VMContext, block *types.StateBlock, input *types.StateBlock) ([]*ContractBlock, error)
// refund data at receive error
GetRefundData() []byte
}
type ChainContractV1 interface {
InternalContract
// DoPending generate pending info from send block
DoPending(block *types.StateBlock) (*types.PendingKey, *types.PendingInfo, error)
// ProcessSend verify or update StateBlock.Data
DoSend(ctx *vmstore.VMContext, block *types.StateBlock) error
}
type qlcchainContractV1 struct {
m map[string]ChainContractV1
abi abi.ABIContract
}
var contractCacheV1 = map[types.Address]*qlcchainContractV1{
types.MintageAddress: {
map[string]ChainContractV1{
cabi.MethodNameMintage: &Mintage{},
cabi.MethodNameMintageWithdraw: &WithdrawMintage{},
},
cabi.MintageABI,
},
types.NEP5PledgeAddress: {
map[string]ChainContractV1{
cabi.MethodNEP5Pledge: &Nep5Pledge{},
cabi.MethodWithdrawNEP5Pledge: &WithdrawNep5Pledge{},
},
cabi.NEP5PledgeABI,
},
types.RewardsAddress: {
map[string]ChainContractV1{
cabi.MethodNameAirdropRewards: &AirdropRewords{},
cabi.MethodNameConfidantRewards: &ConfidantRewards{},
},
cabi.RewardsABI,
},
types.MinerAddress: {
map[string]ChainContractV1{
cabi.MethodNameMinerReward: &MinerReward{},
},
cabi.MinerABI,
},
}
type ChainContractV2 interface {
InternalContract
// ProcessSend verify or update StateBlock.Data
ProcessSend(ctx *vmstore.VMContext, block *types.StateBlock) (*types.PendingKey, *types.PendingInfo, error)
}
type qlcchainContractV2 struct {
m map[string]ChainContractV2
abi abi.ABIContract
}
var contractCacheV2 = map[types.Address]*qlcchainContractV2{
types.BlackHoleAddress: {
m: map[string]ChainContractV2{
cabi.MethodNameDestroy: &BlackHole{},
},
abi: cabi.BlackHoleABI,
},
}
func GetChainContract(addr types.Address, methodSelector []byte) (interface{}, bool, error) {
if p, ok := contractCacheV1[addr]; ok {
if method, err := p.abi.MethodById(methodSelector); err == nil {
c, ok := p.m[method.Name]
return c, ok, nil
} else {
return nil, ok, errors.New("abi: method not found")
}
} else if p, ok := contractCacheV2[addr]; ok {
if method, err := p.abi.MethodById(methodSelector); err == nil {
c, ok := p.m[method.Name]
return c, ok, nil
} else {
return nil, ok, errors.New("abi: method not found")
}
}
return nil, false, nil
}
func GetChainContractName(addr types.Address, methodSelector []byte) (string, bool, error) {
if p, ok := contractCacheV1[addr]; ok {
if method, err := p.abi.MethodById(methodSelector); err == nil {
_, ok := p.m[method.Name]
return method.Name, ok, nil
} else {
return "", ok, errors.New("abi: method not found")
}
} else if p, ok := contractCacheV2[addr]; ok {
if method, err := p.abi.MethodById(methodSelector); err == nil {
_, ok := p.m[method.Name]
return method.Name, ok, nil
} else {
return "", ok, errors.New("abi: method not found")
}
}
return "", false, nil
}
func IsChainContract(addr types.Address) bool {
if _, ok := contractCacheV1[addr]; ok {
return true
} else if _, ok := contractCacheV2[addr]; ok {
return true
}
return false
}
|
package scheduler
import (
"github.com/Sirupsen/logrus"
"github.com/pkg/errors"
"github.com/rancher/longhorn-manager/types"
)
type OrcScheduler struct {
ops types.ScheduleOps
}
func NewOrcScheduler(ops types.ScheduleOps) *OrcScheduler {
return &OrcScheduler{
ops: ops,
}
}
func randomHostID(m map[string]*types.HostInfo) string {
for k := range m {
return k
}
return ""
}
func (s *OrcScheduler) Schedule(item *types.ScheduleItem, policy *types.SchedulePolicy) (*types.InstanceInfo, error) {
if item.Instance.ID == "" || item.Instance.Type == types.InstanceTypeNone {
return nil, errors.Errorf("instance ID and type required for scheduling")
}
if item.Instance.HostID != "" {
return s.ScheduleProcess(&types.ScheduleSpec{
HostID: item.Instance.HostID,
}, item)
}
hosts, err := s.ops.ListHosts()
if err != nil {
return nil, errors.Wrap(err, "fail to schedule")
}
normalPriorityList := []string{}
lowPriorityList := []string{}
for id := range hosts {
if policy != nil {
if policy.Binding == types.SchedulePolicyBindingSoftAntiAffinity {
if _, ok := policy.HostIDMap[id]; ok {
lowPriorityList = append(lowPriorityList, id)
} else {
normalPriorityList = append(normalPriorityList, id)
}
} else {
return nil, errors.Errorf("Unsupported schedule policy binding %v", policy.Binding)
}
} else {
normalPriorityList = append(normalPriorityList, id)
}
}
priorityList := append(normalPriorityList, lowPriorityList...)
for _, id := range priorityList {
ret, err := s.ScheduleProcess(&types.ScheduleSpec{HostID: id}, item)
if err == nil {
return ret, nil
}
logrus.Warnf("Fail to schedule %+v on host %v, trying on another one: %v",
hosts[id], item.Instance, err)
}
return nil, errors.Errorf("unable to find suitable host for scheduling")
}
func (s *OrcScheduler) ScheduleProcess(spec *types.ScheduleSpec, item *types.ScheduleItem) (*types.InstanceInfo, error) {
if s.ops.GetCurrentHostID() == spec.HostID {
return s.Process(spec, item)
}
host, err := s.ops.GetHost(spec.HostID)
if err != nil {
return nil, errors.Wrapf(err, "cannot find host %v", spec.HostID)
}
client := newSchedulerClient(host)
ret, err := client.Schedule(item)
if err != nil {
return nil, errors.Wrapf(err, "Fail to schedule on host %v(%v %v)", host.UUID, host.Name, host.Address)
}
logrus.Debugf("Scheduled %v %v to %v(%v)", item.Action, item.Instance.ID, host.UUID, host.Address)
return ret, nil
}
func (s *OrcScheduler) Process(spec *types.ScheduleSpec, item *types.ScheduleItem) (*types.InstanceInfo, error) {
if s.ops.GetCurrentHostID() != spec.HostID {
return nil, errors.Errorf("wrong host routing, should be at %v", spec.HostID)
}
instance, err := s.ops.ProcessSchedule(item)
if err != nil {
return nil, errors.Wrapf(err, "fail to process schedule request")
}
if instance == nil || instance.ID == "" || instance.Type == types.InstanceTypeNone {
return nil, errors.Errorf("missing key fields from schedule response %+v", instance)
}
return instance, nil
}
|
package provider
import (
"github.com/bearname/videohost/internal/common/db"
"github.com/bearname/videohost/internal/thumbgenerator/app/publisher"
"github.com/bearname/videohost/internal/thumbgenerator/domain/model"
log "github.com/sirupsen/logrus"
"time"
)
func RunTaskProvider(stopChan chan struct{}, db db.Connector) <-chan *model.Task {
resultChan := make(chan *model.Task)
stopTaskProviderChan := make(chan struct{})
taskProviderChan := runTaskPublisher(stopTaskProviderChan, db)
onStop := func() {
stopTaskProviderChan <- struct{}{}
close(resultChan)
}
go func() {
for {
select {
case <-stopChan:
onStop()
return
case task := <-taskProviderChan:
select {
case <-stopChan:
onStop()
return
case resultChan <- task:
}
}
}
}()
return resultChan
}
func runTaskPublisher(stopChan chan struct{}, db db.Connector) <-chan *model.Task {
tasksChan := make(chan *model.Task)
go func() {
for {
select {
case <-stopChan:
close(tasksChan)
return
default:
}
if task := publisher.PublishTask(db); task != nil {
log.Printf("got the task %v\n", task)
tasksChan <- task
} else {
log.Info("no task for processing, start waiting")
time.Sleep(1 * time.Second)
}
}
}()
return tasksChan
}
|
package models
import (
"time"
)
type Note struct {
ID int `schema:"-"`
Title string `schema:"title"`
Body string `schema:"body"`
Tags string `schema:"tags"`
NotebookID int `schema:"notebook_id"`
CreatedAt time.Time `schema:"-"`
UpdatedAt time.Time `schema:"-"`
}
|
package tsl
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/miton18/go-warp10/base"
)
// Query is a TSL query
type Query struct {
raw string
endpoint string
token string
httpClient *http.Client
}
// Execute the query on Backend
func (q *Query) Execute() (base.GTSList, error) {
if q.httpClient == nil {
q.httpClient = http.DefaultClient
}
r := strings.NewReader(q.raw)
req, err := http.NewRequest("POST", q.endpoint+"/v0/query", r)
if err != nil {
return nil, fmt.Errorf("Cannot build TSL query: %s", err.Error())
}
req.SetBasicAuth("tsl-user", q.token)
res, err := q.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("Cannot perform TSL query: %s", err.Error())
}
if res.StatusCode != 200 {
b, _ := ioutil.ReadAll(res.Body)
return nil, fmt.Errorf("Unexpected TSL query response: (%d) %s", res.StatusCode, string(b))
}
stack := []json.RawMessage{}
dec := json.NewDecoder(res.Body)
err = dec.Decode(&stack)
if err != nil {
return nil, fmt.Errorf("Cannot decode TSL response: %s", err.Error())
}
if len(stack) == 0 {
return nil, nil
}
var gts base.GTSList
err = json.Unmarshal(stack[0], >s)
if err != nil {
return gts, fmt.Errorf("Cannot parse TSL stack: %s", err.Error())
}
err = res.Body.Close()
if err != nil {
return gts, fmt.Errorf("Cannot close TSL response body: %s", err.Error())
}
return gts, nil
}
// Dump output the TSL query
func (q *Query) Dump() string {
return q.raw
}
// Select a metric
func (q *Query) Select(metric string) *Query {
if metric == "" {
metric = "*"
}
q.raw += fmt.Sprintf("select(\"%s\")", metric)
return q
}
// Where filter metric labels
// value must be set using Eq(), NotEq(), Like() or NotLike() functions
func (q *Query) Where(label, value string) *Query {
q.raw += fmt.Sprintf(".where(\"%s%s\")", label, value)
return q
}
// From set times limits
func (q *Query) From(start, end time.Time) *Query {
if end.IsZero() {
start = time.Now()
}
q.raw += fmt.Sprintf(".from(%d, %d)", toMicroSeconds(start), toMicroSeconds(end))
return q
}
// Last set time duration
// TODO: handle shift parameter
func (q *Query) Last(d time.Duration, at time.Time) *Query {
if at.IsZero() {
q.raw += fmt.Sprintf(".last(%s)", shortDur(d))
} else {
q.raw += fmt.Sprintf(".last(%s, timestamp=%d)", shortDur(d), toMicroSeconds(at))
}
return q
}
// LastN for last N datapoints
func (q *Query) LastN(n int64, at time.Time) *Query {
if at.IsZero() {
q.raw += fmt.Sprintf(".last(%d)", n)
} else {
q.raw += fmt.Sprintf(".last(%d, timestamp=%d)", n, toMicroSeconds(at))
}
return q
}
// SampleBy sample the metrics in time buckets
func (q *Query) SampleBy(d time.Duration, aggregator Aggregator) *Query {
q.raw += fmt.Sprintf(".sampleBy(%s, %s)", shortDur(d), aggregator)
return q
}
// SampleByN sample the metrics in buckets count
func (q *Query) SampleByN(n int64, aggregator Aggregator) *Query {
q.raw += fmt.Sprintf(".sampleBy(%d, %s)", n, aggregator)
return q
}
// Group sample the metrics in buckets count
func (q *Query) Group(aggregator Aggregator) *Query {
q.raw += fmt.Sprintf(".group(%s)", aggregator)
return q
}
// GroupBy sample the metrics in buckets count
func (q *Query) GroupBy(labels []string, aggregator Aggregator) *Query {
q.raw += fmt.Sprintf(".groupBy(%s, %s)", toStringArray(labels), aggregator)
return q
}
|
package nanomsgsubscriber
import (
"encoding/json"
"errors"
"log"
"github.com/didiercrunch/doorman/shared"
"github.com/go-mangos/mangos"
"github.com/go-mangos/mangos/protocol/sub"
"github.com/go-mangos/mangos/transport/ipc"
"github.com/go-mangos/mangos/transport/tcp"
)
type NanoMsgSubscriber struct {
Url string
}
func (s *NanoMsgSubscriber) callUpdateHandlerFunction(f shared.UpdateHandlerFunc, data []byte) error {
wu := &shared.DoormanUpdater{}
if err := json.Unmarshal(data, wu); err != nil {
return err
} else {
return f(wu)
}
}
func (s *NanoMsgSubscriber) Subscribe(abtestId string, update shared.UpdateHandlerFunc) error {
var sock mangos.Socket
var err error
var msg []byte
if sock, err = sub.NewSocket(); err != nil {
return errors.New("can't get new sub socket: " + err.Error())
}
sock.AddTransport(ipc.NewTransport())
sock.AddTransport(tcp.NewTransport())
if err = sock.Dial(s.Url); err != nil {
return errors.New("can't dial on sub socket: " + err.Error())
}
// Empty byte array effectively subscribes to everything
if err = sock.SetOption(mangos.OptionSubscribe, []byte("")); err != nil {
return errors.New("cannot subscribe: " + err.Error())
}
go func(s *NanoMsgSubscriber, sock mangos.Socket) {
for {
if msg, err = sock.Recv(); err != nil {
err := errors.New("Cannot recv: " + err.Error())
log.Println(err)
} else if err := s.callUpdateHandlerFunction(update, msg); err != nil {
log.Println("cannot update abtest with received data: ", err)
}
}
}(s, sock)
return nil
}
|
package entity_book
import validation "github.com/go-ozzo/ozzo-validation"
type Book struct {
id uint64
isbn string
title string
author string
}
func NewBook(isbn string, title string, author string) (*Book, error) {
book := Book{
isbn: isbn,
title: title,
author: author,
}
if err := book.validate(); err != nil {
return nil, err
}
return &book, nil
}
func NewBookForRebuild(id uint64, isbn string, title string, author string) (*Book, error) {
book := Book{
id: id,
isbn: isbn,
title: title,
author: author,
}
if err := book.validate(); err != nil {
return nil, err
}
return &book, nil
}
func (b Book) validate() error {
return validation.ValidateStruct(&b,
validation.Field(&b.isbn, validation.Required, validation.Length(13, 13)),
validation.Field(&b.title, validation.Length(0, 255)),
validation.Field(&b.author, validation.Length(0, 255)),
)
}
func (b Book) ID() uint64 {
return b.id
}
func (b Book) Isbn() string {
return b.isbn
}
func (b Book) Title() string {
return b.title
}
func (b Book) Author() string {
return b.author
}
|
package gravatar
import (
"testing"
)
func TestHash(t *testing.T) {
if "0bc83cb571cd1c50ba6f3e8a78ef1346" != Hash("MyEmailAddress@example.com ") {
t.Errorf("incorrect hash")
}
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"bytes"
"context"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/types"
)
type gcSubstituter struct {
}
// ExprColumnMap is used to store all expressions of indexed generated columns in a table,
// and map them to the generated columns,
// thus we can substitute the expression in a query to an indexed generated column.
type ExprColumnMap map[expression.Expression]*expression.Column
// optimize try to replace the expression to indexed virtual generate column in where, group by, order by, and field clause
// so that we can use the index on expression.
// For example: select a+1 from t order by a+1, with a virtual generate column c as (a+1) and
// an index on c. We need to replace a+1 with c so that we can use the index on c.
// See also https://dev.mysql.com/doc/refman/8.0/en/generated-column-index-optimizations.html
func (gc *gcSubstituter) optimize(ctx context.Context, lp LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) {
exprToColumn := make(ExprColumnMap)
collectGenerateColumn(lp, exprToColumn)
if len(exprToColumn) == 0 {
return lp, nil
}
return gc.substitute(ctx, lp, exprToColumn, opt), nil
}
// collectGenerateColumn collect the generate column and save them to a map from their expressions to themselves.
// For the sake of simplicity, we don't collect the stored generate column because we can't get their expressions directly.
// TODO: support stored generate column.
func collectGenerateColumn(lp LogicalPlan, exprToColumn ExprColumnMap) {
if _, ok := lp.(*LogicalCTE); ok {
return
}
for _, child := range lp.Children() {
collectGenerateColumn(child, exprToColumn)
}
ds, ok := lp.(*DataSource)
if !ok {
return
}
for _, p := range ds.possibleAccessPaths {
if p.IsTablePath() {
continue
}
for _, idxPart := range p.Index.Columns {
colInfo := ds.tableInfo.Columns[idxPart.Offset]
if colInfo.IsGenerated() && !colInfo.GeneratedStored {
s := ds.schema.Columns
col := expression.ColInfo2Col(s, colInfo)
if col != nil && col.GetType().PartialEqual(col.VirtualExpr.GetType(), lp.SCtx().GetSessionVars().EnableUnsafeSubstitute) {
exprToColumn[col.VirtualExpr] = col
}
}
}
}
}
func tryToSubstituteExpr(expr *expression.Expression, lp LogicalPlan, candidateExpr expression.Expression, tp types.EvalType, schema *expression.Schema, col *expression.Column, opt *logicalOptimizeOp) {
if (*expr).Equal(lp.SCtx(), candidateExpr) && candidateExpr.GetType().EvalType() == tp &&
schema.ColumnIndex(col) != -1 {
*expr = col
appendSubstituteColumnStep(lp, candidateExpr, col, opt)
}
}
func appendSubstituteColumnStep(lp LogicalPlan, candidateExpr expression.Expression, col *expression.Column, opt *logicalOptimizeOp) {
reason := func() string { return "" }
action := func() string {
buffer := bytes.NewBufferString("expression:")
buffer.WriteString(candidateExpr.String())
buffer.WriteString(" substituted by")
buffer.WriteString(" column:")
buffer.WriteString(col.String())
return buffer.String()
}
opt.appendStepToCurrent(lp.ID(), lp.TP(), reason, action)
}
func substituteExpression(cond expression.Expression, lp LogicalPlan, exprToColumn ExprColumnMap, schema *expression.Schema, opt *logicalOptimizeOp) {
sf, ok := cond.(*expression.ScalarFunction)
if !ok {
return
}
sctx := lp.SCtx().GetSessionVars().StmtCtx
defer func() {
// If the argument is not changed, hash code doesn't need to recount again.
// But we always do it to keep the code simple and stupid.
expression.ReHashCode(sf, sctx)
}()
var expr *expression.Expression
var tp types.EvalType
switch sf.FuncName.L {
case ast.EQ, ast.LT, ast.LE, ast.GT, ast.GE:
for candidateExpr, column := range exprToColumn {
tryToSubstituteExpr(&sf.GetArgs()[1], lp, candidateExpr, sf.GetArgs()[0].GetType().EvalType(), schema, column, opt)
}
for candidateExpr, column := range exprToColumn {
tryToSubstituteExpr(&sf.GetArgs()[0], lp, candidateExpr, sf.GetArgs()[1].GetType().EvalType(), schema, column, opt)
}
case ast.In:
expr = &sf.GetArgs()[0]
tp = sf.GetArgs()[1].GetType().EvalType()
canSubstitute := true
// Can only substitute if all the operands on the right-hand
// side are the same type.
for i := 1; i < len(sf.GetArgs()); i++ {
if sf.GetArgs()[i].GetType().EvalType() != tp {
canSubstitute = false
break
}
}
if canSubstitute {
for candidateExpr, column := range exprToColumn {
tryToSubstituteExpr(expr, lp, candidateExpr, tp, schema, column, opt)
}
}
case ast.Like:
expr = &sf.GetArgs()[0]
tp = sf.GetArgs()[1].GetType().EvalType()
for candidateExpr, column := range exprToColumn {
tryToSubstituteExpr(expr, lp, candidateExpr, tp, schema, column, opt)
}
case ast.LogicOr, ast.LogicAnd:
substituteExpression(sf.GetArgs()[0], lp, exprToColumn, schema, opt)
substituteExpression(sf.GetArgs()[1], lp, exprToColumn, schema, opt)
case ast.UnaryNot:
substituteExpression(sf.GetArgs()[0], lp, exprToColumn, schema, opt)
}
}
func (gc *gcSubstituter) substitute(ctx context.Context, lp LogicalPlan, exprToColumn ExprColumnMap, opt *logicalOptimizeOp) LogicalPlan {
var tp types.EvalType
switch x := lp.(type) {
case *LogicalSelection:
for _, cond := range x.Conditions {
substituteExpression(cond, lp, exprToColumn, x.Schema(), opt)
}
case *LogicalProjection:
for i := range x.Exprs {
tp = x.Exprs[i].GetType().EvalType()
for candidateExpr, column := range exprToColumn {
tryToSubstituteExpr(&x.Exprs[i], lp, candidateExpr, tp, x.children[0].Schema(), column, opt)
}
}
case *LogicalSort:
for i := range x.ByItems {
tp = x.ByItems[i].Expr.GetType().EvalType()
for candidateExpr, column := range exprToColumn {
tryToSubstituteExpr(&x.ByItems[i].Expr, lp, candidateExpr, tp, x.Schema(), column, opt)
}
}
case *LogicalAggregation:
for _, aggFunc := range x.AggFuncs {
for i := 0; i < len(aggFunc.Args); i++ {
tp = aggFunc.Args[i].GetType().EvalType()
for candidateExpr, column := range exprToColumn {
if aggFunc.Args[i].Equal(lp.SCtx(), candidateExpr) && candidateExpr.GetType().EvalType() == tp &&
x.Schema().ColumnIndex(column) != -1 {
aggFunc.Args[i] = column
appendSubstituteColumnStep(lp, candidateExpr, column, opt)
}
}
}
}
for i := 0; i < len(x.GroupByItems); i++ {
tp = x.GroupByItems[i].GetType().EvalType()
for candidateExpr, column := range exprToColumn {
if x.GroupByItems[i].Equal(lp.SCtx(), candidateExpr) && candidateExpr.GetType().EvalType() == tp &&
x.Schema().ColumnIndex(column) != -1 {
x.GroupByItems[i] = column
appendSubstituteColumnStep(lp, candidateExpr, column, opt)
}
}
}
}
for _, child := range lp.Children() {
gc.substitute(ctx, child, exprToColumn, opt)
}
return lp
}
func (*gcSubstituter) name() string {
return "generate_column_substitute"
}
|
package core
import (
"archive/tar"
"bytes"
"compress/gzip"
"fmt"
"github.com/callumj/weave/tools"
"io"
"log"
"os"
"path"
"strings"
)
type Item struct {
Start int64
Length int64
Name string
}
type ArchiveInfo struct {
Items []Item
Path string
}
type archiveProcessCallback func(string, string, *tar.Reader) bool
func CompressArchive(archivePath, outPath string) bool {
dupe, err := os.Create(outPath)
if err != nil {
log.Printf("Unable to open %v for writing\r\n", outPath)
return false
}
defer dupe.Close()
gzipPntr := gzip.NewWriter(dupe)
defer gzipPntr.Close()
basePntr, err := os.Open(archivePath)
if err != nil {
log.Printf("Unable to open %v for reading\r\n", archivePath)
return false
}
defer basePntr.Close()
io.Copy(gzipPntr, basePntr)
return true
}
func MergeIntoBaseArchive(baseArchive ArchiveInfo, basedir string, contents []FileInfo, file string, definitiveList *ContentsInfo) bool {
// tar pntr for copy
dupe, err := os.Create(file)
if err != nil {
log.Printf("Unable to open %v for reading\r\n", file)
return false
}
defer dupe.Close()
tw := tar.NewWriter(dupe)
defer tw.Close()
basePntr, err := os.Open(baseArchive.Path)
if err != nil {
log.Printf("Unable to open archive %v for appending\r\n", baseArchive.Path)
return false
}
defer basePntr.Close()
if definitiveList != nil {
// recursively copy, excluding as needed
existingTar := tar.NewReader(basePntr)
for {
hdr, err := existingTar.Next()
if err == io.EOF {
// end of tar archive
break
}
checkName := strings.TrimPrefix(hdr.Name, "/")
found := false
for _, item := range definitiveList.Contents {
if item.RelPath == checkName {
found = true
}
}
if !found {
continue
}
if err != nil {
log.Printf("Failed to read tar for duping \r\n")
return false
}
err = tw.WriteHeader(hdr)
if err != nil {
log.Printf("Failed copy header\r\n")
return false
}
if _, err := io.Copy(tw, existingTar); err != nil {
log.Printf("Unable to write %s (%v)\r\n", hdr.Name, err)
return false
}
}
} else {
written, err := io.Copy(dupe, basePntr)
if written == 0 {
log.Printf("Warning: Did not write anything from %v to %v\r\n", baseArchive.Path, file)
return false
}
if err != nil {
log.Printf("Copy failed: %v\r\n", err)
return false
}
// bump to the end
dupe.Seek(-2<<9, os.SEEK_END)
}
// insert
for _, item := range contents {
res := writeFileToArchive(dupe, tw, item.AbsPath, basedir)
if res == nil {
log.Printf("Unable to add %v to new archive\r\n", item.AbsPath)
return false
}
}
return true
}
func CreateBaseArchive(basedir string, contents []FileInfo, file string) *ArchiveInfo {
tarPntr, err := os.Create(file)
if err != nil {
log.Printf("Unable to open base archive %v\r\n", file)
return nil
}
defer tarPntr.Close()
tw := tar.NewWriter(tarPntr)
defer tw.Close()
total := len(contents)
a := ArchiveInfo{Path: file}
for index, info := range contents {
item := writeFileToArchive(tarPntr, tw, info.AbsPath, basedir)
if item == nil {
log.Printf("Failed to add %v to base archive.\r\n", info.AbsPath)
return nil
}
fmt.Printf("\rArchiving %v / %v", index+1, total)
a.Items = append(a.Items, *item)
}
fmt.Println()
return &a
}
func writeFileToArchive(tarPntr *os.File, tw *tar.Writer, file string, basedir string) *Item {
curPos, err := tarPntr.Seek(0, 1)
if err != nil {
log.Println("Unable to determine current position")
return nil
}
stat, err := os.Stat(file)
if err != nil {
log.Printf("Unable to query file %v\r\n", file)
return nil
}
hdr := &tar.Header{
Name: strings.Replace(file, basedir, "", 1),
Size: stat.Size(),
Mode: 775,
ModTime: stat.ModTime(),
}
if err := tw.WriteHeader(hdr); err != nil {
log.Printf("Unable to write TAR header for %v\r\n", hdr.Name)
return nil
}
filePntr, err := os.Open(file)
if err != nil {
log.Printf("Unable to open %v for reading\r\n", hdr.Name)
return nil
}
defer filePntr.Close()
// read in chunks for memory
buf := make([]byte, 1024)
for {
// read a chunk
n, err := filePntr.Read(buf)
if err != nil && err != io.EOF {
log.Printf("Unable to open %v for reading\r\n", hdr.Name)
return nil
}
if n == 0 {
break
}
// write a chunk
if _, err := tw.Write(buf[:n]); err != nil {
log.Printf("Unable to write chunk for %v\r\n", hdr.Name)
return nil
}
}
endPos, err := tarPntr.Seek(0, 1)
if err != nil {
log.Println("Unable to determine end position")
return nil
}
return &Item{Start: curPos, Length: (endPos - curPos), Name: hdr.Name}
}
func ExtractArchive(file, directory string) bool {
return iterateOnArchive(file, directory, func(originalName, outputPath string, tarPntr *tar.Reader) bool {
log.Printf("Extracting: %s\n", outputPath)
totalPath := path.Dir(outputPath)
if !tools.PathExists(totalPath) {
os.MkdirAll(totalPath, 0770)
}
writePntr, err := os.Create(outputPath)
if err != nil {
log.Printf("Failed open handler for %s (%v)\r\n", outputPath, err)
return false
}
if _, err := io.Copy(writePntr, tarPntr); err != nil {
writePntr.Close()
log.Printf("Unable to write %s (%v)\r\n", outputPath, err)
return false
}
writePntr.Close()
return true
})
}
func FetchFile(archive, name string) string {
contents := ""
iterateOnArchive(archive, "", func(originalName, outputPath string, tarPntr *tar.Reader) bool {
if name == originalName {
buf := bytes.NewBuffer(nil)
_, err := io.Copy(buf, tarPntr)
if err != nil {
log.Printf("Unable to read in %v\r\n", name)
return false
}
contents = string(buf.Bytes())
return false
}
return true
})
return contents
}
func iterateOnArchive(file, directory string, callback archiveProcessCallback) bool {
filePntr, err := os.Open(file)
if err != nil {
log.Printf("Unable to open %v for reading\r\n", file)
return false
}
defer filePntr.Close()
gzipPntr, err := gzip.NewReader(filePntr)
defer gzipPntr.Close()
tarPntr := tar.NewReader(gzipPntr)
for {
hdr, err := tarPntr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
log.Printf("Failed to process %v archive", file)
return false
}
var outputPath string
if strings.HasSuffix(directory, "/") || strings.HasPrefix(hdr.Name, "/") {
outputPath = strings.Join([]string{directory, hdr.Name}, "")
} else {
outputPath = strings.Join([]string{directory, hdr.Name}, "/")
}
res := callback(hdr.Name, outputPath, tarPntr)
if res == false {
return false
}
}
return true
}
|
package commands
import "code.cloudfoundry.org/cli/utils/config"
//go:generate counterfeiter . Config
// Config a way of getting basic CF configuration
type Config interface {
BinaryName() string
ColorEnabled() config.ColorSetting
Locale() string
Plugins() map[string]config.Plugin
SetTargetInformation(api string, apiVersion string, auth string, loggregator string, doppler string, uaa string)
}
|
package cmd
import (
"bytes"
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/a8uhnf/suich/pkg/utils"
"github.com/spf13/cobra"
)
const (
podInfoNameTitle = "NAME"
)
var (
follow = false
)
// GetLogsCmd builds the logs cobra command for suich
func GetLogsCmd() *cobra.Command {
logsCMD := &cobra.Command{
Use: "logs",
Short: "Get logs for a certain pod",
Long: "Prompts the user with a list of pods names to display the logs",
RunE: getLogs,
}
logsCMD.Flags().StringP("namespace", "n", "", "The namespace to work on")
logsCMD.Flags().BoolVarP(&follow, "follow", "f", false, "Watch the logs")
return logsCMD
}
func getLogs(cmd *cobra.Command, args []string) error {
second()
return nil
client := getKubernetesClient()
var namespace string
nList, err := getNamespaceNames(client)
if err != nil {
namespace, err = cmd.Flags().GetString("namespace")
if err != nil {
return err
}
} else {
namespace, err = utils.RunPrompt(nList, "Select Namespace")
if err != nil {
return err
}
}
if namespace == "" {
return errors.New("Must provide namespace flag as you do not have access to list namespaces")
}
var podsBfr bytes.Buffer
if err := utils.ExecCommand(&podsBfr, "kubectl", "get", "pods", "-n", namespace); err != nil {
return err
}
pns := readAllPods(podsBfr)
pod, err := utils.RunPrompt(pns, "Select Pod")
if err != nil {
return err
}
if follow {
if err := utils.ExecCommand(os.Stdout, "kubectl", "logs", pod, "-n", namespace, "-f"); err != nil {
return err
}
} else {
if err := utils.ExecCommand(os.Stdout, "kubectl", "logs", pod, "-n", namespace); err != nil {
return err
}
}
return nil
}
func second() {
loc := time.FixedZone("some_common_name", 6*60*60)
tp := time.Date(1970, 1, 1, 0, 0, 0, 0, loc)
ts := time.Now().Sub(tp)
fmt.Printf("%v", ts)
}
func readAllPods(out bytes.Buffer) []string {
podInfos := strings.Split(out.String(), "\n")
podNames := []string{}
for _, pi := range podInfos {
info := strings.Split(pi, " ")
if info[0] == podInfoNameTitle || info[0] == "" {
continue
}
podNames = append(podNames, info[0])
}
return podNames
}
|
package imageextractor
import (
"fmt"
"image"
"image/jpeg"
"log"
"os"
"strings"
"github.com/disintegration/imaging"
"github.com/otiai10/gosseract"
)
type ImageExtractor struct {
image image.Image
path string
}
type goserractConfig struct {
Image string
Whitelist string
Blacklist string
Language string
}
type imageExtractConfig struct {
x0 int
y0 int
x1 int
y1 int
CleanAllSpace bool
}
const langEng = "eng"
const langJpn = "jpn"
const (
MoneyScore = iota
PlayDate
PlayTime
TitleEng
TitleJpn
AchievementRate
)
func New(imagePath string) (ImageExtractor, error) {
//open image
rawImage, err := imaging.Open(imagePath)
if err != nil {
log.Fatal("Error when loading image", err)
}
validErr := validateImage(rawImage)
if validErr != nil {
return ImageExtractor{}, validErr
}
//get image size
width := rawImage.Bounds().Dx()
height := rawImage.Bounds().Dy()
//double image size and increase the contrast
rawImage = imaging.Resize(rawImage, width*2, height*2, imaging.Lanczos)
return ImageExtractor{
image: rawImage,
path: imagePath,
}, nil
}
func (ie *ImageExtractor) Extractor(extractType int) (string, error) {
gosseractConfig, imageConfig, err := ie.extractSelector(extractType)
if err != nil {
fmt.Println("Error Loading Config:", err)
return "", err
}
//crop image
image := imaging.Crop(ie.image, image.Rect(imageConfig.x0, imageConfig.y0, imageConfig.x1, imageConfig.y1))
image = imaging.Invert(image)
//generate image to path
out, err := os.Create(gosseractConfig.Image)
if err != nil {
fmt.Println(err)
return "", err
}
jpeg.Encode(out, image, nil)
defer func() {
err := os.Remove(gosseractConfig.Image)
if err != nil {
log.Printf("Error in deleting file", gosseractConfig.Image, ":", err)
}
}()
//goserract
client := gosseract.NewClient()
defer client.Close()
client.SetImage(gosseractConfig.Image)
client.SetLanguage(gosseractConfig.Language)
if len(gosseractConfig.Whitelist) > 0 {
client.SetWhitelist(gosseractConfig.Whitelist)
}
if len(gosseractConfig.Blacklist) > 0 {
client.SetBlacklist(gosseractConfig.Blacklist)
}
text, _ := client.Text()
if imageConfig.CleanAllSpace {
text = strings.Replace(text, " ", "", -1)
}
return text, nil
}
func (ie *ImageExtractor) extractSelector(extractType int) (goserractConfig, imageExtractConfig, error) {
switch extractType {
case MoneyScore:
return goserractConfig{
Image: ie.path + "_score",
Whitelist: "0123456789",
Language: langEng,
},
imageExtractConfig{
x0: 640,
y0: 450,
x1: 740,
y1: 500,
},
nil
case PlayDate:
return goserractConfig{
Image: ie.path + "_playdate",
Whitelist: "0123456789-",
Language: langEng,
},
imageExtractConfig{
x0: 940,
y0: 40,
x1: 1100,
y1: 70,
CleanAllSpace: true,
},
nil
case PlayTime:
return goserractConfig{
Image: ie.path + "_playtime",
Whitelist: "0123456789:",
Language: langEng,
},
imageExtractConfig{
x0: 1100,
y0: 40,
x1: 1200,
y1: 72,
CleanAllSpace: true,
},
nil
case TitleEng:
return goserractConfig{
Image: ie.path + "_title",
Whitelist: "",
Language: langEng,
},
imageExtractConfig{
x0: 180,
y0: 300,
x1: 1100,
y1: 340,
},
nil
case TitleJpn:
return goserractConfig{
Image: ie.path + "_title",
Whitelist: "",
Language: langJpn,
},
imageExtractConfig{
x0: 180,
y0: 300,
x1: 1100,
y1: 340,
},
nil
case AchievementRate:
return goserractConfig{
Image: ie.path + "_rate",
Whitelist: "0123456789.",
Language: langEng,
},
imageExtractConfig{
x0: 620,
y0: 790,
x1: 720,
y1: 840,
CleanAllSpace: true,
},
nil
default:
return goserractConfig{}, imageExtractConfig{}, fmt.Errorf("Extract type not found")
}
}
func validateImage(input image.Image) error {
if input.Bounds().Dx() != 600 || input.Bounds().Dy() != 480 {
return fmt.Errorf("seems like the picture is not taken from eamuse app")
}
return nil
}
|
package main
import (
"net/http"
hand "./handler"
"github.com/gorilla/mux"
"log"
)
var router = mux.NewRouter()
func main() {
log.Println("Server Starting")
router.HandleFunc("/register/users", hand.Handler)
router.HandleFunc("/signup", hand.SignUpHandler)
router.HandleFunc("/login", hand.LoginHandler)
router.HandleFunc("/api/products", hand.GetProductsHandler).Methods("GET")
router.HandleFunc("/api/products/{id}", hand.GetProductHandler).Methods("GET")
router.HandleFunc("/api/product", hand.PostProductHandler).Methods("POST")
router.HandleFunc("/api/products/{id}", hand.PutProductHandler).Methods("Put")
router.HandleFunc("/api/products/{id}", hand.DeleteProductHandler).Methods("Delete")
server := &http.Server{
Addr: ":8080",
Handler: router,
}
server.ListenAndServe()
log.Println("Server ending...")
}
|
package standings
import (
"errors"
"net/http"
"strings"
"github.com/gin-gonic/gin"
)
// Shield gives Supporters Shield Standings
func Shield(c *gin.Context) {
standings, err := GetShield()
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
}
c.JSON(http.StatusOK, gin.H{
"standings": standings,
})
}
// Conference gives standings by Conference
func Conference(c *gin.Context) {
conference := strings.ToLower(c.Param("conference"))
if conference != "east" && conference != "west" {
c.AbortWithError(http.StatusBadRequest, errors.New("Invalid Conference"))
}
standings, err := GetFor(conference)
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
}
c.JSON(http.StatusOK, gin.H{
"standings": standings,
})
}
|
package flarmport
import (
"context"
"fmt"
"time"
"github.com/gorilla/websocket"
)
// Remote connects to a remote flarm server, and returns a an object that implements flarmReader..
func Remote(addr string) (*Conn, error) {
d := websocket.Dialer{
HandshakeTimeout: time.Second * 10,
}
conn, _, err := d.Dial(addr, nil)
if err != nil {
return nil, fmt.Errorf("failed dialing %s: %v", addr, err)
}
return &Conn{conn: conn}, nil
}
type Conn struct {
conn *websocket.Conn
}
func (c *Conn) Range(ctx context.Context, f func(Data)) error {
defer c.conn.Close()
for ctx.Err() == nil {
v, err := c.next()
if err != nil {
return err
}
if ctx.Err() == nil {
f(v)
}
}
return ctx.Err()
}
// next is used in Range and exists for testing purposes.
func (c *Conn) next() (Data, error) {
var o Data
err := c.conn.ReadJSON(&o)
return o, err
}
func (c *Conn) Close() error {
return c.conn.Close()
}
|
package controllers
import (
"log"
"net/http"
"time"
"github.com/stevenandrewcarter/terradex/internal/models"
)
func UnlockProject(w http.ResponseWriter, r *http.Request) {
if r.Context().Value("projectID") == nil {
log.Print("Please provide a projectID in order to lock the project.")
w.WriteHeader(400)
return
}
if r.Context().Value("username") == nil {
log.Print("Please provide a username in order to lock the project.")
w.WriteHeader(400)
return
}
projectID := r.Context().Value("projectID").(string)
username := r.Context().Value("username").(string)
log.Printf("Unlocking... %s for %s", projectID, username)
project := models.Project{
Id: projectID,
Username: username,
CreatedDate: time.Now(),
}
if err := project.Unlock(); err != nil {
w.WriteHeader(500)
}
w.WriteHeader(200)
}
|
// Copyright 2017 Zack Guo <zack.y.guo@gmail.com>. All rights reserved.
// Use of this source code is governed by a MIT license that can
// be found in the LICENSE file.
// +build ignore
package main
import (
"log"
"math"
ui "github.com/gizak/termui/v3"
"github.com/gizak/termui/v3/widgets"
)
func main() {
if err := ui.Init(); err != nil {
log.Fatalf("failed to initialize termui: %v", err)
}
defer ui.Close()
sinData := func() [][]float64 {
n := 220
data := make([][]float64, 2)
data[0] = make([]float64, n)
data[1] = make([]float64, n)
for i := 0; i < n; i++ {
data[0][i] = 1 + math.Sin(float64(i)/5)
data[1][i] = 1 + math.Cos(float64(i)/5)
}
return data
}()
p0 := widgets.NewPlot()
p0.Title = "braille-mode Line Chart"
p0.Data = sinData
p0.SetRect(0, 0, 50, 15)
p0.AxesColor = ui.ColorWhite
p0.LineColors[0] = ui.ColorGreen
p1 := widgets.NewPlot()
p1.Title = "dot-mode line Chart"
p1.Marker = widgets.MarkerDot
p1.Data = [][]float64{[]float64{1, 2, 3, 4, 5}}
p1.SetRect(50, 0, 75, 10)
p1.DotMarkerRune = '+'
p1.AxesColor = ui.ColorWhite
p1.LineColors[0] = ui.ColorYellow
p1.DrawDirection = widgets.DrawLeft
p2 := widgets.NewPlot()
p2.Title = "dot-mode Scatter Plot"
p2.Marker = widgets.MarkerDot
p2.Data = make([][]float64, 2)
p2.Data[0] = []float64{1, 2, 3, 4, 5}
p2.Data[1] = sinData[1][4:]
p2.SetRect(0, 15, 50, 30)
p2.AxesColor = ui.ColorWhite
p2.LineColors[0] = ui.ColorCyan
p2.PlotType = widgets.ScatterPlot
p3 := widgets.NewPlot()
p3.Title = "braille-mode Scatter Plot"
p3.Data = make([][]float64, 2)
p3.Data[0] = []float64{1, 2, 3, 4, 5}
p3.Data[1] = sinData[1][4:]
p3.SetRect(45, 15, 80, 30)
p3.AxesColor = ui.ColorWhite
p3.LineColors[0] = ui.ColorCyan
p3.Marker = widgets.MarkerBraille
p3.PlotType = widgets.ScatterPlot
ui.Render(p0, p1, p2, p3)
uiEvents := ui.PollEvents()
for {
e := <-uiEvents
switch e.ID {
case "q", "<C-c>":
return
}
}
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
computepb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/compute/compute_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute"
)
// FirewallPolicyAssociationServer implements the gRPC interface for FirewallPolicyAssociation.
type FirewallPolicyAssociationServer struct{}
// ProtoToFirewallPolicyAssociation converts a FirewallPolicyAssociation resource from its proto representation.
func ProtoToFirewallPolicyAssociation(p *computepb.ComputeFirewallPolicyAssociation) *compute.FirewallPolicyAssociation {
obj := &compute.FirewallPolicyAssociation{
Name: dcl.StringOrNil(p.GetName()),
AttachmentTarget: dcl.StringOrNil(p.GetAttachmentTarget()),
FirewallPolicy: dcl.StringOrNil(p.GetFirewallPolicy()),
ShortName: dcl.StringOrNil(p.GetShortName()),
}
return obj
}
// FirewallPolicyAssociationToProto converts a FirewallPolicyAssociation resource to its proto representation.
func FirewallPolicyAssociationToProto(resource *compute.FirewallPolicyAssociation) *computepb.ComputeFirewallPolicyAssociation {
p := &computepb.ComputeFirewallPolicyAssociation{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetAttachmentTarget(dcl.ValueOrEmptyString(resource.AttachmentTarget))
p.SetFirewallPolicy(dcl.ValueOrEmptyString(resource.FirewallPolicy))
p.SetShortName(dcl.ValueOrEmptyString(resource.ShortName))
return p
}
// applyFirewallPolicyAssociation handles the gRPC request by passing it to the underlying FirewallPolicyAssociation Apply() method.
func (s *FirewallPolicyAssociationServer) applyFirewallPolicyAssociation(ctx context.Context, c *compute.Client, request *computepb.ApplyComputeFirewallPolicyAssociationRequest) (*computepb.ComputeFirewallPolicyAssociation, error) {
p := ProtoToFirewallPolicyAssociation(request.GetResource())
res, err := c.ApplyFirewallPolicyAssociation(ctx, p)
if err != nil {
return nil, err
}
r := FirewallPolicyAssociationToProto(res)
return r, nil
}
// applyComputeFirewallPolicyAssociation handles the gRPC request by passing it to the underlying FirewallPolicyAssociation Apply() method.
func (s *FirewallPolicyAssociationServer) ApplyComputeFirewallPolicyAssociation(ctx context.Context, request *computepb.ApplyComputeFirewallPolicyAssociationRequest) (*computepb.ComputeFirewallPolicyAssociation, error) {
cl, err := createConfigFirewallPolicyAssociation(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyFirewallPolicyAssociation(ctx, cl, request)
}
// DeleteFirewallPolicyAssociation handles the gRPC request by passing it to the underlying FirewallPolicyAssociation Delete() method.
func (s *FirewallPolicyAssociationServer) DeleteComputeFirewallPolicyAssociation(ctx context.Context, request *computepb.DeleteComputeFirewallPolicyAssociationRequest) (*emptypb.Empty, error) {
cl, err := createConfigFirewallPolicyAssociation(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteFirewallPolicyAssociation(ctx, ProtoToFirewallPolicyAssociation(request.GetResource()))
}
// ListComputeFirewallPolicyAssociation handles the gRPC request by passing it to the underlying FirewallPolicyAssociationList() method.
func (s *FirewallPolicyAssociationServer) ListComputeFirewallPolicyAssociation(ctx context.Context, request *computepb.ListComputeFirewallPolicyAssociationRequest) (*computepb.ListComputeFirewallPolicyAssociationResponse, error) {
cl, err := createConfigFirewallPolicyAssociation(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListFirewallPolicyAssociation(ctx, request.GetFirewallPolicy())
if err != nil {
return nil, err
}
var protos []*computepb.ComputeFirewallPolicyAssociation
for _, r := range resources.Items {
rp := FirewallPolicyAssociationToProto(r)
protos = append(protos, rp)
}
p := &computepb.ListComputeFirewallPolicyAssociationResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigFirewallPolicyAssociation(ctx context.Context, service_account_file string) (*compute.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return compute.NewClient(conf), nil
}
|
package main
import (
"os"
"os/signal"
"github.com/siggy/bbox/bbox"
)
func main() {
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt, os.Kill)
// beat changes
// keyboard => loop
// keyboard => render
msgs := []chan bbox.Beats{
make(chan bbox.Beats),
make(chan bbox.Beats),
}
// tempo changes
// keyboard => loop
tempo := make(chan int)
// ticks
// loop => render
ticks := []chan int{
make(chan int),
}
// interval changes
// loop => render
intervals := []chan bbox.Interval{
make(chan bbox.Interval),
}
// keyboard broadcasts quit with close(msgs)
keyboard := bbox.InitKeyboard(bbox.WriteonlyBeats(msgs), tempo, bbox.KeyMapsPC, false)
loop := bbox.InitLoop(msgs[0], tempo, bbox.WriteonlyInt(ticks), bbox.WriteonlyInterval(intervals))
render := bbox.InitRender(msgs[1], ticks[0], intervals[0])
go keyboard.Run()
go loop.Run()
go render.Run()
defer keyboard.Close()
defer loop.Close()
defer render.Close()
for {
select {
case <-sig:
return
}
}
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schemacmp_test
import (
"testing"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/types"
. "github.com/pingcap/tidb/util/schemacmp"
"github.com/stretchr/testify/require"
)
const binary = "binary"
var (
// INT
typeInt = types.NewFieldTypeBuilder().SetType(mysql.TypeLong).SetFlag(0).SetFlen(11).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// INT NOT NULL
typeIntNotNull = types.NewFieldTypeBuilder().SetType(mysql.TypeLong).SetFlag(mysql.NoDefaultValueFlag | mysql.NotNullFlag).SetFlen(10).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// INT AUTO_INCREMENT UNIQUE
typeIntAutoIncrementUnique = types.NewFieldTypeBuilder().SetType(mysql.TypeLong).SetFlag(mysql.AutoIncrementFlag | mysql.UniqueKeyFlag).SetFlen(11).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// INT NOT NULL, KEY
typeIntNotNullKey = types.NewFieldTypeBuilder().SetType(mysql.TypeLong).SetFlag(mysql.NoDefaultValueFlag | mysql.MultipleKeyFlag | mysql.NotNullFlag).SetFlen(11).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// INT(1)
typeInt1 = types.NewFieldTypeBuilder().SetType(mysql.TypeLong).SetFlag(0).SetFlen(1).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// INT(22)
typeInt22 = types.NewFieldTypeBuilder().SetType(mysql.TypeLong).SetFlag(0).SetFlen(22).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// BIT(4)
typeBit4 = types.NewFieldTypeBuilder().SetType(mysql.TypeBit).SetFlag(mysql.UnsignedFlag).SetFlen(4).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// BIGINT(22) ZEROFILL
typeBigInt22ZeroFill = types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).SetFlag(mysql.ZerofillFlag | mysql.UnsignedFlag).SetFlen(22).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// DECIMAL(16, 8) DEFAULT 2.5
typeDecimal16_8 = types.NewFieldTypeBuilder().SetType(mysql.TypeNewDecimal).SetFlag(0).SetFlen(16).SetDecimal(8).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// DECIMAL
typeDecimal = types.NewFieldTypeBuilder().SetType(mysql.TypeNewDecimal).SetFlag(0).SetFlen(11).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// DATE
typeDate = types.NewFieldTypeBuilder().SetType(mysql.TypeDate).SetFlag(mysql.BinaryFlag).SetFlen(10).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// DATETIME(3)
typeDateTime3 = types.NewFieldTypeBuilder().SetType(mysql.TypeDatetime).SetFlag(mysql.BinaryFlag).SetFlen(23).SetDecimal(3).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// TIMESTAMP
typeTimestamp = types.NewFieldTypeBuilder().SetType(mysql.TypeTimestamp).SetFlag(mysql.BinaryFlag).SetFlen(19).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// TIME(6)
typeTime6 = types.NewFieldTypeBuilder().SetType(mysql.TypeDuration).SetFlag(mysql.BinaryFlag).SetFlen(17).SetDecimal(6).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// YEAR(4)
typeYear4 = types.NewFieldTypeBuilder().SetType(mysql.TypeYear).SetFlag(mysql.ZerofillFlag | mysql.UnsignedFlag).SetFlen(4).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// CHAR(123)
typeChar123 = types.NewFieldTypeBuilder().SetType(mysql.TypeString).SetFlag(0).SetFlen(123).SetDecimal(0).SetCharset(mysql.UTF8MB4Charset).SetCollate(mysql.UTF8MB4DefaultCollation).SetElems(nil).BuildP()
// VARCHAR(65432) CHARSET ascii
typeVarchar65432CharsetASCII = types.NewFieldTypeBuilder().SetType(mysql.TypeVarchar).SetFlag(0).SetFlen(65432).SetDecimal(0).SetCharset("ascii").SetCollate("ascii_bin").SetElems(nil).BuildP()
// BINARY(69)
typeBinary69 = types.NewFieldTypeBuilder().SetType(mysql.TypeString).SetFlag(mysql.BinaryFlag).SetFlen(69).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// VARBINARY(420)
typeVarBinary420 = types.NewFieldTypeBuilder().SetType(mysql.TypeVarchar).SetFlag(mysql.BinaryFlag).SetFlen(420).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// LONGBLOB
typeLongBlob = types.NewFieldTypeBuilder().SetType(mysql.TypeLongBlob).SetFlag(mysql.BinaryFlag).SetFlen(0xffffffff).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
// MEDIUMTEXT
typeMediumText = types.NewFieldTypeBuilder().SetType(mysql.TypeMediumBlob).SetFlag(0).SetFlen(0xffffffff).SetDecimal(0).SetCharset(mysql.UTF8MB4Charset).SetCollate(mysql.UTF8MB4DefaultCollation).SetElems(nil).BuildP()
// ENUM('tidb', 'tikv', 'tiflash', 'golang', 'rust')
typeEnum5 = types.NewFieldTypeBuilder().SetType(mysql.TypeEnum).SetFlag(0).SetFlen(types.UnspecifiedLength).SetDecimal(0).SetCharset(mysql.UTF8MB4Charset).SetCollate(mysql.UTF8MB4DefaultCollation).SetElems([]string{"tidb", "tikv", "tiflash", "golang", "rust"}).BuildP()
// ENUM('tidb', 'tikv')
typeEnum2 = types.NewFieldTypeBuilder().SetType(mysql.TypeEnum).SetFlag(0).SetFlen(types.UnspecifiedLength).SetDecimal(0).SetCharset(mysql.UTF8MB4Charset).SetCollate(mysql.UTF8MB4DefaultCollation).SetElems([]string{"tidb", "tikv"}).BuildP()
// SET('tidb', 'tikv', 'tiflash', 'golang', 'rust')
typeSet5 = types.NewFieldTypeBuilder().SetType(mysql.TypeSet).SetFlag(0).SetFlen(types.UnspecifiedLength).SetDecimal(0).SetCharset(mysql.UTF8MB4Charset).SetCollate(mysql.UTF8MB4DefaultCollation).SetElems([]string{"tidb", "tikv", "tiflash", "golang", "rust"}).BuildP()
// ENUM('tidb', 'tikv')
typeSet2 = types.NewFieldTypeBuilder().SetType(mysql.TypeSet).SetFlag(0).SetFlen(types.UnspecifiedLength).SetDecimal(0).SetCharset(mysql.UTF8MB4Charset).SetCollate(mysql.UTF8MB4DefaultCollation).SetElems([]string{"tidb", "tikv"}).BuildP()
// JSON
typeJSON = types.NewFieldTypeBuilder().SetType(mysql.TypeJSON).SetFlag(mysql.BinaryFlag).SetFlen(0xffffffff).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP()
)
func TestTypeUnwrap(t *testing.T) {
testCases := []*types.FieldType{
typeInt,
typeIntNotNull,
typeIntAutoIncrementUnique,
typeIntNotNullKey,
typeInt1,
typeInt22,
typeBit4,
typeBigInt22ZeroFill,
typeDecimal16_8,
typeDecimal,
typeDate,
typeDateTime3,
typeTimestamp,
typeTime6,
typeYear4,
typeChar123,
typeVarchar65432CharsetASCII,
typeBinary69,
typeVarBinary420,
typeLongBlob,
typeMediumText,
typeEnum5,
typeEnum2,
typeSet5,
typeSet2,
typeJSON,
}
for _, tc := range testCases {
tt := Type(tc)
require.EqualValues(t, tc, tt.Unwrap())
}
}
func TestTypeCompareJoin(t *testing.T) {
testCases := []struct {
a *types.FieldType
b *types.FieldType
compareResult int
compareError string
join *types.FieldType
joinError string
}{
{
a: typeInt,
b: typeInt22,
compareResult: -1,
join: typeInt22,
},
{
a: typeInt1,
b: typeInt,
compareResult: -1,
join: typeInt,
},
{
a: typeInt,
b: typeIntNotNull,
compareResult: 1,
join: typeInt,
},
{
// Cannot join DEFAULT NULL with AUTO_INCREMENT.
a: typeInt,
b: typeIntAutoIncrementUnique,
compareError: `at tuple index \d+: distinct singletons.*`, // TODO: Improve error messages.
joinError: `at tuple index \d+: distinct singletons.*`,
},
{
// INT NOT NULL <join> INT AUTO_INC UNIQUE = INT AUTO_INC,
// but an AUTO_INC column must be defined with a key, so the join is invalid.
a: typeIntNotNull,
b: typeIntAutoIncrementUnique,
compareError: `at tuple index \d+: combining contradicting orders.*`,
joinError: `auto type but not defined as a key`,
},
{
// INT NOT NULL KEY <join> INT AUTO_INC UNIQUE = INT AUTO_INC KEY,
a: typeIntNotNullKey,
b: typeIntAutoIncrementUnique,
compareError: `at tuple index \d+: combining contradicting orders.*`,
join: types.NewFieldTypeBuilder().SetType(mysql.TypeLong).SetFlag(mysql.AutoIncrementFlag | mysql.MultipleKeyFlag).SetFlen(11).SetDecimal(0).SetCharset(binary).SetCollate(binary).SetElems(nil).BuildP(),
},
{
// DECIMAL of differet Flen/Decimal cannot be compared
a: typeDecimal16_8,
b: typeDecimal,
compareError: `at tuple index \d+: distinct singletons.*`,
joinError: `at tuple index \d+: distinct singletons.*`,
},
{
a: typeVarchar65432CharsetASCII,
b: typeVarBinary420,
compareError: `at tuple index \d+: distinct singletons.*`,
joinError: `at tuple index \d+: distinct singletons.*`,
},
{
a: typeEnum5,
b: typeEnum2,
compareResult: 1,
join: typeEnum5,
},
{
a: typeSet2,
b: typeSet5,
compareResult: -1,
join: typeSet5,
},
{
a: typeSet5,
b: typeEnum5,
compareError: `at tuple index \d+: incompatible mysql type.*`,
joinError: `at tuple index \d+: incompatible mysql type.*`,
},
}
for _, tc := range testCases {
a := Type(tc.a)
b := Type(tc.b)
cmp, err := a.Compare(b)
if len(tc.compareError) != 0 {
if err == nil {
t.Log(cmp)
}
require.Regexp(t, tc.compareError, err)
} else {
require.NoError(t, err)
require.Equal(t, tc.compareResult, cmp)
}
cmp, err = b.Compare(a)
if len(tc.compareError) != 0 {
require.Regexp(t, tc.compareError, err)
} else {
require.NoError(t, err)
require.Equal(t, -tc.compareResult, cmp)
}
wrappedJoin, err := a.Join(b)
if len(tc.joinError) != 0 {
require.Regexp(t, tc.joinError, err)
} else {
require.NoError(t, err)
require.EqualValues(t, tc.join, wrappedJoin.Unwrap())
cmp, err = wrappedJoin.Compare(a)
require.NoError(t, err)
require.GreaterOrEqual(t, cmp, 0)
cmp, err = wrappedJoin.Compare(b)
require.NoError(t, err)
require.GreaterOrEqual(t, cmp, 0)
}
}
}
|
package repository
import "github.com/lfmexi/tcpgateway/session/model"
// SessionRepository is the session repository
type SessionRepository interface {
Insert(*model.Session) error
Update(*model.Session) error
}
|
//https://leetcode-cn.com/problems/longest-palindromic-substring/
package main
import "fmt"
func main() {
// s := "babad"
// s := "cbbd"
// s := "bb"
// s := ""
// if s == "" || len(s) < 2 {
// return ""
// }
fmt.Println(s)
start := 0
end := 0
strLen := 0
for i := 0; i < len(s); i++ {
len1 := expandAroundCenter(s, i, i)
len2 := expandAroundCenter(s, i, i+1)
tmpLen := max(len1, len2)
if strLen < tmpLen {
strLen = tmpLen
start = i - (strLen-1)/2
end = i + strLen/2
}
}
fmt.Println(s[start : end+1])
}
//方法四:中心扩展算法 ,只看懂这一种方法了
func expandAroundCenter(s string, left int, right int) int {
for left >= 0 && right < len(s) && s[left] == s[right] {
left = left - 1
right = right + 1
}
return right - left - 1
}
func max(a int, b int) int {
if a < b {
return b
}
return a
}
|
package collection
import (
"path/filepath"
"reflect"
"runtime"
"testing"
"github.com/forensicanalysis/artifactlib/goartifacts"
"github.com/forensicanalysis/forensicstore/goforensicstore"
)
func Test_collectorResolver_Resolve(t *testing.T) {
windowsEnvironmentVariableSystemRoot := goartifacts.ArtifactDefinition{
Name: "WindowsEnvironmentVariableSystemRoot",
Doc: `The system root directory path, defined by %SystemRoot%, typically "C:\Windows".`,
Sources: []goartifacts.Source{{
Type: "PATH",
Attributes: goartifacts.Attributes{
Paths: []string{`\Windows`, `\WinNT`, `\WINNT35`, `\WTSRV`},
Separator: `\`,
},
Provides: []goartifacts.Provide{
{Key: "environ_systemroot"},
{Key: "environ_windir"},
{Key: "environ_systemdrive", Regex: `^(..)`},
},
}, {
Type: "REGISTRY_VALUE",
Attributes: goartifacts.Attributes{
KeyValuePairs: []goartifacts.KeyValuePair{{
Key: `HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion`,
Value: `SystemRoot`,
}},
},
Provides: []goartifacts.Provide{
{Key: "environ_systemroot"},
{Key: "environ_windir"},
{Key: "environ_systemdrive", Regex: `^(..)`},
},
}},
SupportedOs: []string{"Windows"},
Urls: []string{"http://environmentvariables.org/SystemRoot"},
}
windowsSystemEventLogEvtx := goartifacts.ArtifactDefinition{
Name: "WindowsSystemEventLogEvtxFile",
Doc: "Windows System Event log for Vista or later systems.",
Sources: []goartifacts.Source{{
Type: "FILE",
Attributes: goartifacts.Attributes{
Paths: []string{`%%environ_systemroot%%\System32\winevt\Logs\System.evtx`},
Separator: `\`,
},
}},
Conditions: []string{"os_major_version >= 6"},
Labels: []string{"Logs"},
SupportedOs: []string{"Windows"},
Urls: []string{"http://www.forensicswiki.org/wiki/Windows_XML_Event_Log_(EVTX)"},
}
type args struct {
parameter string
}
tests := []struct {
name string
args args
wantResolves []string
wantErr bool
os string
}{
{"Resolve test", args{"environ_systemroot"}, []string{`/C/Windows`}, false, "windows"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.os == runtime.GOOS {
testDir := setup(t)
defer teardown(t)
store, err := goforensicstore.NewJSONLite(filepath.Join(testDir, "extract", "ac.forensicstore"))
if err != nil {
t.Errorf("Collect() error = %v", err)
return
}
collector, err := NewCollector(store, "", []goartifacts.ArtifactDefinition{windowsSystemEventLogEvtx, windowsEnvironmentVariableSystemRoot})
if err != nil {
t.Errorf("NewCollector() error = %v", err)
return
}
gotResolves, err := collector.Resolve(tt.args.parameter)
if (err != nil) != tt.wantErr {
t.Errorf("Resolve() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotResolves, tt.wantResolves) {
t.Errorf("Resolve() gotResolves = %v, want %v", gotResolves, tt.wantResolves)
}
}
})
}
}
|
package checksum
import "testing"
func TestEncode(t *testing.T) {
str, err := Encode("appSecret", "nonce", "time")
if err != nil {
t.Error(err)
return
}
t.Log(str)
}
|
package host
import (
"crypto/md5"
"fmt"
"io"
)
// Host struct host information about discovered network client
type Host struct {
id string
IP string
MAC string
}
// ID will generate unique MD5 hash of host by his properties
// and cache generated hash for future usage
func (h *Host) ID() string {
if h.id == "" {
hash := md5.New()
_, _ = io.WriteString(hash, h.IP+h.MAC)
h.id = fmt.Sprintf("%x", hash.Sum(nil))
}
return h.id
}
|
package main
import (
"context"
pdd "go_interview/advanced_go_programming/chapter04/rpc_hello_05/grpc_hello_03/grpc_hello_publisher"
"google.golang.org/grpc"
"log"
)
func main() {
conn, err := grpc.Dial("localhost:1234", grpc.WithInsecure())
if err != nil {
log.Fatal(" conn err:", err)
}
defer conn.Close()
client := pdd.NewPublishServiceClient(conn)
_, err = client.Publish(context.Background(), &pdd.String{
Value: "golang: hello GO",
})
if err != nil {
log.Fatal(" client publish err:", err)
}
_, err = client.Publish(context.Background(), &pdd.String{
Value: "docker: hello docker",
})
if err != nil {
log.Fatal("client publish err:", err)
}
}
|
package kubernetes
import (
"context"
"errors"
"strings"
"testing"
"github.com/brigadecore/brigade/v2/apiserver/internal/api"
"github.com/brigadecore/brigade/v2/apiserver/internal/lib/queue"
"github.com/brigadecore/brigade/v2/apiserver/internal/meta"
myk8s "github.com/brigadecore/brigade/v2/internal/kubernetes"
uuid "github.com/satori/go.uuid"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
)
func TestNewSubstrate(t *testing.T) {
testClient := fake.NewSimpleClientset()
testQueueWriterFactory := &mockQueueWriterFactory{}
testConfig := SubstrateConfig{}
s, ok :=
NewSubstrate(testClient, testQueueWriterFactory, testConfig).(*substrate)
require.True(t, ok)
require.Same(t, testClient, s.kubeClient)
require.Same(t, testQueueWriterFactory, s.queueWriterFactory)
require.Equal(t, testConfig, s.config)
}
func TestSubstrateCountRunningWorkers(t *testing.T) {
const testBrigadeID = "4077th"
const testNamespace = "foo"
kubeClient := fake.NewSimpleClientset()
podsClient := kubeClient.CoreV1().Pods(testNamespace)
// This pod doesn't have correct labels
_, err := podsClient.Create(
context.Background(),
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Labels: map[string]string{
myk8s.LabelBrigadeID: testBrigadeID,
myk8s.LabelComponent: myk8s.LabelKeyJob,
},
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
// This pod has correct labels
_, err = podsClient.Create(
context.Background(),
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "bat",
Labels: map[string]string{
myk8s.LabelBrigadeID: testBrigadeID,
myk8s.LabelComponent: myk8s.LabelKeyWorker,
},
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
s := &substrate{
config: SubstrateConfig{
BrigadeID: testBrigadeID,
},
kubeClient: kubeClient,
}
count, err := s.CountRunningWorkers(context.Background())
require.NoError(t, err)
require.Equal(t, 1, count.Count)
}
func TestSubstrateCountRunningJobs(t *testing.T) {
const testBrigadeID = "4077th"
const testNamespace = "foo"
kubeClient := fake.NewSimpleClientset()
podsClient := kubeClient.CoreV1().Pods(testNamespace)
// This pod doesn't have correct labels
_, err := podsClient.Create(
context.Background(),
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Labels: map[string]string{
myk8s.LabelBrigadeID: testBrigadeID,
myk8s.LabelComponent: myk8s.LabelKeyWorker,
},
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
// This pod has correct labels
_, err = podsClient.Create(
context.Background(),
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "bat",
Labels: map[string]string{
myk8s.LabelBrigadeID: testBrigadeID,
myk8s.LabelComponent: myk8s.LabelKeyJob,
},
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
s := &substrate{
config: SubstrateConfig{
BrigadeID: testBrigadeID,
},
kubeClient: kubeClient,
}
count, err := s.CountRunningJobs(context.Background())
require.NoError(t, err)
require.Equal(t, 1, count.Count)
}
func TestSubstrateCreateProject(t *testing.T) {
const testNamespace = "foo"
testCases := []struct {
name string
setup func() *fake.Clientset
assertions func(api.Project, error, *fake.Clientset)
}{
{
name: "error creating namespace",
setup: func() *fake.Clientset {
kubeClient := fake.NewSimpleClientset()
// We'll force an error due to the Namespace already existing
_, err := kubeClient.CoreV1().Namespaces().Create(
context.Background(),
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNamespace,
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return kubeClient
},
assertions: func(
project api.Project,
err error,
kubeClient *fake.Clientset,
) {
require.Error(t, err)
require.Contains(t, err.Error(), "error creating namespace")
require.Contains(t, err.Error(), "already exists")
},
},
{
name: "error creating workers role",
setup: func() *fake.Clientset {
kubeClient := fake.NewSimpleClientset()
// We'll force an error due to the Role already existing
_, err := kubeClient.RbacV1().Roles(testNamespace).Create(
context.Background(),
&rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: "workers",
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return kubeClient
},
assertions: func(
project api.Project,
err error,
kubeClient *fake.Clientset,
) {
require.Error(t, err)
require.Contains(t, err.Error(), "error creating role")
require.Contains(t, err.Error(), "already exists")
},
},
{
name: "error creating workers service account",
setup: func() *fake.Clientset {
kubeClient := fake.NewSimpleClientset()
// We'll force an error due to the ServiceAccount already existing
_, err := kubeClient.CoreV1().ServiceAccounts(testNamespace).Create(
context.Background(),
&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "workers",
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return kubeClient
},
assertions: func(
project api.Project,
err error,
kubeClient *fake.Clientset,
) {
require.Error(t, err)
require.Contains(t, err.Error(), "error creating service account")
require.Contains(t, err.Error(), "already exists")
},
},
{
name: "error creating workers role binding",
setup: func() *fake.Clientset {
kubeClient := fake.NewSimpleClientset()
// We'll force an error due to the RoleBinding already existing
_, err := kubeClient.RbacV1().RoleBindings(testNamespace).Create(
context.Background(),
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "workers",
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return kubeClient
},
assertions: func(
project api.Project,
err error,
kubeClient *fake.Clientset,
) {
require.Error(t, err)
require.Contains(t, err.Error(), "error creating role binding")
require.Contains(t, err.Error(), "already exists")
},
},
{
name: "error creating jobs role",
setup: func() *fake.Clientset {
kubeClient := fake.NewSimpleClientset()
// We'll force an error due to the Role already existing
_, err := kubeClient.RbacV1().Roles(testNamespace).Create(
context.Background(),
&rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: "jobs",
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return kubeClient
},
assertions: func(
project api.Project,
err error,
kubeClient *fake.Clientset,
) {
require.Error(t, err)
require.Contains(t, err.Error(), "error creating role")
require.Contains(t, err.Error(), "already exists")
},
},
{
name: "error creating jobs service account",
setup: func() *fake.Clientset {
kubeClient := fake.NewSimpleClientset()
// We'll force an error due to the ServiceAccount already existing
_, err := kubeClient.CoreV1().ServiceAccounts(testNamespace).Create(
context.Background(),
&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "jobs",
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return kubeClient
},
assertions: func(
project api.Project,
err error,
kubeClient *fake.Clientset,
) {
require.Error(t, err)
require.Contains(t, err.Error(), "error creating service account")
require.Contains(t, err.Error(), "already exists")
},
},
{
name: "error creating jobs role binding",
setup: func() *fake.Clientset {
kubeClient := fake.NewSimpleClientset()
// We'll force an error due to the RoleBinding already existing
_, err := kubeClient.RbacV1().RoleBindings(testNamespace).Create(
context.Background(),
&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "jobs",
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return kubeClient
},
assertions: func(
project api.Project,
err error,
kubeClient *fake.Clientset,
) {
require.Error(t, err)
require.Contains(t, err.Error(), "error creating role binding")
require.Contains(t, err.Error(), "already exists")
},
},
{
name: "error creating project secret",
setup: func() *fake.Clientset {
kubeClient := fake.NewSimpleClientset()
// We'll force an error due to the Secret already existing
_, err := kubeClient.CoreV1().Secrets(testNamespace).Create(
context.Background(),
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "project-secrets",
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return kubeClient
},
assertions: func(
project api.Project,
err error,
kubeClient *fake.Clientset,
) {
require.Error(t, err)
require.Contains(t, err.Error(), "error creating secret")
require.Contains(t, err.Error(), "already exists")
},
},
{
name: "success",
setup: func() *fake.Clientset {
return fake.NewSimpleClientset()
},
assertions: func(
project api.Project,
err error,
kubeClient *fake.Clientset,
) {
require.NoError(t, err)
// Check that the project was augmented with Kubernetes-specific details
require.NotNil(t, project.Kubernetes)
require.NotEmpty(t, project.Kubernetes.Namespace)
// Check that an RBAC Role was created for the Project's Workers
role, err := kubeClient.RbacV1().Roles(
project.Kubernetes.Namespace,
).Get(context.Background(), "workers", metav1.GetOptions{})
require.NoError(t, err)
require.NotNil(t, role)
// Check that a ServiceAccount was created for the Project's Workers
servicAccount, err := kubeClient.CoreV1().ServiceAccounts(
project.Kubernetes.Namespace,
).Get(context.Background(), "workers", metav1.GetOptions{})
require.NoError(t, err)
require.NotNil(t, servicAccount)
// Check that an RBAC RoleBinding associates the Workers' ServiceAccount
// with the Workers' RBAC Role
roleBinding, err := kubeClient.RbacV1().RoleBindings(
project.Kubernetes.Namespace,
).Get(context.Background(), "workers", metav1.GetOptions{})
require.NoError(t, err)
require.NotNil(t, roleBinding)
// Check that an RBAC Role was created for the Project's Jobs
role, err = kubeClient.RbacV1().Roles(
project.Kubernetes.Namespace,
).Get(context.Background(), "jobs", metav1.GetOptions{})
require.NoError(t, err)
require.NotNil(t, role)
// Check that a ServiceAccount was created for the Project's Jobs
servicAccount, err = kubeClient.CoreV1().ServiceAccounts(
project.Kubernetes.Namespace,
).Get(context.Background(), "jobs", metav1.GetOptions{})
require.NoError(t, err)
require.NotNil(t, servicAccount)
// Check that an RBAC RoleBinding associates the Jobs' ServiceAccount
// with the Jobs' RBAC Role
roleBinding, err = kubeClient.RbacV1().RoleBindings(
project.Kubernetes.Namespace,
).Get(context.Background(), "jobs", metav1.GetOptions{})
require.NoError(t, err)
require.NotNil(t, roleBinding)
// Check that a Secret was created to store the Project's Secrets
secrets, err := kubeClient.CoreV1().Secrets(
project.Kubernetes.Namespace,
).Get(context.Background(), "project-secrets", metav1.GetOptions{})
require.NoError(t, err)
require.NotNil(t, secrets)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
kubeClient := testCase.setup()
s := &substrate{
generateNewNamespaceFn: func() string {
return testNamespace
},
kubeClient: kubeClient,
}
project, err := s.CreateProject(context.Background(), api.Project{})
testCase.assertions(project, err, kubeClient)
})
}
}
func TestSubstrateDeleteProject(t *testing.T) {
const testNamespace = "foo"
testCases := []struct {
name string
setup func() *fake.Clientset
assertions func(error, *fake.Clientset)
}{
{
name: "error deleting namespace",
setup: func() *fake.Clientset {
return fake.NewSimpleClientset()
},
assertions: func(err error, kubeClient *fake.Clientset) {
require.Error(t, err)
require.Contains(t, err.Error(), "not found")
require.Contains(t, err.Error(), "error deleting namespace")
},
},
{
name: "success",
setup: func() *fake.Clientset {
kubeClient := fake.NewSimpleClientset()
// Make sure the Namespace exists so it can be deleted
_, err := kubeClient.CoreV1().Namespaces().Create(
context.Background(),
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNamespace,
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return kubeClient
},
assertions: func(err error, kubeClient *fake.Clientset) {
require.NoError(t, err)
// Check that the Namespace is gone
_, err = kubeClient.CoreV1().Namespaces().Get(
context.Background(),
testNamespace,
metav1.GetOptions{},
)
require.Error(t, err)
require.Contains(t, err.Error(), "not found")
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
kubeClient := testCase.setup()
s := &substrate{
generateNewNamespaceFn: func() string {
return testNamespace
},
kubeClient: kubeClient,
}
err := s.DeleteProject(
context.Background(), api.Project{
Kubernetes: &api.KubernetesDetails{
Namespace: testNamespace,
},
},
)
testCase.assertions(err, kubeClient)
})
}
}
func TestSubstrateScheduleWorker(t *testing.T) {
const testEventID = "12345"
testCases := []struct {
name string
substrate api.Substrate
assertions func(error)
}{
{
name: "error creating queue writer",
substrate: &substrate{
queueWriterFactory: &mockQueueWriterFactory{
NewWriterFn: func(queueName string) (queue.Writer, error) {
return nil, errors.New("something went wrong")
},
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(t, err.Error(), "error creating queue writer")
},
},
{
name: "error writing to queue",
substrate: &substrate{
queueWriterFactory: &mockQueueWriterFactory{
NewWriterFn: func(queueName string) (queue.Writer, error) {
return &mockQueueWriter{
WriteFn: func(
context.Context,
string,
*queue.MessageOptions,
) error {
return errors.New("something went wrong")
},
CloseFn: func(context.Context) error {
return nil
},
}, nil
},
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(
t,
err.Error(),
"error submitting execution task for event",
)
},
},
{
name: "success",
substrate: &substrate{
queueWriterFactory: &mockQueueWriterFactory{
NewWriterFn: func(queueName string) (queue.Writer, error) {
return &mockQueueWriter{
WriteFn: func(
context.Context,
string,
*queue.MessageOptions,
) error {
return nil
},
CloseFn: func(context.Context) error {
return nil
},
}, nil
},
},
},
assertions: func(err error) {
require.NoError(t, err)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
err := testCase.substrate.ScheduleWorker(
context.Background(),
api.Event{
ObjectMeta: meta.ObjectMeta{
ID: testEventID,
},
},
)
testCase.assertions(err)
})
}
}
func TestSubstrateStartWorker(t *testing.T) {
const testNamespace = "foo"
const testEventID = "12345"
testCases := []struct {
name string
setup func() api.Substrate
assertions func(error)
}{
{
name: "error getting project secret",
setup: func() api.Substrate {
return &substrate{
kubeClient: fake.NewSimpleClientset(),
}
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(
t,
err.Error(),
"error finding secret \"project-secrets\"",
)
},
},
{
name: "error creating event secret",
setup: func() api.Substrate {
kubeClient := fake.NewSimpleClientset()
_, err := kubeClient.CoreV1().Secrets(testNamespace).Create(
context.Background(),
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "project-secrets",
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
// We'll force an error creating the event secret by having it already
// exist
_, err = kubeClient.CoreV1().Secrets(testNamespace).Create(
context.Background(),
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: myk8s.EventSecretName(testEventID),
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return &substrate{
kubeClient: kubeClient,
}
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "error creating secret")
},
},
{
name: "error creating workspace",
setup: func() api.Substrate {
kubeClient := fake.NewSimpleClientset()
_, err := kubeClient.CoreV1().Secrets(testNamespace).Create(
context.Background(),
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "project-secrets",
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return &substrate{
kubeClient: kubeClient,
createWorkspacePVCFn: func(
context.Context,
api.Project,
api.Event,
) error {
return errors.New("something went wrong")
},
}
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(t, err.Error(), "error creating workspace for event")
},
},
{
name: "error creating worker pod",
setup: func() api.Substrate {
kubeClient := fake.NewSimpleClientset()
_, err := kubeClient.CoreV1().Secrets(testNamespace).Create(
context.Background(),
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "project-secrets",
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return &substrate{
kubeClient: kubeClient,
createWorkspacePVCFn: func(
context.Context,
api.Project,
api.Event,
) error {
return nil
},
createWorkerPodFn: func(
context.Context,
api.Project,
api.Event,
) error {
return errors.New("something went wrong")
},
}
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(t, err.Error(), "error creating pod for event")
},
},
{
name: "success",
setup: func() api.Substrate {
kubeClient := fake.NewSimpleClientset()
_, err := kubeClient.CoreV1().Secrets(testNamespace).Create(
context.Background(),
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "project-secrets",
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return &substrate{
kubeClient: kubeClient,
createWorkspacePVCFn: func(
context.Context,
api.Project,
api.Event,
) error {
return nil
},
createWorkerPodFn: func(
context.Context,
api.Project,
api.Event,
) error {
return nil
},
}
},
assertions: func(err error) {
require.NoError(t, err)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
err := testCase.setup().StartWorker(
context.Background(),
api.Project{
Kubernetes: &api.KubernetesDetails{
Namespace: testNamespace,
},
},
api.Event{
ObjectMeta: meta.ObjectMeta{
ID: testEventID,
},
Worker: api.Worker{
Spec: api.WorkerSpec{
UseWorkspace: true,
},
},
},
"fake token",
)
testCase.assertions(err)
})
}
}
func TestSubstrateStoreJobEnvironment(t *testing.T) {
const testEventID = "123456789"
const testJobName = "italian"
testCases := []struct {
name string
substrate api.Substrate
assertions func(error)
}{
{
name: "error creating job secret",
substrate: &substrate{
createJobSecretFn: func(
context.Context,
api.Project,
string,
string,
api.JobSpec,
) error {
return errors.New("something went wrong")
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(t, err.Error(), "error creating secret for event")
},
},
{
name: "success",
substrate: &substrate{
createJobSecretFn: func(
context.Context,
api.Project,
string,
string,
api.JobSpec,
) error {
return nil
},
},
assertions: func(err error) {
require.NoError(t, err)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
err := testCase.substrate.StoreJobEnvironment(
context.Background(),
api.Project{},
testEventID,
testJobName,
api.JobSpec{},
)
testCase.assertions(err)
})
}
}
func TestSubstrateScheduleJob(t *testing.T) {
const testEventID = "123456789"
const testJobName = "italian"
testCases := []struct {
name string
setup func() api.Substrate
assertions func(error)
}{
{
name: "error creating queue writer",
setup: func() api.Substrate {
return &substrate{
queueWriterFactory: &mockQueueWriterFactory{
NewWriterFn: func(queueName string) (queue.Writer, error) {
return nil, errors.New("something went wrong")
},
},
}
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(t, err.Error(), "error creating queue writer")
},
},
{
name: "error writing to queue",
setup: func() api.Substrate {
return &substrate{
queueWriterFactory: &mockQueueWriterFactory{
NewWriterFn: func(queueName string) (queue.Writer, error) {
return &mockQueueWriter{
WriteFn: func(
context.Context,
string,
*queue.MessageOptions,
) error {
return errors.New("something went wrong")
},
CloseFn: func(context.Context) error {
return nil
},
}, nil
},
},
}
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(
t,
err.Error(),
"error submitting execution task for event",
)
},
},
{
name: "success",
setup: func() api.Substrate {
return &substrate{
queueWriterFactory: &mockQueueWriterFactory{
NewWriterFn: func(queueName string) (queue.Writer, error) {
return &mockQueueWriter{
WriteFn: func(
context.Context,
string,
*queue.MessageOptions,
) error {
return nil
},
CloseFn: func(context.Context) error {
return nil
},
}, nil
},
},
}
},
assertions: func(err error) {
require.NoError(t, err)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
substrate := testCase.setup()
err := substrate.ScheduleJob(
context.Background(),
api.Project{},
api.Event{
ObjectMeta: meta.ObjectMeta{
ID: testEventID,
},
},
testJobName,
)
testCase.assertions(err)
})
}
}
func TestSubstrateStartJob(t *testing.T) {
const testJobName = "foo"
testCases := []struct {
name string
substrate api.Substrate
assertions func(error)
}{
{
name: "error creating job pod",
substrate: &substrate{
createJobPodFn: func(
context.Context,
api.Project,
api.Event,
string,
api.JobSpec,
) error {
return errors.New("something went wrong")
},
},
assertions: func(err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "something went wrong")
require.Contains(t, err.Error(), "error creating pod for event")
},
},
{
name: "success",
substrate: &substrate{
createJobPodFn: func(
context.Context,
api.Project,
api.Event,
string,
api.JobSpec,
) error {
return nil
},
},
assertions: func(err error) {
require.NoError(t, err)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
err := testCase.substrate.StartJob(
context.Background(),
api.Project{},
api.Event{
Worker: api.Worker{
Spec: api.WorkerSpec{
UseWorkspace: true,
},
},
},
testJobName,
)
testCase.assertions(err)
})
}
}
// TODO: Find a better way to test this. Unfortunately, the DeleteCollection
// function on a *fake.ClientSet doesn't ACTUALLY delete collections of
// resources based on the labels provided.
//
// Refer to: https://github.com/kubernetes/client-go/issues/609
//
// This makes it basically impossible to assert what we'd LIKE to assert here--
// that resources labeled with the correct Event ID are deleted while other
// resources are left alone. We'll settle for invoking DeleteWorkerAndJobs(...)
// and asserting we get no error-- so we at least get some test coverage for
// this function. We'll have to make sure this behavior is well-covered by
// integration or e2e tests in the future.
func TestSubstrateDeleteJob(t *testing.T) {
const testEventID = "123456789"
const testJobName = "italian"
s := &substrate{
kubeClient: fake.NewSimpleClientset(),
}
err := s.DeleteJob(
context.Background(),
api.Project{
Kubernetes: &api.KubernetesDetails{
Namespace: "foo",
},
},
api.Event{
ObjectMeta: meta.ObjectMeta{
ID: testEventID,
},
},
testJobName,
)
require.NoError(t, err)
}
// TODO: Find a better way to test this. Unfortunately, the DeleteCollection
// function on a *fake.ClientSet doesn't ACTUALLY delete collections of
// resources based on the labels provided.
//
// Refer to: https://github.com/kubernetes/client-go/issues/609
//
// This makes it basically impossible to assert what we'd LIKE to assert here--
// that resources labeled with the correct Event ID are deleted while other
// resources are left alone. We'll settle for invoking DeleteWorkerAndJobs(...)
// and asserting we get no error-- so we at least get some test coverage for
// this function. We'll have to make sure this behavior is well-covered by
// integration or e2e tests in the future.
func TestSubstrateDeleteWorkerAndJobs(t *testing.T) {
s := &substrate{
kubeClient: fake.NewSimpleClientset(),
}
err := s.DeleteWorkerAndJobs(
context.Background(),
api.Project{
Kubernetes: &api.KubernetesDetails{
Namespace: "foo",
},
},
api.Event{
ObjectMeta: meta.ObjectMeta{
ID: "bar",
},
},
)
require.NoError(t, err)
}
func TestSubstrateCreateWorkspacePVC(t *testing.T) {
testProject := api.Project{
Kubernetes: &api.KubernetesDetails{
Namespace: "foo",
},
}
const testEventID = "123456789"
testCases := []struct {
name string
event api.Event
setup func() *substrate
assertions func(kubernetes.Interface, error)
}{
{
name: "unparsable storage quantity",
event: api.Event{
Worker: api.Worker{
Spec: api.WorkerSpec{
WorkspaceSize: "10ZillionBytes",
},
},
},
setup: func() *substrate {
return &substrate{}
},
assertions: func(_ kubernetes.Interface, err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "error parsing storage quantity")
},
},
{
name: "error creating pvc",
event: api.Event{
ObjectMeta: meta.ObjectMeta{
ID: testEventID,
},
},
setup: func() *substrate {
kubeClient := fake.NewSimpleClientset()
// Ensure a failure by pre-creating a PVC with the expected name
_, err := kubeClient.CoreV1().PersistentVolumeClaims(
testProject.Kubernetes.Namespace,
).Create(
context.Background(),
&corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: myk8s.WorkspacePVCName(testEventID),
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return &substrate{
kubeClient: kubeClient,
}
},
assertions: func(_ kubernetes.Interface, err error) {
require.Error(t, err)
require.Contains(
t,
err.Error(),
"error creating workspace PVC for event",
)
},
},
{
name: "success",
event: api.Event{
ObjectMeta: meta.ObjectMeta{
ID: testEventID,
},
},
setup: func() *substrate {
return &substrate{
kubeClient: fake.NewSimpleClientset(),
}
},
assertions: func(kubeClient kubernetes.Interface, err error) {
require.NoError(t, err)
pvc, err := kubeClient.CoreV1().PersistentVolumeClaims(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.WorkspacePVCName(testEventID),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, pvc)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
substrate := testCase.setup()
err := substrate.createWorkspacePVC(
context.Background(),
testProject,
testCase.event,
)
testCase.assertions(substrate.kubeClient, err)
})
}
}
func TestSubstrateCreateWorkerPod(t *testing.T) {
testProject := api.Project{
Kubernetes: &api.KubernetesDetails{
Namespace: "foo",
},
}
testEvent := api.Event{
ObjectMeta: meta.ObjectMeta{
ID: "123456789",
},
Worker: api.Worker{
Spec: api.WorkerSpec{
Kubernetes: &api.KubernetesConfig{
ImagePullSecrets: []string{"foo", "bar"},
},
UseWorkspace: true,
Git: &api.GitConfig{
CloneURL: "a fake clone url",
},
Container: &api.ContainerSpec{
Environment: map[string]string{
"FOO": "bar",
},
},
},
},
}
testCases := []struct {
name string
setup func() *substrate
assertions func(kubernetes.Interface, error)
}{
{
name: "error creating pod",
setup: func() *substrate {
kubeClient := fake.NewSimpleClientset()
// Ensure a failure by pre-creating a pod with the expected name
_, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Create(
context.Background(),
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: myk8s.WorkerPodName(testEvent.ID),
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return &substrate{
kubeClient: kubeClient,
}
},
assertions: func(_ kubernetes.Interface, err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "error creating pod for event")
},
},
{
name: "success",
setup: func() *substrate {
return &substrate{
kubeClient: fake.NewSimpleClientset(),
}
},
assertions: func(kubeClient kubernetes.Interface, err error) {
require.NoError(t, err)
pod, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.WorkerPodName(testEvent.ID),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, pod)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
substrate := testCase.setup()
err := substrate.createWorkerPod(
context.Background(),
testProject,
testEvent,
)
testCase.assertions(substrate.kubeClient, err)
})
}
}
func TestSubstrateCreateJobSecret(t *testing.T) {
testProject := api.Project{
Kubernetes: &api.KubernetesDetails{
Namespace: "foo",
},
}
const testEventID = "123456789"
const testJobName = "italian"
testJobSpec := api.JobSpec{
PrimaryContainer: api.JobContainerSpec{
ContainerSpec: api.ContainerSpec{
Environment: map[string]string{
"FOO": "bar",
},
},
},
SidecarContainers: map[string]api.JobContainerSpec{
"helper": {
ContainerSpec: api.ContainerSpec{
Environment: map[string]string{
"BAT": "baz",
},
},
},
},
}
testCases := []struct {
name string
setup func() *substrate
assertions func(kubernetes.Interface, error)
}{
{
name: "error creating secret",
setup: func() *substrate {
kubeClient := fake.NewSimpleClientset()
// Ensure a failure by pre-creating a secret with the expected name
_, err := kubeClient.CoreV1().Secrets(
testProject.Kubernetes.Namespace,
).Create(
context.Background(),
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: myk8s.JobSecretName(testEventID, testJobName),
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return &substrate{
kubeClient: kubeClient,
}
},
assertions: func(_ kubernetes.Interface, err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "error creating secret for event")
},
},
{
name: "success",
setup: func() *substrate {
return &substrate{
kubeClient: fake.NewSimpleClientset(),
}
},
assertions: func(kubeClient kubernetes.Interface, err error) {
require.NoError(t, err)
secret, err := kubeClient.CoreV1().Secrets(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.JobSecretName(testEventID, testJobName),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, secret)
val, ok := secret.StringData["italian.FOO"]
require.True(t, ok)
require.Equal(t, "bar", val)
val, ok = secret.StringData["helper.BAT"]
require.True(t, ok)
require.Equal(t, "baz", val)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
substrate := testCase.setup()
err := substrate.createJobSecret(
context.Background(),
testProject,
testEventID,
testJobName,
testJobSpec,
)
testCase.assertions(substrate.kubeClient, err)
})
}
}
func TestSubstrateCreateJobPod(t *testing.T) {
testSubstrateConfig := SubstrateConfig{
GitInitializerImage: "brigadecore/brigade2-git-initializer:v2.0.0", // nolint: lll
GitInitializerImagePullPolicy: "IfNotPresent",
GitInitializerWindowsImage: "brigadecore/brigade2-git-initializer-windows:v2.0.0", // nolint: lll
GitInitializerWindowsImagePullPolicy: "IfNotPresent",
}
testProject := api.Project{
Kubernetes: &api.KubernetesDetails{
Namespace: "foo",
},
}
testEvent := api.Event{
ObjectMeta: meta.ObjectMeta{
ID: "123456789",
},
Worker: api.Worker{
Spec: api.WorkerSpec{
Git: &api.GitConfig{
CloneURL: "a fake git repo url",
},
Kubernetes: &api.KubernetesConfig{
ImagePullSecrets: []string{"foo", "bar"},
},
},
},
}
const testJobName = "italian"
testJobSpec := api.JobSpec{
PrimaryContainer: api.JobContainerSpec{
ContainerSpec: api.ContainerSpec{
Environment: map[string]string{
"FOO": "bar",
},
},
WorkspaceMountPath: "/var/workspace",
SourceMountPath: "/var/source",
// UseHostDockerSocket: true,
Privileged: true,
},
SidecarContainers: map[string]api.JobContainerSpec{
"helper": {
ContainerSpec: api.ContainerSpec{
Environment: map[string]string{
"BAT": "baz",
},
},
WorkspaceMountPath: "/var/workspace",
SourceMountPath: "/var/source",
// UseHostDockerSocket: true,
Privileged: true,
},
},
}
testCases := []struct {
name string
setup func() *substrate
jobSpec func() api.JobSpec
assertions func(kubernetes.Interface, error)
}{
{
name: "error creating pod",
setup: func() *substrate {
kubeClient := fake.NewSimpleClientset()
// Ensure a failure by pre-creating a pod with the expected name
_, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Create(
context.Background(),
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: myk8s.JobPodName(testEvent.ID, testJobName),
},
},
metav1.CreateOptions{},
)
require.NoError(t, err)
return &substrate{
config: testSubstrateConfig,
kubeClient: kubeClient,
}
},
jobSpec: func() api.JobSpec {
return testJobSpec
},
assertions: func(_ kubernetes.Interface, err error) {
require.Error(t, err)
require.Contains(t, err.Error(), "error creating pod for event")
},
},
{
name: "success",
setup: func() *substrate {
return &substrate{
config: testSubstrateConfig,
kubeClient: fake.NewSimpleClientset(),
}
},
jobSpec: func() api.JobSpec {
return testJobSpec
},
assertions: func(kubeClient kubernetes.Interface, err error) {
require.NoError(t, err)
pod, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.JobPodName(testEvent.ID, testJobName),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, pod)
// Volumes:
require.Len(t, pod.Spec.Volumes, 3)
require.Equal(t, "workspace", pod.Spec.Volumes[0].Name)
require.Equal(t, "event", pod.Spec.Volumes[1].Name)
require.Equal(t, "vcs", pod.Spec.Volumes[2].Name)
// require.Equal(t, "docker-socket", pod.Spec.Volumes[3].Name)
// Init container:
require.Len(t, pod.Spec.InitContainers, 1)
require.Equal(t, "vcs", pod.Spec.InitContainers[0].Name)
require.Equal(
t,
testSubstrateConfig.GitInitializerImage,
pod.Spec.InitContainers[0].Image,
)
require.Equal(
t,
corev1.PullPolicy(testSubstrateConfig.GitInitializerImagePullPolicy),
pod.Spec.InitContainers[0].ImagePullPolicy,
)
require.Len(t, pod.Spec.InitContainers[0].VolumeMounts, 2)
require.Equal(
t,
"event",
pod.Spec.InitContainers[0].VolumeMounts[0].Name,
)
require.Equal(t, "vcs", pod.Spec.InitContainers[0].VolumeMounts[1].Name)
// Containers:
require.Len(t, pod.Spec.Containers, 2)
// Primary container:
require.Equal(t, testJobName, pod.Spec.Containers[0].Name)
require.Len(t, pod.Spec.Containers[0].Env, 1)
require.Equal(t, "FOO", pod.Spec.Containers[0].Env[0].Name)
require.Len(t, pod.Spec.Containers[0].VolumeMounts, 2)
require.Equal(
t,
"workspace",
pod.Spec.Containers[0].VolumeMounts[0].Name,
)
require.Equal(t, "vcs", pod.Spec.Containers[0].VolumeMounts[1].Name)
// require.Equal(
// t,
// "docker-socket",
// pod.Spec.Containers[0].VolumeMounts[2].Name,
// )
// Sidecar container:
require.Equal(t, "helper", pod.Spec.Containers[1].Name)
require.Len(t, pod.Spec.Containers[1].Env, 1)
require.Equal(t, "BAT", pod.Spec.Containers[1].Env[0].Name)
require.Len(t, pod.Spec.Containers[1].VolumeMounts, 2)
require.Equal(
t,
"workspace",
pod.Spec.Containers[1].VolumeMounts[0].Name,
)
require.Equal(t, "vcs", pod.Spec.Containers[1].VolumeMounts[1].Name)
// require.Equal(
// t,
// "docker-socket",
// pod.Spec.Containers[1].VolumeMounts[2].Name,
// )
},
},
{
name: "success with windows",
setup: func() *substrate {
return &substrate{
config: testSubstrateConfig,
kubeClient: fake.NewSimpleClientset(),
}
},
jobSpec: func() api.JobSpec {
jobSpecCopy := testJobSpec
jobSpecCopy.Host = &api.JobHost{
OS: api.OSFamilyWindows,
}
return jobSpecCopy
},
assertions: func(kubeClient kubernetes.Interface, err error) {
require.NoError(t, err)
pod, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.JobPodName(testEvent.ID, testJobName),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, pod)
// These should be the only real differences from the previous test case
//
// Make sure the pod is assigned to a Windows node
require.Equal(t, "windows", pod.Spec.NodeSelector[corev1.LabelOSStable])
// Make sure we use the Windows variant of the git initializer image
require.Equal(
t,
testSubstrateConfig.GitInitializerWindowsImage,
pod.Spec.InitContainers[0].Image,
)
require.Equal(
t,
corev1.PullPolicy(testSubstrateConfig.GitInitializerWindowsImagePullPolicy), // nolint: lll
pod.Spec.InitContainers[0].ImagePullPolicy,
)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
substrate := testCase.setup()
err := substrate.createJobPod(
context.Background(),
testProject,
testEvent,
testJobName,
testCase.jobSpec(),
)
testCase.assertions(substrate.kubeClient, err)
})
}
}
func TestSubstrateCreatePodWithNodeSelectorAndToleration(t *testing.T) {
testProject := api.Project{
Kubernetes: &api.KubernetesDetails{
Namespace: "foo",
},
}
testEvent := api.Event{
ObjectMeta: meta.ObjectMeta{
ID: "123456789",
},
Worker: api.Worker{},
}
testJobName := "italian"
testCases := []struct {
name string
setup func() *substrate
assertions func(kubernetes.Interface)
}{
{
name: "empty node selector, empty toleration",
setup: func() *substrate {
return &substrate{
kubeClient: fake.NewSimpleClientset(),
config: SubstrateConfig{},
}
},
assertions: func(kubeClient kubernetes.Interface) {
workerPod, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.WorkerPodName(testEvent.ID),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, workerPod)
require.Empty(t, workerPod.Spec.NodeSelector)
require.Empty(t, workerPod.Spec.Tolerations)
jobPod, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.JobPodName(testEvent.ID, testJobName),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, jobPod)
require.Empty(t, jobPod.Spec.NodeSelector)
require.Empty(t, jobPod.Spec.Tolerations)
},
},
{
name: "node selector key but no value",
setup: func() *substrate {
return &substrate{
kubeClient: fake.NewSimpleClientset(),
config: SubstrateConfig{
NodeSelectorKey: "foo",
},
}
},
assertions: func(kubeClient kubernetes.Interface) {
workerPod, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.WorkerPodName(testEvent.ID),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, workerPod)
require.Empty(t, workerPod.Spec.NodeSelector)
require.Empty(t, workerPod.Spec.Tolerations)
jobPod, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.JobPodName(testEvent.ID, testJobName),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, jobPod)
require.Empty(t, jobPod.Spec.NodeSelector)
require.Empty(t, jobPod.Spec.Tolerations)
},
},
{
name: "node selector key and value",
setup: func() *substrate {
return &substrate{
kubeClient: fake.NewSimpleClientset(),
config: SubstrateConfig{
NodeSelectorKey: "foo",
NodeSelectorValue: "bar",
},
}
},
assertions: func(kubeClient kubernetes.Interface) {
workerPod, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.WorkerPodName(testEvent.ID),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, workerPod)
require.Equal(t, "bar", workerPod.Spec.NodeSelector["foo"])
require.Empty(t, workerPod.Spec.Tolerations)
jobPod, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.JobPodName(testEvent.ID, testJobName),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, jobPod)
require.Equal(t, "bar", jobPod.Spec.NodeSelector["foo"])
require.Empty(t, jobPod.Spec.Tolerations)
},
},
{
name: "toleration key, no value",
setup: func() *substrate {
return &substrate{
kubeClient: fake.NewSimpleClientset(),
config: SubstrateConfig{
TolerationKey: "foo",
},
}
},
assertions: func(kubeClient kubernetes.Interface) {
workerPod, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.WorkerPodName(testEvent.ID),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, workerPod)
require.Empty(t, workerPod.Spec.NodeSelector)
require.Equal(t, corev1.Toleration{
Key: "foo",
Operator: corev1.TolerationOpExists,
}, workerPod.Spec.Tolerations[0])
jobPod, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.JobPodName(testEvent.ID, testJobName),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, jobPod)
require.Empty(t, jobPod.Spec.NodeSelector)
require.Equal(t, corev1.Toleration{
Key: "foo",
Operator: corev1.TolerationOpExists,
}, jobPod.Spec.Tolerations[0])
},
},
{
name: "toleration key and value",
setup: func() *substrate {
return &substrate{
kubeClient: fake.NewSimpleClientset(),
config: SubstrateConfig{
TolerationKey: "foo",
TolerationValue: "bar",
},
}
},
assertions: func(kubeClient kubernetes.Interface) {
workerPod, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.WorkerPodName(testEvent.ID),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, workerPod)
require.Empty(t, workerPod.Spec.NodeSelector)
require.Equal(t, corev1.Toleration{
Key: "foo",
Value: "bar",
Operator: corev1.TolerationOpEqual,
}, workerPod.Spec.Tolerations[0])
jobPod, err := kubeClient.CoreV1().Pods(
testProject.Kubernetes.Namespace,
).Get(
context.Background(),
myk8s.JobPodName(testEvent.ID, testJobName),
metav1.GetOptions{},
)
require.NoError(t, err)
require.NotNil(t, jobPod)
require.Empty(t, jobPod.Spec.NodeSelector)
require.Equal(t, corev1.Toleration{
Key: "foo",
Value: "bar",
Operator: corev1.TolerationOpEqual,
}, jobPod.Spec.Tolerations[0])
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
substrate := testCase.setup()
err := substrate.createWorkerPod(
context.Background(),
testProject,
testEvent,
)
require.NoError(t, err)
err = substrate.createJobPod(
context.Background(),
testProject,
testEvent,
testJobName,
api.JobSpec{},
)
require.NoError(t, err)
testCase.assertions(substrate.kubeClient)
})
}
}
func TestGenerateNewNamespace(t *testing.T) {
namespace := generateNewNamespace()
tokens := strings.SplitN(namespace, "-", 2)
require.Len(t, tokens, 2)
require.Equal(t, "brigade", tokens[0])
_, err := uuid.FromString(tokens[1])
require.NoError(t, err)
}
type mockQueueWriterFactory struct {
NewWriterFn func(queueName string) (queue.Writer, error)
CloseFn func(context.Context) error
}
func (m *mockQueueWriterFactory) NewWriter(
queueName string,
) (queue.Writer, error) {
return m.NewWriterFn(queueName)
}
func (m *mockQueueWriterFactory) Close(ctx context.Context) error {
return m.CloseFn(ctx)
}
type mockQueueWriter struct {
WriteFn func(context.Context, string, *queue.MessageOptions) error
CloseFn func(context.Context) error
}
func (m *mockQueueWriter) Write(
ctx context.Context,
msg string,
opts *queue.MessageOptions,
) error {
return m.WriteFn(ctx, msg, opts)
}
func (m *mockQueueWriter) Close(ctx context.Context) error {
return m.CloseFn(ctx)
}
|
package transport
import (
"net/http"
"github.com/gin-gonic/gin"
s "github.com/thedevelopnik/netplan/pkg/models"
)
// CreateSubnetEndpoint creates a Subnet and returns the created value.
// Returns a 400 if it can't create the struct,
// or a 500 if the db connection or creation fails.
func (h netplanHTTP) CreateSubnetEndpoint(c *gin.Context) {
vpcID, err := convertParamToInt("vpcid", c)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{
"error": err,
})
return
}
if vpcID <= 0 {
c.JSON(http.StatusBadRequest, gin.H{
"error": "vpc id parameter must be a positive integer",
})
return
}
// get the network map object from the request, or send error
var sn s.Subnet
if err := c.ShouldBindJSON(&sn); err != nil {
c.JSON(http.StatusBadRequest, gin.H{
"error": err,
})
return
}
sn.VPCID = uint(vpcID)
// create in the db
if err := h.svc.CreateSubnet(&sn); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"error": err,
})
return
}
// send full created database object back
c.JSON(http.StatusCreated, sn)
}
// UpdateSubnetEndpoint updates the name of a
// Subnet given an id and name.
func (h netplanHTTP) UpdateSubnetEndpoint(c *gin.Context) {
// get the values to update with off the request
var sn s.Subnet
if err := c.ShouldBindJSON(&sn); err != nil {
c.JSON(http.StatusBadRequest, gin.H{
"error": err,
})
return
}
update, err := h.svc.UpdateSubnet(&sn)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"error": err,
})
return
}
// send back updated value
c.JSON(http.StatusOK, update)
}
// DeleteSubnetEndpoint deletes a Subnet given an id.
func (h netplanHTTP) DeleteSubnetEndpoint(c *gin.Context) {
id, err := convertParamToInt("snid", c)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{
"error": err,
})
return
}
if id <= 0 {
c.JSON(http.StatusBadRequest, gin.H{
"error": "id parameter must be a positive integer",
})
return
}
// delete the object
if err := h.svc.DeleteSubnet(uint(id)); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"error": err,
})
return
}
// send back a no content response
c.Status(http.StatusNoContent)
}
|
/*
Whilst trying (and failing) have persuade my infant son to eat his dinner, I tried singing to him. Mid way through this song I realised the formulaic structure might lend itself well to code golfing!
The task is to write a program or function which accepts no input and produces the following text:
There's a hole in the bottom of the sea
There's a hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a log in the hole in the bottom of the sea
There's a log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a bump on the log in the hole in the bottom of the sea
There's a bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a frog on the bump on the log in the hole in the bottom of the sea
There's a frog on the bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a fly on the hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a fly on the hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a flea on the fly on the hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a flea on the fly on the hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a smile on the flea on the fly on the hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a smile on the flea on the fly on the hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
Challenge rules:
The text may be printed or returned as function output
Each verse is separated by a single empty line
Trailing whitespace is OK as long as it does not change the layout (so no leading whitespace or extra spaces between words)
Trailing newlines are OK too.
No leading newlines.
All languages welcomed, and this is code-golf, so the shortest answer in bytes for each language wins!
*/
package main
import "fmt"
func main() {
fmt.Println(POEM)
}
const POEM = `
There's a hole in the bottom of the sea
There's a hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a log in the hole in the bottom of the sea
There's a log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a bump on the log in the hole in the bottom of the sea
There's a bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a frog on the bump on the log in the hole in the bottom of the sea
There's a frog on the bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a fly on the hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a fly on the hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a flea on the fly on the hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a flea on the fly on the hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
There's a smile on the flea on the fly on the hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a smile on the flea on the fly on the hair on the wart on the frog on the bump on the log in the hole in the bottom of the sea
There's a hole, there's a hole
There's a hole in the bottom of the sea
`
|
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package keystore
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha512"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"math/big"
"os"
"path/filepath"
"sync"
"time"
// consider vendoring this deprecated ripemd160 package
"golang.org/x/crypto/ripemd160" // nolint:staticcheck
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec/v2/ecdsa"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcwallet/internal/legacy/rename"
secp "github.com/decred/dcrd/dcrec/secp256k1/v4"
)
const (
Filename = "wallet.bin"
// Length in bytes of KDF output.
kdfOutputBytes = 32
)
const (
defaultKdfComputeTime = 0.25
defaultKdfMaxMem = 32 * 1024 * 1024
)
// Possible errors when dealing with key stores.
var (
ErrAddressNotFound = errors.New("address not found")
ErrAlreadyEncrypted = errors.New("private key is already encrypted")
ErrChecksumMismatch = errors.New("checksum mismatch")
ErrDuplicate = errors.New("duplicate key or address")
ErrMalformedEntry = errors.New("malformed entry")
ErrWatchingOnly = errors.New("keystore is watching-only")
ErrLocked = errors.New("keystore is locked")
ErrWrongPassphrase = errors.New("wrong passphrase")
)
var fileID = [8]byte{0xba, 'W', 'A', 'L', 'L', 'E', 'T', 0x00}
type entryHeader byte
const (
addrCommentHeader entryHeader = 1 << iota // nolint:varcheck,deadcode
txCommentHeader // nolint:varcheck,deadcode
deletedHeader // nolint:varcheck,deadcode
scriptHeader
addrHeader entryHeader = 0
)
// We want to use binaryRead and binaryWrite instead of binary.Read
// and binary.Write because those from the binary package do not return
// the number of bytes actually written or read. We need to return
// this value to correctly support the io.ReaderFrom and io.WriterTo
// interfaces.
func binaryRead(r io.Reader, order binary.ByteOrder, data interface{}) (n int64, err error) {
var read int
buf := make([]byte, binary.Size(data))
if read, err = io.ReadFull(r, buf); err != nil {
return int64(read), err
}
return int64(read), binary.Read(bytes.NewBuffer(buf), order, data)
}
// See comment for binaryRead().
func binaryWrite(w io.Writer, order binary.ByteOrder, data interface{}) (n int64, err error) {
buf := bytes.Buffer{}
if err = binary.Write(&buf, order, data); err != nil {
return 0, err
}
written, err := w.Write(buf.Bytes())
return int64(written), err
}
// pubkeyFromPrivkey creates an encoded pubkey based on a
// 32-byte privkey. The returned pubkey is 33 bytes if compressed,
// or 65 bytes if uncompressed.
func pubkeyFromPrivkey(privkey []byte, compress bool) (pubkey []byte) {
_, pk := btcec.PrivKeyFromBytes(privkey)
if compress {
return pk.SerializeCompressed()
}
return pk.SerializeUncompressed()
}
func keyOneIter(passphrase, salt []byte, memReqts uint64) []byte {
saltedpass := append(passphrase, salt...)
lutbl := make([]byte, memReqts)
// Seed for lookup table
seed := sha512.Sum512(saltedpass)
copy(lutbl[:sha512.Size], seed[:])
for nByte := 0; nByte < (int(memReqts) - sha512.Size); nByte += sha512.Size {
hash := sha512.Sum512(lutbl[nByte : nByte+sha512.Size])
copy(lutbl[nByte+sha512.Size:nByte+2*sha512.Size], hash[:])
}
x := lutbl[cap(lutbl)-sha512.Size:]
seqCt := uint32(memReqts / sha512.Size)
nLookups := seqCt / 2
for i := uint32(0); i < nLookups; i++ {
// Armory ignores endianness here. We assume LE.
newIdx := binary.LittleEndian.Uint32(x[cap(x)-4:]) % seqCt
// Index of hash result at newIdx
vIdx := newIdx * sha512.Size
v := lutbl[vIdx : vIdx+sha512.Size]
// XOR hash x with hash v
for j := 0; j < sha512.Size; j++ {
x[j] ^= v[j]
}
// Save new hash to x
hash := sha512.Sum512(x)
copy(x, hash[:])
}
return x[:kdfOutputBytes]
}
// kdf implements the key derivation function used by Armory
// based on the ROMix algorithm described in Colin Percival's paper
// "Stronger Key Derivation via Sequential Memory-Hard Functions"
// (http://www.tarsnap.com/scrypt/scrypt.pdf).
func kdf(passphrase []byte, params *kdfParameters) []byte {
masterKey := passphrase
for i := uint32(0); i < params.nIter; i++ {
masterKey = keyOneIter(masterKey, params.salt[:], params.mem)
}
return masterKey
}
func pad(size int, b []byte) []byte {
// Prevent a possible panic if the input exceeds the expected size.
if len(b) > size {
size = len(b)
}
p := make([]byte, size)
copy(p[size-len(b):], b)
return p
}
// chainedPrivKey deterministically generates a new private key using a
// previous address and chaincode. privkey and chaincode must be 32
// bytes long, and pubkey may either be 33 or 65 bytes.
func chainedPrivKey(privkey, pubkey, chaincode []byte) ([]byte, error) {
if len(privkey) != 32 {
return nil, fmt.Errorf("invalid privkey length %d (must be 32)",
len(privkey))
}
if len(chaincode) != 32 {
return nil, fmt.Errorf("invalid chaincode length %d (must be 32)",
len(chaincode))
}
switch n := len(pubkey); n {
case secp.PubKeyBytesLenUncompressed, secp.PubKeyBytesLenCompressed:
// Correct length
default:
return nil, fmt.Errorf("invalid pubkey length %d", n)
}
xorbytes := make([]byte, 32)
chainMod := chainhash.DoubleHashB(pubkey)
for i := range xorbytes {
xorbytes[i] = chainMod[i] ^ chaincode[i]
}
chainXor := new(big.Int).SetBytes(xorbytes)
privint := new(big.Int).SetBytes(privkey)
t := new(big.Int).Mul(chainXor, privint)
b := t.Mod(t, btcec.S256().N).Bytes()
return pad(32, b), nil
}
// chainedPubKey deterministically generates a new public key using a
// previous public key and chaincode. pubkey must be 33 or 65 bytes, and
// chaincode must be 32 bytes long.
func chainedPubKey(pubkey, chaincode []byte) ([]byte, error) {
var compressed bool
switch n := len(pubkey); n {
case secp.PubKeyBytesLenUncompressed:
compressed = false
case secp.PubKeyBytesLenCompressed:
compressed = true
default:
// Incorrect serialized pubkey length
return nil, fmt.Errorf("invalid pubkey length %d", n)
}
if len(chaincode) != 32 {
return nil, fmt.Errorf("invalid chaincode length %d (must be 32)",
len(chaincode))
}
var xorbytes [32]byte
chainMod := chainhash.DoubleHashB(pubkey)
for i := range xorbytes {
xorbytes[i] = chainMod[i] ^ chaincode[i]
}
oldPk, err := btcec.ParsePubKey(pubkey)
if err != nil {
return nil, err
}
var xorBytesScalar btcec.ModNScalar
overflow := xorBytesScalar.SetBytes(&xorbytes)
if overflow != 0 {
return nil, fmt.Errorf("unable to create pubkey: %v", err)
}
var (
newPointJacobian btcec.JacobianPoint
oldPkJacobian btcec.JacobianPoint
)
oldPk.AsJacobian(&oldPkJacobian)
btcec.ScalarMultNonConst(
&xorBytesScalar, &oldPkJacobian, &newPointJacobian,
)
newPointJacobian.ToAffine()
newPk := btcec.NewPublicKey(
&newPointJacobian.X, &newPointJacobian.Y,
)
if compressed {
return newPk.SerializeCompressed(), nil
}
return newPk.SerializeUncompressed(), nil
}
type version struct {
major byte
minor byte
bugfix byte
autoincrement byte
}
// Enforce that version satisifies the io.ReaderFrom and
// io.WriterTo interfaces.
var _ io.ReaderFrom = &version{}
var _ io.WriterTo = &version{}
// readerFromVersion is an io.ReaderFrom and io.WriterTo that
// can specify any particular key store file format for reading
// depending on the key store file version.
type readerFromVersion interface {
readFromVersion(version, io.Reader) (int64, error)
io.WriterTo
}
func (v version) String() string {
str := fmt.Sprintf("%d.%d", v.major, v.minor)
if v.bugfix != 0x00 || v.autoincrement != 0x00 {
str += fmt.Sprintf(".%d", v.bugfix)
}
if v.autoincrement != 0x00 {
str += fmt.Sprintf(".%d", v.autoincrement)
}
return str
}
func (v version) Uint32() uint32 {
return uint32(v.major)<<6 | uint32(v.minor)<<4 | uint32(v.bugfix)<<2 | uint32(v.autoincrement)
}
func (v *version) ReadFrom(r io.Reader) (int64, error) {
// Read 4 bytes for the version.
var versBytes [4]byte
n, err := io.ReadFull(r, versBytes[:])
if err != nil {
return int64(n), err
}
v.major = versBytes[0]
v.minor = versBytes[1]
v.bugfix = versBytes[2]
v.autoincrement = versBytes[3]
return int64(n), nil
}
func (v *version) WriteTo(w io.Writer) (int64, error) {
// Write 4 bytes for the version.
versBytes := []byte{
v.major,
v.minor,
v.bugfix,
v.autoincrement,
}
n, err := w.Write(versBytes)
return int64(n), err
}
// LT returns whether v is an earlier version than v2.
func (v version) LT(v2 version) bool {
switch {
case v.major < v2.major:
return true
case v.minor < v2.minor:
return true
case v.bugfix < v2.bugfix:
return true
case v.autoincrement < v2.autoincrement:
return true
default:
return false
}
}
// EQ returns whether v2 is an equal version to v.
func (v version) EQ(v2 version) bool {
switch {
case v.major != v2.major:
return false
case v.minor != v2.minor:
return false
case v.bugfix != v2.bugfix:
return false
case v.autoincrement != v2.autoincrement:
return false
default:
return true
}
}
// GT returns whether v is a later version than v2.
func (v version) GT(v2 version) bool {
switch {
case v.major > v2.major:
return true
case v.minor > v2.minor:
return true
case v.bugfix > v2.bugfix:
return true
case v.autoincrement > v2.autoincrement:
return true
default:
return false
}
}
// Various versions.
var (
// VersArmory is the latest version used by Armory.
VersArmory = version{1, 35, 0, 0}
// Vers20LastBlocks is the version where key store files now hold
// the 20 most recently seen block hashes.
Vers20LastBlocks = version{1, 36, 0, 0}
// VersUnsetNeedsPrivkeyFlag is the bugfix version where the
// createPrivKeyNextUnlock address flag is correctly unset
// after creating and encrypting its private key after unlock.
// Otherwise, re-creating private keys will occur too early
// in the address chain and fail due to encrypting an already
// encrypted address. Key store versions at or before this
// version include a special case to allow the duplicate
// encrypt.
VersUnsetNeedsPrivkeyFlag = version{1, 36, 1, 0}
// VersCurrent is the current key store file version.
VersCurrent = VersUnsetNeedsPrivkeyFlag
)
type varEntries struct {
store *Store
entries []io.WriterTo
}
func (v *varEntries) WriteTo(w io.Writer) (n int64, err error) {
ss := v.entries
var written int64
for _, s := range ss {
var err error
if written, err = s.WriteTo(w); err != nil {
return n + written, err
}
n += written
}
return n, nil
}
func (v *varEntries) ReadFrom(r io.Reader) (n int64, err error) {
var read int64
// Remove any previous entries.
v.entries = nil
wts := v.entries
// Keep reading entries until an EOF is reached.
for {
var header entryHeader
if read, err = binaryRead(r, binary.LittleEndian, &header); err != nil {
// EOF here is not an error.
if err == io.EOF {
return n + read, nil
}
return n + read, err
}
n += read
var wt io.WriterTo
switch header {
case addrHeader:
var entry addrEntry
entry.addr.store = v.store
if read, err = entry.ReadFrom(r); err != nil {
return n + read, err
}
n += read
wt = &entry
case scriptHeader:
var entry scriptEntry
entry.script.store = v.store
if read, err = entry.ReadFrom(r); err != nil {
return n + read, err
}
n += read
wt = &entry
default:
return n, fmt.Errorf("unknown entry header: %d", uint8(header))
}
if wt != nil {
wts = append(wts, wt)
v.entries = wts
}
}
}
// Key stores use a custom network parameters type so it can be an io.ReaderFrom.
// Due to the way and order that key stores are currently serialized and how
// address reading requires the key store's network parameters, setting and
// erroring on unknown key store networks must happen on the read itself and not
// after the fact. This is admitidly a hack, but with a bip32 keystore on the
// horizon I'm not too motivated to clean this up.
type netParams chaincfg.Params
func (net *netParams) ReadFrom(r io.Reader) (int64, error) {
var buf [4]byte
uint32Bytes := buf[:4]
n, err := io.ReadFull(r, uint32Bytes)
n64 := int64(n)
if err != nil {
return n64, err
}
switch wire.BitcoinNet(binary.LittleEndian.Uint32(uint32Bytes)) {
case wire.MainNet:
*net = (netParams)(chaincfg.MainNetParams)
case wire.TestNet3:
*net = (netParams)(chaincfg.TestNet3Params)
case wire.SimNet:
*net = (netParams)(chaincfg.SimNetParams)
// The legacy key store won't be compatible with custom signets, only
// the main public one.
case chaincfg.SigNetParams.Net:
*net = (netParams)(chaincfg.SigNetParams)
default:
return n64, errors.New("unknown network")
}
return n64, nil
}
func (net *netParams) WriteTo(w io.Writer) (int64, error) {
var buf [4]byte
uint32Bytes := buf[:4]
binary.LittleEndian.PutUint32(uint32Bytes, uint32(net.Net))
n, err := w.Write(uint32Bytes)
n64 := int64(n)
return n64, err
}
// Stringified byte slices for use as map lookup keys.
type addressKey string
func getAddressKey(addr btcutil.Address) addressKey {
return addressKey(addr.ScriptAddress())
}
// Store represents an key store in memory. It implements the
// io.ReaderFrom and io.WriterTo interfaces to read from and
// write to any type of byte streams, including files.
type Store struct {
// TODO: Use atomic operations for dirty so the reader lock
// doesn't need to be grabbed.
dirty bool
path string
dir string
file string
mtx sync.RWMutex
vers version
net *netParams
flags walletFlags
createDate int64
name [32]byte
desc [256]byte
highestUsed int64
kdfParams kdfParameters
keyGenerator btcAddress
// These are non-standard and fit in the extra 1024 bytes between the
// root address and the appended entries.
recent recentBlocks
addrMap map[addressKey]walletAddress
// The rest of the fields in this struct are not serialized.
passphrase []byte
secret []byte
chainIdxMap map[int64]btcutil.Address
importedAddrs []walletAddress
lastChainIdx int64
missingKeysStart int64
}
// New creates and initializes a new Store. name's and desc's byte length
// must not exceed 32 and 256 bytes, respectively. All address private keys
// are encrypted with passphrase. The key store is returned locked.
func New(dir string, desc string, passphrase []byte, net *chaincfg.Params,
createdAt *BlockStamp) (*Store, error) {
// Check sizes of inputs.
if len(desc) > 256 {
return nil, errors.New("desc exceeds 256 byte maximum size")
}
// Randomly-generate rootkey and chaincode.
rootkey := make([]byte, 32)
if _, err := rand.Read(rootkey); err != nil {
return nil, err
}
chaincode := make([]byte, 32)
if _, err := rand.Read(chaincode); err != nil {
return nil, err
}
// Compute AES key and encrypt root address.
kdfp, err := computeKdfParameters(defaultKdfComputeTime, defaultKdfMaxMem)
if err != nil {
return nil, err
}
aeskey := kdf(passphrase, kdfp)
// Create and fill key store.
s := &Store{
path: filepath.Join(dir, Filename),
dir: dir,
file: Filename,
vers: VersCurrent,
net: (*netParams)(net),
flags: walletFlags{
useEncryption: true,
watchingOnly: false,
},
createDate: time.Now().Unix(),
highestUsed: rootKeyChainIdx,
kdfParams: *kdfp,
recent: recentBlocks{
lastHeight: createdAt.Height,
hashes: []*chainhash.Hash{
createdAt.Hash,
},
},
addrMap: make(map[addressKey]walletAddress),
chainIdxMap: make(map[int64]btcutil.Address),
lastChainIdx: rootKeyChainIdx,
missingKeysStart: rootKeyChainIdx,
secret: aeskey,
}
copy(s.desc[:], []byte(desc))
// Create new root address from key and chaincode.
root, err := newRootBtcAddress(s, rootkey, nil, chaincode,
createdAt)
if err != nil {
return nil, err
}
// Verify root address keypairs.
if err := root.verifyKeypairs(); err != nil {
return nil, err
}
if err := root.encrypt(aeskey); err != nil {
return nil, err
}
s.keyGenerator = *root
// Add root address to maps.
rootAddr := s.keyGenerator.Address()
s.addrMap[getAddressKey(rootAddr)] = &s.keyGenerator
s.chainIdxMap[rootKeyChainIdx] = rootAddr
// key store must be returned locked.
if err := s.Lock(); err != nil {
return nil, err
}
return s, nil
}
// ReadFrom reads data from a io.Reader and saves it to a key store,
// returning the number of bytes read and any errors encountered.
func (s *Store) ReadFrom(r io.Reader) (n int64, err error) {
s.mtx.Lock()
defer s.mtx.Unlock()
var read int64
s.net = &netParams{}
s.addrMap = make(map[addressKey]walletAddress)
s.chainIdxMap = make(map[int64]btcutil.Address)
var id [8]byte
appendedEntries := varEntries{store: s}
s.keyGenerator.store = s
// Iterate through each entry needing to be read. If data
// implements io.ReaderFrom, use its ReadFrom func. Otherwise,
// data is a pointer to a fixed sized value.
datas := []interface{}{
&id,
&s.vers,
s.net,
&s.flags,
make([]byte, 6), // Bytes for Armory unique ID
&s.createDate,
&s.name,
&s.desc,
&s.highestUsed,
&s.kdfParams,
make([]byte, 256),
&s.keyGenerator,
newUnusedSpace(1024, &s.recent),
&appendedEntries,
}
for _, data := range datas {
var err error
switch d := data.(type) {
case readerFromVersion:
read, err = d.readFromVersion(s.vers, r)
case io.ReaderFrom:
read, err = d.ReadFrom(r)
default:
read, err = binaryRead(r, binary.LittleEndian, d)
}
n += read
if err != nil {
return n, err
}
}
if id != fileID {
return n, errors.New("unknown file ID")
}
// Add root address to address map.
rootAddr := s.keyGenerator.Address()
s.addrMap[getAddressKey(rootAddr)] = &s.keyGenerator
s.chainIdxMap[rootKeyChainIdx] = rootAddr
s.lastChainIdx = rootKeyChainIdx
// Fill unserializied fields.
wts := appendedEntries.entries
for _, wt := range wts {
switch e := wt.(type) {
case *addrEntry:
addr := e.addr.Address()
s.addrMap[getAddressKey(addr)] = &e.addr
if e.addr.Imported() {
s.importedAddrs = append(s.importedAddrs, &e.addr)
} else {
s.chainIdxMap[e.addr.chainIndex] = addr
if s.lastChainIdx < e.addr.chainIndex {
s.lastChainIdx = e.addr.chainIndex
}
}
// If the private keys have not been created yet, mark the
// earliest so all can be created on next key store unlock.
if e.addr.flags.createPrivKeyNextUnlock {
switch {
case s.missingKeysStart == rootKeyChainIdx:
fallthrough
case e.addr.chainIndex < s.missingKeysStart:
s.missingKeysStart = e.addr.chainIndex
}
}
case *scriptEntry:
addr := e.script.Address()
s.addrMap[getAddressKey(addr)] = &e.script
// script are always imported.
s.importedAddrs = append(s.importedAddrs, &e.script)
default:
return n, errors.New("unknown appended entry")
}
}
return n, nil
}
// WriteTo serializes a key store and writes it to a io.Writer,
// returning the number of bytes written and any errors encountered.
func (s *Store) WriteTo(w io.Writer) (n int64, err error) {
s.mtx.RLock()
defer s.mtx.RUnlock()
return s.writeTo(w)
}
func (s *Store) writeTo(w io.Writer) (n int64, err error) {
var wts []io.WriterTo
var chainedAddrs = make([]io.WriterTo, len(s.chainIdxMap)-1)
var importedAddrs []io.WriterTo
for _, wAddr := range s.addrMap {
switch btcAddr := wAddr.(type) {
case *btcAddress:
e := &addrEntry{
addr: *btcAddr,
}
copy(e.pubKeyHash160[:], btcAddr.AddrHash())
if btcAddr.Imported() {
// No order for imported addresses.
importedAddrs = append(importedAddrs, e)
} else if btcAddr.chainIndex >= 0 {
// Chained addresses are sorted. This is
// kind of nice but probably isn't necessary.
chainedAddrs[btcAddr.chainIndex] = e
}
case *scriptAddress:
e := &scriptEntry{
script: *btcAddr,
}
copy(e.scriptHash160[:], btcAddr.AddrHash())
// scripts are always imported
importedAddrs = append(importedAddrs, e)
}
}
wts = append(chainedAddrs, importedAddrs...) // nolint:gocritic
appendedEntries := varEntries{store: s, entries: wts}
// Iterate through each entry needing to be written. If data
// implements io.WriterTo, use its WriteTo func. Otherwise,
// data is a pointer to a fixed size value.
datas := []interface{}{
&fileID,
&VersCurrent,
s.net,
&s.flags,
make([]byte, 6), // Bytes for Armory unique ID
&s.createDate,
&s.name,
&s.desc,
&s.highestUsed,
&s.kdfParams,
make([]byte, 256),
&s.keyGenerator,
newUnusedSpace(1024, &s.recent),
&appendedEntries,
}
var written int64
for _, data := range datas {
if s, ok := data.(io.WriterTo); ok {
written, err = s.WriteTo(w)
} else {
written, err = binaryWrite(w, binary.LittleEndian, data)
}
n += written
if err != nil {
return n, err
}
}
return n, nil
}
// TODO: set this automatically.
func (s *Store) MarkDirty() {
s.mtx.Lock()
defer s.mtx.Unlock()
s.dirty = true
}
func (s *Store) WriteIfDirty() error {
s.mtx.RLock()
if !s.dirty {
s.mtx.RUnlock()
return nil
}
// TempFile creates the file 0600, so no need to chmod it.
fi, err := os.CreateTemp(s.dir, s.file)
if err != nil {
s.mtx.RUnlock()
return err
}
fiPath := fi.Name()
_, err = s.writeTo(fi)
if err != nil {
s.mtx.RUnlock()
fi.Close()
return err
}
err = fi.Sync()
if err != nil {
s.mtx.RUnlock()
fi.Close()
return err
}
fi.Close()
err = rename.Atomic(fiPath, s.path)
s.mtx.RUnlock()
if err == nil {
s.mtx.Lock()
s.dirty = false
s.mtx.Unlock()
}
return err
}
// OpenDir opens a new key store from the specified directory. If the file
// does not exist, the error from the os package will be returned, and can
// be checked with os.IsNotExist to differentiate missing file errors from
// others (including deserialization).
func OpenDir(dir string) (*Store, error) {
path := filepath.Join(dir, Filename)
fi, err := os.OpenFile(path, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer fi.Close()
store := new(Store)
_, err = store.ReadFrom(fi)
if err != nil {
return nil, err
}
store.path = path
store.dir = dir
store.file = Filename
return store, nil
}
// Unlock derives an AES key from passphrase and key store's KDF
// parameters and unlocks the root key of the key store. If
// the unlock was successful, the key store's secret key is saved,
// allowing the decryption of any encrypted private key. Any
// addresses created while the key store was locked without private
// keys are created at this time.
func (s *Store) Unlock(passphrase []byte) error {
s.mtx.Lock()
defer s.mtx.Unlock()
if s.flags.watchingOnly {
return ErrWatchingOnly
}
// Derive key from KDF parameters and passphrase.
key := kdf(passphrase, &s.kdfParams)
// Unlock root address with derived key.
if _, err := s.keyGenerator.unlock(key); err != nil {
return err
}
// If unlock was successful, save the passphrase and aes key.
s.passphrase = passphrase
s.secret = key
return s.createMissingPrivateKeys()
}
// Lock performs a best try effort to remove and zero all secret keys
// associated with the key store.
func (s *Store) Lock() (err error) {
s.mtx.Lock()
defer s.mtx.Unlock()
if s.flags.watchingOnly {
return ErrWatchingOnly
}
// Remove clear text passphrase from key store.
if s.isLocked() {
err = ErrLocked
} else {
zero(s.passphrase)
s.passphrase = nil
zero(s.secret)
s.secret = nil
}
// Remove clear text private keys from all address entries.
for _, addr := range s.addrMap {
if baddr, ok := addr.(*btcAddress); ok {
_ = baddr.lock()
}
}
return err
}
// ChangePassphrase creates a new AES key from a new passphrase and
// re-encrypts all encrypted private keys with the new key.
func (s *Store) ChangePassphrase(new []byte) error {
s.mtx.Lock()
defer s.mtx.Unlock()
if s.flags.watchingOnly {
return ErrWatchingOnly
}
if s.isLocked() {
return ErrLocked
}
oldkey := s.secret
newkey := kdf(new, &s.kdfParams)
for _, wa := range s.addrMap {
// Only btcAddresses curently have private keys.
a, ok := wa.(*btcAddress)
if !ok {
continue
}
if err := a.changeEncryptionKey(oldkey, newkey); err != nil {
return err
}
}
// zero old secrets.
zero(s.passphrase)
zero(s.secret)
// Save new secrets.
s.passphrase = new
s.secret = newkey
return nil
}
func zero(b []byte) {
for i := range b {
b[i] = 0
}
}
// IsLocked returns whether a key store is unlocked (in which case the
// key is saved in memory), or locked.
func (s *Store) IsLocked() bool {
s.mtx.RLock()
defer s.mtx.RUnlock()
return s.isLocked()
}
func (s *Store) isLocked() bool {
return len(s.secret) != 32
}
// NextChainedAddress attempts to get the next chained address. If the key
// store is unlocked, the next pubkey and private key of the address chain are
// derived. If the key store is locke, only the next pubkey is derived, and
// the private key will be generated on next unlock.
func (s *Store) NextChainedAddress(bs *BlockStamp) (btcutil.Address, error) {
s.mtx.Lock()
defer s.mtx.Unlock()
return s.nextChainedAddress(bs)
}
func (s *Store) nextChainedAddress(bs *BlockStamp) (btcutil.Address, error) {
addr, err := s.nextChainedBtcAddress(bs)
if err != nil {
return nil, err
}
return addr.Address(), nil
}
// ChangeAddress returns the next chained address from the key store, marking
// the address for a change transaction output.
func (s *Store) ChangeAddress(bs *BlockStamp) (btcutil.Address, error) {
s.mtx.Lock()
defer s.mtx.Unlock()
addr, err := s.nextChainedBtcAddress(bs)
if err != nil {
return nil, err
}
addr.flags.change = true
// Create and return payment address for address hash.
return addr.Address(), nil
}
func (s *Store) nextChainedBtcAddress(bs *BlockStamp) (*btcAddress, error) {
// Attempt to get address hash of next chained address.
nextAPKH, ok := s.chainIdxMap[s.highestUsed+1]
if !ok {
if s.isLocked() {
// Chain pubkeys.
if err := s.extendLocked(bs); err != nil {
return nil, err
}
} else {
// Chain private and pubkeys.
if err := s.extendUnlocked(bs); err != nil {
return nil, err
}
}
// Should be added to the internal maps, try lookup again.
nextAPKH, ok = s.chainIdxMap[s.highestUsed+1]
if !ok {
return nil, errors.New("chain index map inproperly updated")
}
}
// Look up address.
addr, ok := s.addrMap[getAddressKey(nextAPKH)]
if !ok {
return nil, errors.New("cannot find generated address")
}
btcAddr, ok := addr.(*btcAddress)
if !ok {
return nil, errors.New("found non-pubkey chained address")
}
s.highestUsed++
return btcAddr, nil
}
// LastChainedAddress returns the most recently requested chained
// address from calling NextChainedAddress, or the root address if
// no chained addresses have been requested.
func (s *Store) LastChainedAddress() btcutil.Address {
s.mtx.RLock()
defer s.mtx.RUnlock()
return s.chainIdxMap[s.highestUsed]
}
// extendUnlocked grows address chain for an unlocked keystore.
func (s *Store) extendUnlocked(bs *BlockStamp) error {
// Get last chained address. New chained addresses will be
// chained off of this address's chaincode and private key.
a := s.chainIdxMap[s.lastChainIdx]
waddr, ok := s.addrMap[getAddressKey(a)]
if !ok {
return errors.New("expected last chained address not found")
}
if s.isLocked() {
return ErrLocked
}
lastAddr, ok := waddr.(*btcAddress)
if !ok {
return errors.New("found non-pubkey chained address")
}
privkey, err := lastAddr.unlock(s.secret)
if err != nil {
return err
}
cc := lastAddr.chaincode[:]
privkey, err = chainedPrivKey(privkey, lastAddr.pubKeyBytes(), cc)
if err != nil {
return err
}
newAddr, err := newBtcAddress(s, privkey, nil, bs, true)
if err != nil {
return err
}
if err := newAddr.verifyKeypairs(); err != nil {
return err
}
if err = newAddr.encrypt(s.secret); err != nil {
return err
}
a = newAddr.Address()
s.addrMap[getAddressKey(a)] = newAddr
newAddr.chainIndex = lastAddr.chainIndex + 1
s.chainIdxMap[newAddr.chainIndex] = a
s.lastChainIdx++
copy(newAddr.chaincode[:], cc)
return nil
}
// extendLocked creates one new address without a private key (allowing for
// extending the address chain from a locked key store) chained from the
// last used chained address and adds the address to the key store's internal
// bookkeeping structures.
func (s *Store) extendLocked(bs *BlockStamp) error {
a := s.chainIdxMap[s.lastChainIdx]
waddr, ok := s.addrMap[getAddressKey(a)]
if !ok {
return errors.New("expected last chained address not found")
}
addr, ok := waddr.(*btcAddress)
if !ok {
return errors.New("found non-pubkey chained address")
}
cc := addr.chaincode[:]
nextPubkey, err := chainedPubKey(addr.pubKeyBytes(), cc)
if err != nil {
return err
}
newaddr, err := newBtcAddressWithoutPrivkey(s, nextPubkey, nil, bs)
if err != nil {
return err
}
a = newaddr.Address()
s.addrMap[getAddressKey(a)] = newaddr
newaddr.chainIndex = addr.chainIndex + 1
s.chainIdxMap[newaddr.chainIndex] = a
s.lastChainIdx++
copy(newaddr.chaincode[:], cc)
if s.missingKeysStart == rootKeyChainIdx {
s.missingKeysStart = newaddr.chainIndex
}
return nil
}
func (s *Store) createMissingPrivateKeys() error {
idx := s.missingKeysStart
if idx == rootKeyChainIdx {
return nil
}
// Lookup previous address.
apkh, ok := s.chainIdxMap[idx-1]
if !ok {
return errors.New("missing previous chained address")
}
prevWAddr := s.addrMap[getAddressKey(apkh)]
if s.isLocked() {
return ErrLocked
}
prevAddr, ok := prevWAddr.(*btcAddress)
if !ok {
return errors.New("found non-pubkey chained address")
}
prevPrivKey, err := prevAddr.unlock(s.secret)
if err != nil {
return err
}
for i := idx; ; i++ {
// Get the next private key for the ith address in the address chain.
ithPrivKey, err := chainedPrivKey(prevPrivKey,
prevAddr.pubKeyBytes(), prevAddr.chaincode[:])
if err != nil {
return err
}
// Get the address with the missing private key, set, and
// encrypt.
apkh, ok := s.chainIdxMap[i]
if !ok {
// Finished.
break
}
waddr := s.addrMap[getAddressKey(apkh)]
addr, ok := waddr.(*btcAddress)
if !ok {
return errors.New("found non-pubkey chained address")
}
addr.privKeyCT = ithPrivKey
if err := addr.encrypt(s.secret); err != nil {
// Avoid bug: see comment for VersUnsetNeedsPrivkeyFlag.
if err != ErrAlreadyEncrypted || s.vers.LT(VersUnsetNeedsPrivkeyFlag) {
return err
}
}
addr.flags.createPrivKeyNextUnlock = false
// Set previous address and private key for next iteration.
prevAddr = addr
prevPrivKey = ithPrivKey
}
s.missingKeysStart = rootKeyChainIdx
return nil
}
// Address returns an walletAddress structure for an address in a key store.
// This address may be typecast into other interfaces (like PubKeyAddress
// and ScriptAddress) if specific information e.g. keys is required.
func (s *Store) Address(a btcutil.Address) (WalletAddress, error) {
s.mtx.RLock()
defer s.mtx.RUnlock()
// Look up address by address hash.
btcaddr, ok := s.addrMap[getAddressKey(a)]
if !ok {
return nil, ErrAddressNotFound
}
return btcaddr, nil
}
// Net returns the bitcoin network parameters for this key store.
func (s *Store) Net() *chaincfg.Params {
s.mtx.RLock()
defer s.mtx.RUnlock()
return s.netParams()
}
func (s *Store) netParams() *chaincfg.Params {
return (*chaincfg.Params)(s.net)
}
// SetSyncStatus sets the sync status for a single key store address. This
// may error if the address is not found in the key store.
//
// When marking an address as unsynced, only the type Unsynced matters.
// The value is ignored.
func (s *Store) SetSyncStatus(a btcutil.Address, ss SyncStatus) error {
s.mtx.Lock()
defer s.mtx.Unlock()
wa, ok := s.addrMap[getAddressKey(a)]
if !ok {
return ErrAddressNotFound
}
wa.setSyncStatus(ss)
return nil
}
// SetSyncedWith marks already synced addresses in the key store to be in
// sync with the recently-seen block described by the blockstamp.
// Unsynced addresses are unaffected by this method and must be marked
// as in sync with MarkAddressSynced or MarkAllSynced to be considered
// in sync with bs.
//
// If bs is nil, the entire key store is marked unsynced.
func (s *Store) SetSyncedWith(bs *BlockStamp) {
s.mtx.Lock()
defer s.mtx.Unlock()
if bs == nil {
s.recent.hashes = s.recent.hashes[:0]
s.recent.lastHeight = s.keyGenerator.firstBlock
s.keyGenerator.setSyncStatus(Unsynced(s.keyGenerator.firstBlock))
return
}
// Check if we're trying to rollback the last seen history.
// If so, and this bs is already saved, remove anything
// after and return. Otherwire, remove previous hashes.
if bs.Height < s.recent.lastHeight {
maybeIdx := len(s.recent.hashes) - 1 - int(s.recent.lastHeight-bs.Height)
if maybeIdx >= 0 && maybeIdx < len(s.recent.hashes) &&
*s.recent.hashes[maybeIdx] == *bs.Hash {
s.recent.lastHeight = bs.Height
// subslice out the removed hashes.
s.recent.hashes = s.recent.hashes[:maybeIdx]
return
}
s.recent.hashes = nil
}
if bs.Height != s.recent.lastHeight+1 {
s.recent.hashes = nil
}
s.recent.lastHeight = bs.Height
if len(s.recent.hashes) == 20 {
// Make room for the most recent hash.
copy(s.recent.hashes, s.recent.hashes[1:])
// Set new block in the last position.
s.recent.hashes[19] = bs.Hash
} else {
s.recent.hashes = append(s.recent.hashes, bs.Hash)
}
}
// SyncHeight returns details about the block that a wallet is marked at least
// synced through. The height is the height that rescans should start at when
// syncing a wallet back to the best chain.
//
// NOTE: If the hash of the synced block is not known, hash will be nil, and
// must be obtained from elsewhere. This must be explicitly checked before
// dereferencing the pointer.
func (s *Store) SyncedTo() (hash *chainhash.Hash, height int32) {
s.mtx.RLock()
defer s.mtx.RUnlock()
switch h, ok := s.keyGenerator.SyncStatus().(PartialSync); {
case ok && int32(h) > s.recent.lastHeight:
height = int32(h)
default:
height = s.recent.lastHeight
if n := len(s.recent.hashes); n != 0 {
hash = s.recent.hashes[n-1]
}
}
for _, a := range s.addrMap {
var syncHeight int32
switch e := a.SyncStatus().(type) {
case Unsynced:
syncHeight = int32(e)
case PartialSync:
syncHeight = int32(e)
case FullSync:
continue
}
if syncHeight < height {
height = syncHeight
hash = nil
// Can't go lower than 0.
if height == 0 {
return
}
}
}
return // nolint:nakedret
}
// NewIterateRecentBlocks returns an iterator for recently-seen blocks.
// The iterator starts at the most recently-added block, and Prev should
// be used to access earlier blocks.
func (s *Store) NewIterateRecentBlocks() *BlockIterator {
s.mtx.RLock()
defer s.mtx.RUnlock()
return s.recent.iter(s)
}
// ImportPrivateKey imports a WIF private key into the keystore. The imported
// address is created using either a compressed or uncompressed serialized
// public key, depending on the CompressPubKey bool of the WIF.
func (s *Store) ImportPrivateKey(wif *btcutil.WIF, bs *BlockStamp) (btcutil.Address, error) {
s.mtx.Lock()
defer s.mtx.Unlock()
if s.flags.watchingOnly {
return nil, ErrWatchingOnly
}
// First, must check that the key being imported will not result
// in a duplicate address.
pkh := btcutil.Hash160(wif.SerializePubKey())
if _, ok := s.addrMap[addressKey(pkh)]; ok {
return nil, ErrDuplicate
}
// The key store must be unlocked to encrypt the imported private key.
if s.isLocked() {
return nil, ErrLocked
}
// Create new address with this private key.
privKey := wif.PrivKey.Serialize()
btcaddr, err := newBtcAddress(s, privKey, nil, bs, wif.CompressPubKey)
if err != nil {
return nil, err
}
btcaddr.chainIndex = importedKeyChainIdx
// Mark as unsynced if import height is below currently-synced
// height.
if len(s.recent.hashes) != 0 && bs.Height < s.recent.lastHeight {
btcaddr.flags.unsynced = true
}
// Encrypt imported address with the derived AES key.
if err = btcaddr.encrypt(s.secret); err != nil {
return nil, err
}
addr := btcaddr.Address()
// Add address to key store's bookkeeping structures. Adding to
// the map will result in the imported address being serialized
// on the next WriteTo call.
s.addrMap[getAddressKey(addr)] = btcaddr
s.importedAddrs = append(s.importedAddrs, btcaddr)
// Create and return address.
return addr, nil
}
// ImportScript creates a new scriptAddress with a user-provided script
// and adds it to the key store.
func (s *Store) ImportScript(script []byte, bs *BlockStamp) (btcutil.Address, error) {
s.mtx.Lock()
defer s.mtx.Unlock()
if s.flags.watchingOnly {
return nil, ErrWatchingOnly
}
if _, ok := s.addrMap[addressKey(btcutil.Hash160(script))]; ok {
return nil, ErrDuplicate
}
// Create new address with this private key.
scriptaddr, err := newScriptAddress(s, script, bs)
if err != nil {
return nil, err
}
// Mark as unsynced if import height is below currently-synced
// height.
if len(s.recent.hashes) != 0 && bs.Height < s.recent.lastHeight {
scriptaddr.flags.unsynced = true
}
// Add address to key store's bookkeeping structures. Adding to
// the map will result in the imported address being serialized
// on the next WriteTo call.
addr := scriptaddr.Address()
s.addrMap[getAddressKey(addr)] = scriptaddr
s.importedAddrs = append(s.importedAddrs, scriptaddr)
// Create and return address.
return addr, nil
}
// CreateDate returns the Unix time of the key store creation time. This
// is used to compare the key store creation time against block headers and
// set a better minimum block height of where to being rescans.
func (s *Store) CreateDate() int64 {
s.mtx.RLock()
defer s.mtx.RUnlock()
return s.createDate
}
// ExportWatchingWallet creates and returns a new key store with the same
// addresses in w, but as a watching-only key store without any private keys.
// New addresses created by the watching key store will match the new addresses
// created the original key store (thanks to public key address chaining), but
// will be missing the associated private keys.
func (s *Store) ExportWatchingWallet() (*Store, error) {
s.mtx.RLock()
defer s.mtx.RUnlock()
// Don't continue if key store is already watching-only.
if s.flags.watchingOnly {
return nil, ErrWatchingOnly
}
// Copy members of w into a new key store, but mark as watching-only and
// do not include any private keys.
ws := &Store{
vers: s.vers,
net: s.net,
flags: walletFlags{
useEncryption: false,
watchingOnly: true,
},
name: s.name,
desc: s.desc,
createDate: s.createDate,
highestUsed: s.highestUsed,
recent: recentBlocks{
lastHeight: s.recent.lastHeight,
},
addrMap: make(map[addressKey]walletAddress),
// todo oga make me a list
chainIdxMap: make(map[int64]btcutil.Address),
lastChainIdx: s.lastChainIdx,
}
kgwc := s.keyGenerator.watchingCopy(ws)
ws.keyGenerator = *(kgwc.(*btcAddress))
if len(s.recent.hashes) != 0 {
ws.recent.hashes = make([]*chainhash.Hash, 0, len(s.recent.hashes))
for _, hash := range s.recent.hashes {
hashCpy := *hash
ws.recent.hashes = append(ws.recent.hashes, &hashCpy)
}
}
for apkh, addr := range s.addrMap {
if !addr.Imported() {
// Must be a btcAddress if !imported.
btcAddr := addr.(*btcAddress)
ws.chainIdxMap[btcAddr.chainIndex] =
addr.Address()
}
apkhCopy := apkh
ws.addrMap[apkhCopy] = addr.watchingCopy(ws)
}
if len(s.importedAddrs) != 0 {
ws.importedAddrs = make([]walletAddress, 0,
len(s.importedAddrs))
for _, addr := range s.importedAddrs {
ws.importedAddrs = append(ws.importedAddrs, addr.watchingCopy(ws))
}
}
return ws, nil
}
// SyncStatus is the interface type for all sync variants.
type SyncStatus interface {
ImplementsSyncStatus()
}
type (
// Unsynced is a type representing an unsynced address. When this is
// returned by a key store method, the value is the recorded first seen
// block height.
Unsynced int32
// PartialSync is a type representing a partially synced address (for
// example, due to the result of a partially-completed rescan).
PartialSync int32
// FullSync is a type representing an address that is in sync with the
// recently seen blocks.
FullSync struct{}
)
// ImplementsSyncStatus is implemented to make Unsynced a SyncStatus.
func (u Unsynced) ImplementsSyncStatus() {}
// ImplementsSyncStatus is implemented to make PartialSync a SyncStatus.
func (p PartialSync) ImplementsSyncStatus() {}
// ImplementsSyncStatus is implemented to make FullSync a SyncStatus.
func (f FullSync) ImplementsSyncStatus() {}
// WalletAddress is an interface that provides acces to information regarding an
// address managed by a key store. Concrete implementations of this type may
// provide further fields to provide information specific to that type of
// address.
type WalletAddress interface {
// Address returns a btcutil.Address for the backing address.
Address() btcutil.Address
// AddrHash returns the key or script hash related to the address
AddrHash() string
// FirstBlock returns the first block an address could be in.
FirstBlock() int32
// Compressed returns true if the backing address was imported instead
// of being part of an address chain.
Imported() bool
// Compressed returns true if the backing address was created for a
// change output of a transaction.
Change() bool
// Compressed returns true if the backing address is compressed.
Compressed() bool
// SyncStatus returns the current synced state of an address.
SyncStatus() SyncStatus
}
// SortedActiveAddresses returns all key store addresses that have been
// requested to be generated. These do not include unused addresses in
// the key pool. Use this when ordered addresses are needed. Otherwise,
// ActiveAddresses is preferred.
func (s *Store) SortedActiveAddresses() []WalletAddress {
s.mtx.RLock()
defer s.mtx.RUnlock()
addrs := make([]WalletAddress, 0,
s.highestUsed+int64(len(s.importedAddrs))+1)
for i := int64(rootKeyChainIdx); i <= s.highestUsed; i++ {
a := s.chainIdxMap[i]
info, ok := s.addrMap[getAddressKey(a)]
if ok {
addrs = append(addrs, info)
}
}
for _, addr := range s.importedAddrs {
addrs = append(addrs, addr)
}
return addrs
}
// ActiveAddresses returns a map between active payment addresses
// and their full info. These do not include unused addresses in the
// key pool. If addresses must be sorted, use SortedActiveAddresses.
func (s *Store) ActiveAddresses() map[btcutil.Address]WalletAddress {
s.mtx.RLock()
defer s.mtx.RUnlock()
addrs := make(map[btcutil.Address]WalletAddress)
for i := int64(rootKeyChainIdx); i <= s.highestUsed; i++ {
a := s.chainIdxMap[i]
addr := s.addrMap[getAddressKey(a)]
addrs[addr.Address()] = addr
}
for _, addr := range s.importedAddrs {
addrs[addr.Address()] = addr
}
return addrs
}
// ExtendActiveAddresses gets or creates the next n addresses from the
// address chain and marks each as active. This is used to recover
// deterministic (not imported) addresses from a key store backup, or to
// keep the active addresses in sync between an encrypted key store with
// private keys and an exported watching key store without.
//
// A slice is returned with the btcutil.Address of each new address.
// The blockchain must be rescanned for these addresses.
func (s *Store) ExtendActiveAddresses(n int) ([]btcutil.Address, error) {
s.mtx.Lock()
defer s.mtx.Unlock()
last := s.addrMap[getAddressKey(s.chainIdxMap[s.highestUsed])]
bs := &BlockStamp{Height: last.FirstBlock()}
addrs := make([]btcutil.Address, n)
for i := 0; i < n; i++ {
addr, err := s.nextChainedAddress(bs)
if err != nil {
return nil, err
}
addrs[i] = addr
}
return addrs, nil
}
type walletFlags struct {
useEncryption bool
watchingOnly bool
}
func (wf *walletFlags) ReadFrom(r io.Reader) (int64, error) {
var b [8]byte
n, err := io.ReadFull(r, b[:])
if err != nil {
return int64(n), err
}
wf.useEncryption = b[0]&(1<<0) != 0
wf.watchingOnly = b[0]&(1<<1) != 0
return int64(n), nil
}
func (wf *walletFlags) WriteTo(w io.Writer) (int64, error) {
var b [8]byte
if wf.useEncryption {
b[0] |= 1 << 0
}
if wf.watchingOnly {
b[0] |= 1 << 1
}
n, err := w.Write(b[:])
return int64(n), err
}
type addrFlags struct {
hasPrivKey bool
hasPubKey bool
encrypted bool
createPrivKeyNextUnlock bool
compressed bool
change bool
unsynced bool
partialSync bool
}
func (af *addrFlags) ReadFrom(r io.Reader) (int64, error) {
var b [8]byte
n, err := io.ReadFull(r, b[:])
if err != nil {
return int64(n), err
}
af.hasPrivKey = b[0]&(1<<0) != 0
af.hasPubKey = b[0]&(1<<1) != 0
af.encrypted = b[0]&(1<<2) != 0
af.createPrivKeyNextUnlock = b[0]&(1<<3) != 0
af.compressed = b[0]&(1<<4) != 0
af.change = b[0]&(1<<5) != 0
af.unsynced = b[0]&(1<<6) != 0
af.partialSync = b[0]&(1<<7) != 0
// Currently (at least until watching-only key stores are implemented)
// btcwallet shall refuse to open any unencrypted addresses. This
// check only makes sense if there is a private key to encrypt, which
// there may not be if the keypool was extended from just the last
// public key and no private keys were written.
if af.hasPrivKey && !af.encrypted {
return int64(n), errors.New("private key is unencrypted")
}
return int64(n), nil
}
func (af *addrFlags) WriteTo(w io.Writer) (int64, error) {
var b [8]byte
if af.hasPrivKey {
b[0] |= 1 << 0
}
if af.hasPubKey {
b[0] |= 1 << 1
}
if af.hasPrivKey && !af.encrypted {
// We only support encrypted privkeys.
return 0, errors.New("address must be encrypted")
}
if af.encrypted {
b[0] |= 1 << 2
}
if af.createPrivKeyNextUnlock {
b[0] |= 1 << 3
}
if af.compressed {
b[0] |= 1 << 4
}
if af.change {
b[0] |= 1 << 5
}
if af.unsynced {
b[0] |= 1 << 6
}
if af.partialSync {
b[0] |= 1 << 7
}
n, err := w.Write(b[:])
return int64(n), err
}
// recentBlocks holds at most the last 20 seen block hashes as well as
// the block height of the most recently seen block.
type recentBlocks struct {
hashes []*chainhash.Hash
lastHeight int32
}
func (rb *recentBlocks) readFromVersion(v version, r io.Reader) (int64, error) {
if !v.LT(Vers20LastBlocks) {
// Use current version.
return rb.ReadFrom(r)
}
// Old file versions only saved the most recently seen
// block height and hash, not the last 20.
var read int64
// Read height.
var heightBytes [4]byte // 4 bytes for a int32
n, err := io.ReadFull(r, heightBytes[:])
read += int64(n)
if err != nil {
return read, err
}
rb.lastHeight = int32(binary.LittleEndian.Uint32(heightBytes[:]))
// If height is -1, the last synced block is unknown, so don't try
// to read a block hash.
if rb.lastHeight == -1 {
rb.hashes = nil
return read, nil
}
// Read block hash.
var syncedBlockHash chainhash.Hash
n, err = io.ReadFull(r, syncedBlockHash[:])
read += int64(n)
if err != nil {
return read, err
}
rb.hashes = []*chainhash.Hash{
&syncedBlockHash,
}
return read, nil
}
func (rb *recentBlocks) ReadFrom(r io.Reader) (int64, error) {
var read int64
// Read number of saved blocks. This should not exceed 20.
var nBlockBytes [4]byte // 4 bytes for a uint32
n, err := io.ReadFull(r, nBlockBytes[:])
read += int64(n)
if err != nil {
return read, err
}
nBlocks := binary.LittleEndian.Uint32(nBlockBytes[:])
if nBlocks > 20 {
return read, errors.New("number of last seen blocks exceeds maximum of 20")
}
// Read most recently seen block height.
var heightBytes [4]byte // 4 bytes for a int32
n, err = io.ReadFull(r, heightBytes[:])
read += int64(n)
if err != nil {
return read, err
}
height := int32(binary.LittleEndian.Uint32(heightBytes[:]))
// height should not be -1 (or any other negative number)
// since at this point we should be reading in at least one
// known block.
if height < 0 {
return read, errors.New("expected a block but specified height is negative")
}
// Set last seen height.
rb.lastHeight = height
// Read nBlocks block hashes. Hashes are expected to be in
// order of oldest to newest, but there's no way to check
// that here.
rb.hashes = make([]*chainhash.Hash, 0, nBlocks)
for i := uint32(0); i < nBlocks; i++ {
var blockHash chainhash.Hash
n, err := io.ReadFull(r, blockHash[:])
read += int64(n)
if err != nil {
return read, err
}
rb.hashes = append(rb.hashes, &blockHash)
}
return read, nil
}
func (rb *recentBlocks) WriteTo(w io.Writer) (int64, error) {
var written int64
// Write number of saved blocks. This should not exceed 20.
nBlocks := uint32(len(rb.hashes))
if nBlocks > 20 {
return written, errors.New("number of last seen blocks exceeds maximum of 20")
}
if nBlocks != 0 && rb.lastHeight < 0 {
return written, errors.New("number of block hashes is positive, but height is negative")
}
var nBlockBytes [4]byte // 4 bytes for a uint32
binary.LittleEndian.PutUint32(nBlockBytes[:], nBlocks)
n, err := w.Write(nBlockBytes[:])
written += int64(n)
if err != nil {
return written, err
}
// Write most recently seen block height.
var heightBytes [4]byte // 4 bytes for a int32
binary.LittleEndian.PutUint32(heightBytes[:], uint32(rb.lastHeight))
n, err = w.Write(heightBytes[:])
written += int64(n)
if err != nil {
return written, err
}
// Write block hashes.
for _, hash := range rb.hashes {
n, err := w.Write(hash[:])
written += int64(n)
if err != nil {
return written, err
}
}
return written, nil
}
// BlockIterator allows for the forwards and backwards iteration of recently
// seen blocks.
type BlockIterator struct {
storeMtx *sync.RWMutex
height int32
index int
rb *recentBlocks
}
func (rb *recentBlocks) iter(s *Store) *BlockIterator {
if rb.lastHeight == -1 || len(rb.hashes) == 0 {
return nil
}
return &BlockIterator{
storeMtx: &s.mtx,
height: rb.lastHeight,
index: len(rb.hashes) - 1,
rb: rb,
}
}
func (it *BlockIterator) Next() bool {
it.storeMtx.RLock()
defer it.storeMtx.RUnlock()
if it.index+1 >= len(it.rb.hashes) {
return false
}
it.index++
return true
}
func (it *BlockIterator) Prev() bool {
it.storeMtx.RLock()
defer it.storeMtx.RUnlock()
if it.index-1 < 0 {
return false
}
it.index--
return true
}
func (it *BlockIterator) BlockStamp() BlockStamp {
it.storeMtx.RLock()
defer it.storeMtx.RUnlock()
return BlockStamp{
Height: it.rb.lastHeight - int32(len(it.rb.hashes)-1-it.index),
Hash: it.rb.hashes[it.index],
}
}
// unusedSpace is a wrapper type to read or write one or more types
// that btcwallet fits into an unused space left by Armory's key store file
// format.
type unusedSpace struct {
nBytes int // number of unused bytes that armory left.
rfvs []readerFromVersion
}
func newUnusedSpace(nBytes int, rfvs ...readerFromVersion) *unusedSpace {
return &unusedSpace{
nBytes: nBytes,
rfvs: rfvs,
}
}
func (u *unusedSpace) readFromVersion(v version, r io.Reader) (int64, error) {
var read int64
for _, rfv := range u.rfvs {
n, err := rfv.readFromVersion(v, r)
if err != nil {
return read + n, err
}
read += n
if read > int64(u.nBytes) {
return read, errors.New("read too much from armory's unused space")
}
}
// Read rest of actually unused bytes.
unused := make([]byte, u.nBytes-int(read))
n, err := io.ReadFull(r, unused)
return read + int64(n), err
}
func (u *unusedSpace) WriteTo(w io.Writer) (int64, error) {
var written int64
for _, wt := range u.rfvs {
n, err := wt.WriteTo(w)
if err != nil {
return written + n, err
}
written += n
if written > int64(u.nBytes) {
return written, errors.New("wrote too much to armory's unused space")
}
}
// Write rest of actually unused bytes.
unused := make([]byte, u.nBytes-int(written))
n, err := w.Write(unused)
return written + int64(n), err
}
// walletAddress is the internal interface used to abstracted around the
// different address types.
type walletAddress interface {
io.ReaderFrom
io.WriterTo
WalletAddress
watchingCopy(*Store) walletAddress
setSyncStatus(SyncStatus)
}
type btcAddress struct {
store *Store
address btcutil.Address
flags addrFlags
chaincode [32]byte
chainIndex int64
chainDepth int64 // unused
initVector [16]byte
privKey [32]byte
pubKey *btcec.PublicKey
firstSeen int64
lastSeen int64
firstBlock int32
partialSyncHeight int32 // This is reappropriated from armory's `lastBlock` field.
privKeyCT []byte // non-nil if unlocked.
}
const (
// Root address has a chain index of -1. Each subsequent
// chained address increments the index.
rootKeyChainIdx = -1
// Imported private keys are not part of the chain, and have a
// special index of -2.
importedKeyChainIdx = -2
)
const (
pubkeyCompressed byte = 0x2
pubkeyUncompressed byte = 0x4
)
type publicKey []byte
func (k *publicKey) ReadFrom(r io.Reader) (n int64, err error) {
var read int64
var format byte
read, err = binaryRead(r, binary.LittleEndian, &format)
if err != nil {
return n + read, err
}
n += read
// Remove the oddness from the format
noodd := format
noodd &= ^byte(0x1)
var s []byte
switch noodd {
case pubkeyUncompressed:
// Read the remaining 64 bytes.
s = make([]byte, 64)
case pubkeyCompressed:
// Read the remaining 32 bytes.
s = make([]byte, 32)
default:
return n, errors.New("unrecognized pubkey format")
}
read, err = binaryRead(r, binary.LittleEndian, &s)
if err != nil {
return n + read, err
}
n += read
*k = append([]byte{format}, s...)
return // nolint:nakedret
}
func (k *publicKey) WriteTo(w io.Writer) (n int64, err error) {
return binaryWrite(w, binary.LittleEndian, []byte(*k))
}
// PubKeyAddress implements WalletAddress and additionally provides the
// pubkey for a pubkey-based address.
type PubKeyAddress interface {
WalletAddress
// PubKey returns the public key associated with the address.
PubKey() *btcec.PublicKey
// ExportPubKey returns the public key associated with the address
// serialised as a hex encoded string.
ExportPubKey() string
// PrivKey returns the private key for the address.
// It can fail if the key store is watching only, the key store is locked,
// or the address doesn't have any keys.
PrivKey() (*btcec.PrivateKey, error)
// ExportPrivKey exports the WIF private key.
ExportPrivKey() (*btcutil.WIF, error)
}
// newBtcAddress initializes and returns a new address. privkey must
// be 32 bytes. iv must be 16 bytes, or nil (in which case it is
// randomly generated).
func newBtcAddress(wallet *Store, privkey, iv []byte, bs *BlockStamp, compressed bool) (addr *btcAddress, err error) {
if len(privkey) != 32 {
return nil, errors.New("private key is not 32 bytes")
}
addr, err = newBtcAddressWithoutPrivkey(wallet,
pubkeyFromPrivkey(privkey, compressed), iv, bs)
if err != nil {
return nil, err
}
addr.flags.createPrivKeyNextUnlock = false
addr.flags.hasPrivKey = true
addr.privKeyCT = privkey
return addr, nil
}
// newBtcAddressWithoutPrivkey initializes and returns a new address with an
// unknown (at the time) private key that must be found later. pubkey must be
// 33 or 65 bytes, and iv must be 16 bytes or empty (in which case it is
// randomly generated).
func newBtcAddressWithoutPrivkey(s *Store, pubkey, iv []byte, bs *BlockStamp) (addr *btcAddress, err error) {
var compressed bool
switch n := len(pubkey); n {
case secp.PubKeyBytesLenCompressed:
compressed = true
case secp.PubKeyBytesLenUncompressed:
compressed = false
default:
return nil, fmt.Errorf("invalid pubkey length %d", n)
}
if len(iv) == 0 {
iv = make([]byte, 16)
if _, err := rand.Read(iv); err != nil {
return nil, err
}
} else if len(iv) != 16 {
return nil, errors.New("init vector must be nil or 16 bytes large")
}
pk, err := btcec.ParsePubKey(pubkey)
if err != nil {
return nil, err
}
address, err := btcutil.NewAddressPubKeyHash(btcutil.Hash160(pubkey), s.netParams())
if err != nil {
return nil, err
}
addr = &btcAddress{
flags: addrFlags{
hasPrivKey: false,
hasPubKey: true,
encrypted: false,
createPrivKeyNextUnlock: true,
compressed: compressed,
change: false,
unsynced: false,
},
store: s,
address: address,
firstSeen: time.Now().Unix(),
firstBlock: bs.Height,
pubKey: pk,
}
copy(addr.initVector[:], iv)
return addr, nil
}
// newRootBtcAddress generates a new address, also setting the
// chaincode and chain index to represent this address as a root
// address.
func newRootBtcAddress(s *Store, privKey, iv, chaincode []byte,
bs *BlockStamp) (addr *btcAddress, err error) {
if len(chaincode) != 32 {
return nil, errors.New("chaincode is not 32 bytes")
}
// Create new btcAddress with provided inputs. This will
// always use a compressed pubkey.
addr, err = newBtcAddress(s, privKey, iv, bs, true)
if err != nil {
return nil, err
}
copy(addr.chaincode[:], chaincode)
addr.chainIndex = rootKeyChainIdx
return addr, err
}
// verifyKeypairs creates a signature using the parsed private key and
// verifies the signature with the parsed public key. If either of these
// steps fail, the keypair generation failed and any funds sent to this
// address will be unspendable. This step requires an unencrypted or
// unlocked btcAddress.
func (a *btcAddress) verifyKeypairs() error {
if len(a.privKeyCT) != 32 {
return errors.New("private key unavailable")
}
privKey, pubKey := btcec.PrivKeyFromBytes(a.privKeyCT)
data := "String to sign."
sig := ecdsa.Sign(privKey, []byte(data))
ok := sig.Verify([]byte(data), pubKey)
if !ok {
return errors.New("pubkey verification failed")
}
return nil
}
// ReadFrom reads an encrypted address from an io.Reader.
func (a *btcAddress) ReadFrom(r io.Reader) (n int64, err error) {
var read int64
// Checksums
var chkPubKeyHash uint32
var chkChaincode uint32
var chkInitVector uint32
var chkPrivKey uint32
var chkPubKey uint32
var pubKeyHash [ripemd160.Size]byte
var pubKey publicKey
// Read serialized key store into addr fields and checksums.
datas := []interface{}{
&pubKeyHash,
&chkPubKeyHash,
make([]byte, 4), // version
&a.flags,
&a.chaincode,
&chkChaincode,
&a.chainIndex,
&a.chainDepth,
&a.initVector,
&chkInitVector,
&a.privKey,
&chkPrivKey,
&pubKey,
&chkPubKey,
&a.firstSeen,
&a.lastSeen,
&a.firstBlock,
&a.partialSyncHeight,
}
for _, data := range datas {
if rf, ok := data.(io.ReaderFrom); ok {
read, err = rf.ReadFrom(r)
} else {
read, err = binaryRead(r, binary.LittleEndian, data)
}
if err != nil {
return n + read, err
}
n += read
}
// Verify checksums, correct errors where possible.
checks := []struct {
data []byte
chk uint32
}{
{pubKeyHash[:], chkPubKeyHash},
{a.chaincode[:], chkChaincode},
{a.initVector[:], chkInitVector},
{a.privKey[:], chkPrivKey},
{pubKey, chkPubKey},
}
for i := range checks {
if err = verifyAndFix(checks[i].data, checks[i].chk); err != nil {
return n, err
}
}
if !a.flags.hasPubKey {
return n, errors.New("read in an address without a public key")
}
pk, err := btcec.ParsePubKey(pubKey)
if err != nil {
return n, err
}
a.pubKey = pk
addr, err := btcutil.NewAddressPubKeyHash(pubKeyHash[:], a.store.netParams())
if err != nil {
return n, err
}
a.address = addr
return n, nil
}
func (a *btcAddress) WriteTo(w io.Writer) (n int64, err error) {
var written int64
pubKey := a.pubKeyBytes()
hash := a.address.ScriptAddress()
datas := []interface{}{
&hash,
walletHash(hash),
make([]byte, 4), //version
&a.flags,
&a.chaincode,
walletHash(a.chaincode[:]),
&a.chainIndex,
&a.chainDepth,
&a.initVector,
walletHash(a.initVector[:]),
&a.privKey,
walletHash(a.privKey[:]),
&pubKey,
walletHash(pubKey),
&a.firstSeen,
&a.lastSeen,
&a.firstBlock,
&a.partialSyncHeight,
}
for _, data := range datas {
if wt, ok := data.(io.WriterTo); ok {
written, err = wt.WriteTo(w)
} else {
written, err = binaryWrite(w, binary.LittleEndian, data)
}
if err != nil {
return n + written, err
}
n += written
}
return n, nil
}
// encrypt attempts to encrypt an address's clear text private key,
// failing if the address is already encrypted or if the private key is
// not 32 bytes. If successful, the encryption flag is set.
func (a *btcAddress) encrypt(key []byte) error {
if a.flags.encrypted {
return ErrAlreadyEncrypted
}
if len(a.privKeyCT) != 32 {
return errors.New("invalid clear text private key")
}
aesBlockEncrypter, err := aes.NewCipher(key)
if err != nil {
return err
}
aesEncrypter := cipher.NewCFBEncrypter(aesBlockEncrypter, a.initVector[:])
aesEncrypter.XORKeyStream(a.privKey[:], a.privKeyCT)
a.flags.hasPrivKey = true
a.flags.encrypted = true
return nil
}
// lock removes the reference this address holds to its clear text
// private key. This function fails if the address is not encrypted.
func (a *btcAddress) lock() error {
if !a.flags.encrypted {
return errors.New("unable to lock unencrypted address")
}
zero(a.privKeyCT)
a.privKeyCT = nil
return nil
}
// unlock decrypts and stores a pointer to an address's private key,
// failing if the address is not encrypted, or the provided key is
// incorrect. The returned clear text private key will always be a copy
// that may be safely used by the caller without worrying about it being
// zeroed during an address lock.
func (a *btcAddress) unlock(key []byte) (privKeyCT []byte, err error) {
if !a.flags.encrypted {
return nil, errors.New("unable to unlock unencrypted address")
}
// Decrypt private key with AES key.
aesBlockDecrypter, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
aesDecrypter := cipher.NewCFBDecrypter(aesBlockDecrypter, a.initVector[:])
privkeyBytes := make([]byte, 32)
aesDecrypter.XORKeyStream(privkeyBytes, a.privKey[:])
// If secret is already saved, simply compare the bytes.
if len(a.privKeyCT) == 32 {
if !bytes.Equal(a.privKeyCT, privkeyBytes) {
return nil, ErrWrongPassphrase
}
privKeyCT := make([]byte, 32)
copy(privKeyCT, a.privKeyCT)
return privKeyCT, nil
}
_, pubKey := btcec.PrivKeyFromBytes(privkeyBytes)
if !pubKey.IsEqual(a.pubKey) {
return nil, ErrWrongPassphrase
}
privkeyCopy := make([]byte, 32)
copy(privkeyCopy, privkeyBytes)
a.privKeyCT = privkeyBytes
return privkeyCopy, nil
}
// changeEncryptionKey re-encrypts the private keys for an address
// with a new AES encryption key. oldkey must be the old AES encryption key
// and is used to decrypt the private key.
func (a *btcAddress) changeEncryptionKey(oldkey, newkey []byte) error {
// Address must have a private key and be encrypted to continue.
if !a.flags.hasPrivKey {
return errors.New("no private key")
}
if !a.flags.encrypted {
return errors.New("address is not encrypted")
}
privKeyCT, err := a.unlock(oldkey)
if err != nil {
return err
}
aesBlockEncrypter, err := aes.NewCipher(newkey)
if err != nil {
return err
}
newIV := make([]byte, len(a.initVector))
if _, err := rand.Read(newIV); err != nil {
return err
}
copy(a.initVector[:], newIV)
aesEncrypter := cipher.NewCFBEncrypter(aesBlockEncrypter, a.initVector[:])
aesEncrypter.XORKeyStream(a.privKey[:], privKeyCT)
return nil
}
// Address returns the pub key address, implementing AddressInfo.
func (a *btcAddress) Address() btcutil.Address {
return a.address
}
// AddrHash returns the pub key hash, implementing WalletAddress.
func (a *btcAddress) AddrHash() string {
return string(a.address.ScriptAddress())
}
// FirstBlock returns the first block the address is seen in, implementing
// AddressInfo.
func (a *btcAddress) FirstBlock() int32 {
return a.firstBlock
}
// Imported returns the pub if the address was imported, or a chained address,
// implementing AddressInfo.
func (a *btcAddress) Imported() bool {
return a.chainIndex == importedKeyChainIdx
}
// Change returns true if the address was created as a change address,
// implementing AddressInfo.
func (a *btcAddress) Change() bool {
return a.flags.change
}
// Compressed returns true if the address backing key is compressed,
// implementing AddressInfo.
func (a *btcAddress) Compressed() bool {
return a.flags.compressed
}
// SyncStatus returns a SyncStatus type for how the address is currently
// synced. For an Unsynced type, the value is the recorded first seen
// block height of the address.
func (a *btcAddress) SyncStatus() SyncStatus {
switch {
case a.flags.unsynced && !a.flags.partialSync:
return Unsynced(a.firstBlock)
case a.flags.unsynced && a.flags.partialSync:
return PartialSync(a.partialSyncHeight)
default:
return FullSync{}
}
}
// PubKey returns the hex encoded pubkey for the address. Implementing
// PubKeyAddress.
func (a *btcAddress) PubKey() *btcec.PublicKey {
return a.pubKey
}
func (a *btcAddress) pubKeyBytes() []byte {
if a.Compressed() {
return a.pubKey.SerializeCompressed()
}
return a.pubKey.SerializeUncompressed()
}
// ExportPubKey returns the public key associated with the address serialised as
// a hex encoded string. Implemnts PubKeyAddress
func (a *btcAddress) ExportPubKey() string {
return hex.EncodeToString(a.pubKeyBytes())
}
// PrivKey implements PubKeyAddress by returning the private key, or an error
// if the key store is locked, watching only or the private key is missing.
func (a *btcAddress) PrivKey() (*btcec.PrivateKey, error) {
if a.store.flags.watchingOnly {
return nil, ErrWatchingOnly
}
if !a.flags.hasPrivKey {
return nil, errors.New("no private key for address")
}
// Key store must be unlocked to decrypt the private key.
if a.store.isLocked() {
return nil, ErrLocked
}
// Unlock address with key store secret. unlock returns a copy of
// the clear text private key, and may be used safely even
// during an address lock.
privKeyCT, err := a.unlock(a.store.secret)
if err != nil {
return nil, err
}
privKey, _ := btcec.PrivKeyFromBytes(privKeyCT)
return privKey, nil
}
// ExportPrivKey exports the private key as a WIF for encoding as a string
// in the Wallet Import Formt.
func (a *btcAddress) ExportPrivKey() (*btcutil.WIF, error) {
pk, err := a.PrivKey()
if err != nil {
return nil, err
}
// NewWIF only errors if the network is nil. In this case, panic,
// as our program's assumptions are so broken that this needs to be
// caught immediately, and a stack trace here is more useful than
// elsewhere.
wif, err := btcutil.NewWIF(pk, a.store.netParams(), a.Compressed())
if err != nil {
panic(err)
}
return wif, nil
}
// watchingCopy creates a copy of an address without a private key.
// This is used to fill a watching a key store with addresses from a
// normal key store.
func (a *btcAddress) watchingCopy(s *Store) walletAddress {
return &btcAddress{
store: s,
address: a.address,
flags: addrFlags{
hasPrivKey: false,
hasPubKey: true,
encrypted: false,
createPrivKeyNextUnlock: false,
compressed: a.flags.compressed,
change: a.flags.change,
unsynced: a.flags.unsynced,
},
chaincode: a.chaincode,
chainIndex: a.chainIndex,
chainDepth: a.chainDepth,
pubKey: a.pubKey,
firstSeen: a.firstSeen,
lastSeen: a.lastSeen,
firstBlock: a.firstBlock,
partialSyncHeight: a.partialSyncHeight,
}
}
// setSyncStatus sets the address flags and possibly the partial sync height
// depending on the type of s.
func (a *btcAddress) setSyncStatus(s SyncStatus) {
switch e := s.(type) {
case Unsynced:
a.flags.unsynced = true
a.flags.partialSync = false
a.partialSyncHeight = 0
case PartialSync:
a.flags.unsynced = true
a.flags.partialSync = true
a.partialSyncHeight = int32(e)
case FullSync:
a.flags.unsynced = false
a.flags.partialSync = false
a.partialSyncHeight = 0
}
}
// note that there is no encrypted bit here since if we had a script encrypted
// and then used it on the blockchain this provides a simple known plaintext in
// the key store file. It was determined that the script in a p2sh transaction is
// not a secret and any sane situation would also require a signature (which
// does have a secret).
type scriptFlags struct {
hasScript bool
change bool
unsynced bool
partialSync bool
}
// ReadFrom implements the io.ReaderFrom interface by reading from r into sf.
func (sf *scriptFlags) ReadFrom(r io.Reader) (int64, error) {
var b [8]byte
n, err := io.ReadFull(r, b[:])
if err != nil {
return int64(n), err
}
// We match bits from addrFlags for similar fields. hence hasScript uses
// the same bit as hasPubKey and the change bit is the same for both.
sf.hasScript = b[0]&(1<<1) != 0
sf.change = b[0]&(1<<5) != 0
sf.unsynced = b[0]&(1<<6) != 0
sf.partialSync = b[0]&(1<<7) != 0
return int64(n), nil
}
// WriteTo implements the io.WriteTo interface by writing sf into w.
func (sf *scriptFlags) WriteTo(w io.Writer) (int64, error) {
var b [8]byte
if sf.hasScript {
b[0] |= 1 << 1
}
if sf.change {
b[0] |= 1 << 5
}
if sf.unsynced {
b[0] |= 1 << 6
}
if sf.partialSync {
b[0] |= 1 << 7
}
n, err := w.Write(b[:])
return int64(n), err
}
// p2SHScript represents the variable length script entry in a key store.
type p2SHScript []byte
// ReadFrom implements the ReaderFrom interface by reading the P2SH script from
// r in the format <4 bytes little endian length><script bytes>
func (a *p2SHScript) ReadFrom(r io.Reader) (n int64, err error) {
//read length
var lenBytes [4]byte
read, err := io.ReadFull(r, lenBytes[:])
n += int64(read)
if err != nil {
return n, err
}
length := binary.LittleEndian.Uint32(lenBytes[:])
script := make([]byte, length)
read, err = io.ReadFull(r, script)
n += int64(read)
if err != nil {
return n, err
}
*a = script
return n, nil
}
// WriteTo implements the WriterTo interface by writing the P2SH script to w in
// the format <4 bytes little endian length><script bytes>
func (a *p2SHScript) WriteTo(w io.Writer) (n int64, err error) {
// Prepare and write 32-bit little-endian length header
var lenBytes [4]byte
binary.LittleEndian.PutUint32(lenBytes[:], uint32(len(*a)))
written, err := w.Write(lenBytes[:])
n += int64(written)
if err != nil {
return n, err
}
// Now write the bytes themselves.
written, err = w.Write(*a)
return n + int64(written), err
}
type scriptAddress struct {
store *Store
address btcutil.Address
class txscript.ScriptClass
addresses []btcutil.Address
reqSigs int
flags scriptFlags
script p2SHScript // variable length
firstSeen int64
lastSeen int64
firstBlock int32
partialSyncHeight int32
}
// ScriptAddress is an interface representing a Pay-to-Script-Hash style of
// bitcoind address.
type ScriptAddress interface {
WalletAddress
// Returns the script associated with the address.
Script() []byte
// Returns the class of the script associated with the address.
ScriptClass() txscript.ScriptClass
// Returns the addresses that are required to sign transactions from the
// script address.
Addresses() []btcutil.Address
// Returns the number of signatures required by the script address.
RequiredSigs() int
}
// newScriptAddress initializes and returns a new P2SH address.
// iv must be 16 bytes, or nil (in which case it is randomly generated).
func newScriptAddress(s *Store, script []byte, bs *BlockStamp) (addr *scriptAddress, err error) {
class, addresses, reqSigs, err :=
txscript.ExtractPkScriptAddrs(script, s.netParams())
if err != nil {
return nil, err
}
scriptHash := btcutil.Hash160(script)
address, err := btcutil.NewAddressScriptHashFromHash(scriptHash, s.netParams())
if err != nil {
return nil, err
}
addr = &scriptAddress{
store: s,
address: address,
addresses: addresses,
class: class,
reqSigs: reqSigs,
flags: scriptFlags{
hasScript: true,
change: false,
},
script: script,
firstSeen: time.Now().Unix(),
firstBlock: bs.Height,
}
return addr, nil
}
// ReadFrom reads an script address from an io.Reader.
func (sa *scriptAddress) ReadFrom(r io.Reader) (n int64, err error) {
var read int64
// Checksums
var chkScriptHash uint32
var chkScript uint32
var scriptHash [ripemd160.Size]byte
// Read serialized key store into addr fields and checksums.
datas := []interface{}{
&scriptHash,
&chkScriptHash,
make([]byte, 4), // version
&sa.flags,
&sa.script,
&chkScript,
&sa.firstSeen,
&sa.lastSeen,
&sa.firstBlock,
&sa.partialSyncHeight,
}
for _, data := range datas {
if rf, ok := data.(io.ReaderFrom); ok {
read, err = rf.ReadFrom(r)
} else {
read, err = binaryRead(r, binary.LittleEndian, data)
}
if err != nil {
return n + read, err
}
n += read
}
// Verify checksums, correct errors where possible.
checks := []struct {
data []byte
chk uint32
}{
{scriptHash[:], chkScriptHash},
{sa.script, chkScript},
}
for i := range checks {
if err = verifyAndFix(checks[i].data, checks[i].chk); err != nil {
return n, err
}
}
address, err := btcutil.NewAddressScriptHashFromHash(scriptHash[:],
sa.store.netParams())
if err != nil {
return n, err
}
sa.address = address
if !sa.flags.hasScript {
return n, errors.New("read in an addresss with no script")
}
class, addresses, reqSigs, err :=
txscript.ExtractPkScriptAddrs(sa.script, sa.store.netParams())
if err != nil {
return n, err
}
sa.class = class
sa.addresses = addresses
sa.reqSigs = reqSigs
return n, nil
}
// WriteTo implements io.WriterTo by writing the scriptAddress to w.
func (sa *scriptAddress) WriteTo(w io.Writer) (n int64, err error) {
var written int64
hash := sa.address.ScriptAddress()
datas := []interface{}{
&hash,
walletHash(hash),
make([]byte, 4), //version
&sa.flags,
&sa.script,
walletHash(sa.script),
&sa.firstSeen,
&sa.lastSeen,
&sa.firstBlock,
&sa.partialSyncHeight,
}
for _, data := range datas {
if wt, ok := data.(io.WriterTo); ok {
written, err = wt.WriteTo(w)
} else {
written, err = binaryWrite(w, binary.LittleEndian, data)
}
if err != nil {
return n + written, err
}
n += written
}
return n, nil
}
// address returns a btcutil.AddressScriptHash for a btcAddress.
func (sa *scriptAddress) Address() btcutil.Address {
return sa.address
}
// AddrHash returns the script hash, implementing AddressInfo.
func (sa *scriptAddress) AddrHash() string {
return string(sa.address.ScriptAddress())
}
// FirstBlock returns the first blockheight the address is known at.
func (sa *scriptAddress) FirstBlock() int32 {
return sa.firstBlock
}
// Imported currently always returns true since script addresses are always
// imported addressed and not part of any chain.
func (sa *scriptAddress) Imported() bool {
return true
}
// Change returns true if the address was created as a change address.
func (sa *scriptAddress) Change() bool {
return sa.flags.change
}
// Compressed returns false since script addresses are never compressed.
// Implements WalletAddress.
func (sa *scriptAddress) Compressed() bool {
return false
}
// Script returns the script that is represented by the address. It should not
// be modified.
func (sa *scriptAddress) Script() []byte {
return sa.script
}
// Addresses returns the list of addresses that must sign the script.
func (sa *scriptAddress) Addresses() []btcutil.Address {
return sa.addresses
}
// ScriptClass returns the type of script the address is.
func (sa *scriptAddress) ScriptClass() txscript.ScriptClass {
return sa.class
}
// RequiredSigs returns the number of signatures required by the script.
func (sa *scriptAddress) RequiredSigs() int {
return sa.reqSigs
}
// SyncStatus returns a SyncStatus type for how the address is currently
// synced. For an Unsynced type, the value is the recorded first seen
// block height of the address.
// Implements WalletAddress.
func (sa *scriptAddress) SyncStatus() SyncStatus {
switch {
case sa.flags.unsynced && !sa.flags.partialSync:
return Unsynced(sa.firstBlock)
case sa.flags.unsynced && sa.flags.partialSync:
return PartialSync(sa.partialSyncHeight)
default:
return FullSync{}
}
}
// setSyncStatus sets the address flags and possibly the partial sync height
// depending on the type of s.
func (sa *scriptAddress) setSyncStatus(s SyncStatus) {
switch e := s.(type) {
case Unsynced:
sa.flags.unsynced = true
sa.flags.partialSync = false
sa.partialSyncHeight = 0
case PartialSync:
sa.flags.unsynced = true
sa.flags.partialSync = true
sa.partialSyncHeight = int32(e)
case FullSync:
sa.flags.unsynced = false
sa.flags.partialSync = false
sa.partialSyncHeight = 0
}
}
// watchingCopy creates a copy of an address without a private key.
// This is used to fill a watching key store with addresses from a
// normal key store.
func (sa *scriptAddress) watchingCopy(s *Store) walletAddress {
return &scriptAddress{
store: s,
address: sa.address,
addresses: sa.addresses,
class: sa.class,
reqSigs: sa.reqSigs,
flags: scriptFlags{
change: sa.flags.change,
unsynced: sa.flags.unsynced,
},
script: sa.script,
firstSeen: sa.firstSeen,
lastSeen: sa.lastSeen,
firstBlock: sa.firstBlock,
partialSyncHeight: sa.partialSyncHeight,
}
}
func walletHash(b []byte) uint32 {
sum := chainhash.DoubleHashB(b)
return binary.LittleEndian.Uint32(sum)
}
// TODO(jrick) add error correction.
func verifyAndFix(b []byte, chk uint32) error {
if walletHash(b) != chk {
return ErrChecksumMismatch
}
return nil
}
type kdfParameters struct {
mem uint64
nIter uint32
salt [32]byte
}
// computeKdfParameters returns best guess parameters to the
// memory-hard key derivation function to make the computation last
// targetSec seconds, while using no more than maxMem bytes of memory.
func computeKdfParameters(targetSec float64, maxMem uint64) (*kdfParameters, error) {
params := &kdfParameters{}
if _, err := rand.Read(params.salt[:]); err != nil {
return nil, err
}
testKey := []byte("This is an example key to test KDF iteration speed")
memoryReqtBytes := uint64(1024)
approxSec := float64(0)
for approxSec <= targetSec/4 && memoryReqtBytes < maxMem {
memoryReqtBytes *= 2
before := time.Now()
_ = keyOneIter(testKey, params.salt[:], memoryReqtBytes)
approxSec = time.Since(before).Seconds()
}
allItersSec := float64(0)
nIter := uint32(1)
for allItersSec < 0.02 { // This is a magic number straight from armory's source.
nIter *= 2
before := time.Now()
for i := uint32(0); i < nIter; i++ {
_ = keyOneIter(testKey, params.salt[:], memoryReqtBytes)
}
allItersSec = time.Since(before).Seconds()
}
params.mem = memoryReqtBytes
params.nIter = nIter
return params, nil
}
func (params *kdfParameters) WriteTo(w io.Writer) (n int64, err error) {
var written int64
memBytes := make([]byte, 8)
nIterBytes := make([]byte, 4)
binary.LittleEndian.PutUint64(memBytes, params.mem)
binary.LittleEndian.PutUint32(nIterBytes, params.nIter)
chkedBytes := append(memBytes, nIterBytes...)
chkedBytes = append(chkedBytes, params.salt[:]...)
datas := []interface{}{
¶ms.mem,
¶ms.nIter,
¶ms.salt,
walletHash(chkedBytes),
make([]byte, 256-(binary.Size(params)+4)), // padding
}
for _, data := range datas {
if written, err = binaryWrite(w, binary.LittleEndian, data); err != nil {
return n + written, err
}
n += written
}
return n, nil
}
func (params *kdfParameters) ReadFrom(r io.Reader) (n int64, err error) {
var read int64
// These must be read in but are not saved directly to params.
chkedBytes := make([]byte, 44)
var chk uint32
padding := make([]byte, 256-(binary.Size(params)+4))
datas := []interface{}{
chkedBytes,
&chk,
padding,
}
for _, data := range datas {
if read, err = binaryRead(r, binary.LittleEndian, data); err != nil {
return n + read, err
}
n += read
}
// Verify checksum
if err = verifyAndFix(chkedBytes, chk); err != nil {
return n, err
}
// Read params
buf := bytes.NewBuffer(chkedBytes)
datas = []interface{}{
¶ms.mem,
¶ms.nIter,
¶ms.salt,
}
for _, data := range datas {
if err = binary.Read(buf, binary.LittleEndian, data); err != nil {
return n, err
}
}
return n, nil
}
type addrEntry struct {
pubKeyHash160 [ripemd160.Size]byte
addr btcAddress
}
func (e *addrEntry) WriteTo(w io.Writer) (n int64, err error) {
var written int64
// Write header
if written, err = binaryWrite(w, binary.LittleEndian, addrHeader); err != nil {
return n + written, err
}
n += written
// Write hash
if written, err = binaryWrite(w, binary.LittleEndian, &e.pubKeyHash160); err != nil {
return n + written, err
}
n += written
// Write btcAddress
written, err = e.addr.WriteTo(w)
n += written
return n, err
}
func (e *addrEntry) ReadFrom(r io.Reader) (n int64, err error) {
var read int64
if read, err = binaryRead(r, binary.LittleEndian, &e.pubKeyHash160); err != nil {
return n + read, err
}
n += read
read, err = e.addr.ReadFrom(r)
return n + read, err
}
// scriptEntry is the entry type for a P2SH script.
type scriptEntry struct {
scriptHash160 [ripemd160.Size]byte
script scriptAddress
}
// WriteTo implements io.WriterTo by writing the entry to w.
func (e *scriptEntry) WriteTo(w io.Writer) (n int64, err error) {
var written int64
// Write header
if written, err = binaryWrite(w, binary.LittleEndian, scriptHeader); err != nil {
return n + written, err
}
n += written
// Write hash
if written, err = binaryWrite(w, binary.LittleEndian, &e.scriptHash160); err != nil {
return n + written, err
}
n += written
// Write btcAddress
written, err = e.script.WriteTo(w)
n += written
return n, err
}
// ReadFrom implements io.ReaderFrom by reading the entry from e.
func (e *scriptEntry) ReadFrom(r io.Reader) (n int64, err error) {
var read int64
if read, err = binaryRead(r, binary.LittleEndian, &e.scriptHash160); err != nil {
return n + read, err
}
n += read
read, err = e.script.ReadFrom(r)
return n + read, err
}
// BlockStamp defines a block (by height and a unique hash) and is
// used to mark a point in the blockchain that a key store element is
// synced to.
type BlockStamp struct {
Hash *chainhash.Hash
Height int32
}
|
package romanLiterals
import (
"fmt"
"testing"
"testing/quick"
)
// Strings for formatting test messages
const (
convertArabicToRoman = "%d gets converted to %q"
convertArabicToRomanFailed = "Conversion result: %q, expected %q"
convertRomanToArabic = "%q gets converted to %d"
convertRomanToArabicFailed = "Conversion result: %d, expected %d"
)
type RomanLiteralCases struct {
Arabic uint16
Roman string
}
var cases []RomanLiteralCases = []RomanLiteralCases{
{1, "I"},
{2, "II"},
{3, "III"},
{4, "IV"},
{5, "V"},
{6, "VI"},
{7, "VII"},
{8, "VIII"},
{9, "IX"},
{10, "X"},
{14, "XIV"},
{18, "XVIII"},
{20, "XX"},
{39, "XXXIX"},
{40, "XL"},
{47, "XLVII"},
{49, "XLIX"},
{50, "L"},
{100, "C"},
{90, "XC"},
{400, "CD"},
{500, "D"},
{900, "CM"},
{1000, "M"},
{1984, "MCMLXXXIV"},
{3999, "MMMCMXCIX"},
{2014, "MMXIV"},
{1006, "MVI"},
{798, "DCCXCVIII"},
}
func TestRomanNumerals(t *testing.T) {
runCases(t, cases, ConvertToRoman)
}
func runCases(t *testing.T, cases []RomanLiteralCases, f func(uint16) string) {
for _, test := range cases {
t.Run(fmt.Sprintf(convertArabicToRoman, test.Arabic, test.Roman), func(t *testing.T) {
got := f(test.Arabic)
if got != test.Roman {
t.Errorf(convertArabicToRomanFailed, got, test.Roman)
}
})
}
}
func TestConvertToArabic(t *testing.T) {
for _, test := range cases {
t.Run(fmt.Sprintf(convertRomanToArabic, test.Roman, test.Arabic), func(t *testing.T) {
got := ConvertToArabic(test.Roman)
if got != test.Arabic {
t.Errorf(convertRomanToArabicFailed, got, test.Arabic)
}
})
}
}
func TestPropertiesOfConversion(t *testing.T) {
assertion := func(arabic uint16) bool {
if arabic > 3999 {
return true
}
t.Log("testing", arabic)
roman := ConvertToRoman(arabic)
fromRoman := ConvertToArabic(roman)
return fromRoman == arabic
}
if err := quick.Check(assertion, nil); err != nil {
t.Error("failed checks", err)
}
}
|
package server
import (
"encoding/json"
"net/http"
colly "github.com/gocolly/colly/v2"
)
type songData struct {
Title string `json:"title"`
Artist string `json:"artist"`
}
// TestSpotify ...
func TestSpotify(w http.ResponseWriter, r *http.Request) {
enableCors(&w, r)
query := r.URL.Query().Get("query")
if query == "" {
query = "https://open.spotify.com/playlist/30mIdIfINRKeT4QbJOk0Qf"
}
c := colly.NewCollector()
songs := []songData{}
c.OnHTML(".tracklist-row > .tracklist-col.name > .track-name-wrapper", func(e *colly.HTMLElement) {
children := e.DOM.Children()
songContainer := children.First()
artistContainer := children.Last().Children().First()
songs = append(songs, songData{
songContainer.Text(),
artistContainer.Text(),
})
})
c.OnRequest(func(r *colly.Request) {
print("Visiting " + r.URL.String())
})
err := c.Visit(query)
if err != nil {
w.WriteHeader(500)
json.NewEncoder(w).Encode(map[string]interface{}{
"error": err.Error(),
})
return
}
json.NewEncoder(w).Encode(map[string]interface{}{
"results": songs,
})
}
|
package ginja
import (
"encoding/json"
"reflect"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func NewTestApi() *Api {
return &Api{}
}
type TestItem struct {
Name string `json:"name"`
}
var testItem = TestItem{
Name: "A Name",
}
var testItemPayload = map[string]interface{}{
"data": map[string]interface{}{
"type": "testitem",
"id": "0",
"attributes": map[string]interface{}{
"name": "A Name",
},
},
}
var testMetaData = map[string]interface{}{
"vendor": "ginja",
}
func TestStoreRegister(t *testing.T) {
Convey("Api can register arbitrary types", t, func() {
api := NewTestApi()
api.Register(TestItem{})
So(api.types[reflect.TypeOf(TestItem{})], ShouldResemble, [2]string{"testitem", "testitems"})
So(api.NameFor(TestItem{}), ShouldEqual, "testitem")
})
}
func TestNewDocument(t *testing.T) {
Convey("Empty document has data:null", t, func() {
d := NewDocument()
payload, err := json.Marshal(&d)
So(string(payload), ShouldEqual, `{"data":null}`)
So(err, ShouldBeNil)
Convey("adds meta data", func() {
d.AddMeta(testMetaData)
payload, err := d.MarshalJSON()
So(string(payload), ShouldEqual, `{"data":null,"meta":{"vendor":"ginja"}}`)
So(err, ShouldBeNil)
})
})
}
func TestNewCollectionDocument(t *testing.T) {
Convey("Empty collection document has data:[]", t, func() {
d := NewCollectionDocument()
// So(d, ShouldImplement, (*json.Marshaler)(nil))
payload, err := json.Marshal(&d)
So(string(payload), ShouldEqual, `{"data":[]}`)
So(err, ShouldBeNil)
})
}
func TestNewErrorDocument(t *testing.T) {
Convey("Empty error document das no data, but empty errors field", t, func() {
ed := NewErrorDocument()
payload, err := json.Marshal(&ed)
So(string(payload), ShouldEqual, `{"errors":[]}`)
So(err, ShouldBeNil)
Convey("adds meta data", func() {
ed.AddMeta(testMetaData)
payload, err := ed.MarshalJSON()
So(string(payload), ShouldEqual, `{"errors":[],"meta":{"vendor":"ginja"}}`)
So(err, ShouldBeNil)
})
})
}
func TestAddData(t *testing.T) {
Convey("Document with simple data", t, func() {
d := NewDocument()
d.AddData(&ResourceObject{Id: "0", Object: &testItem})
payload, err := d.MarshalJSON()
So(d.Meta, ShouldBeEmpty)
So(string(payload), ShouldEqual, `{"data":{"type":"testitem","id":"0","attributes":{"name":"A Name"}}}`)
So(err, ShouldBeNil)
})
}
func TestAddError(t *testing.T) {
Convey("Adding errors", t, func() {
d := NewDocument()
d.AddError(NewError("test error"))
So(d.Errors, ShouldNotBeEmpty)
payload, err := d.MarshalJSON()
So(string(payload), ShouldEqual, `{"errors":[{"title":"test error"}]}`)
So(err, ShouldBeNil)
})
}
// Benchmarks
func BenchmarkNewDocument1000(b *testing.B) {
ro := &ResourceObject{Id: "0", Object: &testItem}
var d Document
for n := 0; n < b.N; n++ {
d = NewDocument()
d.AddData(ro)
d.MarshalJSON()
}
}
|
package pgsql
import (
"database/sql"
"database/sql/driver"
"net"
)
// InetFromIPNet returns a driver.Valuer that produces a PostgreSQL inet from the given Go net.IPNet.
func InetFromIPNet(val net.IPNet) driver.Valuer {
return inetFromIPNet{val: val}
}
// InetToIPNet returns an sql.Scanner that converts a PostgreSQL inet into a Go net.IPNet and sets it to val.
func InetToIPNet(val *net.IPNet) sql.Scanner {
return inetToIPNet{val: val}
}
// InetFromIP returns a driver.Valuer that produces a PostgreSQL inet from the given Go net.IP.
func InetFromIP(val net.IP) driver.Valuer {
return inetFromIP{val: val}
}
// InetToIP returns an sql.Scanner that converts a PostgreSQL inet into a Go net.IP and sets it to val.
func InetToIP(val *net.IP) sql.Scanner {
return inetToIP{val: val}
}
type inetFromIPNet struct {
val net.IPNet
}
func (v inetFromIPNet) Value() (driver.Value, error) {
if v.val.IP == nil && v.val.Mask == nil {
return nil, nil
}
return []byte(v.val.String()), nil
}
type inetToIPNet struct {
val *net.IPNet
}
func (v inetToIPNet) Scan(src interface{}) error {
var str string
switch s := src.(type) {
case []byte:
str = string(s)
case string:
str = s
case nil:
return nil
}
_, ipnet, err := net.ParseCIDR(str)
if err != nil {
ipnet = &net.IPNet{IP: net.ParseIP(str)}
}
*v.val = *ipnet
return nil
}
type inetFromIP struct {
val net.IP
}
func (v inetFromIP) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
}
return []byte(v.val.String()), nil
}
type inetToIP struct {
val *net.IP
}
func (v inetToIP) Scan(src interface{}) error {
var str string
switch s := src.(type) {
case []byte:
str = string(s)
case string:
str = s
case nil:
return nil
}
*v.val = net.ParseIP(str)
return nil
}
|
package main
import (
"log"
"net/http"
"github.com/SanderV1992/golang_simple_blog/news"
"github.com/SanderV1992/golang_simple_blog/page"
"github.com/SanderV1992/golang_simple_blog/site"
"github.com/SanderV1992/golang_simple_blog/database"
)
const (
defaultPort = "8080"
databaseType = "mysql"
)
func main() {
DB := database.Connect()
router := site.Router{}
renderer, err := site.NewRenderer(
"templates/*",
&router,
)
if err != nil {
log.Fatal(err)
}
var newsRepo news.Repo
switch databaseType {
case "mysql":
newsRepo = &news.RepoMysql{DB}
case "json":
//newsRepo = &news.RepoJson{}
}
(&page.Server{renderer}).Register(&router)
(&news.Server{renderer, newsRepo}).Register(&router)
http.ListenAndServe(":"+defaultPort, &router)
}
|
package transformer
import (
"github.com/golang/protobuf/ptypes"
"github.com/satori/go.uuid"
"github.com/tppgit/we_service/core"
"github.com/tppgit/we_service/dto/worder"
"github.com/tppgit/we_service/entity/order"
"github.com/tppgit/we_service/entity/service"
"github.com/tppgit/we_service/entity/user"
"github.com/tppgit/we_service/pkg/errors"
"github.com/tppgit/we_service/pkg/utils"
"strings"
"time"
)
const (
ERROR_DETAILS_EMPTY = "You should pass at least one field"
ERROR_ID_EMPTY = "The id should not empty"
ERROR_DATESTART_EMPTY = "The dateStart should not empty"
ERROR_TIMESTART_EMPTY = "The timeStart should not empty"
)
type WeOrderTransformer struct {
AppointmentHistoryTransform *core.GettingAppointmentHistoryRequest
OrderForCreate *core.CreateOrderRequest
OrderForUpdate *core.UpdateOrder
GetOrderById *core.GetOrderByIdRequest
GetOrderByStaffId *core.GetOrdersByStaffId
GetOrderByCompanyId *core.GetOrdersByCompanyId
OrderForMobile *core.OrderForMobile
GetOrdersByCompanyId *core.GetOrdersByCompanyId
GetOrdersByStaffId *core.GetOrdersByStaffId
GetOrderByResidentId *core.GetOrderByResident
ResidentGetListHistoryOrderRequest *core.ResidentGetListHistoryOrderRequest
WeCommentTransformer *WeCommentTransformer
WeServiceTransformer *WeServiceTransformer
WeCompanyTransformer *WeCompanyTransformer
WeUserTransform *WeUserTransform
WeStaffTransform *WeStaffTransform
WeHistoryTransformer *WeHistoryTransformer
}
func (m *WeOrderTransformer) TransformForGetOrdersByStaffId() (*core.GetOrdersByStaffId, error) {
query := new(core.GetOrdersByStaffId)
var errorFields []errors.FieldError
layout := "2006-01-02"
if err := parseDate(layout, m.GetOrdersByStaffId.DateStart); err != nil {
errorFields = append(errorFields, errors.FieldError{Field: "dateStart", Description: "Date Start invalid"})
}
if err := parseDate(layout, m.GetOrdersByStaffId.DateEnd); err != nil {
_, errDateEnd := time.Parse(layout, m.GetOrdersByStaffId.DateEnd)
if errDateEnd != nil {
errorFields = append(errorFields, errors.FieldError{Field: "dateEnd", Description: "Date End invalid"})
}
}
if len(errorFields) > 0 {
err := errors.BuildInvalidArgument(errorFields...)
return query, err
}
query.DateStart = m.GetOrdersByStaffId.DateStart
query.DateEnd = m.GetOrdersByStaffId.DateEnd
query.Status = m.convertStatusRequest(m.GetOrdersByStaffId.Status)
return query, nil
}
func (m *WeOrderTransformer) convertStatusRequest(status string) string {
switch strings.ToUpper(status) {
case "NEW":
return string(order.NewRequest)
case "PENDING":
return string(order.SPPending) + "," + string(order.ResidentPending)
case "ACCEPTED":
return string(order.InProgress) + "," + string(order.RequestConfirmed)
case "PAYMENT":
return string(order.PaymentPending)
default:
return ""
}
}
func (m *WeOrderTransformer) TransformForCreateOrder(comService user.CompanyService, uService user.UserService, sService service.ServiceService,staffService user.StaffService) (*order.Order, error) {
var errorFields []errors.FieldError
if m.OrderForCreate.ServiceId == "" {
errorFields = append(errorFields, errors.FieldError{Field: "serviceId", Description: errors.ID_SERVICE_NOT_NULL})
}
if m.OrderForCreate.CompanyId == "" {
errorFields = append(errorFields, errors.FieldError{Field: "companyId", Description: errors.ID_COMPANY_NOT_NUL})
}
if m.OrderForCreate.StaffId == "" {
errorFields = append(errorFields, errors.FieldError{Field: "staffId", Description: errors.ID_STAFF_NOT_NUL})
}
if m.OrderForCreate.Duration == "" {
errorFields = append(errorFields, errors.FieldError{Field: "duration", Description: "Duration must not be null"})
}
if m.OrderForCreate.ApartmentNo == "" {
errorFields = append(errorFields, errors.FieldError{Field: "apartmentNo", Description: "Apartment No must not be null"})
}
if m.OrderForCreate.BuildingName == "" {
errorFields = append(errorFields, errors.FieldError{Field: "buildingName", Description: "Name building must not be null"})
}
if len(errorFields) > 0 {
return new(order.Order), errors.BuildInvalidArgument(errorFields...)
}
if err := utils.CompareDateTime(m.OrderForCreate.DateStart + " " + m.OrderForCreate.TimeStart); err != nil {
return new(order.Order), err
}
companyData, err := comService.GetCompanyByID(uuid.FromStringOrNil(m.OrderForCreate.CompanyId))
if err != nil {
return new(order.Order), errors.BuildInvalidArgument(errors.FieldError{Field: "company", Description: errors.COMPANY_NOT_EXIST})
}
if companyData.Status != user.ENABLE {
return new(order.Order), errors.BuildInvalidArgument(errors.FieldError{Field: "Company", Description: "Company is not enable"})
}
serviceData, err := sService.GetServiceById(uuid.FromStringOrNil(m.OrderForCreate.ServiceId))
if err != nil {
return new(order.Order), errors.BuildInvalidArgument(errors.FieldError{Field: "service", Description: errors.SERVICE_NOT_EXIST})
}
if int(serviceData.Status) != service.ENABLE {
return new(order.Order), errors.BuildInvalidArgument(errors.FieldError{Field: "service", Description: "Service is not enable"})
}
userData, err := uService.GetUserByID(uuid.FromStringOrNil(m.OrderForCreate.StaffId));
if err != nil {
return new(order.Order), errors.BuildInvalidArgument(errors.FieldError{Field: "staff", Description: errors.STAFF_NOT_EXIST})
}
if userData.Deleted {
return new(order.Order), errors.BuildInvalidArgument(errors.FieldError{Field: "staff", Description: "Staff is not available"})
}
staffData ,err := staffService.GetStaffInformation(uuid.FromStringOrNil(m.OrderForCreate.StaffId),uuid.FromStringOrNil(m.OrderForCreate.CompanyId))
if err != nil {
return new(order.Order), errors.BuildInvalidArgument(errors.FieldError{Field: "staff", Description: errors.STAFF_NOT_EXIST})
}
if !staffData.Enable {
return new(order.Order), errors.BuildInvalidArgument(errors.FieldError{Field: "service", Description: "Staff is not available"})
}
model := new(order.Order)
code, err := utils.GenerateCodeForOrder()
if err != nil {
return new(order.Order), err
}
model.Code = code
model.State = order.NewRequest
model.ServiceID = uuid.FromStringOrNil(m.OrderForCreate.ServiceId)
model.CompanyID = uuid.FromStringOrNil(m.OrderForCreate.CompanyId)
model.StaffID = uuid.FromStringOrNil(m.OrderForCreate.StaffId)
model.ApartmentNo = m.OrderForCreate.ApartmentNo
model.BuildingName = m.OrderForCreate.BuildingName
model.AdditionDetails = m.OrderForCreate.AdditionDetails
model.TotalCost = serviceData.PriceModel.DefaultPrice
model.Destination = m.OrderForCreate.Destination
model.Duration = m.OrderForCreate.Duration
dateParse, err := time.Parse("2006-01-02", m.OrderForCreate.DateStart)
if err != nil {
return new(order.Order), err
}
model.DateStart = dateParse
model.TimeStart = m.OrderForCreate.TimeStart
return model, nil
}
func (m *WeOrderTransformer) TransformForUpdateOrder() (*order.Order, error) {
var errorFields []errors.FieldError
if m.OrderForUpdate == nil {
errorFields = append(errorFields, errors.FieldError{Field: "details", Description: ERROR_DETAILS_EMPTY})
return nil, errors.BuildInvalidArgument(errorFields...)
}
if m.OrderForUpdate.Id == "" {
errorFields = append(errorFields, errors.FieldError{Field: "id", Description: ERROR_ID_EMPTY})
return nil, errors.BuildInvalidArgument(errorFields...)
}
if m.OrderForUpdate.DateStart == nil {
errorFields = append(errorFields, errors.FieldError{Field: "dateStart", Description: ERROR_DATESTART_EMPTY})
return nil, errors.BuildInvalidArgument(errorFields...)
}
if m.OrderForUpdate.TimeStart == "" {
errorFields = append(errorFields, errors.FieldError{Field: "timeStart", Description: ERROR_TIMESTART_EMPTY})
return nil, errors.BuildInvalidArgument(errorFields...)
}
model := new(order.Order)
model.ID = uuid.FromStringOrNil(m.OrderForUpdate.Id)
if model.ID == uuid.Nil {
errorFields = append(errorFields, errors.FieldError{Field: "id", Description: "Id invalid"})
return nil, errors.BuildInvalidArgument(errorFields...)
}
model.ID = uuid.FromStringOrNil(m.OrderForUpdate.Id)
model.DateStart, _ = ptypes.Timestamp(m.OrderForUpdate.DateStart)
model.TimeStart = m.OrderForUpdate.TimeStart
return model, nil
}
func (m *WeOrderTransformer) ResponseOrder(model *order.Order) (response *core.OrderResponse, err error) {
response = new(core.OrderResponse)
response.Detail = new(core.OrderDetail)
service, _ := m.WeServiceTransformer.TransformServiceResponse(&model.Service)
resident, _ := m.WeUserTransform.ResponseUser(&model.Resident)
staff, _ := m.WeUserTransform.ResponseUser(&model.Staff)
company, _ := m.WeCompanyTransformer.ResponseCompany(&model.Company)
response.Id = model.ID.String()
response.Detail.BuildingName = model.BuildingName
response.Detail.ApartmentNo = model.ApartmentNo
response.Detail.TotalCost = model.TotalCost
response.Detail.Duration = model.Duration
response.Detail.Code = model.Code
response.Detail.AdditionDetails = model.AdditionDetails
response.Detail.ExtraCost = model.ExtraCost
response.Detail.Destination = model.Destination
response.Detail.State = model.State
response.Detail.TimeStart = model.TimeStart
response.Detail.DateStart, _ = ptypes.TimestampProto(model.DateStart)
response.Detail.CreatedAt, _ = ptypes.TimestampProto(model.CreatedAt)
response.Detail.UpdatedAt, _ = ptypes.TimestampProto(model.UpdatedAt)
response.Staff = staff
response.Service = service
response.Resident = resident
response.Company = company
return response, nil
}
func (m *WeOrderTransformer) ResponseListOrder(model []*order.Order) (*core.ListOrder, error) {
result := &core.ListOrder{}
for _, item := range model {
res, err := m.ResponseOrder(item)
if err != nil {
return result, err
}
result.Data = append(result.Data, res)
}
return result, nil
}
func (m *WeOrderTransformer) ResponseOrderForMobile(model *order.Order) (response *core.OrderForMobile, err error) {
response = new(core.OrderForMobile)
resident, _ := m.WeUserTransform.ResponseUser(&model.Resident)
response.BuildingName = model.BuildingName
response.ApartmentNo = model.ApartmentNo
response.ResidentId = resident.Id
response.ResidentName = model.Resident.Name
response.TimeStart = model.TimeStart
response.DateStart, _ = ptypes.TimestampProto(model.DateStart)
switch model.State {
case order.NewRequest:
response.Type = "NEW REQUEST"
break
case string(order.ResidentPending):
response.Type = "PENDING"
break
case string(order.SPPending):
response.Type = "PENDING"
break
case string(order.RequestConfirmed):
response.Type = "ACCEPTED"
break
case string(order.InProgress):
response.Type = "ACCEPTED"
break
case string(order.PaymentPending):
response.Type = "PENDING PAYMENT"
break
}
return response, nil
}
func (m *WeOrderTransformer) TransformAppointmentHistoryRequest(companyId uuid.UUID) (*worder.AppointmentHistory, error) {
var errorFields []errors.FieldError
model := new(worder.AppointmentHistory)
model.FilterStatus = m.AppointmentHistoryTransform.FilterStatus
model.StartDate = m.AppointmentHistoryTransform.StartDate
model.EndDate = m.AppointmentHistoryTransform.EndDate
model.Page = m.AppointmentHistoryTransform.Pagination.CurrentPage
model.PageSize = m.AppointmentHistoryTransform.Pagination.PageSize
model.TextSearch = m.AppointmentHistoryTransform.Pagination.TextSearch
model.SortType = strings.ToLower(m.AppointmentHistoryTransform.SortType)
model.SortColumn = strings.ToLower(m.AppointmentHistoryTransform.SortColumn)
if model.SortType != "desc" && model.SortType != "asc" {
errorFields = append(errorFields, errors.FieldError{Field: "sortType", Description: "Sort type invalid"})
}
model.CompanyId = companyId
if len(errorFields) > 0 {
err := errors.BuildInvalidArgument(errorFields...)
return model, err
}
return model, nil
}
func (m *WeOrderTransformer) TransformAppointmentHistoryResponse(orders []*order.Order, totalItems *int32) (*core.GettingAppointmentHistoryResponse, error) {
result := new(core.GettingAppointmentHistoryResponse)
result.TotalItems = *totalItems
for _, e := range orders {
item := new(core.AppointmentHistoryInfo)
item.BuildingName = e.BuildingName
item.Date = e.DateStart.String()
item.StaffName = e.Staff.Name
item.Earning = float32(e.TotalCost + e.ExtraCost)
item.State = e.State
item.Avatar = e.Staff.Avatar
item.OrderId = e.ID.String()
result.AppointmentHistoryInfo = append(result.AppointmentHistoryInfo, item)
}
return result, nil
}
func parseDate(layout, date string) error {
if date != "" {
_, err := time.Parse(layout, date)
if err != nil {
return err
}
}
return nil
}
func (m *WeOrderTransformer) ResponseOrderForResident(model *order.Order) (response *core.OrderForResident, err error) {
response = new(core.OrderForResident)
response.Id = model.ID.String()
response.Code = model.Code
response.BuildingName = model.BuildingName
response.ApartmentNo = model.ApartmentNo
response.StaffId = model.StaffID.String()
response.StaffName = model.Staff.Name
response.CompanyId = model.Company.ID.String()
response.CompanyName = model.Company.Name
response.TimeStart = model.TimeStart
response.DateStart, _ = ptypes.TimestampProto(model.DateStart)
switch model.State {
case order.NewRequest:
response.Type = "NEW REQUEST"
break
case string(order.ResidentPending):
response.Type = "PENDING"
break
case string(order.SPPending):
response.Type = "PENDING"
break
case string(order.RequestConfirmed):
response.Type = "ACCEPTED"
break
case string(order.InProgress):
response.Type = "ACCEPTED"
break
case string(order.PaymentPending):
response.Type = "PENDING PAYMENT"
break
}
return response, nil
}
func (m *WeOrderTransformer) ResponseListOrderForResident(model []*order.Order, TotalItem int32) (c *core.ListOrderForResident) {
result := new(core.ListOrderForResident)
result.TotalItem = TotalItem
for _, item := range model {
res, _ := m.ResponseOrderForResident(item)
result.Data = append(result.Data, res)
}
return result
}
func (m *WeOrderTransformer) ResponseListHistoryOrderForStaff(order []*order.Order) (*core.StaffGetListHistoryOrderResponse, error) {
result := new(core.StaffGetListHistoryOrderResponse)
for _, item := range order {
res, err := m.ResponseHistoryOrderForStaff(item)
if err != nil {
return new(core.StaffGetListHistoryOrderResponse), err
}
result.Data = append(result.Data, res)
}
return result, nil
}
func (m *WeOrderTransformer) ResponseHistoryOrderForStaff(order *order.Order) (*core.StaffGetHistoryOrderResponse, error) {
result := new(core.StaffGetHistoryOrderResponse)
result.OrderId = order.ID.String()
result.Code = order.Code
result.ResidentId = order.ResidentID.String()
result.ResidentName = order.Resident.Name
result.DateStart = utils.ConvertToString(order.DateStart)
result.TimeStart = order.TimeStart
result.ApartmentName = order.ApartmentNo + " - " + order.BuildingName
return result, nil
}
|
package pgsql
import (
"testing"
)
func TestFloat4Array(t *testing.T) {
testlist2{{
valuer: Float4ArrayFromFloat32Slice,
scanner: Float4ArrayToFloat32Slice,
data: []testdata{
{input: []float32(nil), output: []float32(nil)},
{input: []float32{}, output: []float32{}},
{input: []float32{1, 0}, output: []float32{1, 0}},
{
input: []float32{3.14, 0.15},
output: []float32{3.14, 0.15}},
{
input: []float32{3.4, 5.6, 3.14159},
output: []float32{3.4, 5.6, 3.14159}},
{
input: []float32{0.0024, 1.4, -89.2345, 0.0},
output: []float32{0.0024, 1.4, -89.2345, 0.0}},
},
}, {
valuer: Float4ArrayFromFloat64Slice,
scanner: Float4ArrayToFloat64Slice,
data: []testdata{
{input: []float64(nil), output: []float64(nil)},
{input: []float64{}, output: []float64{}},
{input: []float64{1, 0}, output: []float64{1, 0}},
{
input: []float64{3.14, 0.15},
output: []float64{3.14, 0.15}},
{
input: []float64{3.4, 5.6, 3.14159},
output: []float64{3.4, 5.6, 3.14159}},
{
input: []float64{0.0024, 1.4, -89.2345, 0.0},
output: []float64{0.0024, 1.4, -89.2345, 0.0}},
},
}, {
data: []testdata{
{input: string(`{}`), output: string(`{}`)},
{input: string(`{1,0}`), output: string(`{1,0}`)},
{
input: string(`{3.14,0.15}`),
output: string(`{3.14,0.15}`)},
{
input: string(`{3.4,5.6,3.14159}`),
output: string(`{3.4,5.6,3.14159}`)},
{
input: string(`{0.0024,1.4,-89.2345,0.0}`),
output: string(`{0.0024,1.4,-89.2345,0}`)},
},
}, {
data: []testdata{
{input: []byte(`{}`), output: []byte(`{}`)},
{input: []byte(`{1,0}`), output: []byte(`{1,0}`)},
{
input: []byte(`{3.14,0.15}`),
output: []byte(`{3.14,0.15}`)},
{
input: []byte(`{3.4,5.6,3.14159}`),
output: []byte(`{3.4,5.6,3.14159}`)},
{
input: []byte(`{0.0024,1.4,-89.2345,0.0}`),
output: []byte(`{0.0024,1.4,-89.2345,0}`)},
},
}}.execute(t, "float4arr")
}
|
package main
import (
"bufio"
"fmt"
"os"
"strings"
"./Interprete"
"./Structs"
)
var disco [27]Structs.Disco
func main() {
menu()
}
func menu() {
finalizar := 0
fmt.Println("Bienvenido a la consola de comandos... ('x' para finalizar)")
reader := bufio.NewReader(os.Stdin)
fmt.Print("Enter comands: ")
comando, _ := reader.ReadString('\n')
if comando == "x\n" {
finalizar = 1
} else {
if comando != "" {
Interprete.Interpreter(strings.TrimSpace(comando), &disco)
}
}
for finalizar != 1 {
fmt.Print("Enter comands: ")
reader := bufio.NewReader(os.Stdin)
comando, _ := reader.ReadString('\n')
if comando == "x\n" {
finalizar = 1
} else {
if comando != "" {
Interprete.Interpreter(strings.TrimSpace(comando), &disco)
}
}
}
}
|
package main
import (
"bufio"
"fmt"
"io"
"log"
"os"
"os/exec"
"regexp"
"strings"
"sync"
)
func main() {
if len(os.Args) < 2 {
fmt.Println("usage: klogs <pod-name>")
fmt.Println("example: klogs api")
os.Exit(1)
}
name := os.Args[1]
pods, err := getPods(name)
if err != nil {
panic(err)
}
log.Printf("found %d pods with the name %s", len(pods), name)
var wg sync.WaitGroup
out := make(chan string, 1)
for _, pod := range pods {
wg.Add(1)
go func(pod string) {
defer wg.Done()
err := streamLogs(pod, name, out)
if err != nil {
log.Printf("error streaming logs for %s: %v", pod, err)
}
}(pod)
}
go func() {
for line := range out {
fmt.Println(line)
}
}()
wg.Wait()
}
func getPods(name string) ([]string, error) {
cmd := exec.Command("kubectl", "get", "pods", "-o", "name")
out, err := cmd.CombinedOutput()
if err != nil {
return nil, fmt.Errorf("%v: %s", err, out)
}
s := string(out)
pods := []string{}
r := regexp.MustCompile(fmt.Sprintf(`pod\/(%s\-.+)`, regexp.QuoteMeta(name)))
for _, name := range strings.Fields(s) {
if matches := r.FindStringSubmatch(name); matches != nil {
pods = append(pods, matches[1])
}
}
return pods, nil
}
func streamLogs(pod, container string, out chan<- string) error {
cmd := exec.Command("kubectl", "logs", pod, container, "-f")
stdout, err := cmd.StdoutPipe()
if err != nil {
panic(err)
}
stderr, err := cmd.StderrPipe()
if err != nil {
panic(err)
}
err = cmd.Start()
if err != nil {
return err
}
go readOutput(stdout, out)
return readOutput(stderr, out)
}
func readOutput(reader io.Reader, out chan<- string) error {
rd := bufio.NewReader(reader)
for {
line, err := rd.ReadString('\n')
if err != nil {
return err
}
out <- line[:len(line)-1]
}
}
|
/*
* Copyright 2018 Haines Chan
*
* This program is free software; you can redistribute and/or modify it
* under the terms of the standard MIT license. See LICENSE for more details
*/
package config
import (
"encoding/json"
"fmt"
"github.com/containernetworking/cni/pkg/types"
"github.com/hainesc/anchor/pkg/runtime/k8s"
)
// OctopusConf represents the Octopus configuration.
type OctopusConf struct {
types.NetConf
Mode string `json:"mode"`
MTU int `json:"mtu"`
Octopus map[string]string `json:"octopus"`
Kubernetes k8s.Kubernetes `json:"kubernetes"`
Policy k8s.Policy `json:"policy"`
}
// IPAMConf represents the IPAM configuration.
type IPAMConf struct {
Name string
Type string `json:"type"`
// etcd client
Endpoints string `json:"etcd_endpoints"`
// Used for k8s client
Kubernetes k8s.Kubernetes `json:"kubernetes"`
Policy k8s.Policy `json:"policy"`
// etcd perm files
CertFile string `json:"etcd_cert_file"`
KeyFile string `json:"etcd_key_file"`
TrustedCAFile string `json:"etcd_ca_cert_file"`
ServiceIPNet string `json:"service_ipnet"`
NodeIPs []string `json:"node_ips"`
// Additional network config for pods
Routes []*types.Route `json:"routes,omitempty"`
ResolvConf string `json:"resolvConf,omitempty"`
}
// CNIConf represents the top-level network config.
type CNIConf struct {
Name string `json:"name"`
CNIVersion string `json:"cniVersion"`
Type string `json:"type"`
Master string `json:"master"`
IPAM *IPAMConf `json:"ipam"`
}
// LoadOctopusConf loads config from bytes which read from config file for octopus.
func LoadOctopusConf(bytes []byte) (*OctopusConf, string, error) {
n := &OctopusConf{}
if err := json.Unmarshal(bytes, n); err != nil {
return nil, "", fmt.Errorf("failed to load netconf: %v", err)
}
if n.Octopus == nil {
return nil, "", fmt.Errorf(`"octopus" field is required. It specifies a list of interface names to virtualize`)
}
return n, n.CNIVersion, nil
}
// LoadIPAMConf loads config from bytes which read from config file for anchor.
func LoadIPAMConf(bytes []byte, envArgs string) (*IPAMConf, string, error) {
n := CNIConf{}
if err := json.Unmarshal(bytes, &n); err != nil {
return nil, "", err
}
if n.IPAM == nil {
return nil, "", fmt.Errorf("IPAM config missing 'ipam' key")
}
if n.IPAM.Endpoints == "" {
return nil, "", fmt.Errorf("IPAM config missing 'etcd_endpoints' keys")
}
return n.IPAM, n.CNIVersion, nil
}
|
package ws
import (
"encoding/json"
"fmt"
"log"
"github.com/gorilla/websocket"
)
type Client struct {
id string
socket *websocket.Conn
send chan []byte
message *Message
}
func (c *Client) work(){
go c.read()
go c.write()
}
func (c *Client) read () {
defer c.close()
for {
_, message, err := c.socket.ReadMessage()
if err != nil {
log.Println(err)
break
}
msg := Message{Sender:c.id}
err = json.Unmarshal(message, &msg)
if err != nil {
fmt.Println("Unmarshal failed, ", err)
return
}
if msg.Type == "" {
msg.Type = MESSAGE_TYPE_TO_BROADCAST
}
manager.message <- &msg
}
}
func (c *Client) write() {
defer c.close()
for {
select {
case message, ok := <-c.send:
if !ok {
c.socket.WriteMessage(websocket.CloseMessage, []byte{})
return
}
c.socket.WriteMessage(websocket.TextMessage, message)
}
}
}
func (c *Client) close () {
c.socket.Close()
}
|
package mkhttpclient
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"time"
"github.com/zhongxuqi/mklibs/common"
"github.com/zhongxuqi/mklibs/mklog"
)
// const ...
const (
ContentTypeJSON = "application/json"
ContentTypeForm = "application/x-www-form-urlencoded"
)
var (
defaultBaseConfig = baseConfig{
MaxIdleConns: 10,
IdleConnTimeout: time.Duration(30 * time.Second),
DisableCompression: true,
RetryTimes: 0,
RetryTimeout: time.Duration(5 * time.Second),
TotalTimeout: time.Duration(5 * time.Second),
}
)
// HTTPClient ...
type HTTPClient interface {
GetEx(ctx context.Context, path string, params map[string]interface{}, res interface{}, header map[string]string, options ...HTTPClientOption) error
Get(ctx context.Context, path string, params map[string]interface{}, res interface{}) error
DeleteEx(ctx context.Context, path string, res interface{}, header map[string]string, options ...HTTPClientOption) error
Delete(ctx context.Context, path string, res interface{}) error
PutJSONEx(ctx context.Context, path string, params interface{}, res interface{}, header map[string]string, options ...HTTPClientOption) error
PutJSON(ctx context.Context, path string, params interface{}, res interface{}) error
PostJSONEx(ctx context.Context, path string, params interface{}, res interface{}, header map[string]string, options ...HTTPClientOption) error
PostJSON(ctx context.Context, path string, params interface{}, res interface{}) error
PostEx(ctx context.Context, path string, params map[string]string, res interface{}, header map[string]string, options ...HTTPClientOption) error
Post(ctx context.Context, path string, params map[string]string, res interface{}) error
PostFileEx(ctx context.Context, path string, files map[string][]byte, res interface{}, header map[string]string, options ...HTTPClientOption) error
}
type httpClient struct {
host string
options []HTTPClientOption
client *http.Client
}
// NewHTTPClient ...
func NewHTTPClient(host string, options ...HTTPClientOption) HTTPClient {
currConfig := parseBaseConfig(defaultBaseConfig, options)
return &httpClient{
host: host,
options: options,
client: &http.Client{
Transport: &http.Transport{
MaxIdleConns: currConfig.MaxIdleConns,
IdleConnTimeout: currConfig.IdleConnTimeout,
DisableCompression: currConfig.DisableCompression,
},
},
}
}
// GetEx ...
func (s *httpClient) GetEx(ctx context.Context, path string, params map[string]interface{}, res interface{}, header map[string]string, options ...HTTPClientOption) error {
ml := mklog.NewWithContext(ctx)
if len(params) > 0 {
path += "?"
for k, v := range params {
path += fmt.Sprintf("%s=%+v&", k, v)
}
}
ml.Infof("%s%s", s.host, path)
return s.do(ctx, http.MethodGet, fmt.Sprintf("%s%s", s.host, path), nil, res, header, options...)
}
// Get ...
func (s *httpClient) Get(ctx context.Context, path string, params map[string]interface{}, res interface{}) error {
ml := mklog.NewWithContext(ctx)
if len(params) > 0 {
path += "?"
for k, v := range params {
path += fmt.Sprintf("%s=%+v&", k, v)
}
}
ml.Infof("url %+v%+v", s.host, path)
return s.do(ctx, http.MethodGet, fmt.Sprintf("%s%s", s.host, path), nil, res, nil)
}
// DeleteEx ...
func (s *httpClient) DeleteEx(ctx context.Context, path string, res interface{}, header map[string]string, options ...HTTPClientOption) error {
ml := mklog.NewWithContext(ctx)
ml.Infof("url [%s]%s%s header %+v", http.MethodDelete, s.host, path, header)
return s.do(ctx, http.MethodDelete, fmt.Sprintf("%s%s", s.host, path), nil, res, header, options...)
}
// Delete ...
func (s *httpClient) Delete(ctx context.Context, path string, res interface{}) error {
ml := mklog.NewWithContext(ctx)
ml.Infof("url [%s]%s%s", http.MethodDelete, s.host, path)
return s.do(ctx, http.MethodDelete, fmt.Sprintf("%s%s", s.host, path), nil, res, nil)
}
// PutJSONEx ...
func (s *httpClient) PutJSONEx(ctx context.Context, path string, params interface{}, res interface{}, header map[string]string, options ...HTTPClientOption) error {
ml := mklog.NewWithContext(ctx)
ml.Infof("url [%s]%s%s params %+v header %+v", http.MethodPut, s.host, path, params, header)
paramsByte := make([]byte, 0)
if params != nil {
var err error
paramsByte, err = json.Marshal(params)
if err != nil {
ml.Errorf("json.Marshal error %+v", err.Error())
return err
}
}
if header == nil {
header = make(map[string]string)
}
header["Content-Type"] = ContentTypeJSON
ml.Infof("params %+v", string(paramsByte))
return s.do(ctx, http.MethodPut, fmt.Sprintf("%s%s", s.host, path), paramsByte, res, header, options...)
}
// PutJSON ...
func (s *httpClient) PutJSON(ctx context.Context, path string, params interface{}, res interface{}) error {
ml := mklog.NewWithContext(ctx)
ml.Infof("url [%s]%s%s params %+v", http.MethodPut, s.host, path, params)
paramsByte := make([]byte, 0)
if params != nil {
var err error
paramsByte, err = json.Marshal(params)
if err != nil {
ml.Errorf("json.Marshal error %+v", err.Error())
return err
}
}
ml.Infof("params %+v", string(paramsByte))
return s.do(ctx, http.MethodPut, fmt.Sprintf("%s%s", s.host, path), paramsByte, res, map[string]string{"Content-Type": ContentTypeJSON})
}
// PostJSONEx ...
func (s *httpClient) PostJSONEx(ctx context.Context, path string, params interface{}, res interface{}, header map[string]string, options ...HTTPClientOption) error {
ml := mklog.NewWithContext(ctx)
paramsByte := make([]byte, 0)
if params != nil {
var err error
paramsByte, err = json.Marshal(params)
if err != nil {
ml.Errorf("json.Marshal error %+v", err)
return err
}
}
if header == nil {
header = make(map[string]string)
}
ml.Infof("params %+v", string(paramsByte))
header["Content-Type"] = ContentTypeJSON
return s.do(ctx, http.MethodPost, fmt.Sprintf("%s%s", s.host, path), paramsByte, res, header, options...)
}
// PostJSON ...
func (s *httpClient) PostJSON(ctx context.Context, path string, params interface{}, res interface{}) error {
ml := mklog.NewWithContext(ctx)
paramsByte := make([]byte, 0)
if params != nil {
var err error
paramsByte, err = json.Marshal(params)
if err != nil {
ml.Errorf("json.Marshal error %+v", err)
return err
}
}
ml.Infof("[POST]%+v body: %+v", fmt.Sprintf("%s%s", s.host, path), string(paramsByte))
return s.do(ctx, http.MethodPost, fmt.Sprintf("%s%s", s.host, path), paramsByte, res, map[string]string{"Content-Type": ContentTypeJSON})
}
// PostEx ...
func (s *httpClient) PostEx(ctx context.Context, path string, params map[string]string, res interface{}, header map[string]string, options ...HTTPClientOption) error {
ml := mklog.NewWithContext(ctx)
paramsByte := make([]byte, 0)
if params != nil {
values := make(url.Values)
for k, v := range params {
values.Add(k, v)
}
paramsByte = []byte(values.Encode())
}
ml.Infof("[POST]%+v body: %+v", fmt.Sprintf("%s%s", s.host, path), string(paramsByte))
if header == nil {
header = make(map[string]string)
}
header["Content-Type"] = ContentTypeForm
return s.do(ctx, http.MethodPost, fmt.Sprintf("%s%s", s.host, path), paramsByte, res, header, options...)
}
// Post ...
func (s *httpClient) Post(ctx context.Context, path string, params map[string]string, res interface{}) error {
ml := mklog.NewWithContext(ctx)
paramsByte := make([]byte, 0)
if params != nil {
values := make(url.Values)
for k, v := range params {
values.Add(k, v)
}
paramsByte = []byte(values.Encode())
}
ml.Infof("[POST]%+v body: %+v", fmt.Sprintf("%s%s", s.host, path), string(paramsByte))
return s.do(ctx, http.MethodPost, fmt.Sprintf("%s%s", s.host, path), paramsByte, res, map[string]string{"Content-Type": ContentTypeForm})
}
func (s *httpClient) PostFileEx(ctx context.Context, path string, files map[string][]byte, res interface{}, header map[string]string, options ...HTTPClientOption) error {
ml := mklog.NewWithContext(ctx)
ml.Infof("url [%s]%s%s", http.MethodPost, s.host, path)
body := bytes.NewBuffer([]byte(""))
writer := multipart.NewWriter(body)
var contentLength int64
for fileName, fileContent := range files {
part, err := writer.CreateFormFile(fileName, fileName)
if err != nil {
ml.Errorf("writer.CreateFormFile error %+v", err.Error())
return err
}
_, err = io.Copy(part, bytes.NewReader(fileContent))
if err != nil {
ml.Errorf("bytes.NewReader error %+v", err.Error())
return err
}
contentLength += int64(len(fileContent))
err = writer.Close()
if err != nil {
ml.Errorf("writer.Close error %+v", err.Error())
return err
}
}
if header == nil {
header = make(map[string]string)
}
header["Content-Type"] = writer.FormDataContentType()
//header["Content-Length"] = fmt.Sprintf("%d", contentLength)
return s.do(ctx, http.MethodPost, fmt.Sprintf("%s%s", s.host, path), body.Bytes(), res, header, options...)
}
func (s *httpClient) do(ctx context.Context, method, url string, bodyByte []byte, res interface{}, header map[string]string, options ...HTTPClientOption) error {
ml := mklog.NewWithContext(ctx)
allOptions := make([]HTTPClientOption, 0, len(s.options)+len(options))
allOptions = append(allOptions, s.options...)
allOptions = append(allOptions, options...)
currConfig := parseBaseConfig(defaultBaseConfig, allOptions)
var httpRes *http.Response
resChan := make(chan *http.Response)
for i := 0; i < currConfig.RetryTimes+1; i++ {
var buf *bytes.Buffer = nil
if bodyByte != nil {
buf = bytes.NewBuffer(bodyByte)
} else {
buf = bytes.NewBuffer([]byte(""))
}
req, err := http.NewRequest(method, url, buf)
if err != nil {
ml.Errorf("http.NewRequest error %+v", err.Error())
return err
}
// add http headers
req.Header.Set(common.HttpLogID, ml.GetLogID())
for k, v := range header {
req.Header.Add(k, v)
}
errChan := make(chan error)
go func() {
res, err := s.client.Do(req)
if err != nil {
ml.Errorf("client.Do error %+v", err)
errChan <- err
return
}
resChan <- res
}()
if i < currConfig.RetryTimes {
select {
case httpRes = <-resChan:
if httpStatusCode, err := parseRes(ctx, httpRes, res); httpStatusCode/100 != 2 && currConfig.RetryHttpError {
ml.Errorf("parseRes error %+v", err)
} else if err != nil {
ml.Errorf("parseRes error %+v", err)
return err
} else {
return nil
}
case err := <-errChan:
ml.Errorf("client.Do error %+v", err)
case <-time.After(currConfig.RetryTimeout):
ml.Infof("retry %+v", i)
}
} else {
select {
case httpRes = <-resChan:
if _, err := parseRes(ctx, httpRes, res); err != nil {
ml.Errorf("parseRes error %+v", err)
return err
}
return nil
case err := <-errChan:
ml.Errorf("http error %+v", err)
return err
case <-time.After(currConfig.TotalTimeout):
ml.Errorf("req %+v error timeout", req)
return fmt.Errorf("req %+v error timeout", req)
}
}
}
bodyStr := ""
if bodyByte != nil {
bodyStr = string(bodyByte)
}
ml.Errorf("req %+v error timeout", bodyStr)
return fmt.Errorf("req %+v error timeout", bodyStr)
}
func parseRes(ctx context.Context, httpRes *http.Response, res interface{}) (int, error) {
ml := mklog.NewWithContext(ctx)
bodyByte, err := ioutil.ReadAll(httpRes.Body)
if err != nil {
ml.Errorf("ioutil.ReadAll error %+v", err)
return httpRes.StatusCode, err
}
ml.Infof("response body: %+v", string(bodyByte))
err = json.Unmarshal(bodyByte, res)
if err != nil {
ml.Errorf("json.Unmarshal error %+v", err)
}
if httpRes.StatusCode/100 != 2 {
return httpRes.StatusCode, fmt.Errorf("http error %d %s", httpRes.StatusCode, httpRes.Status)
}
return httpRes.StatusCode, nil
}
|
package fsm
import (
"fmt"
"strings"
"testing"
)
func TestMermaidOutput(t *testing.T) {
fsmUnderTest := NewFSM(
"closed",
Events{
{Name: "open", Src: []string{"closed"}, Dst: "open"},
{Name: "close", Src: []string{"open"}, Dst: "closed"},
{Name: "part-close", Src: []string{"intermediate"}, Dst: "closed"},
},
Callbacks{},
)
got, err := VisualizeForMermaidWithGraphType(fsmUnderTest, StateDiagram)
if err != nil {
t.Errorf("got error for visualizing with type MERMAID: %s", err)
}
wanted := `
stateDiagram-v2
[*] --> closed
closed --> open: open
intermediate --> closed: part-close
open --> closed: close
`
normalizedGot := strings.ReplaceAll(got, "\n", "")
normalizedWanted := strings.ReplaceAll(wanted, "\n", "")
if normalizedGot != normalizedWanted {
t.Errorf("build mermaid graph failed. \nwanted \n%s\nand got \n%s\n", wanted, got)
fmt.Println([]byte(normalizedGot))
fmt.Println([]byte(normalizedWanted))
}
}
func TestMermaidFlowChartOutput(t *testing.T) {
fsmUnderTest := NewFSM(
"closed",
Events{
{Name: "open", Src: []string{"closed"}, Dst: "open"},
{Name: "part-open", Src: []string{"closed"}, Dst: "intermediate"},
{Name: "part-open", Src: []string{"intermediate"}, Dst: "open"},
{Name: "close", Src: []string{"open"}, Dst: "closed"},
{Name: "part-close", Src: []string{"intermediate"}, Dst: "closed"},
},
Callbacks{},
)
got, err := VisualizeForMermaidWithGraphType(fsmUnderTest, FlowChart)
if err != nil {
t.Errorf("got error for visualizing with type MERMAID: %s", err)
}
wanted := `
graph LR
id0[closed]
id1[intermediate]
id2[open]
id0 --> |open| id2
id0 --> |part-open| id1
id1 --> |part-close| id0
id1 --> |part-open| id2
id2 --> |close| id0
style id0 fill:#00AA00
`
normalizedGot := strings.ReplaceAll(got, "\n", "")
normalizedWanted := strings.ReplaceAll(wanted, "\n", "")
if normalizedGot != normalizedWanted {
t.Errorf("build mermaid graph failed. \nwanted \n%s\nand got \n%s\n", wanted, got)
fmt.Println([]byte(normalizedGot))
fmt.Println([]byte(normalizedWanted))
}
}
|
/*
Copyright © 2022 SUSE LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"os"
"os/exec"
"runtime"
"strings"
"github.com/rancher-sandbox/rancher-desktop/src/go/rdctl/pkg/options/generated"
"github.com/rancher-sandbox/rancher-desktop/src/go/rdctl/pkg/utils"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// startCmd represents the start command
var startCmd = &cobra.Command{
Use: "start",
Short: "Start up Rancher Desktop, or update its settings.",
Long: `Starts up Rancher Desktop with the specified settings.
If it's running, behaves the same as 'rdctl set ...'.
`,
RunE: func(cmd *cobra.Command, args []string) error {
if err := cobra.NoArgs(cmd, args); err != nil {
return err
}
return doStartOrSetCommand(cmd)
},
}
var applicationPath string
var noModalDialogs bool
func init() {
rootCmd.AddCommand(startCmd)
options.UpdateCommonStartAndSetCommands(startCmd)
startCmd.Flags().StringVarP(&applicationPath, "path", "p", "", "path to main executable")
startCmd.Flags().BoolVarP(&noModalDialogs, "no-modal-dialogs", "", false, "avoid displaying dialog boxes")
}
/**
* If Rancher Desktop is currently running, treat this like a `set` command, and pass all the args to that.
*/
func doStartOrSetCommand(cmd *cobra.Command) error {
_, err := getListSettings()
if err == nil {
// Unavoidable race condition here.
// There's no system-wide mutex that will let us guarantee that if rancher desktop is running when
// we test it (easiest to just try to get the settings), that it will still be running when we
// try to upload the settings (if any were specified).
if applicationPath != "" {
// `--path | -p` is not a valid option for `rdctl set...`
return fmt.Errorf("--path %q specified but Rancher Desktop is already running", applicationPath)
}
err = doSetCommand(cmd)
if err == nil || cmd.Name() == "set" {
return err
}
}
// If `set...` failed, try running the original `start` command, if only to give
// an error message from the point of view of `start` rather than `set`.
cmd.SilenceUsage = true
return doStartCommand(cmd)
}
func doStartCommand(cmd *cobra.Command) error {
commandLineArgs, err := options.GetCommandLineArgsForStartCommand(cmd.Flags())
if err != nil {
return err
}
if !cmd.Flags().Changed("path") {
applicationPath, err = utils.GetRDPath()
if err != nil {
return fmt.Errorf("failed to locate main Rancher Desktop executable: %w\nplease retry with the --path option", err)
}
}
if noModalDialogs {
commandLineArgs = append(commandLineArgs, "--no-modal-dialogs")
}
return launchApp(applicationPath, commandLineArgs)
}
func launchApp(applicationPath string, commandLineArgs []string) error {
var commandName string
var args []string
if runtime.GOOS == "darwin" {
commandName = "/usr/bin/open"
args = []string{"-a", applicationPath}
if len(commandLineArgs) > 0 {
args = append(args, "--args")
args = append(args, commandLineArgs...)
}
} else {
commandName = applicationPath
args = commandLineArgs
}
// Include this output because there's a delay before the UI comes up.
// Without this line, it might look like the command doesn't work.
logrus.Infof("About to launch %s %s ...\n", commandName, strings.Join(args, " "))
cmd := exec.Command(commandName, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Start()
}
|
package processor
import (
"log"
"github.com/chapterzero/gomposer/provider"
"io"
"math/rand"
"net/http"
"fmt"
"strings"
"os"
"time"
"archive/zip"
"path/filepath"
"sync"
)
const tempDirectory = "/tmp"
var i int = 2;
type DownloadResult struct {
status int
filePath string
err error
dependency Dependency
}
func downloadPackagesParallel(dependencies []Dependency, vendorDirectory string) {
d := make(chan DownloadResult)
var wg sync.WaitGroup
wg.Add(len(dependencies))
rand.Seed(time.Now().UTC().UnixNano())
for _, dependency := range dependencies {
go func(dependency Dependency) {
filePath, err := downloadPackage(dependency.FqPackageName, dependency.Provider, dependency.Version, vendorDirectory)
downloadResult := DownloadResult {
status: 1,
err: err,
filePath: filePath,
dependency: dependency,
}
log.Println("Download complete")
if downloadResult.err != nil {
close(d)
log.Fatalln(downloadResult.err)
}
log.Println("Unzipping...")
_, err = unzipPackage(downloadResult.filePath, getPackageVendorDir(
downloadResult.dependency.FqPackageName,
vendorDirectory,
))
if err != nil {
close(d)
log.Fatalln("Error while unzipping package ", err)
}
deleteFile(downloadResult.filePath)
d <- downloadResult
wg.Done()
}(dependency)
}
go func() {
wg.Wait()
close(d)
}()
for range d {
}
}
func downloadPackage(packageName string, provider provider.Provider, version Version, vendorDirectory string) (string, error) {
downloadUrl := provider.GetDownloadUrl(packageName, version.Value)
filepath := fmt.Sprintf("%v/%v_%v.zip", tempDirectory, rand.Intn(300), strings.Replace(packageName, "/", "_", -1))
log.Println("Downloading package ", downloadUrl)
// Create the file
out, err := os.Create(filepath)
if err != nil {
return filepath, err
}
defer out.Close()
// Get the data
resp, err := http.Get(downloadUrl)
if err != nil {
return filepath, err
}
defer resp.Body.Close()
// Write the body to file
_, err = io.Copy(out, resp.Body)
if err != nil {
return filepath, err
}
return filepath, err
}
func deleteFile(fileName string) {
err := os.Remove(fileName)
if err != nil {
log.Println("Failed to delete temporary file ")
} else {
log.Println("Deleted temporary file")
}
}
func unzipPackage(src string, dest string) ([]string, error) {
var filenames []string
r, err := zip.OpenReader(src)
if err != nil {
return filenames, err
}
defer r.Close()
folderCount := 0
for _, f := range r.File {
// skip github bundled folder
if folderCount == 0 {
folderCount++
continue
}
// remove first folder
fNameSplitted := strings.Split(f.Name, "/")
fNameWithoutRoot := strings.Join(fNameSplitted[1:], "/")
rc, err := f.Open()
if err != nil {
return filenames, err
}
defer rc.Close()
// Store filename/path for returning and using later on
fpath := filepath.Join(dest, fNameWithoutRoot)
// Check for ZipSlip. More Info: http://bit.ly/2MsjAWE
if !strings.HasPrefix(fpath, filepath.Clean(dest)+string(os.PathSeparator)) {
return filenames, fmt.Errorf("%s: illegal file path", fpath)
}
filenames = append(filenames, fpath)
if f.FileInfo().IsDir() {
// Make Folder
os.MkdirAll(fpath, os.ModePerm)
} else {
// Make File
if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
return filenames, err
}
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return filenames, err
}
_, err = io.Copy(outFile, rc)
// Close the file without defer to close before next iteration of loop
outFile.Close()
if err != nil {
return filenames, err
}
}
}
return filenames, nil
}
func getPackageVendorDir(fqPackageName string, rootVendorDirectory string) (string) {
return fmt.Sprintf("%v/%v", rootVendorDirectory, fqPackageName)
}
|
package main
import "fmt"
import "io/ioutil"
func main() {
// case1(정석)
// var b []byte
// var err error
// b, err = ioutil.ReadFile("./hello.txt")
// if err == nil {
// fmt.Printf("%s", b)
// }
// case2
if b, err := ioutil.ReadFile("./hello.txt"); err == nil {
// if 조건문 안에서 변수를 생성할 시, else, if else 문에서는 변수에 접근이 가능하지만,
// if문 바깥에서 변수에 접근하지 못한다.
fmt.Printf("%s", b)
}
// fmt.Println(b) // 변수 b를 사용할 수 없음. 컴파일 에러
// fmt.Println(err) // 변수 err을 사용할 수 없음. 컴파일 에러
}
|
package main
import "fmt"
func main() {
fmt.Println(canPlaceFlowers([]int{
0, 0, 0, 0, 1,
}, 2))
fmt.Println(canPlaceFlowers([]int{
1, 0, 0, 0, 0, 0, 1,
}, 2))
fmt.Println(canPlaceFlowers([]int{
0, 0, 1, 0, 1,
}, 1))
fmt.Println(canPlaceFlowers([]int{
0,
}, 1))
}
func canPlaceFlowers(flowerbed []int, n int) bool {
ln := len(flowerbed)
for i := 0; i < ln; i++ {
v := flowerbed[i]
if v == 0 {
if i == 0 || i == ln-2 {
if i+1 < ln && flowerbed[i+1] == 0 {
// 左右两侧只要连续 2 个 0
n--
} else if ln == 1 {
// 单独一个 【0】,特殊情况
n--
}
} else {
if i+2 < ln && flowerbed[i+1] == 0 && flowerbed[i+2] == 0 {
// 中间连续 3 个 0
n--
i++
}
}
}
if n <= 0 {
// 减到 0 就代表消耗完毕
return true
}
}
return n <= 0
}
|
package apikey
const (
// PermFlush Permission to restart/reload the system including flushing/forcing the queues
PermFlush string = "flush"
// PermGenerateInvites Permission to generate invites remotely
PermGenerateInvites string = "invite"
// PermAPIKeys Permission to create api keys
PermAPIKeys string = "apikey"
// PermMail Permission to send email
PermMail string = "mail"
)
// AllPermissons is a list of all permissions available for API keys
var AllPermissons = []string{
PermAPIKeys,
PermFlush,
PermGenerateInvites,
PermMail,
}
|
package packet
import (
"encoding/json"
ce "github.com/halivor/common/golang/util/errno"
)
type Rsp struct {
ErrCode int `json:"error_code"`
ErrMsg string `json:"error_message"`
Data interface{} `json:"data,omitempty"`
}
type RspRaw struct {
ErrCode int `json:"error_code"`
ErrMsg string `json:"error_message,omitempty"`
Data json.RawMessage `json:"data,omitempty"`
}
func Resp(e ce.Errno) *Rsp {
return &Rsp{ErrCode: e.Errno(), ErrMsg: e.Error()}
}
func RespFail(e ce.Errno) *Rsp {
return &Rsp{ErrCode: e.Errno(), ErrMsg: e.Error()}
}
func RespSucc(data interface{}) *Rsp {
return &Rsp{ErrCode: 0, ErrMsg: "success", Data: data}
}
func RespString(message string) *Rsp {
return &Rsp{ErrCode: ce.ERR_ORI, ErrMsg: message}
}
func RespError(e error) *Rsp {
return &Rsp{ErrCode: ce.ERR_ORI, ErrMsg: e.Error()}
}
func RespRaw(data json.RawMessage) *Rsp {
return &Rsp{ErrCode: 0, ErrMsg: "success", Data: data}
}
|
package model
import "time"
type Book struct {
Id int64 `xorm:"pk autoincr int(64)" json:"id" form:"id" example:"1"`
// 0: 気になる
// 1: 購入済
// 2: 読了
Status int16 `xorm:"int(16)" json:"status" enums:"0,1,2"`
Title string `xorm:"varchar(40)" json:"title" form:"title" validate:"required" example:"本のタイトル"`
Content string `xorm:"varchar(40)" json:"content" form:"content" validate:"required" example:"本の内容"`
BoughtAt time.Time `xorm:"datetime(6)" json:"bought_at" format:"date" validate:"required" example:"2021-07-01"`
}
|
// Package google implements OpenID Connect for Google and GSuite.
//
// https://www.pomerium.com/docs/identity-providers/google
// https://developers.google.com/identity/protocols/oauth2/openid-connect
package google
import (
"context"
"fmt"
oidc "github.com/coreos/go-oidc/v3/oidc"
"github.com/pomerium/pomerium/internal/identity/oauth"
pom_oidc "github.com/pomerium/pomerium/internal/identity/oidc"
)
const (
// Name identifies the Google identity provider
Name = "google"
defaultProviderURL = "https://accounts.google.com"
)
var defaultScopes = []string{oidc.ScopeOpenID, "profile", "email"}
// unlike other identity providers, google does not support the `offline_access` scope and instead
// requires we set this on a custom uri param. Also, ` prompt` must be set to `consent`to ensure
// that our application always receives a refresh token (ask google). And finally, we default to
// having the user select which Google account they'd like to use.
// For more details, please see google's documentation:
//
// https://developers.google.com/identity/protocols/oauth2/web-server#offline
// https://developers.google.com/identity/protocols/oauth2/openid-connect#authenticationuriparameters
var defaultAuthCodeOptions = map[string]string{"prompt": "select_account consent", "access_type": "offline"}
// Provider is a Google implementation of the Authenticator interface.
type Provider struct {
*pom_oidc.Provider
}
// New instantiates an OpenID Connect (OIDC) session with Google.
func New(ctx context.Context, o *oauth.Options) (*Provider, error) {
var p Provider
var err error
if o.ProviderURL == "" {
o.ProviderURL = defaultProviderURL
}
if len(o.Scopes) == 0 {
o.Scopes = defaultScopes
}
genericOidc, err := pom_oidc.New(ctx, o)
if err != nil {
return nil, fmt.Errorf("%s: failed creating oidc provider: %w", Name, err)
}
p.Provider = genericOidc
p.AuthCodeOptions = defaultAuthCodeOptions
if o.AuthCodeOptions != nil {
p.AuthCodeOptions = o.AuthCodeOptions
}
return &p, nil
}
// Name returns the provider name.
func (p *Provider) Name() string {
return Name
}
|
package shared
import (
"context"
"go.mercari.io/datastore"
)
var _ datastore.Middleware = &MiddlewareBridge{}
type MiddlewareBridge struct {
ocb OriginalClientBridge
otb OriginalTransactionBridge
oib OriginalIteratorBridge
mws []datastore.Middleware
Info *datastore.MiddlewareInfo
}
type OriginalClientBridge interface {
AllocateIDs(ctx context.Context, keys []datastore.Key) ([]datastore.Key, error)
PutMulti(ctx context.Context, keys []datastore.Key, psList []datastore.PropertyList) ([]datastore.Key, error)
GetMulti(ctx context.Context, keys []datastore.Key, psList []datastore.PropertyList) error
DeleteMulti(ctx context.Context, keys []datastore.Key) error
Run(ctx context.Context, q datastore.Query, qDump *datastore.QueryDump) datastore.Iterator
GetAll(ctx context.Context, q datastore.Query, qDump *datastore.QueryDump, psList *[]datastore.PropertyList) ([]datastore.Key, error)
Count(ctx context.Context, q datastore.Query, qDump *datastore.QueryDump) (int, error)
}
type OriginalTransactionBridge interface {
PutMulti(keys []datastore.Key, psList []datastore.PropertyList) ([]datastore.PendingKey, error)
GetMulti(keys []datastore.Key, psList []datastore.PropertyList) error
DeleteMulti(keys []datastore.Key) error
}
type OriginalIteratorBridge interface {
Next(iter datastore.Iterator, ps *datastore.PropertyList) (datastore.Key, error)
}
func NewCacheBridge(info *datastore.MiddlewareInfo, ocb OriginalClientBridge, otb OriginalTransactionBridge, oib OriginalIteratorBridge, mws []datastore.Middleware) *MiddlewareBridge {
cb := &MiddlewareBridge{
ocb: ocb,
otb: otb,
oib: oib,
mws: mws,
Info: info,
}
cb.Info.Next = cb
return cb
}
func (cb *MiddlewareBridge) AllocateIDs(info *datastore.MiddlewareInfo, keys []datastore.Key) ([]datastore.Key, error) {
if len(cb.mws) == 0 {
return cb.ocb.AllocateIDs(info.Context, keys)
}
current := cb.mws[0]
left := &MiddlewareBridge{
ocb: cb.ocb,
otb: cb.otb,
oib: cb.oib,
mws: cb.mws[1:],
Info: cb.Info,
}
left.Info.Next = left
return current.AllocateIDs(left.Info, keys)
}
func (cb *MiddlewareBridge) PutMultiWithoutTx(info *datastore.MiddlewareInfo, keys []datastore.Key, psList []datastore.PropertyList) ([]datastore.Key, error) {
if len(cb.mws) == 0 {
return cb.ocb.PutMulti(info.Context, keys, psList)
}
current := cb.mws[0]
left := &MiddlewareBridge{
ocb: cb.ocb,
otb: cb.otb,
oib: cb.oib,
mws: cb.mws[1:],
Info: cb.Info,
}
left.Info.Next = left
return current.PutMultiWithoutTx(left.Info, keys, psList)
}
func (cb *MiddlewareBridge) PutMultiWithTx(info *datastore.MiddlewareInfo, keys []datastore.Key, psList []datastore.PropertyList) ([]datastore.PendingKey, error) {
if len(cb.mws) == 0 {
return cb.otb.PutMulti(keys, psList)
}
current := cb.mws[0]
left := &MiddlewareBridge{
ocb: cb.ocb,
otb: cb.otb,
oib: cb.oib,
mws: cb.mws[1:],
Info: cb.Info,
}
left.Info.Next = left
return current.PutMultiWithTx(left.Info, keys, psList)
}
func (cb *MiddlewareBridge) GetMultiWithoutTx(info *datastore.MiddlewareInfo, keys []datastore.Key, psList []datastore.PropertyList) error {
if len(cb.mws) == 0 {
return cb.ocb.GetMulti(info.Context, keys, psList)
}
current := cb.mws[0]
left := &MiddlewareBridge{
ocb: cb.ocb,
otb: cb.otb,
oib: cb.oib,
mws: cb.mws[1:],
Info: cb.Info,
}
left.Info.Next = left
return current.GetMultiWithoutTx(left.Info, keys, psList)
}
func (cb *MiddlewareBridge) GetMultiWithTx(info *datastore.MiddlewareInfo, keys []datastore.Key, psList []datastore.PropertyList) error {
if len(cb.mws) == 0 {
return cb.otb.GetMulti(keys, psList)
}
current := cb.mws[0]
left := &MiddlewareBridge{
ocb: cb.ocb,
otb: cb.otb,
oib: cb.oib,
mws: cb.mws[1:],
Info: cb.Info,
}
left.Info.Next = left
return current.GetMultiWithTx(left.Info, keys, psList)
}
func (cb *MiddlewareBridge) DeleteMultiWithoutTx(info *datastore.MiddlewareInfo, keys []datastore.Key) error {
if len(cb.mws) == 0 {
return cb.ocb.DeleteMulti(info.Context, keys)
}
current := cb.mws[0]
left := &MiddlewareBridge{
ocb: cb.ocb,
otb: cb.otb,
oib: cb.oib,
mws: cb.mws[1:],
Info: cb.Info,
}
left.Info.Next = left
return current.DeleteMultiWithoutTx(left.Info, keys)
}
func (cb *MiddlewareBridge) DeleteMultiWithTx(info *datastore.MiddlewareInfo, keys []datastore.Key) error {
if len(cb.mws) == 0 {
return cb.otb.DeleteMulti(keys)
}
current := cb.mws[0]
left := &MiddlewareBridge{
ocb: cb.ocb,
otb: cb.otb,
oib: cb.oib,
mws: cb.mws[1:],
Info: cb.Info,
}
left.Info.Next = left
return current.DeleteMultiWithTx(left.Info, keys)
}
func (cb *MiddlewareBridge) PostCommit(info *datastore.MiddlewareInfo, tx datastore.Transaction, commit datastore.Commit) error {
if len(cb.mws) == 0 {
return nil
}
current := cb.mws[0]
left := &MiddlewareBridge{
ocb: cb.ocb,
otb: cb.otb,
oib: cb.oib,
mws: cb.mws[1:],
Info: cb.Info,
}
left.Info.Next = left
return current.PostCommit(left.Info, tx, commit)
}
func (cb *MiddlewareBridge) PostRollback(info *datastore.MiddlewareInfo, tx datastore.Transaction) error {
if len(cb.mws) == 0 {
return nil
}
current := cb.mws[0]
left := &MiddlewareBridge{
ocb: cb.ocb,
otb: cb.otb,
oib: cb.oib,
mws: cb.mws[1:],
Info: cb.Info,
}
left.Info.Next = left
return current.PostRollback(left.Info, tx)
}
func (cb *MiddlewareBridge) Run(info *datastore.MiddlewareInfo, q datastore.Query, qDump *datastore.QueryDump) datastore.Iterator {
if len(cb.mws) == 0 {
return cb.ocb.Run(info.Context, q, qDump)
}
current := cb.mws[0]
left := &MiddlewareBridge{
ocb: cb.ocb,
otb: cb.otb,
oib: cb.oib,
mws: cb.mws[1:],
Info: cb.Info,
}
left.Info.Next = left
return current.Run(left.Info, q, qDump)
}
func (cb *MiddlewareBridge) GetAll(info *datastore.MiddlewareInfo, q datastore.Query, qDump *datastore.QueryDump, psList *[]datastore.PropertyList) ([]datastore.Key, error) {
if len(cb.mws) == 0 {
return cb.ocb.GetAll(info.Context, q, qDump, psList)
}
current := cb.mws[0]
left := &MiddlewareBridge{
ocb: cb.ocb,
otb: cb.otb,
oib: cb.oib,
mws: cb.mws[1:],
Info: cb.Info,
}
left.Info.Next = left
return current.GetAll(left.Info, q, qDump, psList)
}
func (cb *MiddlewareBridge) Next(info *datastore.MiddlewareInfo, q datastore.Query, qDump *datastore.QueryDump, iter datastore.Iterator, ps *datastore.PropertyList) (datastore.Key, error) {
if len(cb.mws) == 0 {
return cb.oib.Next(iter, ps)
}
current := cb.mws[0]
left := &MiddlewareBridge{
ocb: cb.ocb,
otb: cb.otb,
oib: cb.oib,
mws: cb.mws[1:],
Info: cb.Info,
}
left.Info.Next = left
return current.Next(left.Info, q, qDump, iter, ps)
}
func (cb *MiddlewareBridge) Count(info *datastore.MiddlewareInfo, q datastore.Query, qDump *datastore.QueryDump) (int, error) {
if len(cb.mws) == 0 {
return cb.ocb.Count(info.Context, q, qDump)
}
current := cb.mws[0]
left := &MiddlewareBridge{
ocb: cb.ocb,
otb: cb.otb,
oib: cb.oib,
mws: cb.mws[1:],
Info: cb.Info,
}
left.Info.Next = left
return current.Count(left.Info, q, qDump)
}
|
package utils
import (
"os"
cli "github.com/jawher/mow.cli"
logging "github.com/op/go-logging"
)
var (
formatter = logging.MustStringFormatter(
`%{color}%{time:15:04:05.000} %{shortpkg}.%{shortfunc} [%{level}]%{color:reset} %{message}`)
formatterNoColor = logging.MustStringFormatter(
`%{time:15:04:05.000} %{shortpkg}.%{shortfunc} [%{level}] %{message}`)
)
func SetupLogging(app *cli.Cli, logger *logging.Logger) {
logNoColor := app.Bool(cli.BoolOpt{
Name: "log-no-color",
Value: false,
Desc: "disable logging colors",
EnvVar: "GREMLIN_LOG_NO_COLOR",
})
logLevel := app.String(cli.StringOpt{
Name: "log-level",
Value: "DEBUG",
Desc: "logging level",
EnvVar: "GREMLIN_LOG_LEVEL",
})
app.Before = func() {
stdBackend := logging.NewLogBackend(os.Stdout, "", 0)
logging.SetBackend(stdBackend)
if *logNoColor {
logging.SetFormatter(formatterNoColor)
} else {
logging.SetFormatter(formatter)
}
level, err := logging.LogLevel(*logLevel)
if err != nil {
logger.Errorf("Failed setting log level %s, using ERROR", *logLevel)
}
logging.SetLevel(level, logger.Module)
}
}
|
package api
import (
"errors"
"reflect"
"strconv"
"strings"
"github.com/fatih/structs"
"github.com/oleiade/lane"
"github.com/sapk/sca/pkg/tools"
log "github.com/sirupsen/logrus"
"github.com/zabawaba99/firego"
)
//API interface for sca backend
type API struct {
APIKey string
BaseURL string
RefreshToken string
AccessToken string
_data map[string]interface{}
_queue *lane.Deque
//_update chan bool
//TODO add queue
}
//QueueItem represente a elemetn of action to send to API
type QueueItem struct {
Type string
Path string
Data interface{}
}
//New constructor for API
func New(apiKey, refreshToken, baseURL string) (*API, error) {
log.WithFields(log.Fields{
"apiKey": apiKey,
"refreshToken": refreshToken,
"baseURL": baseURL,
}).Debug("Init new API")
//Check params
if apiKey == "" {
return nil, errors.New("You need to set a apiKey")
}
if refreshToken == "" {
return nil, errors.New("You need to set a refreshToken")
}
if baseURL == "" {
return nil, errors.New("You need to set a baseURL")
}
//Generate frist access token
accessToken, err := apiGetAuthToken(apiKey, refreshToken)
if err != nil {
return nil, err
}
return &API{APIKey: apiKey, BaseURL: baseURL, RefreshToken: refreshToken, AccessToken: accessToken, _queue: lane.NewDeque()}, nil
}
//Send data to api with deduction of common value since last update
func (a *API) Send(data map[string]interface{}) error {
if a._data == nil { //No data of backend so sending the complet obj
a._data = a.update(data["UUID"].(string), data).(map[string]interface{}) //Save state
//TODO -> queue.Enqueue(&QueueItem{Type: "set", Data: data})
log.WithFields(log.Fields{
"data_bytes": tools.SizeOfJSON(data), //Debug
}).Info("Add complete messages to queue")
} else {
if reflect.DeepEqual(a._data, data) {
log.Info("Nothing to update data are identical from last send.")
return nil
}
//Debug
sizeBeforeCleaning := tools.SizeOfJSON(data)
cleanData, sendedData := a.sendDeDuplicateData(data["UUID"].(string), a._data, data)
//TODO at each step -> queue.Enqueue(&QueueItem{Type: "set", Data: data})
sizeAfterCleaning := tools.SizeOfJSON(cleanData)
log.WithFields(log.Fields{
"data_bytes": sizeBeforeCleaning,
"send_bytes": sizeAfterCleaning,
}).Info("Sending update messages")
//log.Debug(cleanData)
a._data = sendedData //Save state
}
return a.executeQueue()
}
func (a *API) sendUpdate(updates map[string]interface{}) map[string]interface{} {
if len(updates) > 0 {
log.WithFields(log.Fields{
"size": len(updates),
}).Debug("sendUpdate") //Send update before set
a.exectue("UPDT", "", updates)
return map[string]interface{}{}
}
return updates
}
func (a *API) executeQueue() error {
updates := map[string]interface{}{}
size := a._queue.Size()
for i := 0; i < size; i++ {
value := a._queue.Shift()
item := value.(*QueueItem)
switch item.Type {
case "SET":
updates = a.sendUpdate(updates)
a.exectue("SET", item.Path, item.Data)
case "DEL":
updates = a.sendUpdate(updates)
a.exectue("DEL", item.Path, nil)
case "UPDT":
updates[item.Path] = item.Data
default:
log.WithFields(log.Fields{
"item": item,
}).Debug("Unhandled item type in queue.")
}
}
updates = a.sendUpdate(updates)
return nil
}
func (a *API) set(path string, data interface{}) interface{} {
log.WithFields(log.Fields{
"path": path,
"queueSize": a._queue.Size(),
}).Debug("API.set -> add to queue")
a._queue.Append(&QueueItem{
Type: "SET",
Path: path,
Data: data,
})
return data
}
func (a *API) remove(path string) {
log.WithFields(log.Fields{
"path": path,
"queueSize": a._queue.Size(),
}).Debug("API.remove -> add to queue")
a._queue.Append(&QueueItem{
Type: "DEL",
Path: path,
})
}
func (a *API) update(path string, data interface{}) interface{} {
log.WithFields(log.Fields{
"path": path,
"queueSize": a._queue.Size(),
}).Debug("API.update -> add to queue")
a._queue.Append(&QueueItem{
Type: "UPDT",
Path: path,
Data: data,
})
return data
}
func (a *API) exectue(method string, path string, data interface{}) {
log.WithFields(log.Fields{
//"api": a,
"method": method,
"path": path,
//"keys": data,
//"data": data,
}).Debug("API.execute")
f := firego.New(a.BaseURL+"/data/"+path, nil)
f.Auth(a.AccessToken)
defer f.Unauth()
var err error
switch method {
case "SET":
err = f.Set(data)
case "DEL":
err = f.Remove()
case "UPDT":
err = f.Update(data)
}
//Handleling errors
switch err := err.(type) {
case nil:
return
default:
if strings.Contains(err.Error(), "Auth token is expired") {
log.WithFields(log.Fields{
"api.AccessToken": a.AccessToken,
}).Debug("Auth token is expired -> re-newing AccessToken")
a.AccessToken, err = apiGetAuthToken(a.APIKey, a.RefreshToken)
if err != nil {
log.WithFields(log.Fields{
"api": a,
}).Debug("Failed to re-new AccessToken")
}
a.exectue(method, path, data) //Redo
return
}
if strings.Contains(err.Error(), "Internal server error.") {
log.WithFields(log.Fields{
"api.AccessToken": a.AccessToken,
"method": method,
"path": path,
"data": data,
"err": err,
}).Warning("API respond with : Internal server error. -> skipping update")
//TODO force set of _data to do not have any inconsistency
} //else {
log.WithFields(log.Fields{
//"api": a,
"method": method,
"path": path,
"data": data,
"err": err,
}).Fatal("Unhandled error in api.execut()") //TODO handle all errors
return
}
}
func (a *API) sendDeDuplicateData(path string, old map[string]interface{}, new map[string]interface{}) (map[string]interface{}, map[string]interface{}) {
log.WithFields(log.Fields{
"path": path,
//"old": old,
//"new": new,
}).Debug("API.sendDeDuplicateData")
ret := map[string]interface{}{}
realRet := map[string]interface{}{}
//Remove old key not in new
for key := range old {
if _, ok := new[key]; !ok { //Key not in new we should remove
a.remove(path + "/" + key)
}
}
//Set new key not in old
//Parse key in new and old
for key, newValue := range new {
if oldValue, ok := old[key]; !ok { //Key not in old we should set
ret[key] = a.update(path+"/"+key, newValue)
realRet[key] = ret[key]
} else { //Key is in new and old -> we recurse or set if final obj differ
if !reflect.DeepEqual(oldValue, newValue) { //new differ from old
if structs.IsStruct(oldValue) && structs.IsStruct(newValue) { //We have a object -> rescursive
ret[key], realRet[key] = a.sendDeDuplicateData(path+"/"+key, structs.Map(oldValue), structs.Map(newValue)) //Store in result for stat
} else {
switch newValue.(type) {
case bool, int, int32, int64, uint, uint32, uint64, float32, float64, string, []string: //Simple array are ordered so if there a diff we update
ret[key] = a.update(path+"/"+key, newValue)
realRet[key] = ret[key]
case [][2]string:
// t is of type array/slice
ret[key] = a.update(path+"/"+key, newValue)
realRet[key] = ret[key]
//TODO send only necessary update
case []interface{}:
// t is of type array/slice
newValueArr := newValue.([]interface{})
oldValueArr := oldValue.([]interface{})
commonMin := tools.Min(len(newValueArr), len(oldValueArr))
list := make([]interface{}, len(newValueArr))
listR := make([]interface{}, len(newValueArr))
for i := 0; i < commonMin; i++ { //Compare common
if structs.IsStruct(oldValueArr[i]) && structs.IsStruct(newValueArr[i]) { //We have a object -> rescursive
list[i], listR[i] = a.sendDeDuplicateData(path+"/"+key+"/"+strconv.Itoa(i), structs.Map(oldValueArr[i]), structs.Map(newValueArr[i]))
} else {
switch newValueArr[i].(type) {
case map[string]interface{}: //Allready map
list[i], listR[i] = a.sendDeDuplicateData(path+"/"+key+"/"+strconv.Itoa(i), oldValueArr[i].(map[string]interface{}), newValueArr[i].(map[string]interface{}))
default: //Force update
log.WithFields(log.Fields{
//"api": a,
"path": path + "/" + key + "/" + strconv.Itoa(i),
"data": newValueArr[i],
}).Debug("Force api.update() on data since it seems to not be a struct")
list[i] = a.update(path+"/"+key+"/"+strconv.Itoa(i), newValueArr[i])
listR[i] = list[i]
}
}
}
for i := commonMin; i < len(oldValueArr); i++ { //Remove
a.remove(path + "/" + key + "/" + strconv.Itoa(i))
}
for i := commonMin; i < len(newValueArr); i++ { //Add
list[i] = a.update(path+"/"+key+"/"+strconv.Itoa(i), newValueArr[i])
listR[i] = list[i]
/*
log.WithFields(log.Fields{
//"api": a,
"path": path + "/" + key + "/" + strconv.Itoa(i),
"data": newValueArr[i],
}).Debug("Force api.set() on data since it seems over old array size")
*/
}
ret[key] = list
realRet[key] = listR
case map[string]interface{}:
//q.Q(path, newValue)
ret[key], realRet[key] = a.sendDeDuplicateData(path+"/"+key, oldValue.(map[string]interface{}), newValue.(map[string]interface{})) //Store in result for stat
default:
//q.Q(path, newValue)
log.WithFields(log.Fields{
"path": path,
//"old": old,
//"new": new,
}).Warn("Unhandled type in api.sendDeDuplicateData() falling back to coping all object") //TODO handle all type
ret[key] = a.update(path+"/"+key, newValue)
realRet[key] = ret[key]
}
}
} else {
//Old and New are equal we update real global data object
realRet[key] = newValue
}
}
}
return ret, realRet
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.