text stringlengths 11 4.05M |
|---|
package cmd
import (
"easyctl/sys"
"easyctl/util"
"fmt"
"github.com/spf13/cobra"
)
var CloseServiceForever bool
var closeValidArgs = []string{"firewalld", "selinux", "desktop"}
func init() {
closeSeLinuxCmd.Flags().BoolVarP(&CloseServiceForever, "forever", "f", false, "Service closed duration.")
closeFirewalldCmd.Flags().BoolVarP(&CloseServiceForever, "forever", "f", false, "Service closed duration.")
closeCmd.AddCommand(closeSeLinuxCmd)
closeCmd.AddCommand(closeFirewalldCmd)
rootCmd.AddCommand(closeCmd)
}
// close命令
var closeCmd = &cobra.Command{
Use: "close [OPTIONS] [flags]",
Short: "close some service through easyctl",
Example: "\neasyctl close firewalld" +
"\neasyctl close firewalld --forever=true" +
"\neasyctl close selinux" +
"\neasyctl close selinux --forever=true",
Run: func(cmd *cobra.Command, args []string) {
},
ValidArgs: closeValidArgs,
Args: cobra.ExactValidArgs(1),
}
// close selinux命令
var closeSeLinuxCmd = &cobra.Command{
Use: "selinux [flags]",
Short: "close selinux through easyctl",
Example: "\neasyctl close selinux 暂时关闭selinux" +
"\neasyctl close selinux --forever=true 永久关闭selinux",
Run: func(cmd *cobra.Command, args []string) {
closeSeLinux()
},
ValidArgs: closeValidArgs,
}
// close firewalld命令
var closeFirewalldCmd = &cobra.Command{
Use: "firewalld [flags]",
Short: "close firewalld through easyctl",
Example: "\neasyctl close firewalld 临时关闭firewalld" +
"\neasyctl close firewalld --forever=true 永久关闭firewalld" +
"\neasyctl close firewalld -f 永久关闭firewalld",
Run: func(cmd *cobra.Command, args []string) {
closeFirewalld()
},
ValidArgs: closeValidArgs,
}
// 关闭selinux
func closeSeLinux() {
fmt.Printf("#### 关闭selinux服务 ####\n\n")
sys.CloseSeLinux(CloseServiceForever)
}
// 关闭防火墙
func closeFirewalld() {
util.PrintTitleMsg("关闭防火墙服务")
sys.CloseFirewalld(CloseServiceForever)
}
|
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"context"
"math"
"testing"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/stretchr/testify/require"
)
type testStatisticsSamples struct {
count int
samples []*SampleItem
rc sqlexec.RecordSet
pk sqlexec.RecordSet
}
type recordSet struct {
firstIsID bool
data []types.Datum
count int
cursor int
fields []*ast.ResultField
}
func (r *recordSet) Fields() []*ast.ResultField {
return r.fields
}
func (r *recordSet) setFields(tps ...uint8) {
r.fields = make([]*ast.ResultField, len(tps))
for i := 0; i < len(tps); i++ {
rf := new(ast.ResultField)
rf.Column = new(model.ColumnInfo)
rf.Column.FieldType = *types.NewFieldType(tps[i])
r.fields[i] = rf
}
}
func (r *recordSet) getNext() []types.Datum {
if r.cursor == r.count {
return nil
}
r.cursor++
row := make([]types.Datum, 0, len(r.fields))
if r.firstIsID {
row = append(row, types.NewIntDatum(int64(r.cursor)))
}
row = append(row, r.data[r.cursor-1])
return row
}
func (r *recordSet) Next(_ context.Context, req *chunk.Chunk) error {
req.Reset()
row := r.getNext()
if row != nil {
for i := 0; i < len(row); i++ {
req.AppendDatum(i, &row[i])
}
}
return nil
}
func (r *recordSet) NewChunk(chunk.Allocator) *chunk.Chunk {
fields := make([]*types.FieldType, 0, len(r.fields))
for _, field := range r.fields {
fields = append(fields, &field.Column.FieldType)
}
return chunk.NewChunkWithCapacity(fields, 32)
}
func (r *recordSet) Close() error {
r.cursor = 0
return nil
}
func buildPK(sctx sessionctx.Context, numBuckets, id int64, records sqlexec.RecordSet) (int64, *Histogram, error) {
b := NewSortedBuilder(sctx.GetSessionVars().StmtCtx, numBuckets, id, types.NewFieldType(mysql.TypeLonglong), Version1)
ctx := context.Background()
for {
req := records.NewChunk(nil)
err := records.Next(ctx, req)
if err != nil {
return 0, nil, errors.Trace(err)
}
if req.NumRows() == 0 {
break
}
it := chunk.NewIterator4Chunk(req)
for row := it.Begin(); row != it.End(); row = it.Next() {
datums := RowToDatums(row, records.Fields())
err = b.Iterate(datums[0])
if err != nil {
return 0, nil, errors.Trace(err)
}
}
}
return b.Count, b.hist, nil
}
func mockHistogram(lower, num int64) *Histogram {
h := NewHistogram(0, num, 0, 0, types.NewFieldType(mysql.TypeLonglong), int(num), 0)
for i := int64(0); i < num; i++ {
lower, upper := types.NewIntDatum(lower+i), types.NewIntDatum(lower+i)
h.AppendBucket(&lower, &upper, i+1, 1)
}
return h
}
func TestMergeHistogram(t *testing.T) {
tests := []struct {
leftLower int64
leftNum int64
rightLower int64
rightNum int64
bucketNum int
ndv int64
}{
{
leftLower: 0,
leftNum: 0,
rightLower: 0,
rightNum: 1,
bucketNum: 1,
ndv: 1,
},
{
leftLower: 0,
leftNum: 200,
rightLower: 200,
rightNum: 200,
bucketNum: 200,
ndv: 400,
},
{
leftLower: 0,
leftNum: 200,
rightLower: 199,
rightNum: 200,
bucketNum: 200,
ndv: 399,
},
}
sc := mock.NewContext().GetSessionVars().StmtCtx
bucketCount := 256
for _, tt := range tests {
lh := mockHistogram(tt.leftLower, tt.leftNum)
rh := mockHistogram(tt.rightLower, tt.rightNum)
h, err := MergeHistograms(sc, lh, rh, bucketCount, Version1)
require.NoError(t, err)
require.Equal(t, tt.ndv, h.NDV)
require.Equal(t, tt.bucketNum, h.Len())
require.Equal(t, tt.leftNum+tt.rightNum, int64(h.TotalRowCount()))
expectLower := types.NewIntDatum(tt.leftLower)
cmp, err := h.GetLower(0).Compare(sc, &expectLower, collate.GetBinaryCollator())
require.NoError(t, err)
require.Equal(t, 0, cmp)
expectUpper := types.NewIntDatum(tt.rightLower + tt.rightNum - 1)
cmp, err = h.GetUpper(h.Len()-1).Compare(sc, &expectUpper, collate.GetBinaryCollator())
require.NoError(t, err)
require.Equal(t, 0, cmp)
}
}
func buildCMSketch(values []types.Datum) *CMSketch {
cms := NewCMSketch(8, 2048)
for _, val := range values {
err := cms.insert(&val)
if err != nil {
panic(err)
}
}
return cms
}
func SubTestColumnRange() func(*testing.T) {
return func(t *testing.T) {
s := createTestStatisticsSamples(t)
bucketCount := int64(256)
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
sketch, _, err := buildFMSketch(sc, s.rc.(*recordSet).data, 1000)
require.NoError(t, err)
collector := &SampleCollector{
Count: int64(s.count),
NullCount: 0,
Samples: s.samples,
FMSketch: sketch,
}
hg, err := BuildColumn(ctx, bucketCount, 2, collector, types.NewFieldType(mysql.TypeLonglong))
hg.PreCalculateScalar()
require.NoError(t, err)
col := &Column{
Histogram: *hg,
CMSketch: buildCMSketch(s.rc.(*recordSet).data),
Info: &model.ColumnInfo{},
StatsLoadedStatus: NewStatsFullLoadStatus(),
}
tbl := &Table{
HistColl: HistColl{
RealtimeCount: int64(col.TotalRowCount()),
Columns: make(map[int64]*Column),
},
}
ran := []*ranger.Range{{
LowVal: []types.Datum{{}},
HighVal: []types.Datum{types.MaxValueDatum()},
Collators: collate.GetBinaryCollatorSlice(1),
}}
count, err := GetRowCountByColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 100000, int(count))
ran[0].LowVal[0] = types.MinNotNullDatum()
count, err = GetRowCountByColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 99900, int(count))
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].LowExclude = true
ran[0].HighVal[0] = types.NewIntDatum(2000)
ran[0].HighExclude = true
count, err = GetRowCountByColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 2500, int(count))
ran[0].LowExclude = false
ran[0].HighExclude = false
count, err = GetRowCountByColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 2500, int(count))
ran[0].LowVal[0] = ran[0].HighVal[0]
count, err = GetRowCountByColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 100, int(count))
tbl.Columns[0] = col
ran[0].LowVal[0] = types.Datum{}
ran[0].HighVal[0] = types.MaxValueDatum()
count, err = GetRowCountByColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 100000, int(count))
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].LowExclude = true
ran[0].HighVal[0] = types.NewIntDatum(2000)
ran[0].HighExclude = true
count, err = GetRowCountByColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 9998, int(count))
ran[0].LowExclude = false
ran[0].HighExclude = false
count, err = GetRowCountByColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 10000, int(count))
ran[0].LowVal[0] = ran[0].HighVal[0]
count, err = GetRowCountByColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 1, int(count))
}
}
func SubTestIntColumnRanges() func(*testing.T) {
return func(t *testing.T) {
s := createTestStatisticsSamples(t)
bucketCount := int64(256)
ctx := mock.NewContext()
s.pk.(*recordSet).cursor = 0
rowCount, hg, err := buildPK(ctx, bucketCount, 0, s.pk)
hg.PreCalculateScalar()
require.NoError(t, err)
require.Equal(t, int64(100000), rowCount)
col := &Column{Histogram: *hg, Info: &model.ColumnInfo{}, StatsLoadedStatus: NewStatsFullLoadStatus()}
tbl := &Table{
HistColl: HistColl{
RealtimeCount: int64(col.TotalRowCount()),
Columns: make(map[int64]*Column),
},
}
ran := []*ranger.Range{{
LowVal: []types.Datum{types.NewIntDatum(math.MinInt64)},
HighVal: []types.Datum{types.NewIntDatum(math.MaxInt64)},
Collators: collate.GetBinaryCollatorSlice(1),
}}
count, err := GetRowCountByIntColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 100000, int(count))
ran[0].LowVal[0].SetInt64(1000)
ran[0].HighVal[0].SetInt64(2000)
count, err = GetRowCountByIntColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 1000, int(count))
ran[0].LowVal[0].SetInt64(1001)
ran[0].HighVal[0].SetInt64(1999)
count, err = GetRowCountByIntColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 998, int(count))
ran[0].LowVal[0].SetInt64(1000)
ran[0].HighVal[0].SetInt64(1000)
count, err = GetRowCountByIntColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 1, int(count))
ran = []*ranger.Range{{
LowVal: []types.Datum{types.NewUintDatum(0)},
HighVal: []types.Datum{types.NewUintDatum(math.MaxUint64)},
Collators: collate.GetBinaryCollatorSlice(1),
}}
count, err = GetRowCountByIntColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 100000, int(count))
ran[0].LowVal[0].SetUint64(1000)
ran[0].HighVal[0].SetUint64(2000)
count, err = GetRowCountByIntColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 1000, int(count))
ran[0].LowVal[0].SetUint64(1001)
ran[0].HighVal[0].SetUint64(1999)
count, err = GetRowCountByIntColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 998, int(count))
ran[0].LowVal[0].SetUint64(1000)
ran[0].HighVal[0].SetUint64(1000)
count, err = GetRowCountByIntColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 1, int(count))
tbl.Columns[0] = col
ran[0].LowVal[0].SetInt64(math.MinInt64)
ran[0].HighVal[0].SetInt64(math.MaxInt64)
count, err = GetRowCountByIntColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 100000, int(count))
ran[0].LowVal[0].SetInt64(1000)
ran[0].HighVal[0].SetInt64(2000)
count, err = GetRowCountByIntColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 1001, int(count))
ran[0].LowVal[0].SetInt64(1001)
ran[0].HighVal[0].SetInt64(1999)
count, err = GetRowCountByIntColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 999, int(count))
ran[0].LowVal[0].SetInt64(1000)
ran[0].HighVal[0].SetInt64(1000)
count, err = GetRowCountByIntColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 1, int(count))
tbl.RealtimeCount *= 10
count, err = GetRowCountByIntColumnRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 1, int(count))
}
}
func SubTestIndexRanges() func(*testing.T) {
return func(t *testing.T) {
s := createTestStatisticsSamples(t)
bucketCount := int64(256)
ctx := mock.NewContext()
s.rc.(*recordSet).cursor = 0
rowCount, hg, cms, err := buildIndex(ctx, bucketCount, 0, s.rc)
hg.PreCalculateScalar()
require.NoError(t, err)
require.Equal(t, int64(100000), rowCount)
idxInfo := &model.IndexInfo{Columns: []*model.IndexColumn{{Offset: 0}}}
idx := &Index{Histogram: *hg, CMSketch: cms, Info: idxInfo}
tbl := &Table{
HistColl: HistColl{
RealtimeCount: int64(idx.TotalRowCount()),
Indices: make(map[int64]*Index),
},
}
ran := []*ranger.Range{{
LowVal: []types.Datum{types.MinNotNullDatum()},
HighVal: []types.Datum{types.MaxValueDatum()},
Collators: collate.GetBinaryCollatorSlice(1),
}}
count, err := GetRowCountByIndexRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 99900, int(count))
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(2000)
count, err = GetRowCountByIndexRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 2500, int(count))
ran[0].LowVal[0] = types.NewIntDatum(1001)
ran[0].HighVal[0] = types.NewIntDatum(1999)
count, err = GetRowCountByIndexRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 2500, int(count))
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(1000)
count, err = GetRowCountByIndexRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 100, int(count))
tbl.Indices[0] = &Index{Info: &model.IndexInfo{Columns: []*model.IndexColumn{{Offset: 0}}, Unique: true}}
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(1000)
count, err = GetRowCountByIndexRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 1, int(count))
tbl.Indices[0] = idx
ran[0].LowVal[0] = types.MinNotNullDatum()
ran[0].HighVal[0] = types.MaxValueDatum()
count, err = GetRowCountByIndexRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 100000, int(count))
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(2000)
count, err = GetRowCountByIndexRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 1000, int(count))
ran[0].LowVal[0] = types.NewIntDatum(1001)
ran[0].HighVal[0] = types.NewIntDatum(1990)
count, err = GetRowCountByIndexRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 989, int(count))
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(1000)
count, err = GetRowCountByIndexRanges(ctx, &tbl.HistColl, 0, ran)
require.NoError(t, err)
require.Equal(t, 0, int(count))
}
}
func encodeKey(key types.Datum) types.Datum {
sc := &stmtctx.StatementContext{TimeZone: time.Local}
buf, _ := codec.EncodeKey(sc, nil, key)
return types.NewBytesDatum(buf)
}
func checkRepeats(t *testing.T, hg *Histogram) {
for _, bkt := range hg.Buckets {
require.Greater(t, bkt.Repeat, int64(0))
}
}
func buildIndex(sctx sessionctx.Context, numBuckets, id int64, records sqlexec.RecordSet) (int64, *Histogram, *CMSketch, error) {
b := NewSortedBuilder(sctx.GetSessionVars().StmtCtx, numBuckets, id, types.NewFieldType(mysql.TypeBlob), Version1)
cms := NewCMSketch(8, 2048)
ctx := context.Background()
req := records.NewChunk(nil)
it := chunk.NewIterator4Chunk(req)
for {
err := records.Next(ctx, req)
if err != nil {
return 0, nil, nil, errors.Trace(err)
}
if req.NumRows() == 0 {
break
}
for row := it.Begin(); row != it.End(); row = it.Next() {
datums := RowToDatums(row, records.Fields())
buf, err := codec.EncodeKey(sctx.GetSessionVars().StmtCtx, nil, datums...)
if err != nil {
return 0, nil, nil, errors.Trace(err)
}
data := types.NewBytesDatum(buf)
err = b.Iterate(data)
if err != nil {
return 0, nil, nil, errors.Trace(err)
}
cms.InsertBytes(buf)
}
}
return b.Count, b.Hist(), cms, nil
}
func SubTestBuild() func(*testing.T) {
return func(t *testing.T) {
s := createTestStatisticsSamples(t)
bucketCount := int64(256)
topNCount := 20
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
sketch, _, err := buildFMSketch(sc, s.rc.(*recordSet).data, 1000)
require.NoError(t, err)
collector := &SampleCollector{
Count: int64(s.count),
NullCount: 0,
Samples: s.samples,
FMSketch: sketch,
}
col, err := BuildColumn(ctx, bucketCount, 2, collector, types.NewFieldType(mysql.TypeLonglong))
require.NoError(t, err)
checkRepeats(t, col)
col.PreCalculateScalar()
require.Equal(t, 226, col.Len())
count, _ := col.EqualRowCount(nil, types.NewIntDatum(1000), false)
require.Equal(t, 0, int(count))
count = col.LessRowCount(nil, types.NewIntDatum(1000))
require.Equal(t, 10000, int(count))
count = col.LessRowCount(nil, types.NewIntDatum(2000))
require.Equal(t, 19999, int(count))
count = col.GreaterRowCount(types.NewIntDatum(2000))
require.Equal(t, 80000, int(count))
count = col.LessRowCount(nil, types.NewIntDatum(200000000))
require.Equal(t, 100000, int(count))
count = col.GreaterRowCount(types.NewIntDatum(200000000))
require.Equal(t, 0.0, count)
count, _ = col.EqualRowCount(nil, types.NewIntDatum(200000000), false)
require.Equal(t, 0.0, count)
count = col.BetweenRowCount(nil, types.NewIntDatum(3000), types.NewIntDatum(3500))
require.Equal(t, 4994, int(count))
count = col.LessRowCount(nil, types.NewIntDatum(1))
require.Equal(t, 5, int(count))
colv2, topnv2, err := BuildHistAndTopN(ctx, int(bucketCount), topNCount, 2, collector, types.NewFieldType(mysql.TypeLonglong), true, nil)
require.NoError(t, err)
require.NotNil(t, topnv2.TopN)
// The most common one's occurrence is 9990, the second most common one's occurrence is 30.
// The ndv of the histogram is 73344, the total count of it is 90010. 90010/73344 vs 30, it's not a bad estimate.
expectedTopNCount := []uint64{9990}
require.Equal(t, len(expectedTopNCount), len(topnv2.TopN))
for i, meta := range topnv2.TopN {
require.Equal(t, expectedTopNCount[i], meta.Count)
}
require.Equal(t, 251, colv2.Len())
count = colv2.LessRowCount(nil, types.NewIntDatum(1000))
require.Equal(t, 328, int(count))
count = colv2.LessRowCount(nil, types.NewIntDatum(2000))
require.Equal(t, 10007, int(count))
count = colv2.GreaterRowCount(types.NewIntDatum(2000))
require.Equal(t, 80001, int(count))
count = colv2.LessRowCount(nil, types.NewIntDatum(200000000))
require.Equal(t, 90010, int(count))
count = colv2.GreaterRowCount(types.NewIntDatum(200000000))
require.Equal(t, 0.0, count)
count = colv2.BetweenRowCount(nil, types.NewIntDatum(3000), types.NewIntDatum(3500))
require.Equal(t, 5001, int(count))
count = colv2.LessRowCount(nil, types.NewIntDatum(1))
require.Equal(t, 0, int(count))
builder := SampleBuilder{
Sc: mock.NewContext().GetSessionVars().StmtCtx,
RecordSet: s.pk,
ColLen: 1,
MaxSampleSize: 1000,
MaxFMSketchSize: 1000,
Collators: make([]collate.Collator, 1),
ColsFieldType: []*types.FieldType{types.NewFieldType(mysql.TypeLonglong)},
}
require.NoError(t, s.pk.Close())
collectors, _, err := builder.CollectColumnStats()
require.NoError(t, err)
require.Equal(t, 1, len(collectors))
col, err = BuildColumn(mock.NewContext(), 256, 2, collectors[0], types.NewFieldType(mysql.TypeLonglong))
require.NoError(t, err)
checkRepeats(t, col)
require.Equal(t, 250, col.Len())
tblCount, col, _, err := buildIndex(ctx, bucketCount, 1, s.rc)
require.NoError(t, err)
checkRepeats(t, col)
col.PreCalculateScalar()
require.Equal(t, 100000, int(tblCount))
count, _ = col.EqualRowCount(nil, encodeKey(types.NewIntDatum(10000)), false)
require.Equal(t, 1, int(count))
count = col.LessRowCount(nil, encodeKey(types.NewIntDatum(20000)))
require.Equal(t, 19999, int(count))
count = col.BetweenRowCount(nil, encodeKey(types.NewIntDatum(30000)), encodeKey(types.NewIntDatum(35000)))
require.Equal(t, 4999, int(count))
count = col.BetweenRowCount(nil, encodeKey(types.MinNotNullDatum()), encodeKey(types.NewIntDatum(0)))
require.Equal(t, 0, int(count))
count = col.LessRowCount(nil, encodeKey(types.NewIntDatum(0)))
require.Equal(t, 0, int(count))
s.pk.(*recordSet).cursor = 0
tblCount, col, err = buildPK(ctx, bucketCount, 4, s.pk)
require.NoError(t, err)
checkRepeats(t, col)
col.PreCalculateScalar()
require.Equal(t, 100000, int(tblCount))
count, _ = col.EqualRowCount(nil, types.NewIntDatum(10000), false)
require.Equal(t, 1, int(count))
count = col.LessRowCount(nil, types.NewIntDatum(20000))
require.Equal(t, 20000, int(count))
count = col.BetweenRowCount(nil, types.NewIntDatum(30000), types.NewIntDatum(35000))
require.Equal(t, 5000, int(count))
count = col.GreaterRowCount(types.NewIntDatum(1001))
require.Equal(t, 98998, int(count))
count = col.LessRowCount(nil, types.NewIntDatum(99999))
require.Equal(t, 99999, int(count))
datum := types.Datum{}
datum.SetMysqlJSON(types.BinaryJSON{TypeCode: types.JSONTypeCodeLiteral})
item := &SampleItem{Value: datum}
collector = &SampleCollector{
Count: 1,
NullCount: 0,
Samples: []*SampleItem{item},
FMSketch: sketch,
}
col, err = BuildColumn(ctx, bucketCount, 2, collector, types.NewFieldType(mysql.TypeJSON))
require.NoError(t, err)
require.Equal(t, 1, col.Len())
require.Equal(t, col.GetUpper(0), col.GetLower(0))
}
}
func SubTestHistogramProtoConversion() func(*testing.T) {
return func(t *testing.T) {
s := createTestStatisticsSamples(t)
ctx := mock.NewContext()
require.NoError(t, s.rc.Close())
tblCount, col, _, err := buildIndex(ctx, 256, 1, s.rc)
require.NoError(t, err)
require.Equal(t, 100000, int(tblCount))
p := HistogramToProto(col)
h := HistogramFromProto(p)
require.True(t, HistogramEqual(col, h, true))
}
}
func TestPruneTopN(t *testing.T) {
var topnIn, topnOut []TopNMeta
var totalNDV, nullCnt, sampleRows, totalRows int64
// case 1
topnIn = []TopNMeta{{[]byte{1}, 100_000}, {[]byte{2}, 10}}
totalNDV = 2
nullCnt = 0
sampleRows = 100_010
totalRows = 500_050
topnOut = pruneTopNItem(topnIn, totalNDV, nullCnt, sampleRows, totalRows)
require.Equal(t, topnIn, topnOut)
// case 2
topnIn = []TopNMeta{
{[]byte{1}, 30_000},
{[]byte{2}, 30_000},
{[]byte{3}, 20_000},
{[]byte{4}, 20_000},
}
totalNDV = 5
nullCnt = 0
sampleRows = 100_000
totalRows = 10_000_000
topnOut = pruneTopNItem(topnIn, totalNDV, nullCnt, sampleRows, totalRows)
require.Equal(t, topnIn, topnOut)
// case 3
topnIn = nil
for i := 0; i < 100; i++ {
topnIn = append(topnIn, TopNMeta{[]byte{byte(i)}, 1_000})
}
totalNDV = 100
nullCnt = 0
sampleRows = 100_000
totalRows = 10_000_000
topnOut = pruneTopNItem(topnIn, totalNDV, nullCnt, sampleRows, totalRows)
require.Equal(t, topnIn, topnOut)
}
|
package testing
import (
"crypto/ecdsa"
"crypto/elliptic"
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
"math/big"
"math/rand"
"net"
"testing"
"time"
clusterv1 "github.com/open-cluster-management/api/cluster/v1"
workapiv1 "github.com/open-cluster-management/api/work/v1"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
certv1beta1 "k8s.io/api/certificates/v1beta1"
coordv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
kubeversion "k8s.io/client-go/pkg/version"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
"k8s.io/client-go/util/workqueue"
)
const (
TestLeaseDurationSeconds int32 = 1
TestManagedClusterName = "testmanagedcluster"
)
type FakeSyncContext struct {
spokeName string
recorder events.Recorder
queue workqueue.RateLimitingInterface
}
func (f FakeSyncContext) Queue() workqueue.RateLimitingInterface { return f.queue }
func (f FakeSyncContext) QueueKey() string { return f.spokeName }
func (f FakeSyncContext) Recorder() events.Recorder { return f.recorder }
func NewFakeSyncContext(t *testing.T, clusterName string) *FakeSyncContext {
return &FakeSyncContext{
spokeName: clusterName,
recorder: eventstesting.NewTestingEventRecorder(t),
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
}
}
func NewManagedCluster() *clusterv1.ManagedCluster {
return &clusterv1.ManagedCluster{
ObjectMeta: metav1.ObjectMeta{
Name: TestManagedClusterName,
},
}
}
func NewAcceptingManagedCluster() *clusterv1.ManagedCluster {
managedCluster := NewManagedCluster()
managedCluster.Finalizers = []string{"cluster.open-cluster-management.io/api-resource-cleanup"}
managedCluster.Spec.HubAcceptsClient = true
return managedCluster
}
func NewAcceptedManagedCluster() *clusterv1.ManagedCluster {
managedCluster := NewAcceptingManagedCluster()
acceptedCondtion := NewManagedClusterCondition(
clusterv1.ManagedClusterConditionHubAccepted,
"True",
"HubClusterAdminAccepted",
"Accepted by hub cluster admin",
nil,
)
managedCluster.Status.Conditions = append(managedCluster.Status.Conditions, acceptedCondtion)
managedCluster.Spec.LeaseDurationSeconds = TestLeaseDurationSeconds
return managedCluster
}
func NewAvailableManagedCluster() *clusterv1.ManagedCluster {
managedCluster := NewAcceptedManagedCluster()
availableCondtion := NewManagedClusterCondition(
clusterv1.ManagedClusterConditionAvailable,
"True",
"ManagedClusterAvailable",
"Managed cluster is available",
nil,
)
managedCluster.Status.Conditions = append(managedCluster.Status.Conditions, availableCondtion)
return managedCluster
}
func NewJoinedManagedCluster() *clusterv1.ManagedCluster {
managedCluster := NewAcceptedManagedCluster()
joinedCondtion := NewManagedClusterCondition(
clusterv1.ManagedClusterConditionJoined,
"True",
"ManagedClusterJoined",
"Managed cluster joined",
nil,
)
managedCluster.Status.Conditions = append(managedCluster.Status.Conditions, joinedCondtion)
return managedCluster
}
func NewManagedClusterWithStatus(capacity, allocatable corev1.ResourceList) *clusterv1.ManagedCluster {
managedCluster := NewJoinedManagedCluster()
managedCluster.Status.Capacity = clusterv1.ResourceList{
"cpu": capacity.Cpu().DeepCopy(),
"memory": capacity.Memory().DeepCopy(),
}
managedCluster.Status.Allocatable = clusterv1.ResourceList{
"cpu": allocatable.Cpu().DeepCopy(),
"memory": allocatable.Memory().DeepCopy(),
}
managedCluster.Status.Version = clusterv1.ManagedClusterVersion{
Kubernetes: kubeversion.Get().GitVersion,
}
return managedCluster
}
func NewDeniedManagedCluster() *clusterv1.ManagedCluster {
managedCluster := NewAcceptedManagedCluster()
managedCluster.Spec.HubAcceptsClient = false
return managedCluster
}
func NewDeletingManagedCluster() *clusterv1.ManagedCluster {
now := metav1.Now()
return &clusterv1.ManagedCluster{
ObjectMeta: metav1.ObjectMeta{
Name: TestManagedClusterName,
DeletionTimestamp: &now,
Finalizers: []string{"cluster.open-cluster-management.io/api-resource-cleanup"},
},
}
}
func NewManagedClusterCondition(name, status, reason, message string, lastTransition *metav1.Time) metav1.Condition {
ret := metav1.Condition{
Type: name,
Status: metav1.ConditionStatus(status),
Reason: reason,
Message: message,
}
if lastTransition != nil {
ret.LastTransitionTime = *lastTransition
}
return ret
}
func NewManagedClusterLease(renewTime time.Time) *coordv1.Lease {
return &coordv1.Lease{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("cluster-lease-%s", TestManagedClusterName),
Namespace: TestManagedClusterName,
},
Spec: coordv1.LeaseSpec{
RenewTime: &metav1.MicroTime{Time: renewTime},
},
}
}
func NewNamespace(name string, terminated bool) *corev1.Namespace {
namespace := &corev1.Namespace{}
namespace.Name = name
if terminated {
now := metav1.Now()
namespace.DeletionTimestamp = &now
}
return namespace
}
func NewManifestWork(namespace, name string, finalizers []string, deletionTimestamp *metav1.Time) *workapiv1.ManifestWork {
work := &workapiv1.ManifestWork{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
Finalizers: finalizers,
DeletionTimestamp: deletionTimestamp,
},
}
return work
}
func NewRole(namespace, name string, finalizers []string, terminated bool) *rbacv1.Role {
role := &rbacv1.Role{}
role.Namespace = namespace
role.Name = name
role.Finalizers = finalizers
if terminated {
now := metav1.Now()
role.DeletionTimestamp = &now
}
return role
}
func NewRoleBinding(namespace, name string, finalizers []string, terminated bool) *rbacv1.RoleBinding {
rolebinding := &rbacv1.RoleBinding{}
rolebinding.Namespace = namespace
rolebinding.Name = name
rolebinding.Finalizers = finalizers
if terminated {
now := metav1.Now()
rolebinding.DeletionTimestamp = &now
}
return rolebinding
}
func NewResourceList(cpu, mem int) corev1.ResourceList {
return corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(int64(cpu), resource.DecimalExponent),
corev1.ResourceMemory: *resource.NewQuantity(int64(1024*1024*mem), resource.BinarySI),
}
}
func NewNode(name string, capacity, allocatable corev1.ResourceList) *corev1.Node {
return &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Status: corev1.NodeStatus{
Capacity: capacity,
Allocatable: allocatable,
},
}
}
func NewUnstructuredObj(apiVersion, kind, namespace, name string) *unstructured.Unstructured {
return &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": apiVersion,
"kind": kind,
"metadata": map[string]interface{}{
"namespace": namespace,
"name": name,
},
},
}
}
type CSRHolder struct {
Name string
Labels map[string]string
SignerName *string
CN string
Orgs []string
Username string
ReqBlockType string
}
func NewCSR(holder CSRHolder) *certv1beta1.CertificateSigningRequest {
insecureRand := rand.New(rand.NewSource(0))
pk, err := ecdsa.GenerateKey(elliptic.P256(), insecureRand)
if err != nil {
panic(err)
}
csrb, err := x509.CreateCertificateRequest(insecureRand, &x509.CertificateRequest{
Subject: pkix.Name{
CommonName: holder.CN,
Organization: holder.Orgs,
},
DNSNames: []string{},
EmailAddresses: []string{},
IPAddresses: []net.IP{},
}, pk)
if err != nil {
panic(err)
}
return &certv1beta1.CertificateSigningRequest{
ObjectMeta: metav1.ObjectMeta{
Name: holder.Name,
GenerateName: "csr-",
Labels: holder.Labels,
},
Spec: certv1beta1.CertificateSigningRequestSpec{
Username: holder.Username,
Usages: []certv1beta1.KeyUsage{},
SignerName: holder.SignerName,
Request: pem.EncodeToMemory(&pem.Block{Type: holder.ReqBlockType, Bytes: csrb}),
},
}
}
func NewDeniedCSR(holder CSRHolder) *certv1beta1.CertificateSigningRequest {
csr := NewCSR(holder)
csr.Status.Conditions = append(csr.Status.Conditions, certv1beta1.CertificateSigningRequestCondition{
Type: certv1beta1.CertificateDenied,
})
return csr
}
func NewApprovedCSR(holder CSRHolder) *certv1beta1.CertificateSigningRequest {
csr := NewCSR(holder)
csr.Status.Conditions = append(csr.Status.Conditions, certv1beta1.CertificateSigningRequestCondition{
Type: certv1beta1.CertificateApproved,
})
return csr
}
func NewKubeconfig(key, cert []byte) []byte {
var clientKey, clientCertificate string
var clientKeyData, clientCertificateData []byte
if key != nil {
clientKeyData = key
} else {
clientKey = "tls.key"
}
if cert != nil {
clientCertificateData = cert
} else {
clientCertificate = "tls.crt"
}
kubeconfig := clientcmdapi.Config{
Clusters: map[string]*clientcmdapi.Cluster{"default-cluster": {
Server: "https://127.0.0.1:6001",
InsecureSkipTLSVerify: true,
}},
AuthInfos: map[string]*clientcmdapi.AuthInfo{"default-auth": {
ClientCertificate: clientCertificate,
ClientCertificateData: clientCertificateData,
ClientKey: clientKey,
ClientKeyData: clientKeyData,
}},
Contexts: map[string]*clientcmdapi.Context{"default-context": {
Cluster: "default-cluster",
AuthInfo: "default-auth",
Namespace: "default",
}},
CurrentContext: "default-context",
}
kubeconfigData, err := clientcmd.Write(kubeconfig)
if err != nil {
panic(err)
}
return kubeconfigData
}
type TestCert struct {
Cert []byte
Key []byte
}
func NewHubKubeconfigSecret(namespace, name, resourceVersion string, cert *TestCert, data map[string][]byte) *corev1.Secret {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
ResourceVersion: resourceVersion,
},
Data: data,
}
if cert != nil && cert.Cert != nil {
secret.Data["tls.crt"] = cert.Cert
}
if cert != nil && cert.Key != nil {
secret.Data["tls.key"] = cert.Key
}
return secret
}
func NewTestCert(commonName string, duration time.Duration) *TestCert {
caKey, err := rsa.GenerateKey(cryptorand.Reader, 2048)
if err != nil {
panic(err)
}
caCert, err := certutil.NewSelfSignedCACert(certutil.Config{CommonName: "open-cluster-management.io"}, caKey)
if err != nil {
panic(err)
}
key, err := rsa.GenerateKey(cryptorand.Reader, 2048)
if err != nil {
panic(err)
}
certDERBytes, err := x509.CreateCertificate(
cryptorand.Reader,
&x509.Certificate{
Subject: pkix.Name{
CommonName: commonName,
},
SerialNumber: big.NewInt(1),
NotBefore: caCert.NotBefore,
NotAfter: time.Now().Add(duration).UTC(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
},
caCert,
key.Public(),
caKey,
)
if err != nil {
panic(err)
}
cert, err := x509.ParseCertificate(certDERBytes)
if err != nil {
panic(err)
}
return &TestCert{
Cert: pem.EncodeToMemory(&pem.Block{
Type: certutil.CertificateBlockType,
Bytes: cert.Raw,
}),
Key: pem.EncodeToMemory(&pem.Block{
Type: keyutil.RSAPrivateKeyBlockType,
Bytes: x509.MarshalPKCS1PrivateKey(key),
}),
}
}
func WriteFile(filename string, data []byte) {
if err := ioutil.WriteFile(filename, data, 0644); err != nil {
panic(err)
}
}
|
package discovery
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/coreos/etcd/client"
"github.com/mhausenblas/reshifter/pkg/types"
"github.com/mhausenblas/reshifter/pkg/util"
)
var (
probetests = []struct {
launchfunc func(string, string) (bool, error)
scheme string
port string
version string
secure bool
}{
{util.LaunchEtcd2, "http", "4001", "2", false},
{util.LaunchEtcd3, "http", "4001", "3", false},
{util.LaunchEtcd2, "https", "4001", "2", true},
{util.LaunchEtcd3, "https", "4001", "3", true},
}
k8stests = []struct {
keys []string
version string
secure bool
distro types.KubernetesDistro
}{
{[]string{""}, "2", false, types.NotADistro},
{[]string{"/something"}, "2", false, types.NotADistro},
{[]string{types.LegacyKubernetesPrefix}, "2", false, types.Vanilla},
{[]string{types.KubernetesPrefix}, "2", false, types.Vanilla},
{[]string{types.LegacyKubernetesPrefix, types.OpenShiftPrefix}, "2", false, types.OpenShift},
}
)
func TestCountKeysFor(t *testing.T) {
defer func() {
_ = util.EtcdDown()
}()
port := "4001"
wantk := 2
wants := 11
tetcd := "http://127.0.0.1:" + port
_, err := util.LaunchEtcd2(tetcd, port)
if err != nil {
t.Errorf("Can't launch etcd at %s: %s", tetcd, err)
return
}
c2, err := util.NewClient2(tetcd, false)
if err != nil {
t.Errorf("Can't connect to local etcd2 at %s: %s", tetcd, err)
return
}
kapi := client.NewKeysAPI(c2)
_, err = kapi.Set(context.Background(), types.KubernetesPrefix+"/namespaces/kube-system", ".", &client.SetOptions{Dir: false, PrevExist: client.PrevNoExist})
if err != nil {
t.Errorf("Can't create etcd entry: %s", err)
return
}
_, err = kapi.Set(context.Background(), types.KubernetesPrefix+"/namespaces/default", "..........", &client.SetOptions{Dir: false, PrevExist: client.PrevNoExist})
if err != nil {
t.Errorf("Can't create etcd entry: %s", err)
return
}
k, s, err := CountKeysFor(tetcd, types.Vanilla)
if err != nil {
t.Error(err)
return
}
if k != wantk {
t.Errorf("discovery.CountKeysFor(%s) => got (%d, %d) but want (%d, %d)", tetcd, k, s, wantk, wants)
}
}
func TestProbeEtcd(t *testing.T) {
_ = os.Setenv("RS_ETCD_CLIENT_CERT", filepath.Join(util.Certsdir(), "client.pem"))
_ = os.Setenv("RS_ETCD_CLIENT_KEY", filepath.Join(util.Certsdir(), "client-key.pem"))
for _, tt := range probetests {
testEtcdX(t, tt.launchfunc, tt.scheme, tt.port, tt.version, tt.secure)
}
_, _, err := ProbeEtcd("127.0.0.1")
if err == nil {
t.Error(err)
}
_, _, err = ProbeEtcd("127.0.0.1:2379")
if err == nil {
t.Error(err)
}
}
func testEtcdX(t *testing.T, etcdLaunchFunc func(string, string) (bool, error), scheme string, port string, version string, secure bool) {
defer func() {
_ = util.EtcdDown()
}()
tetcd := scheme + "://127.0.0.1:" + port
_, err := etcdLaunchFunc(tetcd, port)
if err != nil {
t.Errorf("%s", err)
return
}
v, s, err := ProbeEtcd(tetcd)
if err != nil {
t.Error(err)
return
}
if !strings.HasPrefix(v, version) || s != secure {
t.Errorf("discovery.ProbeEtcd(%s://%s) => got (%s, %t) but want (%s.x.x, %t)", scheme, tetcd, v, s, version, secure)
}
}
func TestProbeKubernetesDistro(t *testing.T) {
for _, tt := range k8stests {
testK8SX(t, tt.keys, tt.version, tt.secure, tt.distro)
time.Sleep(time.Second * 1)
}
}
func testK8SX(t *testing.T, keys []string, version string, secure bool, distro types.KubernetesDistro) {
defer func() {
_ = util.EtcdDown()
}()
tetcd := "http://127.0.0.1:4001"
_, err := util.LaunchEtcd2(tetcd, "4001")
if err != nil {
t.Errorf("%s", err)
return
}
c2, err := util.NewClient2(tetcd, false)
if err != nil {
t.Errorf("%s", err)
return
}
kapi := client.NewKeysAPI(c2)
for _, key := range keys {
if key != "" {
err = util.SetKV2(kapi, key, ".")
if err != nil {
t.Errorf("%s", err)
return
}
}
}
distrotype, err := ProbeKubernetesDistro(tetcd)
if err != nil {
t.Errorf("Can't determine Kubernetes distro: %s", err)
return
}
if distrotype != distro {
t.Errorf("discovery.ProbeKubernetesDistro(%s) with keys %s => got '%s' but want '%s'", tetcd, keys, util.LookupDistro(distrotype), util.LookupDistro(distro))
}
}
|
package main
import "fmt"
func split(sum int) (x, y int) {
//return sum * 4 / 9, sum - sum * 4/ 9
x = sum * 4 / 9
y = sum - x
return y, x
}
func main() {
fmt.Println(split(17))
}
|
package main
import (
"fmt"
"github.com/hoisie/redis"
"github.com/golang/glog"
"os"
"os/exec"
"strconv"
"time"
"errors"
"flag"
)
var redisClient redis.Client
var config *Config
type Config struct {
Key string
RedisDb int
RedisAddr string
RedisPassword string
LockTimeout time.Duration
}
func (conf *Config) ParseFromEnvironment() {
conf.RedisAddr = os.Getenv("REDISLOCKRUN_ADDR")
conf.RedisPassword = os.Getenv("REDISLOCKRUN_PASSWORD")
if db, err := strconv.ParseInt(os.Getenv("REDISLOCKRUN_DB"), 10, 8); err != nil {
conf.RedisDb = int(db)
}
if key := os.Getenv("REDISLOCKRUN_KEY"); len(key) > 0 {
conf.Key = key
}
}
// Run the command and catch any panics
func safelyRun(name string, args []string) {
defer func() {
if err := recover(); err != nil {
glog.Errorln(err)
unlock()
}
}()
cmd := exec.Command(name, args...)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
if err := cmd.Run(); err != nil {
panic(fmt.Sprintf("Error running %s %v: %s", name, args, err))
}
}
func makeExpiryTime() time.Time {
expire := time.Now()
expire = expire.Add(config.LockTimeout)
expire = expire.Add(1*time.Second)
return expire
}
func unlock() {
expire, err := getLockExpire()
now := time.Now()
// Only unlock if the lock is not expired
if err == nil && now.Before(expire) {
glog.Infoln("Deleting lock")
redisClient.Del(config.Key)
}
}
func unlockAndLock(expire time.Time) error {
resp, err := redisClient.Getset(config.Key, []byte(strconv.FormatInt(expire.Unix(), 10)))
if err != nil {
return err
}
unixTimeStamp, _ := strconv.ParseInt(string(resp), 10, 32)
timestamp := time.Unix(unixTimeStamp, 0)
now := time.Now()
if glog.V(2) {
glog.Infoln("My lock expire is", expire.Unix())
glog.Infoln("Now", now.Unix())
glog.Infoln("Current lock expire in Redis", unixTimeStamp)
}
// If the timestamp is not expired, then another process aquired the lock faster
if now.Before(timestamp) {
return errors.New("Locked")
}
return nil
}
func getLockExpire() (time.Time, error) {
resp, err := redisClient.Get(config.Key)
if err == nil {
val, _ := strconv.ParseInt(string(resp), 10, 32)
return time.Unix(val, 0), nil
} else {
goto error
}
error:
return time.Now(), err
}
func init() {
config = &Config{Key: "lock"}
config.ParseFromEnvironment()
redisClient = redis.Client{Addr: config.RedisAddr, Db: config.RedisDb}
redisClient.Auth(config.RedisPassword)
flag.DurationVar(&config.LockTimeout, "timeout", 30*time.Minute, "Lock timeout")
flag.Parse()
}
func main() {
var cmdArgs = flag.Args()
var expire = makeExpiryTime()
if flag.NArg() == 0 {
flag.Usage()
os.Exit(1)
}
glog.V(2).Infof("Config: %+v\n", config)
if ok, _ := redisClient.Setnx(config.Key, []byte(strconv.FormatInt(expire.Unix(), 10))); !ok {
if timeout, err := getLockExpire(); err == nil {
if time.Now().After(timeout) {
glog.Infoln("Lock is expired. Trying to aquire lock")
// Lock expired, try to aquire a new one
if err := unlockAndLock(expire); err == nil {
glog.Infoln("Aquired lock")
} else {
// Failed aquiring the lock, exit
glog.Infoln("Failed: lock is already aquired by other process")
os.Exit(1)
}
} else {
// Lock not expired
glog.Infoln("Locked")
os.Exit(1)
}
}
}
defer unlock()
defer glog.Flush()
glog.Infof("Running %s %v\n", cmdArgs[0], cmdArgs[1:])
safelyRun(cmdArgs[0], cmdArgs[1:])
glog.Infof("Finished running %s %v", cmdArgs[0], cmdArgs[1:])
}
|
package config
import (
"reflect"
validator "github.com/go-ozzo/ozzo-validation/v4"
"github.com/pkg/errors"
)
func variablesValidator(variables interface{}) error {
switch variables := variables.(type) {
case *rawVariables:
// variables section is not mandatory
if variables == nil {
return nil
}
if len(*variables) == 0 {
return errors.New("Variables can not be both present on the configuration AND empty")
}
for _, variableValue := range *variables {
if err := validator.Validate(variableValue, validator.By(variableValueValidator)); err != nil {
return errors.WithStack(err)
}
}
return nil
default:
return errors.New("Invalid variables type. Got " + reflect.TypeOf(variables).Name())
}
}
func variableValueValidator(variable interface{}) error {
if variable == nil {
return errors.New("Variable value is nil")
}
return nil
}
|
package archive
import (
"ms/sun/shared/golib/go_map"
"sync"
)
// collections os user
type masterCache struct {
mp map[int]*go_map.ConcurrentIntMap
mux sync.RWMutex
}
func newMasterCache() *masterCache {
return &masterCache{
mp: make(map[int]*go_map.ConcurrentIntMap, 10000),
}
}
func (s *masterCache) GetVal(key int) (*go_map.ConcurrentIntMap, bool) {
s.mux.RLocker()
val, ok := s.mp[key]
s.mux.RUnlock()
return val, ok
}
func (s *masterCache) SetVal(key int, val *go_map.ConcurrentIntMap) {
s.mux.Lock()
s.mp[key] = val
s.mux.Unlock()
}
func (s *masterCache) Delete(key int) {
s.mux.Lock()
delete(s.mp, key)
s.mux.Unlock()
}
|
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
// Package rdb encapsulates the interactions with redis.
package rdb
import (
"encoding/json"
"errors"
"fmt"
"strconv"
"time"
"github.com/go-redis/redis/v7"
"github.com/hibiken/asynq/internal/base"
"github.com/spf13/cast"
)
var (
// ErrNoProcessableTask indicates that there are no tasks ready to be processed.
ErrNoProcessableTask = errors.New("no tasks are ready for processing")
// ErrTaskNotFound indicates that a task that matches the given identifier was not found.
ErrTaskNotFound = errors.New("could not find a task")
// ErrDuplicateTask indicates that another task with the same unique key holds the uniqueness lock.
ErrDuplicateTask = errors.New("task already exists")
)
const statsTTL = 90 * 24 * time.Hour // 90 days
// RDB is a client interface to query and mutate task queues.
type RDB struct {
client redis.UniversalClient
}
// NewRDB returns a new instance of RDB.
func NewRDB(client redis.UniversalClient) *RDB {
return &RDB{client}
}
// Close closes the connection with redis server.
func (r *RDB) Close() error {
return r.client.Close()
}
// Ping checks the connection with redis server.
func (r *RDB) Ping() error {
return r.client.Ping().Err()
}
// Enqueue inserts the given task to the tail of the queue.
func (r *RDB) Enqueue(msg *base.TaskMessage) error {
encoded, err := base.EncodeMessage(msg)
if err != nil {
return err
}
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
return err
}
key := base.QueueKey(msg.Queue)
return r.client.LPush(key, encoded).Err()
}
// KEYS[1] -> unique key
// KEYS[2] -> asynq:{<qname>}
// ARGV[1] -> task ID
// ARGV[2] -> uniqueness lock TTL
// ARGV[3] -> task message data
var enqueueUniqueCmd = redis.NewScript(`
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
if not ok then
return 0
end
redis.call("LPUSH", KEYS[2], ARGV[3])
return 1
`)
// EnqueueUnique inserts the given task if the task's uniqueness lock can be acquired.
// It returns ErrDuplicateTask if the lock cannot be acquired.
func (r *RDB) EnqueueUnique(msg *base.TaskMessage, ttl time.Duration) error {
encoded, err := base.EncodeMessage(msg)
if err != nil {
return err
}
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
return err
}
res, err := enqueueUniqueCmd.Run(r.client,
[]string{msg.UniqueKey, base.QueueKey(msg.Queue)},
msg.ID.String(), int(ttl.Seconds()), encoded).Result()
if err != nil {
return err
}
n, ok := res.(int64)
if !ok {
return fmt.Errorf("could not cast %v to int64", res)
}
if n == 0 {
return ErrDuplicateTask
}
return nil
}
// Dequeue queries given queues in order and pops a task message
// off a queue if one exists and returns the message and deadline.
// Dequeue skips a queue if the queue is paused.
// If all queues are empty, ErrNoProcessableTask error is returned.
func (r *RDB) Dequeue(qnames ...string) (msg *base.TaskMessage, deadline time.Time, err error) {
data, d, err := r.dequeue(qnames...)
if err != nil {
return nil, time.Time{}, err
}
if msg, err = base.DecodeMessage(data); err != nil {
return nil, time.Time{}, err
}
return msg, time.Unix(d, 0), nil
}
// KEYS[1] -> asynq:{<qname>}
// KEYS[2] -> asynq:{<qname>}:paused
// KEYS[3] -> asynq:{<qname>}:active
// KEYS[4] -> asynq:{<qname>}:deadlines
// ARGV[1] -> current time in Unix time
//
// dequeueCmd checks whether a queue is paused first, before
// calling RPOPLPUSH to pop a task from the queue.
// It computes the task deadline by inspecting Timout and Deadline fields,
// and inserts the task with deadlines set.
var dequeueCmd = redis.NewScript(`
if redis.call("EXISTS", KEYS[2]) == 0 then
local msg = redis.call("RPOPLPUSH", KEYS[1], KEYS[3])
if msg then
local decoded = cjson.decode(msg)
local timeout = decoded["Timeout"]
local deadline = decoded["Deadline"]
local score
if timeout ~= 0 and deadline ~= 0 then
score = math.min(ARGV[1]+timeout, deadline)
elseif timeout ~= 0 then
score = ARGV[1] + timeout
elseif deadline ~= 0 then
score = deadline
else
return redis.error_reply("asynq internal error: both timeout and deadline are not set")
end
redis.call("ZADD", KEYS[4], score, msg)
return {msg, score}
end
end
return nil`)
func (r *RDB) dequeue(qnames ...string) (msgjson string, deadline int64, err error) {
for _, qname := range qnames {
keys := []string{
base.QueueKey(qname),
base.PausedKey(qname),
base.ActiveKey(qname),
base.DeadlinesKey(qname),
}
res, err := dequeueCmd.Run(r.client, keys, time.Now().Unix()).Result()
if err == redis.Nil {
continue
} else if err != nil {
return "", 0, err
}
data, err := cast.ToSliceE(res)
if err != nil {
return "", 0, err
}
if len(data) != 2 {
return "", 0, fmt.Errorf("asynq: internal error: dequeue command returned %d values", len(data))
}
if msgjson, err = cast.ToStringE(data[0]); err != nil {
return "", 0, err
}
if deadline, err = cast.ToInt64E(data[1]); err != nil {
return "", 0, err
}
return msgjson, deadline, nil
}
return "", 0, ErrNoProcessableTask
}
// KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// ARGV[1] -> base.TaskMessage value
// ARGV[2] -> stats expiration timestamp
var doneCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
local n = redis.call("INCR", KEYS[3])
if tonumber(n) == 1 then
redis.call("EXPIREAT", KEYS[3], ARGV[2])
end
return redis.status_reply("OK")
`)
// KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[4] -> unique key
// ARGV[1] -> base.TaskMessage value
// ARGV[2] -> stats expiration timestamp
// ARGV[3] -> task ID
var doneUniqueCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
local n = redis.call("INCR", KEYS[3])
if tonumber(n) == 1 then
redis.call("EXPIREAT", KEYS[3], ARGV[2])
end
if redis.call("GET", KEYS[4]) == ARGV[3] then
redis.call("DEL", KEYS[4])
end
return redis.status_reply("OK")
`)
// Done removes the task from active queue to mark the task as done.
// It removes a uniqueness lock acquired by the task, if any.
func (r *RDB) Done(msg *base.TaskMessage) error {
encoded, err := base.EncodeMessage(msg)
if err != nil {
return err
}
now := time.Now()
expireAt := now.Add(statsTTL)
keys := []string{
base.ActiveKey(msg.Queue),
base.DeadlinesKey(msg.Queue),
base.ProcessedKey(msg.Queue, now),
}
args := []interface{}{encoded, expireAt.Unix()}
if len(msg.UniqueKey) > 0 {
keys = append(keys, msg.UniqueKey)
args = append(args, msg.ID.String())
return doneUniqueCmd.Run(r.client, keys, args...).Err()
}
return doneCmd.Run(r.client, keys, args...).Err()
}
// KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}
// ARGV[1] -> base.TaskMessage value
// Note: Use RPUSH to push to the head of the queue.
var requeueCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
redis.call("RPUSH", KEYS[3], ARGV[1])
return redis.status_reply("OK")`)
// Requeue moves the task from active queue to the specified queue.
func (r *RDB) Requeue(msg *base.TaskMessage) error {
encoded, err := base.EncodeMessage(msg)
if err != nil {
return err
}
return requeueCmd.Run(r.client,
[]string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.QueueKey(msg.Queue)},
encoded).Err()
}
// Schedule adds the task to the backlog queue to be processed in the future.
func (r *RDB) Schedule(msg *base.TaskMessage, processAt time.Time) error {
encoded, err := base.EncodeMessage(msg)
if err != nil {
return err
}
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
return err
}
score := float64(processAt.Unix())
return r.client.ZAdd(base.ScheduledKey(msg.Queue), &redis.Z{Score: score, Member: encoded}).Err()
}
// KEYS[1] -> unique key
// KEYS[2] -> asynq:{<qname>}:scheduled
// ARGV[1] -> task ID
// ARGV[2] -> uniqueness lock TTL
// ARGV[3] -> score (process_at timestamp)
// ARGV[4] -> task message
var scheduleUniqueCmd = redis.NewScript(`
local ok = redis.call("SET", KEYS[1], ARGV[1], "NX", "EX", ARGV[2])
if not ok then
return 0
end
redis.call("ZADD", KEYS[2], ARGV[3], ARGV[4])
return 1
`)
// ScheduleUnique adds the task to the backlog queue to be processed in the future if the uniqueness lock can be acquired.
// It returns ErrDuplicateTask if the lock cannot be acquired.
func (r *RDB) ScheduleUnique(msg *base.TaskMessage, processAt time.Time, ttl time.Duration) error {
encoded, err := base.EncodeMessage(msg)
if err != nil {
return err
}
if err := r.client.SAdd(base.AllQueues, msg.Queue).Err(); err != nil {
return err
}
score := float64(processAt.Unix())
res, err := scheduleUniqueCmd.Run(r.client,
[]string{msg.UniqueKey, base.ScheduledKey(msg.Queue)},
msg.ID.String(), int(ttl.Seconds()), score, encoded).Result()
if err != nil {
return err
}
n, ok := res.(int64)
if !ok {
return fmt.Errorf("could not cast %v to int64", res)
}
if n == 0 {
return ErrDuplicateTask
}
return nil
}
// KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:retry
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
// ARGV[1] -> base.TaskMessage value to remove from base.ActiveQueue queue
// ARGV[2] -> base.TaskMessage value to add to Retry queue
// ARGV[3] -> retry_at UNIX timestamp
// ARGV[4] -> stats expiration timestamp
var retryCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
redis.call("ZADD", KEYS[3], ARGV[3], ARGV[2])
local n = redis.call("INCR", KEYS[4])
if tonumber(n) == 1 then
redis.call("EXPIREAT", KEYS[4], ARGV[4])
end
local m = redis.call("INCR", KEYS[5])
if tonumber(m) == 1 then
redis.call("EXPIREAT", KEYS[5], ARGV[4])
end
return redis.status_reply("OK")`)
// Retry moves the task from active to retry queue, incrementing retry count
// and assigning error message to the task message.
func (r *RDB) Retry(msg *base.TaskMessage, processAt time.Time, errMsg string) error {
msgToRemove, err := base.EncodeMessage(msg)
if err != nil {
return err
}
modified := *msg
modified.Retried++
modified.ErrorMsg = errMsg
msgToAdd, err := base.EncodeMessage(&modified)
if err != nil {
return err
}
now := time.Now()
processedKey := base.ProcessedKey(msg.Queue, now)
failedKey := base.FailedKey(msg.Queue, now)
expireAt := now.Add(statsTTL)
return retryCmd.Run(r.client,
[]string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.RetryKey(msg.Queue), processedKey, failedKey},
msgToRemove, msgToAdd, processAt.Unix(), expireAt.Unix()).Err()
}
const (
maxArchiveSize = 10000 // maximum number of tasks in archive
archivedExpirationInDays = 90 // number of days before an archived task gets deleted permanently
)
// KEYS[1] -> asynq:{<qname>}:active
// KEYS[2] -> asynq:{<qname>}:deadlines
// KEYS[3] -> asynq:{<qname>}:archived
// KEYS[4] -> asynq:{<qname>}:processed:<yyyy-mm-dd>
// KEYS[5] -> asynq:{<qname>}:failed:<yyyy-mm-dd>
// ARGV[1] -> base.TaskMessage value to remove
// ARGV[2] -> base.TaskMessage value to add
// ARGV[3] -> died_at UNIX timestamp
// ARGV[4] -> cutoff timestamp (e.g., 90 days ago)
// ARGV[5] -> max number of tasks in archive (e.g., 100)
// ARGV[6] -> stats expiration timestamp
var archiveCmd = redis.NewScript(`
if redis.call("LREM", KEYS[1], 0, ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
if redis.call("ZREM", KEYS[2], ARGV[1]) == 0 then
return redis.error_reply("NOT FOUND")
end
redis.call("ZADD", KEYS[3], ARGV[3], ARGV[2])
redis.call("ZREMRANGEBYSCORE", KEYS[3], "-inf", ARGV[4])
redis.call("ZREMRANGEBYRANK", KEYS[3], 0, -ARGV[5])
local n = redis.call("INCR", KEYS[4])
if tonumber(n) == 1 then
redis.call("EXPIREAT", KEYS[4], ARGV[6])
end
local m = redis.call("INCR", KEYS[5])
if tonumber(m) == 1 then
redis.call("EXPIREAT", KEYS[5], ARGV[6])
end
return redis.status_reply("OK")`)
// Archive sends the given task to archive, attaching the error message to the task.
// It also trims the archive by timestamp and set size.
func (r *RDB) Archive(msg *base.TaskMessage, errMsg string) error {
msgToRemove, err := base.EncodeMessage(msg)
if err != nil {
return err
}
modified := *msg
modified.ErrorMsg = errMsg
msgToAdd, err := base.EncodeMessage(&modified)
if err != nil {
return err
}
now := time.Now()
limit := now.AddDate(0, 0, -archivedExpirationInDays).Unix() // 90 days ago
processedKey := base.ProcessedKey(msg.Queue, now)
failedKey := base.FailedKey(msg.Queue, now)
expireAt := now.Add(statsTTL)
return archiveCmd.Run(r.client,
[]string{base.ActiveKey(msg.Queue), base.DeadlinesKey(msg.Queue), base.ArchivedKey(msg.Queue), processedKey, failedKey},
msgToRemove, msgToAdd, now.Unix(), limit, maxArchiveSize, expireAt.Unix()).Err()
}
// CheckAndEnqueue checks for scheduled/retry tasks for the given queues
//and enqueues any tasks that are ready to be processed.
func (r *RDB) CheckAndEnqueue(qnames ...string) error {
for _, qname := range qnames {
if err := r.forwardAll(base.ScheduledKey(qname), base.QueueKey(qname)); err != nil {
return err
}
if err := r.forwardAll(base.RetryKey(qname), base.QueueKey(qname)); err != nil {
return err
}
}
return nil
}
// KEYS[1] -> source queue (e.g. asynq:{<qname>:scheduled or asynq:{<qname>}:retry})
// KEYS[2] -> destination queue (e.g. asynq:{<qname>})
// ARGV[1] -> current unix time
// Note: Script moves tasks up to 100 at a time to keep the runtime of script short.
var forwardCmd = redis.NewScript(`
local msgs = redis.call("ZRANGEBYSCORE", KEYS[1], "-inf", ARGV[1], "LIMIT", 0, 100)
for _, msg in ipairs(msgs) do
redis.call("LPUSH", KEYS[2], msg)
redis.call("ZREM", KEYS[1], msg)
end
return table.getn(msgs)`)
// forward moves tasks with a score less than the current unix time
// from the src zset to the dst list. It returns the number of tasks moved.
func (r *RDB) forward(src, dst string) (int, error) {
now := float64(time.Now().Unix())
res, err := forwardCmd.Run(r.client, []string{src, dst}, now).Result()
if err != nil {
return 0, err
}
return cast.ToInt(res), nil
}
// forwardAll moves tasks with a score less than the current unix time from the src zset,
// until there's no more tasks.
func (r *RDB) forwardAll(src, dst string) (err error) {
n := 1
for n != 0 {
n, err = r.forward(src, dst)
if err != nil {
return err
}
}
return nil
}
// ListDeadlineExceeded returns a list of task messages that have exceeded the deadline from the given queues.
func (r *RDB) ListDeadlineExceeded(deadline time.Time, qnames ...string) ([]*base.TaskMessage, error) {
var msgs []*base.TaskMessage
opt := &redis.ZRangeBy{
Min: "-inf",
Max: strconv.FormatInt(deadline.Unix(), 10),
}
for _, qname := range qnames {
res, err := r.client.ZRangeByScore(base.DeadlinesKey(qname), opt).Result()
if err != nil {
return nil, err
}
for _, s := range res {
msg, err := base.DecodeMessage(s)
if err != nil {
return nil, err
}
msgs = append(msgs, msg)
}
}
return msgs, nil
}
// KEYS[1] -> asynq:servers:{<host:pid:sid>}
// KEYS[2] -> asynq:workers:{<host:pid:sid>}
// ARGV[1] -> TTL in seconds
// ARGV[2] -> server info
// ARGV[3:] -> alternate key-value pair of (worker id, worker data)
// Note: Add key to ZSET with expiration time as score.
// ref: https://github.com/antirez/redis/issues/135#issuecomment-2361996
var writeServerStateCmd = redis.NewScript(`
redis.call("SETEX", KEYS[1], ARGV[1], ARGV[2])
redis.call("DEL", KEYS[2])
for i = 3, table.getn(ARGV)-1, 2 do
redis.call("HSET", KEYS[2], ARGV[i], ARGV[i+1])
end
redis.call("EXPIRE", KEYS[2], ARGV[1])
return redis.status_reply("OK")`)
// WriteServerState writes server state data to redis with expiration set to the value ttl.
func (r *RDB) WriteServerState(info *base.ServerInfo, workers []*base.WorkerInfo, ttl time.Duration) error {
bytes, err := json.Marshal(info)
if err != nil {
return err
}
exp := time.Now().Add(ttl).UTC()
args := []interface{}{ttl.Seconds(), bytes} // args to the lua script
for _, w := range workers {
bytes, err := json.Marshal(w)
if err != nil {
continue // skip bad data
}
args = append(args, w.ID, bytes)
}
skey := base.ServerInfoKey(info.Host, info.PID, info.ServerID)
wkey := base.WorkersKey(info.Host, info.PID, info.ServerID)
if err := r.client.ZAdd(base.AllServers, &redis.Z{Score: float64(exp.Unix()), Member: skey}).Err(); err != nil {
return err
}
if err := r.client.ZAdd(base.AllWorkers, &redis.Z{Score: float64(exp.Unix()), Member: wkey}).Err(); err != nil {
return err
}
return writeServerStateCmd.Run(r.client, []string{skey, wkey}, args...).Err()
}
// KEYS[1] -> asynq:servers:{<host:pid:sid>}
// KEYS[2] -> asynq:workers:{<host:pid:sid>}
var clearServerStateCmd = redis.NewScript(`
redis.call("DEL", KEYS[1])
redis.call("DEL", KEYS[2])
return redis.status_reply("OK")`)
// ClearServerState deletes server state data from redis.
func (r *RDB) ClearServerState(host string, pid int, serverID string) error {
skey := base.ServerInfoKey(host, pid, serverID)
wkey := base.WorkersKey(host, pid, serverID)
if err := r.client.ZRem(base.AllServers, skey).Err(); err != nil {
return err
}
if err := r.client.ZRem(base.AllWorkers, wkey).Err(); err != nil {
return err
}
return clearServerStateCmd.Run(r.client, []string{skey, wkey}).Err()
}
// KEYS[1] -> asynq:schedulers:{<schedulerID>}
// ARGV[1] -> TTL in seconds
// ARGV[2:] -> schedler entries
var writeSchedulerEntriesCmd = redis.NewScript(`
redis.call("DEL", KEYS[1])
for i = 2, #ARGV do
redis.call("LPUSH", KEYS[1], ARGV[i])
end
redis.call("EXPIRE", KEYS[1], ARGV[1])
return redis.status_reply("OK")`)
// WriteSchedulerEntries writes scheduler entries data to redis with expiration set to the value ttl.
func (r *RDB) WriteSchedulerEntries(schedulerID string, entries []*base.SchedulerEntry, ttl time.Duration) error {
args := []interface{}{ttl.Seconds()}
for _, e := range entries {
bytes, err := json.Marshal(e)
if err != nil {
continue // skip bad data
}
args = append(args, bytes)
}
exp := time.Now().Add(ttl).UTC()
key := base.SchedulerEntriesKey(schedulerID)
err := r.client.ZAdd(base.AllSchedulers, &redis.Z{Score: float64(exp.Unix()), Member: key}).Err()
if err != nil {
return err
}
return writeSchedulerEntriesCmd.Run(r.client, []string{key}, args...).Err()
}
// ClearSchedulerEntries deletes scheduler entries data from redis.
func (r *RDB) ClearSchedulerEntries(scheduelrID string) error {
key := base.SchedulerEntriesKey(scheduelrID)
if err := r.client.ZRem(base.AllSchedulers, key).Err(); err != nil {
return err
}
return r.client.Del(key).Err()
}
// CancelationPubSub returns a pubsub for cancelation messages.
func (r *RDB) CancelationPubSub() (*redis.PubSub, error) {
pubsub := r.client.Subscribe(base.CancelChannel)
_, err := pubsub.Receive()
if err != nil {
return nil, err
}
return pubsub, nil
}
// PublishCancelation publish cancelation message to all subscribers.
// The message is the ID for the task to be canceled.
func (r *RDB) PublishCancelation(id string) error {
return r.client.Publish(base.CancelChannel, id).Err()
}
// KEYS[1] -> asynq:scheduler_history:<entryID>
// ARGV[1] -> enqueued_at timestamp
// ARGV[2] -> serialized SchedulerEnqueueEvent data
// ARGV[3] -> max number of events to be persisted
var recordSchedulerEnqueueEventCmd = redis.NewScript(`
redis.call("ZREMRANGEBYRANK", KEYS[1], 0, -ARGV[3])
redis.call("ZADD", KEYS[1], ARGV[1], ARGV[2])
return redis.status_reply("OK")`)
// Maximum number of enqueue events to store per entry.
const maxEvents = 1000
// RecordSchedulerEnqueueEvent records the time when the given task was enqueued.
func (r *RDB) RecordSchedulerEnqueueEvent(entryID string, event *base.SchedulerEnqueueEvent) error {
key := base.SchedulerHistoryKey(entryID)
data, err := json.Marshal(event)
if err != nil {
return err
}
return recordSchedulerEnqueueEventCmd.Run(
r.client, []string{key}, event.EnqueuedAt.Unix(), data, maxEvents).Err()
}
// ClearSchedulerHistory deletes the enqueue event history for the given scheduler entry.
func (r *RDB) ClearSchedulerHistory(entryID string) error {
key := base.SchedulerHistoryKey(entryID)
return r.client.Del(key).Err()
}
|
package main
import (
"fmt"
"net/url"
"os/exec"
"strings"
)
func Parse(path string) (wh webHook, err error) {
wh.Commits = []string{}
wh.HeadCommit.ID, err = commitID(path)
if err != nil {
return webHook{}, err
}
branch, err := branch(path)
if err != nil {
return webHook{}, err
}
wh.Ref = fmt.Sprintf("ref/head/%s", branch)
repo, err := repository(path)
if err != nil {
return webHook{}, err
}
wh.Repository.FullName = repo
return
}
func commitID(path string) (string, error) {
cmd := exec.Command("git", "rev-parse", "HEAD")
cmd.Dir = path
out, err := cmd.CombinedOutput()
return strings.TrimSpace(string(out)), err
}
func branch(path string) (string, error) {
cmd := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD")
cmd.Dir = path
out, err := cmd.CombinedOutput()
return strings.TrimSpace(string(out)), err
}
func repository(path string) (string, error) {
cmd := exec.Command("git", "remote", "get-url", "origin")
cmd.Dir = path
out, err := cmd.CombinedOutput()
if err != nil {
return "", err
}
return parseOrigin(string(out))
}
func parseOrigin(origin string) (string, error) {
remote := strings.TrimSuffix(strings.TrimSpace(origin), ".git")
u, err := url.Parse(remote)
if err == nil {
repository := strings.SplitAfterN(u.Path[1:], "/", 1)
return strings.Join(repository, "/"), nil
}
i := strings.Index(remote, ":")
return remote[i+1:], nil
}
|
package submerge
import (
"fmt"
"sort"
"strconv"
"strings"
"time"
)
type subLine struct {
Num int
Time string
Text1 string
Text2 string
}
func (s *subLine) isAfter(sub2 *subLine) bool {
if sub2 == nil {
return false
}
if s == nil {
return true
}
times := []string{s.Time, sub2.Time}
sort.Strings(times)
return times[1] == s.Time
}
func (s *subLine) String() string {
return fmt.Sprintf("[%d] %s (( %s | %s ))", s.Num, s.Time, s.Text1, s.Text2)
}
func (s *subLine) toFormat() string {
wr := strings.Builder{}
wr.Write([]byte(fmt.Sprintf("%d\n", s.Num)))
wr.Write([]byte(fmt.Sprintf("%s\n", s.Time)))
wr.Write([]byte(fmt.Sprintf("%s\n", s.Text1)))
if s.Text2 != "" {
wr.Write([]byte(fmt.Sprintf("%s\n", s.Text2)))
}
wr.Write([]byte("\n"))
return wr.String()
}
func (s *subLine) addColor(color string) {
if s.Text1 != "" {
s.Text1 = fmt.Sprintf(`<font color="%s">%s</font>`, color, s.Text1)
}
if s.Text2 != "" {
s.Text2 = fmt.Sprintf(`<font color="%s">%s</font>`, color, s.Text2)
}
}
func (s *subLine) addDelay(hours, mins, secs, ms int64, delayText bool) {
// 00:03:35,954 --> 00:03:37,834
times := strings.Split(s.Time, " --> ")
if len(times) < 2 {
fmt.Println(s.Time)
fmt.Println(s.Num)
fmt.Println(s.Text1)
fmt.Println(s.Text2)
}
hours1, mins1, secs1, ms1 := s.timeAsInts(times[0])
hours2, mins2, secs2, ms2 := s.timeAsInts(times[1])
m1 := ms1 + (secs1 * 1000) + (mins1 * 1000 * 60) + (hours1 * 1000 * 60 * 60)
m2 := ms2 + (secs2 * 1000) + (mins2 * 1000 * 60) + (hours2 * 1000 * 60 * 60)
d1 := ms + (secs * 1000) + (mins * 1000 * 60) + (hours * 1000 * 60 * 60)
d2 := ms + (secs * 1000) + (mins * 1000 * 60) + (hours * 1000 * 60 * 60)
if !delayText {
d1 *= -1
d2 *= -1
}
t1 := time.Duration((m1 + d1) * 1000 * 1000)
t2 := time.Duration((m2 + d2) * 1000 * 1000)
if t1.Nanoseconds() < 0 {
t1 = 0
}
if t2.Nanoseconds() < 0 {
t2 = 0
}
s.setNewTimes(t1, t2)
}
func (s *subLine) setNewTimes(t1, t2 time.Duration) {
hour1 := t1 / time.Hour
t1 -= hour1 * time.Hour
min1 := t1 / time.Minute
t1 -= min1 * time.Minute
sec1 := t1 / time.Second
t1 -= sec1 * time.Second
ms1 := t1 / time.Millisecond
hour2 := t2 / time.Hour
t2 -= hour2 * time.Hour
min2 := t2 / time.Minute
t2 -= min2 * time.Minute
sec2 := t2 / time.Second
t2 -= sec2 * time.Second
ms2 := t2 / time.Millisecond
t2 -= ms2 * time.Millisecond
timeFormat := "%02d:%02d:%02d,%03d --> %02d:%02d:%02d,%03d"
s.Time = fmt.Sprintf(timeFormat, hour1, min1, sec1, ms1, hour2, min2, sec2, ms2)
}
func (s *subLine) timeAsInts(time string) (hours, mins, secs, ms int64) {
timeItems := strings.Split(time, ",")
hoursMinsSecs := strings.Split(timeItems[0], ":")
ms, err := strconv.ParseInt(timeItems[1], 10, 64)
if err != nil {
panic(err)
}
hours, err = strconv.ParseInt(hoursMinsSecs[0], 10, 64)
if err != nil {
panic(err)
}
mins, err = strconv.ParseInt(hoursMinsSecs[1], 10, 64)
if err != nil {
panic(err)
}
secs, err = strconv.ParseInt(hoursMinsSecs[2], 10, 64)
if err != nil {
panic(err)
}
return
}
func adjustNums(lines []*subLine) {
for i, line := range lines {
if line == nil {
fmt.Printf("\n Missing: %d", i)
continue
}
line.Num = i
}
}
|
// -------------------------------------------------------------------
//
// salter: Tool for bootstrap salt clusters in EC2
//
// Copyright (c) 2013-2014 Orchestrate, Inc. All Rights Reserved.
//
// This file is provided to you under the Apache License,
// Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// -------------------------------------------------------------------
package main
import (
"bytes"
"code.google.com/p/go.crypto/ssh"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/ec2"
)
type Node struct {
// The hostname that the node will be given.
Name string `toml:"-"`
// A list of roles that should be defined in the salt configuration.
Roles []string `toml:"roles"`
// The number of nodes of this type that should be created. Each node
// will have the "id" name from toml with a number appended to the end
// of the name.
Count uint `toml:"count"`
// Tags attached to these nodes.
Tags TagMap `toml:"tags"`
// Allow all the same values found in the AwsConfig value.
AwsConfig
Instance *ec2.Instance `toml:"-"`
SshClient *ssh.Client `toml:"-"`
}
func (node *Node) Conn() *ec2.EC2 {
return ec2.New(G_CONFIG.AwsAuth, aws.Regions[node.RegionId])
}
// Retrieve instance information from AWS
func (node *Node) Update() error {
// Clear out current instance info
node.Instance = nil
// Use the node name as our primary filter
filter := ec2.NewFilter()
filter.Add("tag:Name", node.Name)
filter.Add("instance-state-code", "0", "16")
response, err := node.Conn().Instances(nil, filter)
if err != nil {
return err
}
if len(response.Reservations) == 0 {
// Nothing was returned in the list; it's not running
return nil
}
if len(response.Reservations) > 1 || len(response.Reservations[0].Instances) > 1 {
// More than one reservation or instances that match our filter;
// this means something is bjorked and manual intervention is required
return fmt.Errorf("Unexpected number of reservations/instances for %s",
node.Name)
}
node.Instance = &(response.Reservations[0].Instances[0])
return nil
}
func (node *Node) IsRunning() bool {
// Determine if the node is live on AWS and running or pending
if node.Instance == nil {
return false
} else {
return node.Instance.State.Code < 32
}
}
// Start the node on AWS
func (node *Node) Start(masterIp string) error {
// If node is already running, noop
if node.IsRunning() {
return nil
}
// Verify that we have a key available to this node
if !RegionKeyExists(node.KeyName, node.RegionId) {
return fmt.Errorf("key %s is not available locally",
node.KeyName)
}
// Verify that the node's security group exists
if !RegionSGExists(node.SGroup, node.RegionId) {
return fmt.Errorf("security group %s is not available", node.SGroup)
}
// Generate the userdata script for this node
userData, err := G_CONFIG.generateUserData(node.Name, node.Roles, masterIp)
if err != nil {
return err
}
// We only permit a single security group right now per-node
sgroups := []ec2.SecurityGroup{RegionSG(node.SGroup, node.RegionId).SecurityGroup}
runInst := ec2.RunInstances{
ImageId: node.Ami,
KeyName: node.KeyName,
InstanceType: node.Flavor,
UserData: userData,
SecurityGroups: sgroups,
AvailZone: node.Zone,
BlockDevices: deviceMappings(node.Flavor)}
runResp, err := node.Conn().RunInstances(&runInst)
if err != nil {
return fmt.Errorf("launch failed: %+v\n", err)
}
node.Instance = &(runResp.Instances[0])
printf("%s (%s): started\n", node.Name, node.Instance.InstanceId)
// Instance is now running; apply any tags
return node.ApplyTags()
}
func (node *Node) Terminate() error {
// Terminate the node on AWS
if !node.IsRunning() {
return fmt.Errorf("node not running")
}
_, err := node.Conn().TerminateInstances([]string{node.Instance.InstanceId})
if err != nil {
return err
}
printf("%s (%s): terminated\n", node.Name, node.Instance.InstanceId)
node.Instance = nil
return nil
}
func (node *Node) SshOpen() error {
if !node.IsRunning() {
return fmt.Errorf("node not running")
}
if node.SshClient == nil {
config := ssh.ClientConfig{
User: G_CONFIG.Aws.Username,
Auth: PublicKeyAuth(RegionKey(node.KeyName, node.RegionId)),
}
client, err := ssh.Dial("tcp", node.Instance.DNSName+":22", &config)
if err != nil {
return err
}
node.SshClient = client
}
return nil
}
func (node *Node) SshClose() {
if node.SshClient != nil {
node.SshClient.Close()
node.SshClient = nil
}
}
func (node *Node) SshRun(cmd string) error {
if node.SshClient == nil {
err := node.SshOpen()
if err != nil {
return err
}
}
session, err := node.SshClient.NewSession()
if err != nil {
return fmt.Errorf("failed to create session - %+v", err)
}
defer session.Close()
debugf("%s: %s\n", node.Name, cmd)
return session.Run(cmd)
}
func (node *Node) SshRunOutput(cmd string) ([]byte, error) {
if node.SshClient == nil {
err := node.SshOpen()
if err != nil {
return nil, err
}
}
session, err := node.SshClient.NewSession()
if err != nil {
return nil, fmt.Errorf("failed to create session - %+v", err)
}
defer session.Close()
debugf("%s: %s\n", node.Name, cmd)
return session.CombinedOutput(cmd)
}
func (node *Node) SshUpload(remoteFilename string, data []byte) error {
if node.SshClient == nil {
err := node.SshOpen()
if err != nil {
return err
}
}
session, err := node.SshClient.NewSession()
if err != nil {
return fmt.Errorf("failed to create session - %+v", err)
}
defer session.Close()
session.Stdin = bytes.NewReader(data)
cmd := fmt.Sprintf("/usr/bin/sudo sh -c '/bin/cat > %s'", remoteFilename)
err = session.Run(cmd)
debugf("%s: uploaded data to %s; error: %+v\n", node.Name, remoteFilename, err)
return err
}
func (node *Node) ec2Tags() []ec2.Tag {
result := []ec2.Tag{ec2.Tag{Key: "Name", Value: node.Name}}
for key, value := range node.Tags {
result = append(result, ec2.Tag{Key: key, Value: value})
}
return result
}
func (node *Node) ApplyTags() error {
_, err := node.Conn().CreateTags([]string{node.Instance.InstanceId}, node.ec2Tags())
if err != nil {
return fmt.Errorf("Failed to apply tags to %s: %+v\n", node.Name, err)
}
return nil
}
func (node *Node) GenSaltKey(bits int) ([]byte, []byte, error) {
privKey, err := rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return nil, nil, err
}
// Encode private key as PKCS-1 PEM
privKeyStr := PemEncode(x509.MarshalPKCS1PrivateKey(privKey), "RSA PRIVATE KEY")
// Encode public key as PKIX PEM
pubKeyBin, _ := x509.MarshalPKIXPublicKey(&(privKey.PublicKey))
pubKeyStr := PemEncode(pubKeyBin, "PUBLIC KEY")
return pubKeyStr, privKeyStr, nil
}
func PemEncode(data []byte, header string) []byte {
b := pem.Block{Type: header, Bytes: data}
return pem.EncodeToMemory(&b)
}
func deviceMappings(flavor string) []ec2.BlockDeviceMapping {
switch flavor {
case "m1.large":
return deviceMappingGenerator(2)
case "m1.xlarge":
return deviceMappingGenerator(4)
case "c1.xlarge":
return deviceMappingGenerator(4)
case "m2.xlarge":
return deviceMappingGenerator(1)
case "m2.2xlarge":
return deviceMappingGenerator(1)
case "m2.4xlarge":
return deviceMappingGenerator(2)
case "hs1.8xlarge":
return deviceMappingGenerator(24)
case "hi1.4xlarge":
return deviceMappingGenerator(2)
case "cr1.8xlarge":
return deviceMappingGenerator(2)
case "cc2.8xlarge":
return deviceMappingGenerator(4)
case "cg1.4xlarge":
return deviceMappingGenerator(2)
default:
return []ec2.BlockDeviceMapping{}
}
}
func deviceMappingGenerator(count int) []ec2.BlockDeviceMapping {
if count < 0 {
count = 0
} else if count > 24 {
count = 24
}
mappings := make([]ec2.BlockDeviceMapping, 0)
for i := 0; i < count; i++ {
device := fmt.Sprintf("/dev/sd%c1", 'b'+i)
virtual := fmt.Sprintf("ephemeral%d", i)
mappings = append(mappings, ec2.BlockDeviceMapping{DeviceName: device,
VirtualName: virtual})
}
return mappings
}
|
package internal
import (
"context"
"go.mongodb.org/mongo-driver/bson/bsoncodec"
"go.mongodb.org/mongo-driver/mongo/readconcern"
"go.mongodb.org/mongo-driver/mongo/readpref"
"go.mongodb.org/mongo-driver/mongo/writeconcern"
"time"
"github.com/5xxxx/pie/driver"
"github.com/5xxxx/pie/schemas"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
)
type aggregate struct {
db string
doc interface{}
engine driver.Client
pipeline bson.A
opts []*options.AggregateOptions
collOpts []*options.CollectionOptions
}
func NewAggregate(engine driver.Client) driver.Aggregate {
return &aggregate{engine: engine}
}
func (a *aggregate) One(result interface{}, ctx ...context.Context) error {
var c context.Context
if len(ctx) > 0 {
c = ctx[0]
} else {
c = context.Background()
}
var coll *mongo.Collection
var err error
if a.doc != nil {
coll, err = a.collectionForStruct(a.doc)
} else {
coll, err = a.collectionForStruct(result)
}
if err != nil {
return err
}
aggregate, err := coll.Aggregate(c, a.pipeline, a.opts...)
if err != nil {
return err
}
if next := aggregate.Next(c); next {
if err := aggregate.Decode(result); err != nil {
return err
}
} else {
return mongo.ErrNoDocuments
}
return nil
}
func (a *aggregate) All(result interface{}, ctx ...context.Context) error {
var c context.Context
if len(ctx) > 0 {
c = ctx[0]
} else {
c = context.Background()
}
var coll *mongo.Collection
var err error
if a.doc != nil {
coll, err = a.collectionForStruct(a.doc)
} else {
coll, err = a.collectionForSlice(result)
}
if err != nil {
return err
}
aggregate, err := coll.Aggregate(c, a.pipeline, a.opts...)
if err != nil {
return err
}
return aggregate.All(c, result)
}
// SetAllowDiskUse sets the value for the AllowDiskUse field.
func (a *aggregate) SetAllowDiskUse(b bool) driver.Aggregate {
a.opts = append(a.opts, options.Aggregate().SetAllowDiskUse(b))
return a
}
// SetBatchSize sets the value for the BatchSize field.
func (a *aggregate) SetBatchSize(i int32) driver.Aggregate {
a.opts = append(a.opts, options.Aggregate().SetBatchSize(i))
return a
}
// SetBypassDocumentValidation sets the value for the BypassDocumentValidation field.
func (a *aggregate) SetBypassDocumentValidation(b bool) driver.Aggregate {
a.opts = append(a.opts, options.Aggregate().SetBypassDocumentValidation(b))
return a
}
// SetCollation sets the value for the Collation field.
func (a *aggregate) SetCollation(c *options.Collation) driver.Aggregate {
a.opts = append(a.opts, options.Aggregate().SetCollation(c))
return a
}
// SetMaxTime sets the value for the MaxTime field.
func (a *aggregate) SetMaxTime(d time.Duration) driver.Aggregate {
a.opts = append(a.opts, options.Aggregate().SetMaxTime(d))
return a
}
// SetMaxAwaitTime sets the value for the MaxAwaitTime field.
func (a *aggregate) SetMaxAwaitTime(d time.Duration) driver.Aggregate {
a.opts = append(a.opts, options.Aggregate().SetMaxAwaitTime(d))
return a
}
// SetComment sets the value for the Comment field.
func (a *aggregate) SetComment(s string) driver.Aggregate {
a.opts = append(a.opts, options.Aggregate().SetComment(s))
return a
}
// SetHint sets the value for the Hint field.
func (a *aggregate) SetHint(h interface{}) driver.Aggregate {
a.opts = append(a.opts, options.Aggregate().SetHint(h))
return a
}
func (a *aggregate) Pipeline(pipeline bson.A) driver.Aggregate {
a.pipeline = append(a.pipeline, pipeline...)
return a
}
func (a *aggregate) Match(c driver.Condition) driver.Aggregate {
filters, err := c.Filters()
if err != nil {
panic(err)
}
a.pipeline = append(a.pipeline, bson.M{
"$match": filters,
})
return a
}
func (a *aggregate) SetDatabase(db string) driver.Aggregate {
a.db = db
return a
}
func (a *aggregate) collectionForStruct(doc interface{}) (*mongo.Collection, error) {
var coll *schemas.Collection
var err error
if a.doc != nil {
coll, err = a.engine.CollectionNameForStruct(a.doc)
} else {
coll, err = a.engine.CollectionNameForStruct(doc)
}
if err != nil {
return nil, err
}
return a.collectionByName(coll.Name), nil
}
func (a *aggregate) collectionForSlice(doc interface{}) (*mongo.Collection, error) {
var coll *schemas.Collection
var err error
if a.doc != nil {
coll, err = a.engine.CollectionNameForStruct(a.doc)
} else {
coll, err = a.engine.CollectionNameForSlice(doc)
}
if err != nil {
return nil, err
}
return a.collectionByName(coll.Name), nil
}
func (a *aggregate) collectionByName(name string) *mongo.Collection {
if a.collOpts == nil {
a.collOpts = make([]*options.CollectionOptions, 0)
}
return a.engine.Collection(name, a.collOpts, a.db)
}
// SetReadConcern sets the value for the ReadConcern field.
func (a *aggregate) SetReadConcern(rc *readconcern.ReadConcern) driver.Aggregate {
a.collOpts = append(a.collOpts, options.Collection().SetReadConcern(rc))
return a
}
// SetCollWriteConcern sets the value for the WriteConcern field.
func (a *aggregate) SetCollWriteConcern(wc *writeconcern.WriteConcern) driver.Aggregate {
a.collOpts = append(a.collOpts, options.Collection().SetWriteConcern(wc))
return a
}
// SetCollReadPreference sets the value for the ReadPreference field.
func (a *aggregate) SetCollReadPreference(rp *readpref.ReadPref) driver.Aggregate {
a.collOpts = append(a.collOpts, options.Collection().SetReadPreference(rp))
return a
}
// SetCollRegistry sets the value for the Registry field.
func (a *aggregate) SetCollRegistry(r *bsoncodec.Registry) driver.Aggregate {
a.collOpts = append(a.collOpts, options.Collection().SetRegistry(r))
return a
}
func (a *aggregate) Collection(doc interface{}) driver.Aggregate {
a.doc = doc
return a
}
|
package handlers
import (
"net/http"
"strconv"
"strings"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/pkg/errors"
"github.com/decentraland/content-service/data"
"github.com/decentraland/content-service/storage"
. "github.com/decentraland/content-service/utils"
"github.com/go-redis/redis"
log "github.com/sirupsen/logrus"
)
type ParcelContent struct {
ParcelID string `json:"parcel_id"`
Contents []*ContentElement `json:"contents"`
RootCID string `json:"root_cid"`
Publisher string `json:"publisher"`
}
type SceneContent struct {
RootCID string `json:"root_cid"`
SceneCID string `json:"scene_cid"`
Content *ParcelContent `json:"content"`
}
type Scene struct {
ParcelId string `json:"parcel_id"`
RootCID string `json:"root_cid"`
SceneCID string `json:"scene_cid"`
}
type ContentElement struct {
File string `json:"file"`
Cid string `json:"hash"`
}
type StringPair struct {
A string
B string
}
// Logic layer
type MappingsHandler interface {
GetMappings(c *gin.Context)
GetScenes(c *gin.Context)
GetParcelInformation(parcelId string) (*ParcelContent, error)
GetInfo(c *gin.Context)
}
type mappingsHandlerImpl struct {
RedisClient data.RedisClient
Dcl data.Decentraland
Storage storage.Storage
Log *log.Logger
}
func NewMappingsHandler(client data.RedisClient, dcl data.Decentraland, storage storage.Storage, l *log.Logger) MappingsHandler {
return &mappingsHandlerImpl{
RedisClient: client,
Dcl: dcl,
Storage: storage,
Log: l,
}
}
type getMappingsParams struct {
Nw string `form:"nw" binding:"required"`
Se string `form:"se" binding:"required"`
}
func (sa *getMappingsParams) NwCoord() (int, int, error) {
return parseCoordinates(sa.Nw)
}
func (sa *getMappingsParams) SeCoord() (int, int, error) {
return parseCoordinates(sa.Se)
}
func parseCoordinates(coord string) (int, int, error) {
tkns := strings.Split(coord, ",")
if len(tkns) != 2 {
return 0, 0, errors.New("invalid coordinate")
}
x, err := strconv.Atoi(tkns[0])
if err != nil {
return 0, 0, errors.New("invalid coordinate")
}
y, err := strconv.Atoi(tkns[0])
if err != nil {
return 0, 0, errors.New("invalid coordinate")
}
return x, y, nil
}
func (ms *mappingsHandlerImpl) GetMappings(c *gin.Context) {
var params getMappingsParams
err := c.ShouldBindWith(¶ms, binding.Query)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid query params"})
return
}
x1, y1, err := params.NwCoord()
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid query params"})
return
}
x2, y2, err := params.SeCoord()
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid query params"})
return
}
parcels := RectToParcels(x1, y1, x2, y2, 200)
if parcels == nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "too many parcels requested"})
return
}
mapContents := []ParcelContent{}
for _, pid := range parcels {
content, err := ms.GetParcelInformation(pid)
if err != nil {
c.Error(err)
ms.Log.WithError(err).Error("fail to retrieve parcel")
c.AbortWithStatusJSON(http.StatusServiceUnavailable, gin.H{"error": "unexpected error, try again later"})
return
}
if content != nil {
mapContents = append(mapContents, *content)
}
}
c.JSON(http.StatusOK, mapContents)
}
type getScenesParams struct {
X1 *int `form:"x1" binding:"exists,min=-150,max=150"`
Y1 *int `form:"y1" binding:"exists,min=-150,max=150"`
X2 *int `form:"x2" binding:"exists,min=-150,max=150"`
Y2 *int `form:"y2" binding:"exists,min=-150,max=150"`
}
func (ms *mappingsHandlerImpl) GetScenes(c *gin.Context) {
var p getScenesParams
err := c.ShouldBindWith(&p, binding.Query)
if err != nil {
println(err.Error())
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid query params"})
return
}
pids := RectToParcels(*p.X1, *p.Y1, *p.X2, *p.Y2, 200)
if pids == nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "too many parcels requested"})
return
}
cids := make(map[string]bool, len(pids))
for _, pid := range pids {
cid, err := ms.RedisClient.GetParcelCID(pid)
if cid == "" {
continue
}
if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "unexpected error, try again later"})
return
}
validParcel, err := ms.RedisClient.ProcessedParcel(pid)
if err != nil {
log.Errorf("error when checking validity of parcel %s", pid)
// skip on error
}
if !validParcel {
continue
}
cids[cid] = true
}
ret := make([]*Scene, 0, len(cids))
for cid, _ := range cids {
parcels, err := ms.RedisClient.GetSceneParcels(cid)
if err != nil && err != redis.Nil {
ms.Log.WithError(err).Error("error reading scene from redis")
_ = c.Error(err)
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "unexpected error, try again later"})
return
}
sceneCID, err := ms.RedisClient.GetSceneCid(cid)
if err != nil && err != redis.Nil {
log.Errorf("error reading scene cid for cid %s", cid)
// we just use the empty string in this case
}
for _, p := range parcels {
ret = append(ret, &Scene{
SceneCID: sceneCID,
RootCID: cid,
ParcelId: p,
})
}
}
c.JSON(http.StatusOK, gin.H{"data": ret})
}
/**
Retrieves the consolidated information of a given Parcel <ParcelContent>
if the parcel does not exists, the ParcelContent.Contents will be nil
*/
func (ms *mappingsHandlerImpl) GetParcelInformation(parcelId string) (*ParcelContent, error) {
content, err := ms.RedisClient.GetParcelContent(parcelId)
if content == nil || err != nil {
return nil, err
}
var elements []*ContentElement
for name, cid := range content {
elements = append(elements, &ContentElement{File: name, Cid: cid})
}
metadata, err := ms.RedisClient.GetParcelMetadata(parcelId)
if metadata == nil || err != nil {
return nil, err
}
return &ParcelContent{ParcelID: parcelId, Contents: elements, RootCID: metadata["root_cid"].(string), Publisher: metadata["pubkey"].(string)}, nil
}
func (ms *mappingsHandlerImpl) GetInfo(c *gin.Context) {
cidsParam := c.Query("cids")
if len(cidsParam) <= 0 {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid params"})
return
}
cids := strings.Split(cidsParam, ",")
parcels := make(map[string]*StringPair, len(cids))
for _, cid := range cids {
ps, err := ms.RedisClient.GetSceneParcels(cid)
if err != nil && err != redis.Nil {
ms.Log.WithError(err).Error("error reading scene from redis")
_ = c.Error(err)
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "unexpected error, try again later"})
return
}
sceneCID := ""
rootCID := ""
if ps == nil || len(ps) == 0 {
// Maybe the parameter is not the root cid, but the scene cid, which we will be eventually support better
rootCID, err = ms.RedisClient.GetRootCid(cid)
if err != nil && err != redis.Nil {
log.Errorf("error when getting rootcid for hash %s with error %s", cid, err)
continue
}
ps, err = ms.RedisClient.GetSceneParcels(rootCID)
if err != nil && err != redis.Nil {
log.Errorf("error when reading parcels for cid %s with error %s", rootCID, err)
continue
}
if ps == nil || len(ps) == 0 {
continue
}
sceneCID = cid
}
if sceneCID == "" {
sceneCID, _ = ms.RedisClient.GetSceneCid(cid)
rootCID = cid
}
parcels[rootCID] = &StringPair{A: ps[0], B: sceneCID}
}
ret := make([]*SceneContent, 0, len(cids))
for k, v := range parcels {
content, err := ms.GetParcelInformation(v.A)
if err != nil {
log.Errorf("error getting information for parcel %s with error %s", v.A, err)
continue
}
ret = append(ret, &SceneContent{
RootCID: k,
SceneCID: v.B,
Content: content,
})
}
c.JSON(http.StatusOK, gin.H{"data": ret})
}
|
package main
import (
"fmt"
"log"
"strings"
"sync"
"time"
stan "github.com/nats-io/stan.go"
)
// AsyncReport is the report from a function executed on a queue worker.
type AsyncReport struct {
FunctionName string `json:"name"`
StatusCode int `json:"statusCode"`
TimeTaken float64 `json:"timeTaken"`
}
// NATSQueue represents a subscription to NATS Streaming
type NATSQueue struct {
clusterID string
clientID string
natsURL string
maxReconnect int
reconnectDelay time.Duration
conn stan.Conn
connMutex *sync.RWMutex
quitCh chan struct{}
subject string
qgroup string
ackWait time.Duration
messageHandler func(*stan.Msg)
maxInFlight int
subscription stan.Subscription
msgChan chan *stan.Msg
}
// connect creates a subscription to NATS Streaming
func (q *NATSQueue) connect() error {
log.Printf("Connect: %s\n", q.natsURL)
nc, err := stan.Connect(
q.clusterID,
q.clientID,
stan.NatsURL(q.natsURL),
stan.SetConnectionLostHandler(func(conn stan.Conn, err error) {
log.Printf("Disconnected from %s\n", q.natsURL)
q.reconnect()
}),
)
if err != nil {
return fmt.Errorf("can't connect to %s: %v", q.natsURL, err)
}
q.connMutex.Lock()
defer q.connMutex.Unlock()
q.conn = nc
log.Printf("Subscribing to: %s at %s\n", q.subject, q.natsURL)
log.Println("Wait for ", q.ackWait)
// Pre-fill chan with q.maxInFlight tokens
msgChan := make(chan *stan.Msg)
if q.maxInFlight <= 0 {
q.maxInFlight = 1
}
handler := q.messageHandler
opts := []stan.SubscriptionOption{
stan.DurableName(strings.ReplaceAll(q.subject, ".", "_")),
stan.AckWait(q.ackWait),
stan.DeliverAllAvailable(),
stan.MaxInflight(q.maxInFlight),
}
if q.maxInFlight > 1 {
for i := 0; i < q.maxInFlight; i++ {
go func() {
for msg := range msgChan {
q.messageHandler(msg)
msg.Ack()
}
}()
}
opts = append(opts, stan.SetManualAckMode())
handler = func(msg *stan.Msg) {
msgChan <- msg
}
}
subscription, err := q.conn.QueueSubscribe(
q.subject,
q.qgroup,
handler,
opts...,
)
if err != nil {
return fmt.Errorf("couldn't subscribe to %s at %s. Error: %v", q.subject, q.natsURL, err)
}
log.Printf(
"Listening on [%s], clientID=[%s], qgroup=[%s] maxInFlight=[%d]\n",
q.subject,
q.clientID,
q.qgroup,
q.maxInFlight,
)
q.subscription = subscription
q.msgChan = msgChan
return nil
}
func (q *NATSQueue) reconnect() {
log.Printf("Reconnect\n")
for i := 0; i < q.maxReconnect; i++ {
select {
case <-time.After(time.Duration(i) * q.reconnectDelay):
if err := q.connect(); err == nil {
log.Printf("Reconnecting (%d/%d) to %s succeeded\n", i+1, q.maxReconnect, q.natsURL)
return
}
nextTryIn := (time.Duration(i+1) * q.reconnectDelay).String()
log.Printf("Reconnecting (%d/%d) to %s failed\n", i+1, q.maxReconnect, q.natsURL)
log.Printf("Waiting %s before next try", nextTryIn)
case <-q.quitCh:
log.Println("Received signal to stop reconnecting...")
return
}
}
log.Printf("Reconnecting limit (%d) reached\n", q.maxReconnect)
}
func (q *NATSQueue) closeConnection() error {
q.connMutex.Lock()
defer q.connMutex.Unlock()
if q.conn == nil {
return fmt.Errorf("q.conn is nil")
}
err := q.conn.Close()
close(q.msgChan)
close(q.quitCh)
return err
}
|
package main
import (
"context"
"fmt"
"github.com/antihax/optional"
"github.com/outscale/osc-sdk-go/osc"
"os"
)
func main() {
client := osc.NewAPIClient(osc.NewConfiguration())
auth := context.WithValue(context.Background(), osc.ContextAWSv4, osc.AWSv4{
AccessKey: os.Getenv("OSC_ACCESS_KEY"),
SecretKey: os.Getenv("OSC_SECRET_KEY"),
})
read, httpRes, err := client.VolumeApi.ReadVolumes(auth, nil)
if err != nil {
fmt.Fprintln(os.Stderr, "Error while reading volumes")
if httpRes != nil {
fmt.Fprintln(os.Stderr, httpRes.Status)
}
os.Exit(1)
}
println("We currently have", len(read.Volumes), "volumes:")
for _, volume := range read.Volumes {
println("-", volume.VolumeId)
}
println("Creating 10GB GP2 volume")
creationOpts := osc.CreateVolumeOpts{
CreateVolumeRequest: optional.NewInterface(
osc.CreateVolumeRequest{
Size: 10,
VolumeType: "gp2",
SubregionName: "eu-west-2a",
}),
}
creation, httpRes, err := client.VolumeApi.CreateVolume(auth, &creationOpts)
if err != nil {
fmt.Fprint(os.Stderr, "Error while creating volume ")
if httpRes != nil {
fmt.Fprintln(os.Stderr, httpRes.Status)
}
os.Exit(1)
}
println("Created volume", creation.Volume.VolumeId)
println("Reading created volume details")
readOpts := osc.ReadVolumesOpts{
ReadVolumesRequest: optional.NewInterface(
osc.ReadVolumesRequest{
Filters: osc.FiltersVolume{
VolumeIds: []string{creation.Volume.VolumeId},
},
}),
}
read, httpRes, err = client.VolumeApi.ReadVolumes(auth, &readOpts)
if err != nil {
fmt.Fprint(os.Stderr, "Error while reading volumes ")
if httpRes != nil {
fmt.Fprintln(os.Stderr, httpRes.Status)
}
os.Exit(1)
}
println(creation.Volume.VolumeId, "details:")
volume := read.Volumes[0]
println("- Id:", volume.VolumeId)
println("- Size:", volume.Size)
println("- Type:", volume.VolumeType)
println("- State:", volume.State)
println("Deleting volume", creation.Volume.VolumeId)
deletionOpts := osc.DeleteVolumeOpts{
DeleteVolumeRequest: optional.NewInterface(
osc.DeleteVolumeRequest{
VolumeId: creation.Volume.VolumeId,
}),
}
_, httpRes, err = client.VolumeApi.DeleteVolume(auth, &deletionOpts)
if err != nil {
fmt.Fprint(os.Stderr, "Error while deleting volume ")
if httpRes != nil {
fmt.Fprintln(os.Stderr, httpRes.Status)
}
os.Exit(1)
}
println("Deleted volume", creation.Volume.VolumeId)
}
|
package osbuild1
import (
"bytes"
"encoding/json"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestUnmarshal(t *testing.T) {
resultRaw := `{
"success": true,
"build": {
"success": true,
"stages": [
{
"name": "org.osbuild.rpm",
"id": "9eb0a6f6fd6e2995e107f5bcc6aa3b19643b02ec133bdc8a8ac614860b1bbf2d",
"success": true,
"output": "Building...",
"metadata": {
"packages": [
{
"name": "libgcc",
"version": "10.0.1",
"release": "0.11.fc32",
"epoch": null,
"arch": "x86_64",
"sigmd5": "84fc907a5047aeebaf8da1642925a417",
"sigpgp": null,
"siggpg": "883f0305005f2310139ec3e4c0f7e257e611023e11009f639c5fe64abaa76224dab3a9f70c2714a84c63bd009d1cc184fb4b428dfcd7c3556f4a5f860cc0187740"
},
{
"name": "whois-nls",
"version": "5.5.6",
"release": "1.fc32",
"epoch": null,
"arch": "noarch",
"sigmd5": "f868cd02046630c8ce3a9c48820e2437",
"sigpgp": "89023304000108001d162104963a2beb02009608fe67ea4249fd77499570ff3105025f5a272b000a091049fd77499570ff31ccdb0ffe38b95a55ebf3c021526b3cd4f2358c7e23f7767d1f5ce4b7cccef7b33653c6a96a23022313a818fbaf7abeb41837910f0d3ac15664e02838d5939d38ff459aa0076e248728a032d3ae09ddfaec955f941601081a2e3f9bbd49586fd65c1bc1b31685aeb0405687d1791471eab7359ccf00d5584ddef680e99ebc8a4846316391b9baa68ac8ed8ad696ee16fd625d847f8edd92517df3ea6920a46b77b4f119715a0f619f38835d25e0bd0eb5cfad08cd9c796eace6a2b28f4d3dee552e6068255d9748dc2a1906c951e0ba8aed9922ab24e1f659413a06083f8a0bfea56cfff14bddef23bced449f36bcd369da72f90ddf0512e7b0801ba5a0c8eaa8eb0582c630815e992192042cfb0a7c7239f76219197c2fdf18b6553260c105280806d4f037d7b04bdf3da9fd7e9a207db5c71f7e548f4288928f047c989c4cb9cbb8088eec7bd2fa5c252e693f51a3cfc660f666af6a255a5ca0fd2216d5ccd66cbd9c11afa61067d7f615ec8d0dc0c879b5fe633d8c9443f97285da597e4da8a3993af36f0be06acfa9b8058ec70bbc78b876e4c6c5d2108fb05c15a74ba48a3d7ded697cbc1748c228d77d1e0794a41fd5240fa67c3ed745fe47555a47c3d6163d8ce95fd6c2d0d6fa48f8e5b411e571e442109b1cb200d9a8117ee08bfe645f96aca34f7b7559622bbab75143dcad59f126ae0d319e6668ebba417e725638c4febf2e",
"siggpg": null
}
]
}
}
]
}
}`
var result Result
err := json.Unmarshal([]byte(resultRaw), &result)
assert.NoError(t, err)
assert.Equal(t, result.Build.Stages[0].Name, "org.osbuild.rpm")
metadata, ok := result.Build.Stages[0].Metadata.(*RPMStageMetadata)
assert.True(t, ok)
package1 := metadata.Packages[0]
assert.Equal(t, package1.Name, "libgcc")
assert.Nil(t, package1.Epoch)
assert.Equal(t, package1.Version, "10.0.1")
assert.Equal(t, package1.Release, "0.11.fc32")
assert.Equal(t, package1.Arch, "x86_64")
assert.Equal(t, package1.SigMD5, "84fc907a5047aeebaf8da1642925a417")
assert.Empty(t, package1.SigPGP)
assert.Equal(t, package1.SigGPG, "883f0305005f2310139ec3e4c0f7e257e611023e11009f639c5fe64abaa76224dab3a9f70c2714a84c63bd009d1cc184fb4b428dfcd7c3556f4a5f860cc0187740")
package2 := metadata.Packages[1]
assert.Equal(t, package2.SigPGP, "89023304000108001d162104963a2beb02009608fe67ea4249fd77499570ff3105025f5a272b000a091049fd77499570ff31ccdb0ffe38b95a55ebf3c021526b3cd4f2358c7e23f7767d1f5ce4b7cccef7b33653c6a96a23022313a818fbaf7abeb41837910f0d3ac15664e02838d5939d38ff459aa0076e248728a032d3ae09ddfaec955f941601081a2e3f9bbd49586fd65c1bc1b31685aeb0405687d1791471eab7359ccf00d5584ddef680e99ebc8a4846316391b9baa68ac8ed8ad696ee16fd625d847f8edd92517df3ea6920a46b77b4f119715a0f619f38835d25e0bd0eb5cfad08cd9c796eace6a2b28f4d3dee552e6068255d9748dc2a1906c951e0ba8aed9922ab24e1f659413a06083f8a0bfea56cfff14bddef23bced449f36bcd369da72f90ddf0512e7b0801ba5a0c8eaa8eb0582c630815e992192042cfb0a7c7239f76219197c2fdf18b6553260c105280806d4f037d7b04bdf3da9fd7e9a207db5c71f7e548f4288928f047c989c4cb9cbb8088eec7bd2fa5c252e693f51a3cfc660f666af6a255a5ca0fd2216d5ccd66cbd9c11afa61067d7f615ec8d0dc0c879b5fe633d8c9443f97285da597e4da8a3993af36f0be06acfa9b8058ec70bbc78b876e4c6c5d2108fb05c15a74ba48a3d7ded697cbc1748c228d77d1e0794a41fd5240fa67c3ed745fe47555a47c3d6163d8ce95fd6c2d0d6fa48f8e5b411e571e442109b1cb200d9a8117ee08bfe645f96aca34f7b7559622bbab75143dcad59f126ae0d319e6668ebba417e725638c4febf2e")
assert.Empty(t, package2.SigGPG)
}
func TestUnmarshalV1Success(t *testing.T) {
var result Result
err := json.Unmarshal([]byte(v1ResultSuccess), &result)
assert.NoError(t, err)
assert.True(t, result.Success)
assert.True(t, result.Build.Success)
assert.Len(t, result.Build.Stages, 2)
assert.True(t, result.Build.Stages[1].Success)
assert.Equal(t, "org.osbuild.rpm", result.Build.Stages[0].Name)
assert.Len(t, result.Stages, 11)
assert.True(t, result.Stages[10].Success)
assert.Equal(t, result.Stages[0].Name, "org.osbuild.rpm")
assert.True(t, result.Assembler.Success)
assert.Equal(t, result.Assembler.Name, "org.osbuild.qemu")
}
func TestUnmarshalV1Failure(t *testing.T) {
var result Result
err := json.Unmarshal([]byte(v1ResultFailure), &result)
assert.NoError(t, err)
assert.False(t, result.Success)
assert.True(t, result.Build.Success)
assert.Len(t, result.Build.Stages, 2)
assert.True(t, result.Build.Stages[1].Success)
assert.Equal(t, "org.osbuild.rpm", result.Build.Stages[0].Name)
assert.Len(t, result.Stages, 9)
assert.False(t, result.Stages[8].Success)
assert.Equal(t, result.Stages[0].Name, "org.osbuild.rpm")
assert.Nil(t, result.Assembler)
}
func TestUnmarshalV2Success(t *testing.T) {
var result Result
err := json.Unmarshal([]byte(v2ResultSuccess), &result)
assert.NoError(t, err)
assert.True(t, result.Success)
assert.Len(t, result.Stages, 16)
assert.True(t, result.Stages[15].Success)
assert.NotEmpty(t, result.Stages[0].Name)
// check metadata
for _, stage := range result.Stages {
if strings.HasSuffix(stage.Name, "org.osbuild.rpm") {
rpmMd, convOk := stage.Metadata.(RPMStageMetadata)
assert.True(t, convOk)
assert.Greater(t, len(rpmMd.Packages), 0)
} else if strings.HasSuffix(stage.Name, "org.osbuild.ostree.commit") {
commitMd, convOk := stage.Metadata.(OSTreeCommitStageMetadata)
assert.True(t, convOk)
assert.NotEmpty(t, commitMd.Compose.Ref)
}
}
}
func TestUnmarshalV2Failure(t *testing.T) {
var result Result
err := json.Unmarshal([]byte(v2ResultFailure), &result)
assert.NoError(t, err)
assert.False(t, result.Success)
assert.Len(t, result.Stages, 7)
assert.True(t, result.Stages[5].Success)
assert.False(t, result.Stages[6].Success)
assert.NotEmpty(t, result.Stages[0].Name)
}
func TestWriteFull(t *testing.T) {
const testOptions = `{"msg": "test"}`
dnfStage := StageResult{
Name: "org.osbuild.rpm",
Options: []byte(testOptions),
Success: true,
Output: "Finished",
Metadata: RPMStageMetadata{
Packages: []RPMPackageMetadata{
{
Name: "foobar",
Epoch: nil,
Version: "1",
Release: "1",
Arch: "noarch",
SigMD5: "deadbeef",
},
},
},
}
testStage := StageResult{
Name: "org.osbuild.test",
Options: []byte(testOptions),
Success: true,
Output: "Finished",
}
testBuild := buildResult{
Stages: []StageResult{testStage},
TreeID: "treeID",
Success: true,
}
testAssembler := StageResult{
Name: "testAssembler",
Options: []byte(testOptions),
Success: true,
Output: "Done",
}
testComposeResult := Result{
TreeID: "TreeID",
OutputID: "OutputID",
Build: &testBuild,
Stages: []StageResult{dnfStage},
Assembler: &testAssembler,
Success: true,
}
var b bytes.Buffer
assert.NoError(t, testComposeResult.Write(&b))
expectedMessage :=
`Build pipeline:
Stage org.osbuild.test
{
"msg": "test"
}
Output:
Finished
Stages:
Stage: org.osbuild.rpm
{
"msg": "test"
}
Output:
Finished
Assembler testAssembler:
{
"msg": "test"
}
Output:
Done
`
assert.Equal(t, expectedMessage, b.String())
}
func TestWriteEmpty(t *testing.T) {
testComposeResult := Result{}
var b bytes.Buffer
assert.NoError(t, testComposeResult.Write(&b))
assert.Equal(t, "The compose result is empty.\n", b.String())
}
|
// Copyright (C) 2015 Nicolas Lamirault <nicolas.lamirault@gmail.com>
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"fmt"
"log"
"sync"
"github.com/garyburd/redigo/redis"
)
// RedisClient defines the pub-sub Redis client
type RedisClient struct {
Conn redis.Conn
PubSubConn redis.PubSubConn
Mutex sync.Mutex
MsgCh chan *Message
}
// NewRedisClient returns a new Redis client
func NewRedisClient(host string, msgChan chan *Message) (*RedisClient, error) {
log.Printf("[INFO] [redis] PubSub: %s", host)
conn, err := redis.Dial("tcp", fmt.Sprintf("%s:6379", host))
if err != nil {
return nil, err
}
psc := redis.PubSubConn{Conn: conn}
//defer conn.Close()
return &RedisClient{
Conn: conn,
PubSubConn: psc,
MsgCh: msgChan,
}, nil
}
// Subscribe subscribes the connection to the specified channels.
func (client *RedisClient) Subscribe(channels ...interface{}) error {
for _, channel := range channels {
log.Printf("[INFO] [redis] Subscribe to [%s]\n", channel)
client.PubSubConn.Subscribe(channel)
}
return nil
}
// Unsubscribe unsubscribes the connection from the given channels
func (client *RedisClient) Unsubscribe(channels ...interface{}) error {
for _, channel := range channels {
log.Printf("[INFO] [redis] Unsubscribe to [%s]\n", channel)
client.PubSubConn.Unsubscribe(channel)
}
return nil
}
// Publish send a message to a channel
func (client *RedisClient) Publish(channel string, message string) {
log.Printf("[INFO] [redis] Publish: %s to [%s]\n", message, channel)
client.Mutex.Lock()
client.Conn.Do("PUBLISH", channel, message)
client.Mutex.Unlock()
}
// Receive receive a pushed message
func (client *RedisClient) Receive() {
for {
switch message := client.PubSubConn.Receive().(type) {
case redis.Message:
msg := &Message{
Type: "message",
Channel: message.Channel,
Data: string(message.Data),
}
log.Printf("[INFO] [redis] Receive: %v", msg)
client.MsgCh <- msg
case redis.PMessage:
msg := &Message{
Type: "pmessage",
Channel: message.Channel,
Data: string(message.Data),
}
log.Printf("[INFO] [redis] Receive: %v", msg)
client.MsgCh <- msg
case redis.Pong:
msg := &Message{
Type: "pong",
Channel: "",
Data: string(message.Data),
}
log.Printf("[DEBUG] [redis] Receive: %v", msg)
case redis.Subscription:
msg := &Message{
Type: message.Kind,
Channel: message.Channel,
Data: string(message.Count),
}
log.Printf("[INFO] [redis] Receive: %v", msg)
default:
log.Printf("[WARN] [redis] Receive invalid message: %v",
message)
}
}
}
|
package menu
import (
"fmt"
tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api"
"projja_telegram/command/util"
"projja_telegram/model"
)
func MakeProjectMenu(message *util.MessageData, project *model.Project) tgbotapi.MessageConfig {
text := fmt.Sprintf("Работаем над проектом '%s'", project.Name)
msg := tgbotapi.NewMessage(message.Chat.ID, text)
rows := make([][]tgbotapi.KeyboardButton, 0)
row1 := make([]tgbotapi.KeyboardButton, 0)
settingsBtn := tgbotapi.NewKeyboardButton("Настройки проекта")
row1 = append(row1, settingsBtn)
projectsMenuBtn := tgbotapi.NewKeyboardButton("Назад")
if project.Status == "opened" {
addTaskBtn := tgbotapi.NewKeyboardButton("Управление задачами")
checkAnswersBtn := tgbotapi.NewKeyboardButton("Ответы на задачи")
row2 := make([]tgbotapi.KeyboardButton, 0)
row1 = append(row1, addTaskBtn)
row2 = append(row2, checkAnswersBtn)
row2 = append(row2, projectsMenuBtn)
rows = append(rows, row1, row2)
} else {
row1 = append(row1, projectsMenuBtn)
rows = append(rows, row1)
}
keyboard := tgbotapi.NewReplyKeyboard(rows...)
msg.ReplyMarkup = keyboard
return msg
}
|
package remote
import (
"net/http"
"net/http/httptest"
)
func createTestClient(h http.HandlerFunc) (*client, *httptest.Server) {
s := httptest.NewServer(h)
c := &client{
httpClient: s.Client(),
baseUrl: s.URL,
retries: 1,
tokenId: "testid",
token: "testtoken",
}
return c, s
}
|
package _91_Decode_Ways
import "testing"
func TestNumDecodings(t *testing.T) {
if ret := numDecodings("12"); ret != 2 {
t.Errorf("wrong answer with %d", ret)
}
if ret := numDecodings("226"); ret != 3 {
t.Errorf("wrong answer with %d", ret)
}
if ret := numDecodings("0"); ret != 0 {
t.Errorf("wrong answer with %d", ret)
}
}
|
package utils
const (
navigatePages = 8 // 私有
)
/* 用于分页的工具 */
type Page struct {
List interface{} `json:"list"` // 对象纪录的结果集
Total int64 `json:"total"` // 总纪录数
Limit int64 `json:"limit"` // 每页显示纪录数
Pages int64 `json:"pages"` // 总页数
PageNumber int64 `json:"pageNumber"` // 当前页
NavigatePageNumbers []int64 `json:"navigatePageNumbers"` // 所有导航页码号
FirstPage bool `json:"firstPage"` // 是否为第一页
LastPage bool `json:"lastPage"` // 是否为最后一页
}
// 计算导航页
func CalcNavigatePageNumbers(pageNumber int64, pages int64) []int64 {
var navigatePageNumbers = make([]int64, 8)
//当总页数小于或等于导航页码数时
if pages <= navigatePages {
var i int64 = 0
for i < pages {
navigatePageNumbers[i] = i + 1
i++
}
return navigatePageNumbers[:pages]
}
//当总页数大于导航页码数时
startNum := pageNumber - navigatePages/2
endNum := pageNumber + navigatePages/2
if startNum < 1 {
startNum = 1 // 最前navPageCount页
var j int64 = 0
for j < navigatePages {
navigatePageNumbers[j] = startNum
startNum++ // 自增1
j++
}
} else if endNum > pages {
endNum = pages
//最后navPageCount页
var k int64 = navigatePages - 1
for k >= 0 {
endNum--
navigatePageNumbers[k] = endNum
k --
}
} else {
// 所有中间页
var q int64 = 0
for q < navigatePages {
startNum ++
navigatePageNumbers[q] = startNum
q ++
}
}
return navigatePageNumbers[:navigatePages]
}
// 计算pages
func GetPages(total int64, limit int64) int64 {
return (total-1)/limit + 1
}
// 是否是首页
func IsFirstPage(currentPage int64) bool {
return currentPage == 1
}
// 是否是尾页
func IsLastPage(currentPage int64, pages int64) bool {
return currentPage == pages && currentPage != 1
}
|
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2019-2020 Intel Corporation
*/
package ngcnef
// URI : string formatted accordingding to IETF RFC 3986
type URI string
// Dnai : string identifying the Data Network Area Identifier
type Dnai string
// DnaiChangeType : string identifying the DNAI change type
// Possible values are
// - EARLY: Early notification of UP path reconfiguration.
// - EARLY_LATE: Early and late notification of UP path reconfiguration. This
// value shall only be present in the subscription to the DNAI change event.
// - LATE: Late notification of UP path reconfiguration.
type DnaiChangeType string
// Dnn : string identify the Data network name
type Dnn string
// ExternalGroupID : string containing a local identifier followed by "@" and
// a domain identifier.
// Both the local identifier and the domain identifier shall be encoded as
// strings that do not contain any "@" characters.
// See Clauses 4.6.2 and 4.6.3 of 3GPP TS 23.682 for more information
type ExternalGroupID string
// FlowInfo Flow information struct
type FlowInfo struct {
// Indicates the IP flow.
FlowID int32 `json:"flowId"`
// Indicates the packet filters of the IP flow. Refer to subclause 5.3.8 of
// 3GPP TS 29.214 for encoding.
// It shall contain UL and/or DL IP flow description.
// minItems : 1 maxItems : 2
FlowDescriptions []string `json:"flowDescriptions,omitempty"`
}
// Supi : Subscription Permanent Identifier
// pattern: '^(imsi-[0-9]{5,15}|nai-.+|.+)$'
type Supi string
// Gpsi : Generic Public Servie Identifiers asssociated wit the UE
// pattern '^(msisdn-[0-9]{5,15}|extid-[^@]+@[^@]+|.+)$'
type Gpsi string
// Ipv4Addr : string representing the IPv4 address
// pattern: '^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]
//|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$'
// example: '198.51.100.1'
type Ipv4Addr string
// Ipv6Addr : string representing the IPv6 address
// pattern: '^((:|(0?|([1-9a-f][0-9a-f]{0,3}))):)((0?|([1-9a-f][0-9a-f]{0,3}))
// :){0,6}(:|(0?|([1-9a-f][0-9a-f]{0,3})))$'
// pattern: '^((([^:]+:){7}([^:]+))|((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?))$'
// example: '2001:db8:85a3::8a2e:370:7334'
type Ipv6Addr string
// Ipv6Prefix : string representing the Ipv6 Prefix
// pattern: '^((:|(0?|([1-9a-f][0-9a-f]{0,3}))):)((0?|([1-9a-f][0-9a-f]{0,3}))
// :){0,6}(:|(0?|([1-9a-f][0-9a-f]{0,3})))(\/(([0-9])|([0-9]{2})|(1[0-1][0-9])
//|(12[0-8])))$'
// pattern: '^((([^:]+:){7}([^:]+))|((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?))
// (\/.+)$'
// example: '2001:db8:abcd:12::0/64'
type Ipv6Prefix string
// Link : string Identifies a referenced resource
type Link URI
// MacAddr48 : Identifies a MAC address
// pattern: '^([0-9a-fA-F]{2})((-[0-9a-fA-F]{2}){5})$'
type MacAddr48 string
// RouteInformation Route information struct
type RouteInformation struct {
// string identifying a Ipv4 address formatted in the \"dotted decimal\"
// notation as defined in IETF RFC 1166.
Ipv4Addr Ipv4Addr `json:"ipv4Addr,omitempty"`
// string identifying a Ipv6 address formatted according to clause 4 in
// IETF RFC 5952.
// The mixed Ipv4 Ipv6 notation according to clause 5 of IETF RFC 5952
// shall
// not be used.
Ipv6Addr Ipv6Addr `json:"ipv6Addr,omitempty"`
// Port number
PortNumber uint32 `json:"portNumber"`
}
// RouteToLocation : Describes the traffic routes to the locations of the
// application
type RouteToLocation struct {
// Data network access identifier
Dnai Dnai `json:"dnai"`
// Additional route information about the route to Dnai
RouteInfo RouteInformation `json:"routeInfo,omitempty"`
// Dnai route profile identifier
RouteProfID string `json:"routeProfId,omitempty"`
}
// Snssai Network slice identifier
type Snssai struct {
// minimum: 0, maximum: 255
Sst uint8 `json:"sst"`
// pattern: '^[A-Fa-f0-9]{6}$'
Sd string `json:"sd,omitempty"`
}
// SupportedFeatures : A string used to indicate the features supported by an
// API that is used
// (subclause 6.6 in 3GPP TS 29.500).
// The string shall contain a bitmask indicating supported features in
// hexadecimal representation.
// Each character in the string shall take a value of "0" to "9" or "A" to "F"
// and shall represent the support of 4 features as described in table 5.2.2-3.
// The most significant character representing the highest-numbered features
// shall appear first in the string,
// and the character representing features 1 to 4 shall appear last
// in the string.
// The list of features and their numbering (starting with 1)
// are defined separately for each API.
// Possible features for traffic influencing are
// Notification_websocket( takes vlue of 1) and
// Notification_test_event(takes value of 2)
// pattern: '^[A-Fa-f0-9]*$'
type SupportedFeatures string
// WebsockNotifConfig Websocket noticcation configuration
type WebsockNotifConfig struct {
// string formatted according to IETF RFC 3986 identifying a
// referenced resource.
WebsocketURI Link `json:"websocketUri,omitempty"`
// Set by the AF to indicate that the Websocket delivery is requested.
RequestWebsocketURI bool `json:"requestWebsocketUri,omitempty"`
}
// ProblemDetails Problem details struct
type ProblemDetails struct {
// problem type
Type Link `json:"type,omitempty"`
// A short, human-readable summary of the problem type.
// It should not change from occurrence to occurrence of the problem.
Title string `json:"title,omitempty"`
// A human-readable explanation specific to this occurrence of the problem.
Detail string `json:"detail,omitempty"`
// URL to problem instance
Instance Link `json:"instance,omitempty"`
// A machine-readable application error cause specific to this occurrence
// of the problem.
// This IE should be present and provide application-related error
// information, if available.
Cause string `json:"cause,omitempty"`
// Description of invalid parameters, for a request rejected due to
// invalid parameters.
InvalidParams []InvalidParam `json:"invalidParams,omitempty"`
// The HTTP status code for this occurrence of the problem.
Status int32 `json:"status,omitempty"`
}
// InvalidParam Invalid Parameters struct
type InvalidParam struct {
// Attribute''s name encoded as a JSON Pointer, or header''s name.
Param string `json:"param"`
// A human-readable reason, e.g. \"must be a positive integer\".
Reason string `json:"reason,omitempty"`
}
// PresenceState presence state
type PresenceState string
/*
// Possible values of Presence State
const (
// PresenceStateINAREA captures enum value "IN_AREA"
PresenceStateINAREA PresenceState = "IN_AREA"
// PresenceStateOUTOFAREA captures enum value "OUT_OF_AREA"
PresenceStateOUTOFAREA PresenceState = "OUT_OF_AREA"
// PresenceStateUNKNOWN captures enum value "UNKNOWN"
PresenceStateUNKNOWN PresenceState = "UNKNOWN"
// PresenceStateINACTIVE captures enum value "INACTIVE"
PresenceStateINACTIVE PresenceState = "INACTIVE"
)
*/
// Mcc mcc
type Mcc string
// Mnc mnc
type Mnc string
// PlmnID plmn Id
type PlmnID struct {
// mcc
// Required: true
Mcc Mcc `json:"mcc"`
// mnc
// Required: true
Mnc Mnc `json:"mnc"`
}
// Tac tac
type Tac string
// Tai tai
type Tai struct {
// plmn Id
// Required: true
PlmnID PlmnID `json:"plmnId"`
// tac
// Required: true
Tac Tac `json:"tac"`
}
// EutraCellID eutra cell Id
type EutraCellID string
// Ecgi ecgi
type Ecgi struct {
// eutra cell Id
// Required: true
EutraCellID EutraCellID `json:"eutraCellId"`
// plmn Id
// Required: true
PlmnID PlmnID `json:"plmnId"`
}
// NrCellID nr cell Id
type NrCellID string
// Ncgi ncgi
type Ncgi struct {
// nr cell Id
// Required: true
NrCellID NrCellID `json:"nrCellId"`
// plmn Id
// Required: true
PlmnID PlmnID `json:"plmnId"`
}
// GNbID g nb Id
type GNbID struct {
// bit length
// Required: true
// Maximum: 32
// Minimum: 22
BitLength uint8 `json:"bitLength"`
// g n b value
// Required: true
// Pattern: ^[A-Fa-f0-9]{6,8}$
GNBValue string `json:"gNBValue"`
}
// N3IwfID n3 iwf Id
type N3IwfID string
// NgeNbID nge nb Id
type NgeNbID string
// GlobalRanNodeID global ran node Id
type GlobalRanNodeID struct {
// plmn Id
// Required: true
PlmnID PlmnID `json:"plmnId"`
// n3 iwf Id
N3IwfID N3IwfID `json:"n3IwfId,omitempty"`
// g nb Id
GNbID GNbID `json:"gNbId,omitempty"`
// nge nb Id
NgeNbID NgeNbID `json:"ngeNbId,omitempty"`
}
// PresenceInfo presence info
type PresenceInfo struct {
// pra Id
PraID string `json:"praId,omitempty"`
// presence state
PresenceState PresenceState `json:"presenceState,omitempty"`
// ecgi list
// Min Items: 1
EcgiList []Ecgi `json:"ecgiList"`
// ncgi list
// Min Items: 1
NcgiList []Ncgi `json:"ncgiList"`
// global ran node Id list
// Min Items: 1
GlobalRanNodeIDList []GlobalRanNodeID `json:"globalRanNodeIdList"`
}
// SpatialValidity Describes the spatial validity of an AF request for
// influencing traffic routing
type SpatialValidity struct {
PresenceInfoList PresenceInfo `json:"presenceInfoList"`
}
// DateTime is in the date-time format
type DateTime string
// AccessType defines the access type
// supported values are
// - 3GPP_ACCESS
// - NON_3GPP_ACCESS
type AccessType string
// PduSessionID Valid values are 0 to 255
type PduSessionID uint8
// DurationSec is unsigned integer identifying a period of time in units of
// seconds.
type DurationSec uint64
// DurationSecRm is unsigned integer identifying a period of time in units of
// seconds with "nullable=true" property.
type DurationSecRm DurationSec
// DurationSecRo is unsigned integer identifying a period of time in units of
// seconds with "readOnly=true" property.
type DurationSecRo DurationSec
// ApplicationID is string providing an application identifier.
type ApplicationID string
|
package main
import (
"encoding/csv"
"flag"
"fmt"
"image"
"io"
"log"
"net/http"
"net/url"
"strconv"
"github.com/zxc111/go-heatmap"
"github.com/zxc111/go-heatmap/schemes"
)
const maxInputLength = 10000
type csvpoint []string
func (c csvpoint) X() float64 {
x, _ := strconv.ParseFloat(c[0], 64)
return x
}
func (c csvpoint) Y() float64 {
x, _ := strconv.ParseFloat(c[1], 64)
return x
}
func parseInt(vals url.Values, v string, def, min, max int) int {
rv, err := strconv.ParseInt(vals.Get(v), 10, 32)
if err != nil || int(rv) < min || int(rv) > max {
return def
}
return int(rv)
}
func rootHandler(w http.ResponseWriter, req *http.Request) {
vals := req.URL.Query()
width := parseInt(vals, "w", 1024, 100, 4096)
height := parseInt(vals, "h", 768, 100, 4096)
dotsize := parseInt(vals, "d", 200, 1, 256)
opacity := uint8(parseInt(vals, "o", 128, 1, 255))
defer req.Body.Close()
lr := io.LimitReader(req.Body, maxInputLength)
cr := csv.NewReader(lr)
data := []heatmap.DataPoint{}
reading := true
for reading {
rec, err := cr.Read()
switch err {
case io.EOF:
reading = false
case nil:
data = append(data, csvpoint(rec))
default:
log.Printf("Other error: %#v", err)
w.WriteHeader(400)
fmt.Fprintf(w, "Error reading data: %v", err)
return
}
}
w.Header().Set("Content-type", "application/vnd.google-earth.kmz")
w.WriteHeader(200)
heatmap.KMZ(image.Rect(0, 0, width, height),
data, dotsize, opacity, schemes.AlphaFire, w)
}
func main() {
flag.Parse()
http.HandleFunc("/", rootHandler)
log.Fatal(http.ListenAndServe(":1756", nil))
}
|
package execenv
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/dnephin/dobi/logging"
git "github.com/gogits/git-module"
"github.com/metakeule/fmtdate"
"github.com/pkg/errors"
fasttmpl "github.com/valyala/fasttemplate"
)
const (
startTag = "{"
endTag = "}"
execIDEnvVar = "DOBI_EXEC_ID"
)
// ExecEnv is a data object which contains variables for an ExecuteContext
type ExecEnv struct {
ExecID string
Project string
tmplCache map[string]string
workingDir string
startTime time.Time
}
// Unique returns a unique id for this execution
func (e *ExecEnv) Unique() string {
return e.Project + "-" + e.ExecID
}
// Resolve template variables to a string value and cache the value
func (e *ExecEnv) Resolve(tmpl string) (string, error) {
if val, ok := e.tmplCache[tmpl]; ok {
return val, nil
}
template, err := fasttmpl.NewTemplate(tmpl, startTag, endTag)
if err != nil {
return "", err
}
buff := &bytes.Buffer{}
_, err = template.ExecuteFunc(buff, e.templateContext)
if err == nil {
e.tmplCache[tmpl] = buff.String()
}
return buff.String(), err
}
// ResolveSlice resolves all strings in the slice
func (e *ExecEnv) ResolveSlice(tmpls []string) ([]string, error) {
resolved := []string{}
for _, tmpl := range tmpls {
item, err := e.Resolve(tmpl)
if err != nil {
return tmpls, err
}
resolved = append(resolved, item)
}
return resolved, nil
}
// nolint: gocyclo
func (e *ExecEnv) templateContext(out io.Writer, tag string) (int, error) {
tag, defValue, hasDefault := splitDefault(tag)
write := func(val string, err error) (int, error) {
if err != nil {
return 0, err
}
if val == "" {
if !hasDefault {
return 0, fmt.Errorf("a value is required for variable %q", tag)
}
val = defValue
}
return out.Write(bytes.NewBufferString(val).Bytes())
}
prefix, suffix := splitPrefix(tag)
switch prefix {
case "env":
return write(os.Getenv(suffix), nil)
case "git":
return valueFromGit(out, e.workingDir, suffix, defValue)
case "time":
return write(fmtdate.Format(suffix, e.startTime), nil)
case "fs":
val, err := valueFromFilesystem(suffix, e.workingDir)
return write(val, err)
case "user":
val, err := valueFromUser(suffix)
return write(val, err)
}
switch tag {
case "unique":
return write(e.Unique(), nil)
case "project":
return write(e.Project, nil)
case "exec-id":
return write(e.ExecID, nil)
default:
return 0, errors.Errorf("unknown variable %q", tag)
}
}
// valueFromFilesystem can return either `cwd` or `projectdir`
func valueFromFilesystem(name string, workingdir string) (string, error) {
switch name {
case "cwd":
return os.Getwd()
case "projectdir":
return workingdir, nil
default:
return "", errors.Errorf("unknown variable \"fs.%s\"", name)
}
}
// nolint: gocyclo
func valueFromGit(out io.Writer, cwd string, tag, defValue string) (int, error) {
writeValue := func(value string) (int, error) {
return out.Write(bytes.NewBufferString(value).Bytes())
}
writeError := func(err error) (int, error) {
if defValue == "" {
return 0, fmt.Errorf("failed resolving variable {git.%s}: %s", tag, err)
}
logging.Log.Warnf("Failed to get variable \"git.%s\", using default", tag)
return writeValue(defValue)
}
repo, err := git.OpenRepository(cwd)
if err != nil {
return writeError(err)
}
switch tag {
case "branch":
branch, err := repo.GetHEADBranch()
if err != nil {
return writeError(err)
}
return writeValue(branch.Name)
case "sha":
commit, err := repo.GetCommit("HEAD")
if err != nil {
return writeError(err)
}
return writeValue(commit.ID.String())
case "short-sha":
commit, err := repo.GetCommit("HEAD")
if err != nil {
return writeError(err)
}
return writeValue(commit.ID.String()[:10])
default:
return 0, errors.Errorf("unknown variable \"git.%s\"", tag)
}
}
func splitDefault(tag string) (string, string, bool) {
parts := strings.Split(tag, ":")
if len(parts) == 1 {
return tag, "", false
}
last := len(parts) - 1
return strings.Join(parts[:last], ":"), parts[last], true
}
func splitPrefix(tag string) (string, string) {
index := strings.Index(tag, ".")
switch index {
case -1, 0, len(tag) - 1:
return "", tag
default:
return tag[:index], tag[index+1:]
}
}
// NewExecEnvFromConfig returns a new ExecEnv from a Config
func NewExecEnvFromConfig(execID, project, workingDir string) (*ExecEnv, error) {
env := NewExecEnv(defaultExecID(), getProjectName(project, workingDir), workingDir)
var err error
env.ExecID, err = getExecID(execID, env)
return env, err
}
// NewExecEnv returns a new ExecEnv from values
func NewExecEnv(execID, project, workingDir string) *ExecEnv {
return &ExecEnv{
ExecID: execID,
Project: project,
tmplCache: make(map[string]string),
startTime: time.Now(),
workingDir: workingDir,
}
}
func getProjectName(project, workingDir string) string {
if project != "" {
return project
}
project = filepath.Base(workingDir)
logging.Log.Warnf("meta.project is not set. Using default %q.", project)
return project
}
func getExecID(execID string, env *ExecEnv) (string, error) {
var err error
if value, exists := os.LookupEnv(execIDEnvVar); exists {
return validateExecID(value)
}
if execID == "" {
return env.ExecID, nil
}
execID, err = env.Resolve(execID)
if err != nil {
return "", err
}
return validateExecID(execID)
}
func validateExecID(output string) (string, error) {
output = strings.TrimSpace(output)
if output == "" {
return "", fmt.Errorf("exec-id template was empty after rendering")
}
lines := len(strings.Split(output, "\n"))
if lines > 1 {
return "", fmt.Errorf(
"exec-id template rendered to %v lines, expected only one", lines)
}
return output, nil
}
func defaultExecID() string {
username, err := getUserName()
if err == nil {
return username
}
return os.Getenv("USER")
}
|
/////////////////////////////////////////////////////////////////////
// arataca89@gmail.com
// 20210417
//
// func IndexFunc(s string, f func(rune) bool) int
//
// Retorna o índice da primeira ocorrência de caracter que satisfaz a
// função f() ou -1 se não houver ocorrência.
//
// Fonte: https://golang.org/pkg/strings/
//
package main
import (
"fmt"
"strings"
"unicode"
)
func main() {
f := func(c rune) bool {
return unicode.Is(unicode.Han, c)
}
fmt.Println(strings.IndexFunc("Hello, 世界", f)) // 7
fmt.Println(strings.IndexFunc("Hello, world", f)) // -1
}
|
/*
Package sort provide different sorting functions around an expand variety type of slices.
For more information which the best solution for your case, consult our documentation page http://www.algo.org
*/
package sort |
package client
import (
"time"
"github.com/CyCoreSystems/ari"
"github.com/CyCoreSystems/ari-proxy/proxy"
"github.com/CyCoreSystems/ari/rid"
)
type channel struct {
c *Client
}
func (c *channel) Get(key *ari.Key) *ari.ChannelHandle {
k, err := c.c.getRequest(&proxy.Request{
Kind: "ChannelGet",
Key: key,
})
if err != nil {
c.c.log.Warn("failed to make data request for channel", "error", err)
return ari.NewChannelHandle(key, c, nil)
}
return ari.NewChannelHandle(k, c, nil)
}
func (c *channel) List(filter *ari.Key) ([]*ari.Key, error) {
return c.c.listRequest(&proxy.Request{
Kind: "ChannelList",
Key: filter,
})
}
func (c *channel) Originate(referenceKey *ari.Key, o ari.OriginateRequest) (*ari.ChannelHandle, error) {
k, err := c.c.createRequest(&proxy.Request{
Kind: "ChannelOriginate",
Key: referenceKey,
ChannelOriginate: &proxy.ChannelOriginate{
OriginateRequest: o,
},
})
if err != nil {
return nil, err
}
return ari.NewChannelHandle(k, c, nil), nil
}
func (c *channel) StageOriginate(referenceKey *ari.Key, o ari.OriginateRequest) (*ari.ChannelHandle, error) {
if o.ChannelID == "" {
o.ChannelID = rid.New(rid.Channel)
}
k, err := c.c.createRequest(&proxy.Request{
Kind: "ChannelStageOriginate",
Key: referenceKey,
ChannelOriginate: &proxy.ChannelOriginate{
OriginateRequest: o,
},
})
if err != nil {
return nil, err
}
return ari.NewChannelHandle(k.New(ari.ChannelKey, o.ChannelID), c, func(h *ari.ChannelHandle) error {
_, err := c.Originate(referenceKey, o)
return err
}), nil
}
func (c *channel) Create(key *ari.Key, o ari.ChannelCreateRequest) (*ari.ChannelHandle, error) {
k, err := c.c.createRequest(&proxy.Request{
Kind: "ChannelCreate",
Key: key,
ChannelCreate: &proxy.ChannelCreate{
ChannelCreateRequest: o,
},
})
if err != nil {
return nil, err
}
return ari.NewChannelHandle(k.New(ari.ChannelKey, o.ChannelID), c, nil), nil
}
func (c *channel) Data(key *ari.Key) (*ari.ChannelData, error) {
data, err := c.c.dataRequest(&proxy.Request{
Kind: "ChannelData",
Key: key,
})
if err != nil {
return nil, err
}
return data.Channel, nil
}
func (c *channel) Continue(key *ari.Key, context string, extension string, priority int) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelContinue",
Key: key,
ChannelContinue: &proxy.ChannelContinue{
Context: context,
Extension: extension,
Priority: priority,
},
})
}
func (c *channel) Busy(key *ari.Key) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelBusy",
Key: key,
})
}
func (c *channel) Congestion(key *ari.Key) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelCongestion",
Key: key,
})
}
func (c *channel) Hangup(key *ari.Key, reason string) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelHangup",
Key: key,
ChannelHangup: &proxy.ChannelHangup{
Reason: reason,
},
})
}
func (c *channel) Answer(key *ari.Key) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelAnswer",
Key: key,
})
}
func (c *channel) Ring(key *ari.Key) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelRing",
Key: key,
})
}
func (c *channel) StopRing(key *ari.Key) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelStopRing",
Key: key,
})
}
func (c *channel) SendDTMF(key *ari.Key, dtmf string, opts *ari.DTMFOptions) error {
if opts == nil {
opts = &ari.DTMFOptions{}
}
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelSendDTMF",
Key: key,
ChannelSendDTMF: &proxy.ChannelSendDTMF{
DTMF: dtmf,
Options: opts,
},
})
}
func (c *channel) Hold(key *ari.Key) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelHold",
Key: key,
})
}
func (c *channel) StopHold(key *ari.Key) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelStopHold",
Key: key,
})
}
func (c *channel) Mute(key *ari.Key, dir ari.Direction) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelMute",
Key: key,
ChannelMute: &proxy.ChannelMute{
Direction: dir,
},
})
}
func (c *channel) Unmute(key *ari.Key, dir ari.Direction) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelUnmute",
Key: key,
ChannelMute: &proxy.ChannelMute{
Direction: dir,
},
})
}
func (c *channel) MOH(key *ari.Key, moh string) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelMOH",
Key: key,
ChannelMOH: &proxy.ChannelMOH{
Music: moh,
},
})
}
func (c *channel) StopMOH(key *ari.Key) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelStopMOH",
Key: key,
})
}
func (c *channel) Silence(key *ari.Key) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelSilence",
Key: key,
})
}
func (c *channel) StopSilence(key *ari.Key) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelStopSilence",
Key: key,
})
}
func (c *channel) Snoop(key *ari.Key, snoopID string, opts *ari.SnoopOptions) (*ari.ChannelHandle, error) {
k, err := c.c.createRequest(&proxy.Request{
Kind: "ChannelSnoop",
Key: key,
ChannelSnoop: &proxy.ChannelSnoop{
SnoopID: snoopID,
Options: opts,
},
})
if err != nil {
return nil, err
}
return ari.NewChannelHandle(k.New(ari.ChannelKey, snoopID), c, nil), nil
}
func (c *channel) StageSnoop(key *ari.Key, snoopID string, opts *ari.SnoopOptions) (*ari.ChannelHandle, error) {
k, err := c.c.getRequest(&proxy.Request{
Kind: "ChannelStageSnoop",
Key: key,
ChannelSnoop: &proxy.ChannelSnoop{
SnoopID: snoopID,
Options: opts,
},
})
if err != nil {
return nil, err
}
return ari.NewChannelHandle(k, c, func(h *ari.ChannelHandle) error {
_, err := c.Snoop(k.New(ari.ChannelKey, key.ID), snoopID, opts)
return err
}), nil
}
func (c *channel) Dial(key *ari.Key, caller string, timeout time.Duration) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelDial",
Key: key,
ChannelDial: &proxy.ChannelDial{
Caller: caller,
Timeout: timeout,
},
})
}
func (c *channel) Play(key *ari.Key, playbackID string, mediaURI string) (*ari.PlaybackHandle, error) {
k, err := c.c.createRequest(&proxy.Request{
Kind: "ChannelPlay",
Key: key,
ChannelPlay: &proxy.ChannelPlay{
PlaybackID: playbackID,
MediaURI: mediaURI,
},
})
if err != nil {
return nil, err
}
return ari.NewPlaybackHandle(k.New(ari.PlaybackKey, playbackID), c.c.Playback(), nil), nil
}
func (c *channel) StagePlay(key *ari.Key, playbackID string, mediaURI string) (*ari.PlaybackHandle, error) {
if playbackID == "" {
playbackID = rid.New(rid.Playback)
}
k, err := c.c.getRequest(&proxy.Request{
Kind: "ChannelStagePlay",
Key: key,
ChannelPlay: &proxy.ChannelPlay{
PlaybackID: playbackID,
MediaURI: mediaURI,
},
})
if err != nil {
return nil, err
}
return ari.NewPlaybackHandle(k.New(ari.PlaybackKey, playbackID), c.c.Playback(), func(h *ari.PlaybackHandle) error {
_, err := c.Play(k.New(ari.ChannelKey, key.ID), playbackID, mediaURI)
return err
}), nil
}
func (c *channel) Record(key *ari.Key, name string, opts *ari.RecordingOptions) (*ari.LiveRecordingHandle, error) {
rb, err := c.c.createRequest(&proxy.Request{
Kind: "ChannelRecord",
Key: key,
ChannelRecord: &proxy.ChannelRecord{
Name: name,
Options: opts,
},
})
if err != nil {
return nil, err
}
return ari.NewLiveRecordingHandle(rb.New(ari.LiveRecordingKey, name), c.c.LiveRecording(), nil), nil
}
func (c *channel) StageRecord(key *ari.Key, name string, opts *ari.RecordingOptions) (*ari.LiveRecordingHandle, error) {
k, err := c.c.getRequest(&proxy.Request{
Kind: "ChannelStageRecord",
Key: key,
ChannelRecord: &proxy.ChannelRecord{
Name: name,
Options: opts,
},
})
if err != nil {
return nil, err
}
return ari.NewLiveRecordingHandle(k.New(ari.LiveRecordingKey, k.ID), c.c.LiveRecording(), func(h *ari.LiveRecordingHandle) error {
_, err := c.Record(k.New(ari.ChannelKey, key.ID), k.ID, opts)
return err
}), nil
}
func (c *channel) Subscribe(key *ari.Key, n ...string) ari.Subscription {
err := c.c.commandRequest(&proxy.Request{
Kind: "ChannelSubscribe",
Key: key,
})
if err != nil {
c.c.log.Warn("failed to call channel subscribe")
if key.Dialog != "" {
c.c.log.Error("dialog present; failing")
return nil
}
}
return c.c.Bus().Subscribe(key, n...)
}
func (c *channel) GetVariable(key *ari.Key, name string) (string, error) {
data, err := c.c.dataRequest(&proxy.Request{
Kind: "ChannelVariableGet",
Key: key,
ChannelVariable: &proxy.ChannelVariable{
Name: name,
},
})
if err != nil {
return "", err
}
return data.Variable, nil
}
func (c *channel) SetVariable(key *ari.Key, name, value string) error {
return c.c.commandRequest(&proxy.Request{
Kind: "ChannelVariableSet",
Key: key,
ChannelVariable: &proxy.ChannelVariable{
Name: name,
Value: value,
},
})
}
|
package ipa
import (
"errors"
"fmt"
"os"
"runtime"
"testing"
"github.com/iineva/ipa-server/pkg/seekbuf"
)
func TestReadPlistInfo(t *testing.T) {
printMemUsage()
fileName := "test_data/ipa.ipa"
// fileName := "/Users/steven/Downloads/TikTok (18.5.0) Unicorn v4.9.ipa"
f, err := os.Open(fileName)
if err != nil {
t.Fatal(err)
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
t.Fatal(err)
}
buf, err := seekbuf.Open(f, seekbuf.MemoryMode)
if err != nil {
t.Fatal(err)
}
defer buf.Close()
info, err := Parse(buf, fi.Size())
if err != nil {
t.Fatal(err)
}
if info == nil {
t.Fatal(errors.New("parse error"))
}
printMemUsage()
// log.Printf("%+v", info)
}
func TestIconSize(t *testing.T) {
data := map[string]int{
"Payload/UnicornApp.app/AppIcon_TikTok29x29@3x.png": 87,
"Payload/UnicornApp.app/AppIcon_TikTok40x40@2x.png": 80,
"Payload/UnicornApp.app/AppIcon_TikTok60x60@3x.png": 180,
"Payload/UnicornApp.app/AppIcon_TikTok60x60@2x.png": 120,
"Payload/UnicornApp.app/AppIcon_TikTok40x40@3x.png": 120,
"Payload/UnicornApp.app/AppIcon_TikTok29x29@2x.png": 58,
"Payload/UnicornApp.app/AppIcon_TikTok83.5x83.5@2x~ipad.png": 167,
"Payload/UnicornApp.app/AppIcon_TikTok20x20@3x.png": 60,
"Payload/UnicornApp.app/AppIcon_TikTok76x76~ipad.png": 76,
"Payload/UnicornApp.app/AppIcon_TikTok20x20@2x.png": 40,
"Payload/UnicornApp.app/AppIcon_TikTok76x76@2x~ipad.png": 152,
}
for k, v := range data {
size, err := iconSize(k)
if err != nil {
t.Fatal(err)
}
if size != v {
t.Fatal(errors.New("size error"))
}
}
}
func printMemUsage() {
var m runtime.MemStats
runtime.ReadMemStats(&m)
// For info on each, see: https://golang.org/pkg/runtime/#MemStats
fmt.Printf("Alloc = %v MiB", bToMb(m.Alloc))
fmt.Printf("\tTotalAlloc = %v MiB", bToMb(m.TotalAlloc))
fmt.Printf("\tSys = %v MiB", bToMb(m.Sys))
fmt.Printf("\tNumGC = %v\n", m.NumGC)
}
func bToMb(b uint64) uint64 {
return b / 1024 / 1024
}
|
//go:build linux
// +build linux
/*
Copyright © 2021 SUSE LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockerproxy
import (
"fmt"
"net"
"os"
"os/exec"
"os/signal"
"path"
"time"
"github.com/linuxkit/virtsock/pkg/vsock"
"golang.org/x/sys/unix"
"github.com/rancher-sandbox/rancher-desktop/src/go/wsl-helper/pkg/dockerproxy/platform"
"github.com/rancher-sandbox/rancher-desktop/src/go/wsl-helper/pkg/dockerproxy/util"
)
// defaultProxyEndpoint is the path on which dockerd should listen on, relative
// to the WSL mount point.
const defaultProxyEndpoint = "rancher-desktop/run/docker.sock"
// waitForFileToExist will block until the given path exists. If the given
// timeout is reached, an error will be returned.
func waitForFileToExist(path string, timeout time.Duration) error {
timer := time.After(timeout)
ready := make(chan struct{})
expired := false
go func() {
defer close(ready)
// We just do polling here, since inotify / fanotify both have fairly
// low limits on the concurrent number of watchers.
for !expired {
_, err := os.Lstat(path)
if err == nil {
return
}
time.Sleep(500 * time.Millisecond)
}
}()
select {
case <-ready:
return nil
case <-timer:
expired = true
return fmt.Errorf("timed out waiting for %s to exist", path)
}
}
func GetDefaultProxyEndpoint() (string, error) {
mountPoint, err := platform.GetWSLMountPoint()
if err != nil {
return "", err
}
return path.Join(mountPoint, defaultProxyEndpoint), nil
}
// Start the dockerd process within this WSL distribution on the given vsock
// port as well as the unix socket at the given path. All other arguments are
// passed to dockerd as-is.
//
// This function returns after dockerd has exited.
func Start(port uint32, dockerSocket string, args []string) error {
dockerd, err := exec.LookPath("dockerd")
if err != nil {
return fmt.Errorf("could not find dockerd: %w", err)
}
// We have dockerd listen on the given docker socket, so that it can be
// used from other distributions (though we still need to do path
// path translation on top).
err = os.MkdirAll(path.Dir(dockerSocket), 0o755)
if err != nil {
return fmt.Errorf("could not set up docker socket: %w", err)
}
args = append(args, fmt.Sprintf("--host=unix://%s", dockerSocket))
args = append(args, "--host=unix:///var/run/docker.sock")
cmd := exec.Command(dockerd, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Start()
if err != nil {
return fmt.Errorf("could not start dockerd: %w", err)
}
defer func() {
if proc := cmd.Process; proc != nil {
err := proc.Signal(unix.SIGTERM)
if err != nil {
fmt.Printf("could not kill docker: %s\n", err)
}
}
}()
// Wait for the docker socket to exist...
err = waitForFileToExist(dockerSocket, 30*time.Second)
if err != nil {
return err
}
listener, err := platform.ListenVsockNonBlocking(vsock.CIDAny, port)
if err != nil {
return fmt.Errorf("could not listen on vsock port %08x: %w", port, err)
}
defer listener.Close()
sigch := make(chan os.Signal)
signal.Notify(sigch, unix.SIGTERM)
go func() {
<-sigch
listener.Close()
}()
for {
conn, err := listener.Accept()
if err != nil {
fmt.Printf("error accepting client connection: %s\n", err)
continue
}
go handleConnection(conn, dockerSocket)
}
return nil
}
// handleConnection handles piping the connection from the client to the docker
// socket.
func handleConnection(conn net.Conn, dockerPath string) {
dockerConn, err := net.Dial("unix", dockerPath)
if err != nil {
fmt.Printf("could not connect to docker: %s\n", err)
return
}
defer dockerConn.Close()
err = util.Pipe(conn, dockerConn)
if err != nil {
fmt.Printf("error forwarding docker connection: %s\n", err)
return
}
}
|
package routes
import (
"net/http"
"github.com/gorilla/mux"
)
type Route struct {
Uri string
Method string
Handler func(http.ResponseWriter, *http.Request)
}
func Load() []Route {
routes := usersRoutes
return routes
}
func SetupRoutes(r *mux.Router) *mux.Router {
for _, route := range Load() {
r.HandleFunc(route.Uri, route.Handler).Methods(route.Method)
}
return r
}
|
package draw
type IHandler interface {
SetObject(obj IObject)
GetControlById(id int) IControlBase
GetControlByName(name string) IControlBase
//GetControlDialog(id int) IDialogBase
SetVisible(name string, isVisible bool)
SetDisable(name string, isDisable bool)
SetTitle(name string, title string)
GetTitle(name string) string
}
type Handler struct {
obj IObject
}
func (this *Handler) SetObject(obj IObject) {
this.obj = obj
}
func (this *Handler) SetVisible(controlName string, visible bool) {
//@@TODO
/*control := this.impl.GetControlByName(controlName)
if control != nil {
control.SetVisible(visible)
}*/
}
func (this *Handler) GetControlById(id int) IControlBase {
//@@TODO
return nil
}
func (this *Handler) GetControlByName(name string) IControlBase {
//@@TODO
return nil
}
|
/*
* @lc app=leetcode.cn id=1207 lang=golang
*
* [1207] 独一无二的出现次数
*/
package main
// @lc code=start
func uniqueOccurrences(arr []int) bool {
counter := make(map[int]int)
for i := 0; i < len(arr); i++ {
counter[arr[i]]++
}
occu := make(map[int]bool)
for _, v := range counter {
if occu[v] {
return false
} else {
occu[v] = true
}
}
return true
}
// func main() {
// fmt.Println(uniqueOccurrences([]int{1, 2, 2, 1, 1, 3}))
// fmt.Println(uniqueOccurrences([]int{1, 2}))
// fmt.Println(uniqueOccurrences([]int{-3, 0, 1, -3, 1, 1, 1, -3, 10, 0}))
// }
// @lc code=end
|
package main
import (
"fmt"
"net"
)
// MySever is server
func MySever(over func()) {
defer func() {
over()
}()
// 监听
l, err := net.Listen("tcp", ":8080")
if err != nil {
fmt.Printf("[server] listen err(%v)\n", err)
return
}
defer func() {
l.Close()
}()
for {
// 等待连接
c, err := l.Accept()
if err != nil {
fmt.Println("[server] accept err")
continue
}
for {
buf := make([]byte, 1024)
// 接收数据
n, err := c.Read(buf)
if err != nil {
fmt.Println("[server] read err")
goto end
} else {
fmt.Printf("[server] content: %s\n", buf[:n])
}
}
}
end:
fmt.Println("[server] client leave")
}
// MyClient is client
func MyClient(over func()) {
defer func() {
over()
}()
// 连接服务器
conn, err := net.Dial("tcp", ":8080")
if err != nil {
fmt.Printf("[client] dial err(%v)\n", err)
return
}
defer func() {
conn.Close()
}()
// 发送数据
n, err := conn.Write([]byte("ding kai hui"))
if err != nil {
fmt.Println("[client] write err")
} else {
fmt.Printf("[client] send n(%d) byte\n", n)
}
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package infosync
import (
"sync"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessionctx/binloginfo"
"github.com/pingcap/tidb/util/versioninfo"
)
// MockGlobalServerInfoManagerEntry is a mock global ServerInfoManager entry.
var MockGlobalServerInfoManagerEntry = &MockGlobalServerInfoManager{
mockServerPort: 4000,
}
// MockGlobalServerInfoManager manages serverInfos in Distributed unit tests.
type MockGlobalServerInfoManager struct {
infos []*ServerInfo
mu sync.Mutex
mockServerPort uint // used to mock ServerInfo, then every mock server will have different port
}
// Add one mock ServerInfo.
func (m *MockGlobalServerInfoManager) Add(id string, serverIDGetter func() uint64) {
m.mu.Lock()
defer m.mu.Unlock()
m.infos = append(m.infos, m.getServerInfo(id, serverIDGetter))
}
// Delete one mock ServerInfo by idx.
func (m *MockGlobalServerInfoManager) Delete(idx int) error {
m.mu.Lock()
defer m.mu.Unlock()
if idx >= len(m.infos) || idx < 0 {
return errors.New("server idx out of bound")
}
m.infos = append(m.infos[:idx], m.infos[idx+1:]...)
return nil
}
// GetAllServerInfo return all serverInfo in a map.
func (m *MockGlobalServerInfoManager) GetAllServerInfo() map[string]*ServerInfo {
m.mu.Lock()
defer m.mu.Unlock()
allInfo := make(map[string]*ServerInfo)
for _, info := range m.infos {
allInfo[info.ID] = info
}
return allInfo
}
// getServerInfo gets self tidb server information.
func (m *MockGlobalServerInfoManager) getServerInfo(id string, serverIDGetter func() uint64) *ServerInfo {
cfg := config.GetGlobalConfig()
// TODO: each mock server can have different config
info := &ServerInfo{
ID: id,
IP: cfg.AdvertiseAddress,
Port: m.mockServerPort,
StatusPort: cfg.Status.StatusPort,
Lease: cfg.Lease,
BinlogStatus: binloginfo.GetStatus().String(),
StartTimestamp: time.Now().Unix(),
Labels: cfg.Labels,
ServerIDGetter: serverIDGetter,
}
m.mockServerPort++
info.Version = mysql.ServerVersion
info.GitHash = versioninfo.TiDBGitHash
return info
}
// Close reset MockGlobalServerInfoManager.
func (m *MockGlobalServerInfoManager) Close() {
m.mu.Lock()
defer m.mu.Unlock()
m.mockServerPort = 4000
m.infos = m.infos[:0]
}
|
package response
type GetNamespacesResponse struct {
Namespaces []Namespace `json:"namespaces"`
}
type Namespace struct {
Name string `json:"name"`
Status string `json:"status"`
Age string `json:"age"`
}
|
package lib
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
"path"
"text/template"
)
type Client struct {
BaseURL *url.URL
HTTPClient *http.Client
}
type Country struct {
Name string `json:"country"`
Cases int `json:"cases"`
TodayCases int `json:"todayCases"`
Deaths int `json:"deaths"`
TodayDeaths int `json:"todayDeaths"`
Recovere int `json:"recovered"`
Active int `json:"active"`
Critical int `json:"critical"`
CasesPerOneMission float32 `json:"casesPerOneMillion"`
DeathsPerOneMillion float32 `json:"deathsPerOneMillion"`
}
func (country *Country) ToJSON() string {
jsonCountry, err := json.Marshal(country)
if err != nil {
panic(err)
}
return bytes.NewBuffer(jsonCountry).String()
}
func (country *Country) String() string {
const templateText = `
Name : {{.Name}}
Cases : {{.Cases}}
TodayCases : {{.TodayCases}}
Deaths : {{.Deaths}}
TodayDeaths : {{.TodayDeaths}}
Recovere : {{.Recovere}}
Active : {{.Active}}
Critical : {{.Critical}}
CasesPerOneMission : {{.CasesPerOneMission}}
DeathsPerOneMillion: {{.DeathsPerOneMillion}}
`
template, err := template.New("Country").Parse(templateText)
if err != nil {
panic(err)
}
var doc bytes.Buffer
if err := template.Execute(&doc, country); err != nil {
panic(err)
}
return doc.String()
}
func NewClient() (*Client, error) {
baseURL, err := url.Parse("https://corona.lmao.ninja/")
if err != nil {
return nil, err
}
return &Client{
BaseURL: baseURL,
HTTPClient: http.DefaultClient,
}, nil
}
func (client *Client) GetCountries() ([]Country, error) {
url := *client.BaseURL
url.Path = path.Join(url.Path, "countries")
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
panic(err)
}
req.Header.Add("Accept", "application/+json")
resp, err := client.HTTPClient.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
var countries []Country
if err = json.Unmarshal(body, &countries); err != nil {
panic(err)
}
return countries, nil
}
|
package timeutil
import (
"time"
)
//millisecond base
func Now() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}
|
package main
import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"reflect"
"strconv"
"time"
"github.com/iRajesha/experiments/src/panic"
)
type Persons struct {
name string `required:"true"`
age int
siblings []string
}
func main() {
var stringType string
fmt.Printf("%v,%T\n", stringType, stringType)
mySlice := []int{0, 1, 2, 3, 4, 5}
fmt.Printf("mySlice %v \n", mySlice[:])
fmt.Printf("mySlice %v \n", mySlice[0:])
fmt.Printf("mySlice %v \n", mySlice[1:])
fmt.Printf("mySlice %v \n", mySlice[1:3])
fmt.Printf("mySlice %v \n", mySlice[:3])
myNewSlice := make([]int, 2)
myNewSlice[0] = 123
myNewSlice[1] = 111
myNewSlice = append(myNewSlice, 123)
fmt.Printf("myNewSlice %v", myNewSlice)
myNewSlice = append(myNewSlice[:1], myNewSlice[2:]...)
fmt.Printf("myNewSlice %v\n", myNewSlice)
ray := Persons{
name: "ray",
age: 30,
siblings: []string{
"Chota Ray",
"Bada Ray"}}
fmt.Printf("%v\n", ray)
rayMap := make(map[string]string)
rayMap["name"] = "ray"
fmt.Printf("%v\n", rayMap)
rayRefectType := reflect.TypeOf(ray)
rayRefectField, _ := rayRefectType.FieldByName("name")
fmt.Println(rayRefectField.Tag.Get("required"))
if _, ok := rayMap["name"]; ok {
fmt.Printf("Value exists -- > %v\n", rayMap["name"])
}
playWithEmptyInterface(&ray)
i := 20
switch {
case i <= 20:
fmt.Printf("Less than 20\n")
case i >= 20:
fmt.Printf("greater than 20\n")
fallthrough
default:
fmt.Printf("Printing default\n")
}
backToFor:
for i := 0; i < 5; {
fmt.Println(i)
break backToFor
}
fmt.Printf("Right after for loop\n")
panic.CheckPanicInGo()
i = 1
str1 := "String"
str1 = str1 + fmt.Sprintf("_%v", i)
fmt.Printf("Concatinated value is %v", str1)
extractInterfaceType(ray)
checkMarshalling()
fmt.Printf("\n\n\n\n\n")
gernateBusinessObjectId()
//var empty interface{}
bMarshalled, _ := json.Marshal("CCSNonProd")
fmt.Printf("Marshhed string(byte) %v\n", string(bMarshalled))
bytes := []byte("CCSNonProd")
//json.Unmarshal([]byte(IkNDU05vblByb2Qi), empty)
//fmt.Printf("%v", empty)
fmt.Printf("CCSNonProd value -> %v", string(bytes))
}
func gernateBusinessObjectId() {
var channelId = "0"
now := time.Now()
year := now.Year()
lastTwoDigitsOfYear := year % 1e2
_, week := now.UTC().ISOWeek()
businessId := channelId + strconv.Itoa(lastTwoDigitsOfYear) + strconv.Itoa(week) + fmt.Sprintf("%d", get13MiddleDigits("4b537cebb9f67d9bcfe29d38d03d76febe54808a16238815e5ad28b695c197b7"))
fmt.Printf("Last two digits --> %v\n", lastTwoDigitsOfYear)
fmt.Printf("Final Business Identifier %v\n", businessId)
}
func get13MiddleDigits(txId string) int64 {
//hasher := sha256.New()
bv := []byte(txId)
result := sha256.Sum256(bv)
hexString := hex.EncodeToString(result[:])
last13HexDigits := hexString[50 : len(hexString)-1]
fmt.Printf(" last13HexDigits -> %v\n", last13HexDigits)
decimal, err := strconv.ParseInt(last13HexDigits, 16, 64)
if err != nil {
fmt.Printf("Error %v\n ", err)
}
last13digits := decimal % 1e13
fmt.Printf("random 13 digits %v\n", last13digits)
return last13digits
}
func checkMarshalling() {
var walletRespPayload = struct {
MspId string `json:"mspId"`
Status string `json:"status"`
}{}
type walletRespPayloadStruct struct {
Username string `json:"username"`
Secret string `json:"secret"`
MspId string `json:"mspId"`
Status string `json:"status"`
}
structVar := walletRespPayloadStruct{}
//payload := "{\"username\":\"NewOrgTMAId\",\"secret\":\"63c1dd951ffedf6f7fd968ad4efa39b8ed584f162f46e715114ee184f8de9201\",\"mspId\":\"TMAFounderTEST\",\"status\":\"TMA Member\"}"
payload := string(`{"username":"NewOrgTMAId","secret":"63c1dd951ffedf6f7fd968ad4efa39b8ed584f162f46e715114ee184f8de9201","mspId":"TMAFounderTEST","status":"TMA Member"}`)
bPayload, _ := json.Marshal(payload)
fmt.Printf("Marshalled payload --> %v\n", string(bPayload))
json.Unmarshal([]byte(payload), &walletRespPayload)
fmt.Printf("Unmashalled Payload %v\n", walletRespPayload)
json.Unmarshal([]byte(payload), &structVar)
fmt.Printf("Unmashalled Payload %v\n", structVar)
//value := string(`{"TMAFounderTEST":"Hello"}`)
value, err := json.Marshal("TMAFounderTEST")
if err != nil {
fmt.Printf("Error %v\n", err)
}
fmt.Printf("Value -- %v\n", string(value))
var obj interface{}
err = json.Unmarshal([]byte(value), &obj)
if err != nil {
fmt.Printf("Error %v\n", err)
}
}
func playWithEmptyInterface(spreadedArgs interface{}) {
fmt.Printf("Address -- %v\n", spreadedArgs)
newName := spreadedArgs.(*Persons)
fmt.Printf("Address -- %v\n", newName)
}
func extractInterfaceType(i interface{}) {
switch i := i.(type) {
case Persons:
fmt.Printf("Found a person %v\n", i.name)
}
}
|
package leetcode
func prisonAfterNDays(cells []int, N int) []int {
size := len(cells)
encoded := 0
for i, v := range cells {
encoded |= v << uint(i)
}
visit := make(map[int]int)
visit[encoded] = 0
seq := make(map[int]int)
seq[0] = encoded
for x := 1; x <= N; x++ {
nxt := 0
for i := 1; i < size-1; i++ {
if ((encoded>>uint(i-1))&1)^((encoded>>uint(i+1))&1) == 0 {
nxt |= 1 << uint(i)
}
}
encoded = nxt
if prev, ok := visit[encoded]; ok {
v := (N-x)%(x-prev) + prev
ans := make([]int, size)
for t := 0; t < size; t++ {
ans[t] = (seq[v] >> uint(t)) & 1
}
return ans
} else {
visit[encoded] = x
seq[x] = encoded
}
}
ans := make([]int, size)
for t := 0; t < size; t++ {
ans[t] = (encoded >> uint(t)) & 1
}
return ans
}
|
package shader
import (
"fmt"
"io/ioutil"
"strings"
wrapper "github.com/akosgarai/opengl_playground/pkg/glwrapper"
"github.com/go-gl/mathgl/mgl32"
)
// LoadShaderFromFile takes a filepath string arguments.
// It loads the file and returns it as a '\x00' terminated string.
// It returns an error also.
func LoadShaderFromFile(path string) (string, error) {
shaderCode, err := ioutil.ReadFile(path)
if err != nil {
return "", err
}
result := string(shaderCode) + "\x00"
return result, nil
}
// CompileShader creeates a shader, compiles the shader source, and returns
// the uint32 identifier of the shader and nil. If the compile fails, it returns
// an error and 0 as shader id.
func CompileShader(source string, shaderType uint32) (uint32, error) {
shader := wrapper.CreateShader(shaderType)
csources, free := wrapper.Strs(source)
wrapper.ShaderSource(shader, 1, csources, nil)
free()
wrapper.CompileShader(shader)
var status int32
wrapper.GetShaderiv(shader, wrapper.COMPILE_STATUS, &status)
if status == wrapper.FALSE {
var logLength int32
wrapper.GetShaderiv(shader, wrapper.INFO_LOG_LENGTH, &logLength)
log := strings.Repeat("\x00", int(logLength+1))
wrapper.GetShaderInfoLog(shader, logLength, nil, wrapper.Str(log))
return 0, fmt.Errorf("failed to compile %v: %v", source, log)
}
return shader, nil
}
type Shader struct {
id uint32
}
// NewShader returns a Shader. It's inputs are the filenames of the shaders.
// It reads the files and compiles them. The shaders are attached to the shader program.
func NewShader(vertexShaderPath, fragmentShaderPath string) *Shader {
vertexShaderSource, err := LoadShaderFromFile(vertexShaderPath)
if err != nil {
panic(err)
}
vertexShader, err := CompileShader(vertexShaderSource, wrapper.VERTEX_SHADER)
if err != nil {
panic(err)
}
fragmentShaderSource, err := LoadShaderFromFile(fragmentShaderPath)
if err != nil {
panic(err)
}
fragmentShader, err := CompileShader(fragmentShaderSource, wrapper.FRAGMENT_SHADER)
if err != nil {
panic(err)
}
program := wrapper.CreateProgram()
wrapper.AttachShader(program, vertexShader)
wrapper.AttachShader(program, fragmentShader)
wrapper.LinkProgram(program)
return &Shader{
id: program,
}
}
// Use is a wrapper for gl.UseProgram
func (s *Shader) Use() {
wrapper.UseProgram(s.id)
}
// GetId returns the program identifier of the shader.
func (s *Shader) GetId() uint32 {
return s.id
}
// SetUniformMat4 gets an uniform name string and the value matrix as input and
// calls the gl.UniformMatrix4fv function
func (s *Shader) SetUniformMat4(uniformName string, mat mgl32.Mat4) {
location := wrapper.GetUniformLocation(s.id, uniformName)
wrapper.UniformMatrix4fv(location, 1, false, &mat[0])
}
// SetUniform3f gets an uniform name string and 3 float values as input and
// calls the gl.Uniform3f function
func (s *Shader) SetUniform3f(uniformName string, v1, v2, v3 float32) {
location := wrapper.GetUniformLocation(s.id, uniformName)
wrapper.Uniform3f(location, v1, v2, v3)
}
// SetUniform1f gets an uniform name string and a float value as input and
// calls the gl.Uniform1f function
func (s *Shader) SetUniform1f(uniformName string, v1 float32) {
location := wrapper.GetUniformLocation(s.id, uniformName)
wrapper.Uniform1f(location, v1)
}
// SetUniform1i gets an uniform name string and an integer value as input and
// calls the gl.Uniform1i function
func (s *Shader) SetUniform1i(uniformName string, v1 int32) {
location := wrapper.GetUniformLocation(s.id, uniformName)
wrapper.Uniform1i(location, v1)
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tfimport
import (
"fmt"
"os/exec"
"reflect"
"testing"
"github.com/GoogleCloudPlatform/healthcare-data-protection-suite/internal/terraform"
"github.com/GoogleCloudPlatform/healthcare-data-protection-suite/internal/tfimport/importer"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
)
func TestImportable(t *testing.T) {
tests := []struct {
rc terraform.ResourceChange
pcv map[string]interface{}
want resourceImporter
}{
// Empty Kind - should return nil.
{terraform.ResourceChange{}, nil, nil},
// Unsupported Kind - should return nil.
{
terraform.ResourceChange{
Kind: "unsupported",
}, nil, nil,
},
// Bucket - should return resource with bucket importer
{
terraform.ResourceChange{
Kind: "google_storage_bucket",
Address: "google_storage_bucket.gcs_tf_bucket",
Change: terraform.Change{
After: map[string]interface{}{
"project": "project-from-resource",
"name": "mybucket",
},
},
}, nil,
&importer.SimpleImporter{},
},
// GKE Cluster - should return resource with GKE cluster importer
{
terraform.ResourceChange{
Kind: "google_sql_user",
Address: "google_sql_user.my_user",
Change: terraform.Change{
After: map[string]interface{}{
"project": "project-from-resource",
"instance": "instance-from-resource",
"name": "name-from-instance",
},
},
}, nil,
&importer.SQLUser{},
},
}
for _, tc := range tests {
got, ok := Importable(tc.rc, tc.pcv, false)
// If we want nil, we should get nil.
// If we don't want nil, then the address and importer should match.
if got == nil {
if tc.want != nil {
t.Errorf("Importable(%v, %v) = nil; want %+v", tc.rc, tc.pcv, tc.want)
}
} else if reflect.TypeOf(got.Importer) != reflect.TypeOf(tc.want) {
t.Errorf("Importable(%v, %v) = %+v; want %+v", tc.rc, tc.pcv, got.Importer, tc.want)
} else if !ok {
t.Errorf("Importable(%v, %v) unexpectedly failed", tc.rc, tc.pcv)
}
}
}
const (
testAddress = "test-address"
testImportID = "test-import-id"
testInputDir = "test-input-dir"
testTerraformPath = "terraform"
)
var argsWant = []string{testTerraformPath, "import", testAddress, testImportID}
type testImporter struct{}
func (r *testImporter) ImportID(terraform.ResourceChange, importer.ConfigMap, bool) (string, error) {
return testImportID, nil
}
type testRunner struct {
// This can be modified per test case to check different outcomes.
output []byte
}
func (*testRunner) CmdRun(cmd *exec.Cmd) error { return nil }
func (*testRunner) CmdOutput(cmd *exec.Cmd) ([]byte, error) { return nil, nil }
func (tr *testRunner) CmdCombinedOutput(cmd *exec.Cmd) ([]byte, error) {
if !cmp.Equal(cmd.Args, argsWant) {
return nil, fmt.Errorf("args = %v; want %v", cmd.Args, argsWant)
}
return tr.output, nil
}
func TestImportArgs(t *testing.T) {
testResource := &Resource{
Change: terraform.ResourceChange{Address: testAddress},
ProviderConfig: importer.ConfigMap{},
Importer: &testImporter{},
}
wantOutput := ""
trn := &testRunner{
output: []byte(wantOutput),
}
gotOutput, err := Import(trn, testResource, testInputDir, testTerraformPath, true)
if err != nil {
t.Errorf("TestImport(%v, %v, %v) %v", trn, testResource, testInputDir, err)
}
if !cmp.Equal(gotOutput, wantOutput) {
t.Errorf("TestImport(%v, %v, %v) output = %v; want %v", trn, testResource, testInputDir, gotOutput, wantOutput)
}
}
func TestNotImportable(t *testing.T) {
tests := []struct {
output string
want bool
}{
// No output.
{
output: "",
want: false,
},
// Not importable error.
{
output: "Error: resource google_container_registry doesn't support import",
want: true,
},
// Importable and exists.
{
output: "Import successful!",
want: false,
},
}
for _, tc := range tests {
got := NotImportable(tc.output)
if got != tc.want {
t.Errorf("TestNotImportable(%v) = %v; want %v", tc.output, got, tc.want)
}
}
}
func TestDoesNotExist(t *testing.T) {
tests := []struct {
output string
want bool
}{
// No output.
{
output: "",
want: false,
},
// Does not exist error.
{
output: "Error: Cannot import non-existent remote object",
want: true,
},
// Importable and exists.
{
output: "Import successful!",
want: false,
},
}
for _, tc := range tests {
got := DoesNotExist(tc.output)
if got != tc.want {
t.Errorf("TestDoesNotExist(%v) = %v; want %v", tc.output, got, tc.want)
}
}
}
// Simple sanity tests, they make sure each importer assembles the plan values correctly for the ImportID.
func TestImportersSanity(t *testing.T) {
tests := []struct {
resource string
planFields map[string]interface{}
want string
}{
{
"google_app_engine_application",
map[string]interface{}{
"project": "my-appengine-project",
},
"my-appengine-project",
},
{
"google_bigquery_dataset",
map[string]interface{}{
"project": "my-bq-project",
"dataset_id": "my_bq_dataset",
},
"projects/my-bq-project/datasets/my_bq_dataset",
},
{
"google_bigquery_table",
map[string]interface{}{
"project": "my-bq-project",
"dataset_id": "my_bq_dataset",
"table_id": "my_bq_table",
},
"my-bq-project/my_bq_dataset/my_bq_table",
},
{
"google_billing_account_iam_binding",
map[string]interface{}{
"billing_account_id": "my_billing_account",
"role": "roles/owner",
},
"my_billing_account roles/owner",
},
{
"google_billing_account_iam_member",
map[string]interface{}{
"billing_account_id": "my_billing_account",
"role": "roles/owner",
"member": "user:myuser@example.com",
},
"my_billing_account roles/owner user:myuser@example.com",
},
{
"google_billing_account_iam_policy",
map[string]interface{}{
"billing_account_id": "my_billing_account",
},
"my_billing_account",
},
{
"google_binary_authorization_policy",
map[string]interface{}{
"project": "my-binauthz-project",
},
"projects/my-binauthz-project",
},
{
"google_cloudbuild_trigger",
map[string]interface{}{
"project": "my-cb-project",
"name": "my_cb_trigger",
},
"projects/my-cb-project/triggers/my_cb_trigger",
},
{
"google_compute_address",
map[string]interface{}{
"project": "my-compute-project",
"region": "us-east1",
"name": "my_address",
},
"projects/my-compute-project/regions/us-east1/addresses/my_address",
},
{
"google_compute_firewall",
map[string]interface{}{
"project": "my-firewall-project",
"name": "my_firewall",
},
"projects/my-firewall-project/global/firewalls/my_firewall",
},
{
"google_compute_forwarding_rule",
map[string]interface{}{
"project": "my-forwarding-project",
"region": "us-east1",
"name": "my_forwarding_rule",
},
"projects/my-forwarding-project/regions/us-east1/forwardingRules/my_forwarding_rule",
},
{
"google_compute_global_address",
map[string]interface{}{
"project": "my-compute-global-project",
"name": "my_global_address",
},
"projects/my-compute-global-project/global/addresses/my_global_address",
},
{
"google_compute_health_check",
map[string]interface{}{
"project": "my-health-check-project",
"name": "my_health_check",
},
"projects/my-health-check-project/global/healthChecks/my_health_check",
},
{
"google_compute_image",
map[string]interface{}{
"project": "my-compute-project",
"name": "my_image",
},
"projects/my-compute-project/global/images/my_image",
},
{
"google_compute_instance",
map[string]interface{}{
"project": "my-compute-project",
"zone": "us-east1-a",
"name": "my_instance",
},
"projects/my-compute-project/zones/us-east1-a/instances/my_instance",
},
{
"google_compute_instance_template",
map[string]interface{}{
"project": "my-compute-project",
"name": "my_instance_template",
},
"projects/my-compute-project/global/instanceTemplates/my_instance_template",
},
{
"google_compute_instance_from_template",
map[string]interface{}{
"project": "my-compute-project",
"zone": "us-east1-a",
"name": "my_instance",
},
"projects/my-compute-project/zones/us-east1-a/instances/my_instance",
},
{
"google_compute_interconnect_attachment",
map[string]interface{}{
"project": "my-interconnect-project",
"region": "us-east1",
"name": "my_interconnect_attachment",
},
"projects/my-interconnect-project/regions/us-east1/interconnectAttachments/my_interconnect_attachment",
},
{
"google_compute_network",
map[string]interface{}{
"project": "my-network-project",
"name": "my_network",
},
"projects/my-network-project/global/networks/my_network",
},
{
"google_compute_network_peering",
map[string]interface{}{
"network": "projects/my-network-project/global/networks/my_network",
"name": "my_peering",
},
"my-network-project/my_network/my_peering",
},
{
"google_compute_project_metadata_item",
map[string]interface{}{
"key": "my_metadata",
},
"my_metadata",
},
{
"google_compute_region_backend_service",
map[string]interface{}{
"project": "my-backend-project",
"region": "us-east1",
"name": "my_backend_service",
},
"projects/my-backend-project/regions/us-east1/backendServices/my_backend_service",
},
{
"google_compute_route",
map[string]interface{}{
"project": "my-route-project",
"name": "my-compute-route",
},
"projects/my-route-project/global/routes/my-compute-route",
},
{
"google_compute_router",
map[string]interface{}{
"project": "my-router-project",
"region": "us-east1",
"name": "my_router",
},
"projects/my-router-project/regions/us-east1/routers/my_router",
},
{
"google_compute_router_interface",
map[string]interface{}{
"region": "us-east1",
"router": "my-router",
"name": "my-interface",
},
"us-east1/my-router/my-interface",
},
{
"google_compute_router_nat",
map[string]interface{}{
"project": "my-router-project",
"region": "us-east1",
"router": "my_router",
"name": "my_nat",
},
"projects/my-router-project/regions/us-east1/routers/my_router/my_nat",
},
{
"google_compute_router_peer",
map[string]interface{}{
"project": "my-router-project",
"region": "us-east1",
"router": "my_router",
"name": "my_peer",
},
"projects/my-router-project/regions/us-east1/routers/my_router/my_peer",
},
{
"google_compute_shared_vpc_host_project",
map[string]interface{}{
"project": "my-vpc-project",
},
"my-vpc-project",
},
{
"google_compute_shared_vpc_service_project",
map[string]interface{}{
"host_project": "my-host-project",
"service_project": "my-service-project",
},
"my-host-project/my-service-project",
},
{
"google_compute_subnetwork",
map[string]interface{}{
"project": "my-subnetwork-project",
"region": "us-east1",
"name": "my_subnetwork",
},
"projects/my-subnetwork-project/regions/us-east1/subnetworks/my_subnetwork",
},
{
"google_compute_subnetwork_iam_binding",
map[string]interface{}{
"subnetwork": "projects/my-subnetwork-project/regions/us-east1/subnetworks/my_subnetwork",
"role": "roles/owner",
},
"projects/my-subnetwork-project/regions/us-east1/subnetworks/my_subnetwork roles/owner",
},
{
"google_compute_subnetwork_iam_member",
map[string]interface{}{
"subnetwork": "projects/my-subnetwork-project/regions/us-east1/subnetworks/my_subnetwork",
"role": "roles/owner",
"member": "user:myuser@example.com",
},
"projects/my-subnetwork-project/regions/us-east1/subnetworks/my_subnetwork roles/owner user:myuser@example.com",
},
{
"google_compute_subnetwork_iam_policy",
map[string]interface{}{
"subnetwork": "projects/my-network-project/regions/us-east1/subnetworks/my-subnet",
},
"projects/my-network-project/regions/us-east1/subnetworks/my-subnet",
},
{
"google_container_cluster",
map[string]interface{}{
"project": "my-gke-project",
"location": "us-east1-a",
"name": "my_cluster",
},
"projects/my-gke-project/locations/us-east1-a/clusters/my_cluster",
},
{
"google_container_node_pool",
map[string]interface{}{
"project": "my-gke-project",
"location": "us-east1-a",
"cluster": "my_cluster",
"name": "my_node_pool",
},
"my-gke-project/us-east1-a/my_cluster/my_node_pool",
},
{
"google_dns_managed_zone",
map[string]interface{}{
"project": "my-dns-project",
"name": "my_managed_zone",
},
"projects/my-dns-project/managedZones/my_managed_zone",
},
{
"google_dns_record_set",
map[string]interface{}{
"project": "my-dns-project",
"managed_zone": "my_managed_zone",
"name": "my_record_set",
"type": "A",
},
"my-dns-project/my_managed_zone/my_record_set/A",
},
{
"google_firebase_project",
map[string]interface{}{
"project": "my-firebase-project",
},
"projects/my-firebase-project",
},
{
"google_folder",
map[string]interface{}{
"folder_id": "my-folder",
},
"folders/my-folder",
},
{
"google_folder_iam_binding",
map[string]interface{}{
"folder": "my-folder",
"role": "roles/owner",
},
"my-folder roles/owner",
},
{
"google_folder_iam_member",
map[string]interface{}{
"folder": "my-folder",
"role": "roles/owner",
"member": "user:myuser@example.com",
},
"my-folder roles/owner user:myuser@example.com",
},
{
"google_folder_iam_policy",
map[string]interface{}{
"folder": "my-folder",
},
"my-folder",
},
{
"google_folder_organization_policy",
map[string]interface{}{
"folder": "my-folder",
"constraint": "serviceuser.services",
},
"folders/my-folder/constraints/serviceuser.services",
},
{
"google_iap_tunnel_instance_iam_binding",
map[string]interface{}{
"project": "my-iap-project",
"zone": "us-east1-a",
"instance": "my_tunnel_instance",
"role": "roles/iap.tunnelResourceAccessor",
},
"projects/my-iap-project/iap_tunnel/zones/us-east1-a/instances/my_tunnel_instance roles/iap.tunnelResourceAccessor",
},
{
"google_iap_tunnel_instance_iam_member",
map[string]interface{}{
"project": "my-iap-project",
"zone": "us-east1-a",
"instance": "my_tunnel_instance",
"role": "roles/iap.tunnelResourceAccessor",
"member": "user:myuser@example.com",
},
"projects/my-iap-project/iap_tunnel/zones/us-east1-a/instances/my_tunnel_instance roles/iap.tunnelResourceAccessor user:myuser@example.com",
},
{
"google_iap_tunnel_instance_iam_policy",
map[string]interface{}{
"project": "my-iap-project",
"zone": "us-east1-a",
"instance": "my_tunnel_instance",
},
"projects/my-iap-project/iap_tunnel/zones/us-east1-a/instances/my_tunnel_instance",
},
{
"google_kms_key_ring",
map[string]interface{}{
"project": "my-kms-project",
"location": "us-east1",
"name": "my_kms_key_ring",
},
"projects/my-kms-project/locations/us-east1/keyRings/my_kms_key_ring",
},
{
"google_logging_billing_account_sink",
map[string]interface{}{
"billing_account": "my-billing-account",
"name": "my_sink",
},
"billingAccounts/my-billing-account/sinks/my_sink",
},
{
"google_logging_folder_sink",
map[string]interface{}{
"folder": "my-folder",
"name": "my_sink",
},
"folders/my-folder/sinks/my_sink",
},
{
"google_logging_metric",
map[string]interface{}{
"project": "my-project",
"name": "my-metric",
},
"my-project my-metric",
},
{
"google_logging_organization_sink",
map[string]interface{}{
"org_id": "my-org",
"name": "my_sink",
},
"organizations/my-org/sinks/my_sink",
},
{
"google_logging_project_sink",
map[string]interface{}{
"project": "my-project",
"name": "my_sink",
},
"projects/my-project/sinks/my_sink",
},
{
"google_organization_iam_audit_config",
map[string]interface{}{
"org_id": "my-org",
"service": "iam.googleapis.com",
},
"my-org iam.googleapis.com",
},
{
"google_organization_iam_custom_role",
map[string]interface{}{
"org_id": "my-org",
"role_id": "my_custom_role",
},
"organizations/my-org/roles/my_custom_role",
},
{
"google_organization_iam_member",
map[string]interface{}{
"org_id": "my-org",
"role": "roles/owner",
"member": "user:myuser@example.com",
},
"my-org roles/owner user:myuser@example.com",
},
{
"google_organization_policy",
map[string]interface{}{
"org_id": "my-org",
"constraint": "serviceuser.services",
},
"my-org/constraints/serviceuser.services",
},
{
"google_project",
map[string]interface{}{
"project_id": "my-project",
},
"my-project",
},
{
"google_project_iam_audit_config",
map[string]interface{}{
"project": "my-project",
"service": "allServices",
},
"my-project allServices",
},
{
"google_project_iam_binding",
map[string]interface{}{
"project": "my-project",
"role": "roles/owner",
},
"my-project roles/owner",
},
{
"google_project_iam_custom_role",
map[string]interface{}{
"project": "my-project",
"role_id": "my_custom_role",
},
"projects/my-project/roles/my_custom_role",
},
{
"google_project_iam_member",
map[string]interface{}{
"project": "my-project",
"role": "roles/owner",
"member": "user:myuser@example.com",
},
"my-project roles/owner user:myuser@example.com",
},
{
"google_project_organization_policy",
map[string]interface{}{
"project": "my-project",
"constraint": "serviceuser.services",
},
"projects/my-project:constraints/serviceuser.services",
},
{
"google_project_service",
map[string]interface{}{
"project": "my-project",
"service": "iam.googleapis.com",
},
"my-project/iam.googleapis.com",
},
{
"google_project_usage_export_bucket",
map[string]interface{}{
"project": "my-project",
},
"my-project",
},
{
"google_pubsub_subscription",
map[string]interface{}{
"project": "my-project",
"name": "my-subscription",
},
"projects/my-project/subscriptions/my-subscription",
},
{
"google_pubsub_subscription_iam_binding",
map[string]interface{}{
"project": "my-project",
"subscription": "my-subscription",
"role": "roles/owner",
},
"projects/my-project/subscriptions/my-subscription roles/owner",
},
{
"google_pubsub_subscription_iam_member",
map[string]interface{}{
"project": "my-project",
"subscription": "my-subscription",
"role": "roles/owner",
"member": "user:myuser@example.com",
},
"projects/my-project/subscriptions/my-subscription roles/owner user:myuser@example.com",
},
{
"google_pubsub_subscription_iam_policy",
map[string]interface{}{
"project": "my-project",
"subscription": "my-subscription",
},
"projects/my-project/subscriptions/my-subscription",
},
{
"google_pubsub_topic",
map[string]interface{}{
"project": "my-project",
"name": "my-topic",
},
"projects/my-project/topics/my-topic",
},
{
"google_pubsub_topic_iam_binding",
map[string]interface{}{
"project": "my-project",
"topic": "my-topic",
"role": "roles/owner",
},
"projects/my-project/topics/my-topic roles/owner",
},
{
"google_pubsub_topic_iam_member",
map[string]interface{}{
"project": "my-project",
"topic": "my-topic",
"role": "roles/owner",
"member": "user:myuser@example.com",
},
"projects/my-project/topics/my-topic roles/owner user:myuser@example.com",
},
{
"google_pubsub_topic_iam_policy",
map[string]interface{}{
"project": "my-project",
"topic": "my-topic",
},
"projects/my-project/topics/my-topic",
},
{
"google_secret_manager_secret",
map[string]interface{}{
"project": "my-project",
"secret_id": "my-secret",
},
"projects/my-project/secrets/my-secret",
},
{
"google_secret_manager_secret_version",
map[string]interface{}{
"secret": "projects/my-project/secrets/my-secret",
},
"projects/my-project/secrets/my-secret/versions/latest",
},
{
"google_service_account",
map[string]interface{}{
"project": "my-project",
"account_id": "my-sa",
},
"projects/my-project/serviceAccounts/my-sa@my-project.iam.gserviceaccount.com",
},
{
"google_service_account_iam_binding",
map[string]interface{}{
"service_account_id": "projects/my-project/serviceAccounts/my-sa@my-project.iam.gserviceaccount.com",
"role": "roles/owner",
},
"projects/my-project/serviceAccounts/my-sa@my-project.iam.gserviceaccount.com roles/owner",
},
{
"google_service_account_iam_member",
map[string]interface{}{
"service_account_id": "projects/my-project/serviceAccounts/my-sa@my-project.iam.gserviceaccount.com",
"role": "roles/owner",
"member": "user:myuser@example.com",
},
"projects/my-project/serviceAccounts/my-sa@my-project.iam.gserviceaccount.com roles/owner user:myuser@example.com",
},
{
"google_service_account_iam_policy",
map[string]interface{}{
"service_account_id": "projects/my-project/serviceAccounts/my-sa@my-project.iam.gserviceaccount.com",
},
"projects/my-project/serviceAccounts/my-sa@my-project.iam.gserviceaccount.com",
},
{
"google_service_usage_consumer_quota_override",
map[string]interface{}{
"project": "my-project",
"service": "healthcare.googleapis.com",
"metric": "healthcare.googleapis.com%2Fannotation_ops",
"limit": "%2Fmin%2Fproject%2Fregion",
"name": "server-generated",
},
"projects/my-project/services/healthcare.googleapis.com/consumerQuotaMetrics/healthcare.googleapis.com%2Fannotation_ops/limits/%2Fmin%2Fproject%2Fregion/consumerOverrides/server-generated",
},
{
"google_sql_database",
map[string]interface{}{
"project": "my-project",
"instance": "my-instance",
"name": "my-db",
},
"projects/my-project/instances/my-instance/databases/my-db",
},
{
"google_sql_database_instance",
map[string]interface{}{
"project": "my-project",
"name": "my-instance",
},
"projects/my-project/instances/my-instance",
},
{
"google_storage_bucket",
map[string]interface{}{
"project": "my-project",
"name": "my-bucket",
},
"my-project/my-bucket",
},
{
"google_storage_bucket_iam_binding",
map[string]interface{}{
"bucket": "my-bucket",
"role": "roles/owner",
},
"my-bucket roles/owner",
},
{
"google_storage_bucket_iam_member",
map[string]interface{}{
"bucket": "my-bucket",
"role": "roles/owner",
"member": "user:myuser@example.com",
},
"my-bucket roles/owner user:myuser@example.com",
},
{
"google_storage_bucket_iam_policy",
map[string]interface{}{
"bucket": "my-bucket",
},
"my-bucket",
},
{
"gsuite_group",
map[string]interface{}{
"email": "mygsuitegroup@example.com",
},
"mygsuitegroup@example.com",
},
{
"gsuite_group_member",
map[string]interface{}{
"group": "mygsuitegroup@example.com",
"email": "mygsuiteuser@example.com",
},
"mygsuitegroup@example.com:mygsuiteuser@example.com",
},
{
"helm_release",
map[string]interface{}{
"namespace": "my-namespace",
"name": "my-helm-release",
},
"my-namespace/my-helm-release",
},
{
"helm_release",
map[string]interface{}{
"name": "my-helm-release",
},
"default/my-helm-release",
},
{
"kubernetes_config_map",
map[string]interface{}{
"metadata": map[string]interface{}{
"namespace": "my-namespace",
"name": "my-config-map",
},
},
"my-namespace/my-config-map",
},
{
"kubernetes_config_map",
map[string]interface{}{
"metadata": map[string]interface{}{
"name": "my-config-map",
},
},
"default/my-config-map",
},
{
"kubernetes_manifest",
map[string]interface{}{
"manifest": map[string]interface{}{
"metadata": map[string]interface{}{
"namespace": "my-namespace",
"name": "my-kubernetes-resource",
},
},
},
"my-namespace/my-kubernetes-resource",
},
{
"kubernetes_manifest",
map[string]interface{}{
"manifest": map[string]interface{}{
"metadata": map[string]interface{}{
"name": "my-kubernetes-resource",
},
},
},
"default/my-kubernetes-resource",
},
{
"kubernetes_namespace",
map[string]interface{}{
"metadata": map[string]interface{}{
"name": "my-namespace",
},
},
"my-namespace",
},
{
"kubernetes_pod",
map[string]interface{}{
"metadata": map[string]interface{}{
"namespace": "my-namespace",
"name": "my-kubernetes-resource",
},
},
"my-namespace/my-kubernetes-resource",
},
{
"kubernetes_role",
map[string]interface{}{
"metadata": map[string]interface{}{
"namespace": "my-namespace",
"name": "my-kubernetes-resource",
},
},
"my-namespace/my-kubernetes-resource",
},
{
"kubernetes_role_binding",
map[string]interface{}{
"metadata": map[string]interface{}{
"namespace": "my-namespace",
"name": "my-kubernetes-resource",
},
},
"my-namespace/my-kubernetes-resource",
},
{
"kubernetes_service",
map[string]interface{}{
"metadata": map[string]interface{}{
"namespace": "my-namespace",
"name": "my-kubernetes-resource",
},
},
"my-namespace/my-kubernetes-resource",
},
{
"kubernetes_service_account",
map[string]interface{}{
"metadata": map[string]interface{}{
"namespace": "my-namespace",
"name": "my-kubernetes-resource",
},
},
"my-namespace/my-kubernetes-resource",
},
}
for _, tc := range tests {
importer, ok := Importers[tc.resource]
if !ok {
t.Fatalf("importer does not exist for %v", tc.resource)
}
change := terraform.ResourceChange{Change: terraform.Change{After: tc.planFields}}
got, err := importer.ImportID(change, nil, false)
if err != nil {
t.Fatalf("%v importer %T(%v, nil, false) returned error: %v", tc.resource, importer, change, err)
}
if diff := cmp.Diff(tc.want, got, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("%v importer %T(%v, nil, false) returned diff (-want +got):\n%s", tc.resource, importer, change, diff)
}
}
}
// The template is looking for a field that isn't required by the importer.
// This should cause the template to fail.
func TestSimpleImporterTmplExtraField(t *testing.T) {
imp := &importer.SimpleImporter{
Fields: []string{"project", "role"},
Tmpl: "{{.project}} {{.role}} {{.member}}",
}
fields := map[string]interface{}{
"project": "my-project",
"role": "roles/owner",
}
change := terraform.ResourceChange{Change: terraform.Change{After: fields}}
_, err := imp.ImportID(change, nil, false)
if err == nil {
t.Errorf("importer %v ImportID(%v, nil, false) succeeded for malformed input, want error", imp, change)
}
}
// The importer requires a field that isn't being passed in.
// This should cause the template to fail.
func TestSimpleImporterRequiredFieldMissing(t *testing.T) {
imp := &importer.SimpleImporter{
Fields: []string{"project", "role", "member"},
Tmpl: "{{.project}} {{.role}} {{.member}}",
}
fields := map[string]interface{}{
"project": "my-project",
"role": "roles/owner",
}
change := terraform.ResourceChange{Change: terraform.Change{After: fields}}
_, err := imp.ImportID(change, nil, false)
if err == nil {
t.Errorf("importer %v ImportID(%v, nil, false) succeeded for malformed input, want error", imp, change)
}
}
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package isolated defines the isolated common code shared by the client and
// server.
package isolated
|
package main
import (
"bufio"
"encoding/json"
"flag"
"fmt"
"io"
"log"
"os"
"os/user"
"path/filepath"
"sort"
"strings"
"time"
)
type Document struct {
Topic string
Since time.Time
Topics map[string]time.Duration
}
func (j *Document) Load(filename string) error {
f, err := os.Open(filename)
if err != nil {
if os.IsNotExist(err) {
*j = Document{"", time.Time{}, map[string]time.Duration{}}
err = nil
}
return err
}
defer f.Close()
d := json.NewDecoder(bufio.NewReader(f))
err = d.Decode(j)
if err == io.EOF {
err = nil
}
if j.Topics == nil {
j.Topics = map[string]time.Duration{}
}
return err
}
func (j *Document) Save(filename string) error {
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
w := bufio.NewWriter(f)
e := json.NewEncoder(w)
if err := e.Encode(j); err != nil {
return err
}
if err := w.Flush(); err != nil {
return err
}
return nil
}
func (j *Document) Flush(now time.Time) {
if j.Topic == "" || j.Since.IsZero() {
return
}
d := now.Sub(j.Since)
if d <= 0 {
return
}
j.Since = now
j.Topics[j.Topic] += d
return
}
func (j *Document) Println(now time.Time) {
if len(j.Topics) > 0 {
topics := make([]string, 0, len(j.Topics))
for t := range j.Topics {
topics = append(topics, t)
}
sort.Strings(topics)
total := time.Duration(0)
for _, t := range topics {
d := j.Topics[t]
fmt.Printf("%s: %v\n", t, d)
total += d
}
fmt.Printf(" %v\n", total)
}
if j.Topic != "" && !j.Since.IsZero() {
d := now.Sub(j.Since)
if d == 0 {
fmt.Printf("%s\n", j.Topic)
} else {
fmt.Printf("%s for %v\n", j.Topic, d)
}
}
}
var (
Filename string
Noop bool
Finish bool
Discard bool
Add time.Duration
Sub time.Duration
Update bool
)
func init() {
u, err := user.Current()
if err != nil {
log.Fatal(err)
}
flag.StringVar(&Filename, "ts", filepath.Join(u.HomeDir, ".ts.json"), "The file name")
flag.BoolVar(&Noop, "n", false, "Do Nothing")
flag.BoolVar(&Finish, "f", false, "Finish")
flag.BoolVar(&Discard, "d", false, "Discard")
flag.DurationVar(&Add, "a", 0, "Add")
flag.DurationVar(&Sub, "s", 0, "Subtract")
flag.BoolVar(&Update, "u", false, "Update")
}
func main() {
flag.Parse()
if err := do(); err != nil {
log.Fatal(err)
}
}
func do() error {
var j Document
if err := j.Load(Filename); err != nil {
return err
}
now, topic := time.Now(), strings.Join(flag.Args(), " ")
if Noop {
} else if Finish {
if topic != "" {
j.Topic = topic
}
j.Flush(now)
j.Topic, j.Since = "", time.Time{}
} else if Discard {
j.Topic, j.Since = "", time.Time{}
} else if Add != 0 {
if topic == "" {
topic = j.Topic
}
if topic != "" {
j.Topics[topic] += Add
}
} else if Sub != 0 {
if topic == "" {
topic = j.Topic
}
if topic != "" {
j.Topics[topic] += -Sub
j.Topics["_"] += j.Topics[topic]
j.Topics[topic] -= j.Topics[topic]
}
} else if Update {
if j.Topic != "" && topic != "" {
j.Topic = topic
}
j.Flush(now)
if topic != "" {
j.Topic = topic
j.Since = now
}
} else {
j.Flush(now)
if topic != "" {
j.Topic = topic
j.Since = now
}
}
for t, d := range j.Topics {
if d == 0 {
delete(j.Topics, t)
}
}
j.Println(now)
if err := j.Save(Filename); err != nil {
return err
}
return nil
}
|
// Copyright 2020 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package customers
import (
"encoding/json"
"fmt"
"testing"
"github.com/moov-io/base"
)
func TestStatus__json(t *testing.T) {
cs := Status(10)
valid := map[string]Status{
"deCEAsed": Deceased,
"Rejected": Rejected,
"ReviewRequired": ReviewRequired,
"NONE": None,
"KYC": KYC,
"ofaC": OFAC,
"cip": CIP,
}
for k, v := range valid {
in := []byte(fmt.Sprintf(`"%s"`, k))
if err := json.Unmarshal(in, &cs); err != nil {
t.Error(err.Error())
}
if cs != v {
t.Errorf("got cs=%#v, v=%#v", cs, v)
}
}
// make sure other values fail
in := []byte(fmt.Sprintf(`"%v"`, base.ID()))
if err := json.Unmarshal(in, &cs); err == nil {
t.Error("expected error")
}
}
func TestStatus__string(t *testing.T) {
if v := OFAC.String(); v != "ofac" {
t.Errorf("got %s", v)
}
if v := Deceased.String(); v != "deceased" {
t.Errorf("got %s", v)
}
}
func TestStatus__liftStatus(t *testing.T) {
if cs, err := LiftStatus("kyc"); *cs != KYC || err != nil {
t.Errorf("got %s error=%v", cs, err)
}
if cs, err := LiftStatus("none"); *cs != None || err != nil {
t.Errorf("got %s error=%v", cs, err)
}
if cs, err := LiftStatus("cip"); *cs != CIP || err != nil {
t.Errorf("got %s error=%v", cs, err)
}
}
func TestStatus__approvedAt(t *testing.T) {
// authorized
if !ApprovedAt(OFAC, OFAC) {
t.Errorf("expected ApprovedAt")
}
if !ApprovedAt(OFAC, KYC) {
t.Errorf("expected ApprovedAt")
}
if !ApprovedAt(CIP, KYC) {
t.Errorf("expected ApprovedAt")
}
// not authorized
if ApprovedAt(ReviewRequired, ReviewRequired) {
t.Errorf("expected not ApprovedAt")
}
if ApprovedAt(None, OFAC) {
t.Errorf("expected not ApprovedAt")
}
if ApprovedAt(OFAC, CIP) {
t.Errorf("expected not ApprovedAt")
}
if ApprovedAt(Rejected, OFAC) {
t.Errorf("expected not ApprovedAt")
}
}
|
package yarn
import (
"bufio"
"encoding/json"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/bitly/go-simplejson"
"github.com/rootsongjc/magpie/docker"
"github.com/rootsongjc/magpie/utils"
"github.com/samalba/dockerclient"
"github.com/spf13/viper"
"io"
"io/ioutil"
"net/http"
"os"
"sort"
"strings"
"sync"
"time"
)
type YarnStatus struct {
appsPending string
reservedVirtualCores string
availableVirtualCores string
allocatedVirtualCores string
totalVirtualCores string
lostNodes int64
activeNodes int64
appsRunning string
appsFailed string
appsKilled string
availableMB string
allocatedMB string
containersPending string
totalMB string
totalNodes int64
rebootedNodes string
appsSubmitted string
appsCompleted string
containersAllocated string
reservedMB string
containersReserved string
unhealthyNodes int64
decommissionedNodes int64
}
//Get yarn cluster node status
func Get_yarn_status(cluster_names []string) {
fmt.Println("======================YARN CLSUTER STATUS===========================")
fmt.Println("CLUSTER\tTOTAL\tACTIVE\tDECOM\tLOST\tUNHEALTHY\tUsed")
var total_nodes, total_active, total_decom, total_lost, total_unhealthy int64
for i := range cluster_names {
name := cluster_names[i]
url := "http://" + utils.Clustername2ip(name) + ":8088/ws/v1/cluster/metrics"
resp, err := http.Get(url)
if err != nil {
fmt.Println("Yarn cluster ", name, " not found.")
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
js, err := simplejson.NewJson(body)
if err != nil {
panic(err.Error())
}
nodes, _ := js.Get("clusterMetrics").Map()
activeNodes, _ := nodes["activeNodes"].(json.Number).Int64()
totalNodes, _ := nodes["totalNodes"].(json.Number).Int64()
decommissionedNodes, _ := nodes["decommissionedNodes"].(json.Number).Int64()
lostNodes, _ := nodes["lostNodes"].(json.Number).Int64()
unhealthyNodes, _ := nodes["unhealthyNodes"].(json.Number).Int64()
usage := get_yarn_resource_usage(name)
total_active += activeNodes
total_decom += decommissionedNodes
total_nodes += totalNodes
total_lost += lostNodes
total_unhealthy += unhealthyNodes
fmt.Println(name, "\t", totalNodes, "\t", activeNodes, "\t", decommissionedNodes,
"\t", lostNodes, "\t", unhealthyNodes, "\t", usage)
}
fmt.Println("--------------------------------------------------------------------")
fmt.Println("TOTAL", "\t", total_nodes, "\t", total_active, "\t", total_decom, "\t", total_lost, "\t", total_unhealthy)
}
//Get yarn cluster resource usage percent
func get_yarn_resource_usage(clustername string) float64 {
url := "http://" + utils.Clustername2ip(clustername) + ":8088/ws/v1/cluster/scheduler"
resp, err := http.Get(url)
if err != nil {
fmt.Println("Yarn cluster ", clustername, " not found.")
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
js, err := simplejson.NewJson(body)
if err != nil {
panic(err.Error())
}
used, _ := js.Get("scheduler").Get("schedulerInfo").Get("usedCapacity").Float64()
return used
}
//Show the yarn nodemanagers distribution.
func Yarn_distribution(clustername string) {
fmt.Println("====================RUNNING DOCKERS DISTRIBUTION====================")
fmt.Println("HOSTNAME\tNUM")
containers, err := docker.Get_running_docker_containers()
if err != nil {
panic(err)
} else {
distribution := make(map[string]int, len(containers))
for i := range containers {
c := docker.Get_container_name(containers[i].Names[0])
if strings.HasPrefix(c, clustername) {
h := docker.Get_nodemanager_host(containers[i].Names[0])
distribution[h] += 1
}
}
// sorted as the hostname
sorted_keys := make([]string, 0)
for k, _ := range distribution {
sorted_keys = append(sorted_keys, k)
}
sort.Strings(sorted_keys)
for _, k := range sorted_keys {
fmt.Println(k, "\t", distribution[k])
}
}
}
//Inspect the yarn cluster container view
func Yarn_view(clustername string) {
containers := docker.Get_all_yarn_containers()
flag := false
fmt.Println("ID\tCLUSTER\tNAME\tSTATUS\tSTATE\tIP\tHOST")
fmt.Println("=======================================================================================================================================")
for i := range containers {
c := containers[i]
if c.Clustername == clustername {
flag = true
fmt.Println(c.ID, "\t", c.Clustername, "\t", c.Name, "\t", c.Status, "\t", c.State, "\t", c.Ip, "\t", c.Host)
}
}
if flag == false {
fmt.Println("The cluster does not exited.")
}
}
//Decommising the nodemanagers on each resourcemanagers.
//No matter which yarn clusters the nodemanager belonging to,
//you can put the in the same nodefile togethter.
//Magpie will recognize the yarn cluster automatically.
func Decommis_nodemanagers(nodemanagers []string) {
nms := make([]docker.Yarn_docker_container, 0)
containers := docker.Get_all_yarn_containers()
for _, n := range nodemanagers {
found := false
for _, c := range containers {
cid := c.ID
if cid == n {
found = true
nms = append(nms, c)
}
}
if found == false {
fmt.Println("The nodemanager can not be found:", n)
}
}
yarns := make(map[string]string, len(nodemanagers))
for _, n := range nms {
yarns[n.Clustername] = yarns[n.Clustername] + "\n" + n.ID
}
// yarns := map[string]string{
// "yarn1": "3cd9494cdc80\n0ebc7d9cf054",
// "yarn2": "falj2ljfao3k",
// }
fmt.Println("Decommising the following nodemangers...")
var wg sync.WaitGroup
//traverse all the yarn clusters
for k, v := range yarns {
wg.Add(1)
go decommis_yarn_nodes(k, v, &wg)
}
wg.Wait()
}
// Decommising the nodemanager of the nodefile
func Decommis_nodemanagers_through_file(nodefile string) {
fi, err := os.Open(nodefile)
if err != nil {
panic(err)
}
defer fi.Close()
buff := bufio.NewReader(fi)
nodes := make([]string, 0)
for {
line, err := buff.ReadString('\n')
if err != nil || io.EOF == err {
break
}
node := strings.Trim(line, "\n")
nodes = append(nodes, node)
}
Decommis_nodemanagers(nodes)
}
//Decommising the nodemanagers of a yarn
func decommis_yarn_nodes(clustername string, nodemanagers string, wg *sync.WaitGroup) {
if wg != nil {
defer wg.Done()
}
logger := utils.Logger()
fmt.Println(clustername, nodemanagers)
logger.WithFields(logrus.Fields{"Time": time.Now(), "Cluster": clustername, "Nodemanagers": strings.Replace(nodemanagers, "\n", ",", -1), "Action": "DECOM"}).Info("Decomissing nodemanagers " + nodemanagers)
resource_manager_ip := utils.Clustername2ip(clustername)
nodemanger_exclude_file := viper.GetString("clusters.nodemanager_exclude_file")
command := "ssh -n root@" + resource_manager_ip + ` "echo -e '` + nodemanagers + `'>>` + nodemanger_exclude_file + `"`
//command := "ssh -n jingchao.song@" + "172.20.0.6" + ` "echo -e '` + nodemanagers + `'>>` + "/home/jingchao.song/docker/test.txt"+ `"`
//TODO should return result and error handler
utils.Run_command(command)
command = "ssh -n root@" + resource_manager_ip + ` 'su - hadoop -c "yarn rmadmin -refreshNodes"'`
utils.Run_command(command)
}
//Offline the host, decommsing the nodemanagers and then delete the docker contianers.
func Offline_host(hostname string) {
fmt.Println("Offline host", hostname, "...")
containers := docker.Get_all_yarn_containers()
nms := make([]string, 0)
for _, c := range containers {
host := c.Host
if host == hostname {
nms = append(nms, c.Name)
}
}
Decommis_nodemanagers(nms)
docker.Delete_containers_on_host(hostname)
}
//Create a new nodemanager and start it
func Create_new_nodemanager(nm_config Nodemanager_config) {
swarm_master_ip := viper.GetString("clusters.swarm_master_ip")
swarm_master_port := viper.GetString("clusters.swarm_master_port")
endpoint := "tcp://" + swarm_master_ip + ":" + swarm_master_port
client, err := dockerclient.NewDockerClient(endpoint, nil)
logger := utils.Logger()
if err != nil {
panic(err)
}
if err != nil {
fmt.Println("Cannot connect to the swarm master.")
}
fmt.Println("Creating new nodemanager container...")
env := []string{
"HA=" + get_nodemanager_config(nm_config.HA, "HA"),
"NAMESERVICE=" + get_nodemanager_config(nm_config.NAMESERVICE, "NAMESERVICE"),
"ACTIVE_NAMENODE_IP=" + get_nodemanager_config(nm_config.ACTIVE_NAMENODE_IP, "ACTIVE_NAMENODE_IP"),
"STANDBY_NAMENODE_IP=" + get_nodemanager_config(nm_config.STANDBY_NAMENODE_IP, "STANDBY_NAMENODE_IP"),
"ACTIVE_NAMENODE_ID=" + get_nodemanager_config(nm_config.ACTIVE_NAMENODE_ID, "ACTIVE_NAMENODE_ID"),
"STANDBY_NAMENODE_ID=" + get_nodemanager_config(nm_config.STANDBY_NAMENODE_ID, "STANDBY_NAMENODE_ID"),
"HA_ZOOKEEPER_QUORUM=" + get_nodemanager_config(nm_config.HA_ZOOKEEPER_QUORUM, "HA_ZOOKEEPER_QUORUM"),
"NAMENODE_IP=" + get_nodemanager_config(nm_config.NAMENODE_IP, "NAMENODE_IP"),
"RESOURCEMANAGER_IP=" + get_nodemanager_config(nm_config.RESOURCEMANAGER_IP, "RESOURCEMANAGER_IP"),
"YARN_RM1_IP=" + nm_config.YARN_RM1_IP,
"YARN_RM2_IP=" + nm_config.YARN_RM2_IP,
"YARN_JOBHISTORY_IP=" + nm_config.YARN_JOBHISTORY_IP,
"CPU_CORE_NUM=" + get_nodemanager_config(nm_config.CPU_CORE_NUM, "CPU_CORE_NUM"),
"NODEMANAGER_MEMORY_MB=" + get_nodemanager_config(nm_config.NODEMANAGER_MEMORY_MB, "NODEMANAGER_MEMORY_MB"),
"YARN_CLUSTER_ID=" + nm_config.YARN_CLUSTER_ID,
"YARN_ZK_DIR=" + nm_config.YARN_ZK_DIR,
//"PATH=/usr/local/hadoop/bin:/usr/local/hadoop/sbin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/java/bin://usr/local/java/jre/bin",
}
hostConifg := dockerclient.HostConfig{
CpuShares: nm_config.Limit_cpus,
Memory: nm_config.Limit_memory_mb * 1024 * 1024, //transform to Byte
NetworkMode: nm_config.Network_mode,
OomScoreAdj: 500,
}
var config *dockerclient.ContainerConfig
config = new(dockerclient.ContainerConfig)
config.Image = get_nodemanager_config(nm_config.Image, "image")
config.Env = env
//inherit Cmd and Entrypoint settings from docker Image or set them on config file
config.Cmd = viper.GetStringSlice("nodemanager.cmd")
config.Entrypoint = viper.GetStringSlice("nodemanager.entrypoint")
config.HostConfig = hostConifg
id, err := client.CreateContainer(config, "", nil)
if err != nil {
logger.WithFields(logrus.Fields{"Time": time.Now(), "ContainerID": "-", "Action": "CREATE"}).Error(err)
panic(err)
}
container_name := id[0:12]
fmt.Println("Container", container_name, "created.")
logger.WithFields(logrus.Fields{"Time": time.Now(), "ContainerID": container_name, "Action": "CREATE"}).Info("Create a new nodemanager docker container")
if nm_config.Container_name != "" {
err = client.RenameContainer(container_name, nm_config.Container_name)
if err != nil {
logger.WithFields(logrus.Fields{"Time": time.Now(), "ContainerID": container_name, "Action": "RENAME"}).Error(err)
panic(err)
}
fmt.Println("Rename container name to", nm_config.Container_name)
logger.WithFields(logrus.Fields{"Time": time.Now(), "ContainerID": container_name, "Action": "RENAME"}).Info("Rename container "+container_name+" name to ", nm_config.Container_name)
}
err = client.StartContainer(id, nil)
if err != nil {
logger.WithFields(logrus.Fields{"Time": time.Now(), "ContainerID": container_name, "Action": "START"}).Error(err)
panic(err)
}
fmt.Println("Started.")
logger.WithFields(logrus.Fields{"Time": time.Now(), "ContainerID": container_name, "Action": "START"}).Info("Start container " + container_name)
}
//Get nodemanger configuration item from config file
//If no config specify throught the command line,use the default settings
func get_nodemanager_config(nm string, config string) string {
if nm == "" {
return viper.GetString("nodemanager." + config)
}
return nm
}
|
// Copyright 2014, 2016 Claudemiro Alves Feitosa Neto. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package ipe
import (
"net/http"
"github.com/gorilla/mux"
)
type router struct {
ctx *applicationContext
mux *mux.Router
routes map[string]contextHandler
}
func newRouter(ctx *applicationContext) *router {
return &router{
ctx: ctx,
mux: mux.NewRouter().StrictSlash(true),
}
}
func (a *router) GET(path string, handler contextHandler) {
a.Handle("GET", path, handler)
}
func (a *router) POST(path string, handler contextHandler) {
a.Handle("POST", path, handler)
}
func (a *router) Handle(method, path string, handler contextHandler) {
a.mux.Methods(method).Path(path).HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
handler.ServeWithContext(a.ctx, params(mux.Vars(r)), w, r)
})
}
func (a router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
a.mux.ServeHTTP(w, r)
}
|
package parser
import (
"HelloGo/crawler/fetcher"
"HelloGo/crawler/model"
"fmt"
"regexp"
)
var rInfo = regexp.MustCompile(`<a href="(http://album.zhenai.com/u/[\d]+)"[^>]*>([^<]+)</a>`)
func ParseUserInfo(b []byte) (res model.ParseResult) {
sub := rInfo.FindAllSubmatch(b, -1)
var req = make([]model.Request, 10000)
for _, v := range sub {
name := string(v[2])
r := model.Request{URL: string(v[1]), ParseFunc: func(bytes []byte) model.ParseResult {
return ParseProfile(bytes, name)
}}
req = append(req, r)
b, err := fetcher.Fetcher(r.URL)
if err != nil {
fmt.Println("fetcher failed Url", r.URL)
continue
}
go r.ParseFunc(b)
}
res.Request = req
return
}
|
package imdb
import (
"encoding/json"
"fmt"
"regexp"
"strconv"
"sync"
"time"
"golang.org/x/text/language"
htmlParser "github.com/jbowtie/gokogiri/html"
"github.com/jbowtie/gokogiri/xml"
)
// Item represents a single item (either a movie or an episode)
type Item struct {
id int
title *string
year *int
itemType ItemType
season *int
episode *int
cachedDocuments map[string]*htmlParser.HtmlDocument
cacheIndividualLocks map[string]*sync.Mutex
cacheLock sync.Mutex
client HttpGetter
}
// ItemType is one of Unknown, Movie, Series and Episode
type ItemType int
//go:generate stringer -type=ItemType
const (
// Unknown is a null item type
Unknown ItemType = iota
// Any is used for searching for any item
Any
// Movie is the type of a item which is a movie
Movie
// Series is the type of a item which is a series
Series
// Episode is the type of a item which is an episode
Episode
)
type Language language.Base
func (l *Language) MarshalJSON() ([]byte, error) {
return json.Marshal(l.String())
}
func (l *Language) UnmarshalJSON(data []byte) error {
var base string
err := json.Unmarshal(data, &base)
if err != nil {
return err
}
lang, err := language.ParseBase(base)
if err != nil {
return err
}
*l = Language(lang)
return nil
}
// New creates a item from an IMDB ID
func New(id int) *Item {
return &Item{
id: id,
cachedDocuments: make(map[string]*htmlParser.HtmlDocument),
cacheIndividualLocks: make(map[string]*sync.Mutex),
}
}
// NewWithClient creates a item from an IMDB ID which will use the given
// HTTP client to communicate with IMDBIMDB.
func NewWithClient(id int, client HttpGetter) *Item {
item := New(id)
item.client = client
return item
}
// Free frees all resources used by the parser. You must always call it
// after you finish reading the attributes
func (s *Item) Free() {
s.cacheLock.Lock()
defer s.cacheLock.Unlock()
for name := range s.cachedDocuments {
s.cachedDocuments[name].Free()
delete(s.cachedDocuments, name)
}
}
// PreloadAll loads all pages needed for this item by making parallel
// requests to IMDB. All subsequent calls to methods will be fast
// (won't generate a http request)
func (s *Item) PreloadAll() {
wg := sync.WaitGroup{}
wg.Add(4)
load := func(name string) {
_, _ = s.page(name)
wg.Done()
}
go load("combined")
go load("releaseinfo")
go load("plotsummary")
go load("synopsis")
wg.Wait()
}
// page returns the html contents of the page at
// http://akas.imdb.com/title/tt<s.ID>/<name>
func (s *Item) page(name string) (*xml.ElementNode, error) {
s.cacheLock.Lock()
individualLock, ok := s.cacheIndividualLocks[name]
if !ok {
individualLock = &sync.Mutex{}
s.cacheIndividualLocks[name] = individualLock
}
document, ok := s.cachedDocuments[name]
if !ok {
s.cachedDocuments[name] = nil
individualLock.Lock()
}
s.cacheLock.Unlock()
if ok && document == nil {
individualLock.Lock()
document, ok = s.cachedDocuments[name]
if ok {
individualLock.Unlock()
}
}
if !ok {
var err error
document, err = s.parsePage(name)
if err != nil {
individualLock.Unlock()
return nil, err
}
s.cacheLock.Lock()
s.cachedDocuments[name] = document
individualLock.Unlock()
s.cacheLock.Unlock()
}
return document.Root(), nil
}
func (s *Item) parsePage(name string) (*htmlParser.HtmlDocument, error) {
url := fmt.Sprintf("http://akas.imdb.com/title/tt%07d/%s", s.id, name)
return parsePage(s.client, url)
}
// idFromLink extracts an IMDB ID from a link
func idFromLink(link string) (int, error) {
matcher := regexp.MustCompile(`\/tt([0-9]+)`)
groups := matcher.FindStringSubmatch(link)
if len(groups) <= 1 || groups[1] == "" {
return 0, fmt.Errorf("invalid link: %s", link)
}
id, err := strconv.Atoi(groups[1])
if err != nil {
return 0, fmt.Errorf("invalid imdb id: %s", err)
}
return id, nil
}
// parseDate parses a date from IMDB's default format
func parseDate(text string) (time.Time, error) {
t, err := time.Parse("2 January 2006", text)
if err != nil {
return time.Time{}, fmt.Errorf("can't parse date string '%s': %s", text, err)
}
return t, nil
}
// firstMatching obtains a root node by calling page(),
// and then finds its first child node which matches the xpath
func (s *Item) firstMatching(pageName string, xpath string) (xml.Node, error) {
page, err := s.page(pageName)
if err != nil {
return nil, err
}
return firstMatchingOnNode(page, xpath)
}
// firstMatchingOnNode finds its first child node which matches the xpath
func firstMatchingOnNode(node xml.Node, xpath string) (xml.Node, error) {
elements, err := node.Search(xpath)
if err != nil {
return nil, err
}
if len(elements) == 0 {
return nil, fmt.Errorf("unable to find element")
}
return elements[0], nil
}
|
package middleware
import (
"database/sql"
"encoding/json" // package to encode and decode the json into struct and vice versa
"fmt"
"go-postgres/models" // models package where User schema is defined
"log"
"net/http" // used to access the request and response object of the api
"os" // used to read the environment variable
"strconv" // package used to covert string into int type
"strings"
// used to get the params from the route
"github.com/joho/godotenv" // package used to read the .env file
_ "github.com/lib/pq" // postgres golang driver
)
// response format
type response struct {
ID int64 `json:"id,omitempty"`
Message string `json:"message,omitempty"`
}
// create connection with postgres db
func createConnection() *sql.DB {
// load .env file
err := godotenv.Load(".env")
if err != nil {
log.Fatalf("Error loading .env file")
}
// Open the connection
db, err := sql.Open("postgres", os.Getenv("POSTGRES_URL"))
if err != nil {
panic(err)
}
// check the connection
err = db.Ping()
if err != nil {
panic(err)
}
//fmt.Println("Successfully connected!")
// return the connection
return db
}
func PrepForTesting() {
clearDB()
}
//Creates a new book object and adds to postgres db
func CreateBook(w http.ResponseWriter, r *http.Request) {
// set the header to content type x-www-form-urlencoded
// Allow all origin to handle cors issue
w.Header().Set("Context-Type", "application/x-www-form-urlencoded")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
//create new book model
var book models.Book
//decode the json request to user
err := json.NewDecoder(r.Body).Decode(&book)
//check if any errors
if err != nil {
log.Fatalf("Unable to decode the request body. %v", err)
}
//call the insert book function and relay success message
insertID := insertBook(book)
message := "Book added successfully"
//check to see if there was error (Was not sure what/how to handle this the right way so I just checked to see if error id was 400 and if so display message to http)
if insertID == -1 {
message = "Rating needs to be in range 1-3"
}
//create response object
res := response{
ID: insertID,
Message: message,
}
// send the response
json.NewEncoder(w).Encode(res)
}
//Get Book will return book object based on ID
func GetBook(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Context-Type", "application/x-www-form-urlencoded")
w.Header().Set("Access-Control-Allow-Methods", "GET")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Allow-Origin", "*")
stringid := strings.ReplaceAll(r.URL.String(), "/api/book/", "")
// convert the id type from string to int
id, err := strconv.Atoi(stringid)
//check if any errors and display message
if err != nil {
log.Fatalf("Unable to convert the string into int. %v", err)
}
// call the getbookbyID function to get user object and any errors
book, err := getBookByID(int64(id))
//check if any errors and display error message
if err != nil {
log.Fatalf("Unable to get user. %v", err)
}
// send the response
json.NewEncoder(w).Encode(book)
}
// GetAllBooks will return all the books from database
func GetAllBooks(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Context-Type", "application/x-www-form-urlencoded")
w.Header().Set("Access-Control-Allow-Origin", "*")
//call get all books method to get all book objects and errors
books, err := getAllBooks()
//if there are any errors, display error message
if err != nil {
log.Fatalf("Unable to get all user. %v", err)
}
// send all the books as response
json.NewEncoder(w).Encode(books)
}
//function that allows editing/updating of book object information
func UpdateBook(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "PUT")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
stringid := strings.ReplaceAll(r.URL.String(), "/api/book/", "")
id, err := strconv.Atoi(stringid)
if err != nil {
log.Fatalf("Unable to convert the string into int. %v", err)
}
//create new book model
var book models.Book
//decode book model data
err = json.NewDecoder(r.Body).Decode(&book)
//check if any errors and if so display error message
if err != nil {
log.Fatalf("Unable to decode the request body. %v", err)
}
//call update book function that will update book object corresponding to id and new book details
updatedRows := updateBook(int64(id), book)
//set message to success message and show how many rows were affected
msg := fmt.Sprintf("User updated successfully. Total rows/record affected %v ", updatedRows)
//check to see if there was error (Was not sure what/how to handle this the right way so I just checked to see if error id was 400 and if so display message to http)
if updatedRows == -1 {
msg = "Rating needs to be in range 1-3"
}
//create response object and set error id and message
res := response{
ID: int64(id),
Message: msg,
}
json.NewEncoder(w).Encode(res)
}
//Delete book will delete book object from database given book id
func DeleteBook(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Context-Type", "application/x-www-form-urlencoded")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "DELETE")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
stringid := strings.ReplaceAll(r.URL.String(), "/api/deletebook/", "")
id, err := strconv.Atoi(stringid)
//check if any errors and return message
if err != nil {
log.Fatalf("Unable to convert the string into int. %v", err)
}
// call the deletebook function
deletedRows := deleteBook(int64(id))
// format the message string
msg := fmt.Sprintf("User updated successfully. Total rows/record affected %v", deletedRows)
// format the reponse message
res := response{
ID: int64(id),
Message: msg,
}
//send the response
json.NewEncoder(w).Encode(res)
}
//------------------------- Implementation functions ----------------
//insert book function takes in book model and returns id of book created/inserted
func insertBook(book models.Book) int64 {
//create connection
db := createConnection()
//close the db connection
defer db.Close()
//create sql query statement that inserts book into postgres db based on user input data
sqlStatement := `INSERT INTO book (Title, Author, Publisher, Publish_Date, Rating, Status) VALUES ($1, $2, $3, $4, $5, $6) RETURNING ID`
//create id variable
var id int64
//check to see if rating is within range, if not set return error id of -1 (that way we never actually would return this normally)
if book.Rating < 1 || book.Rating > 3 {
return -1
}
//query rows based on user input and store into err
err := db.QueryRow(sqlStatement, book.Title, book.Author, book.Publisher, book.Publish_Date, book.Rating, book.Status).Scan(&id)
//if there are any errors, return error statement
if err != nil {
log.Fatalf("Unable to execute the query. %v", err)
}
//success message
//fmt.Println("Inserted a single record ", id)
//return the inserted id
return id
}
// get one book from the DB by its id
func getBookByID(id int64) (models.Book, error) {
// create the postgres db connection
db := createConnection()
// close the db connection
defer db.Close()
// create a new book model
var book models.Book
// create the select sql query
sqlStatement := `SELECT * FROM book WHERE id=$1`
// execute the sql statement
row := db.QueryRow(sqlStatement, id)
// unmarshal the row object to book
err := row.Scan(&book.ID, &book.Title, &book.Author, &book.Publisher, &book.Publish_Date, &book.Rating, &book.Status)
switch err {
case sql.ErrNoRows:
//fmt.Println("No rows were returned!")
return book, nil
case nil:
return book, nil
default:
log.Fatalf("Unable to scan the row. %v", err)
}
return book, err
}
//get every book from database
func getAllBooks() ([]models.Book, error) {
// create the postgres db connection
db := createConnection()
// close the db connection
defer db.Close()
var books []models.Book
// create the select sql query
sqlStatement := `SELECT * FROM book`
// execute the sql statement
rows, err := db.Query(sqlStatement)
if err != nil {
log.Fatalf("Unable to execute the query. %v", err)
}
// close the statement
defer rows.Close()
// iterate over the rows
for rows.Next() {
var book models.Book
// unmarshal the row object to book
err = rows.Scan(&book.ID, &book.Title, &book.Author, &book.Publisher, &book.Publish_Date, &book.Rating, &book.Status)
if err != nil {
log.Fatalf("Unable to scan the row. %v", err)
}
//append the book to the books list
books = append(books, book)
}
return books, err
}
// update book from the DB
func updateBook(id int64, book models.Book) int64 {
// create the postgres db connection
db := createConnection()
// close the db connection
defer db.Close()
// create the update sql query
sqlStatement := `UPDATE book SET Title=$2, Author=$3, Publisher=$4, Publish_Date =$5, Rating = $6, Status = $7 WHERE id=$1`
//check to see if rating is within correct range and return -1 as error id if out of range
if book.Rating < 1 || book.Rating > 3 {
return -1
}
// execute the sql statement
res, err := db.Exec(sqlStatement, id, book.Title, book.Author, book.Publisher, book.Publish_Date, book.Rating, book.Status)
if err != nil {
log.Fatalf("Unable to execute the query. %v", err)
}
//check how many rows affected
rowsAffected, err := res.RowsAffected()
if err != nil {
log.Fatalf("Error while checking the affected rows. %v", err)
}
//fmt.Printf("Total rows/record affected %v", rowsAffected)
return rowsAffected
}
// delete book in the DB by id
func deleteBook(id int64) int64 {
// create the postgres db connection
db := createConnection()
// close the db connection
defer db.Close()
// create the delete sql query
sqlStatement := `DELETE FROM book WHERE id=$1`
// execute the sql statement
res, err := db.Exec(sqlStatement, id)
if err != nil {
log.Fatalf("Unable to execute the query. %v", err)
}
// check how many rows affected
rowsAffected, err := res.RowsAffected()
if err != nil {
log.Fatalf("Error while checking the affected rows. %v", err)
}
//fmt.Printf("Total rows/record affected %v", rowsAffected)
return rowsAffected
}
func clearDB() {
// create the postgres db connection
db := createConnection()
// close the db connection
defer db.Close()
// create the delete sql query
sqlStatement := `
TRUNCATE book;
DELETE FROM book;
ALTER SEQUENCE book_id_seq RESTART WITH 1;`
// execute the sql statement
rows, err := db.Query(sqlStatement)
if err != nil {
log.Fatalf("Unable to execute the query. %v", err)
}
// close the statement
defer rows.Close()
}
|
package controller
import (
"errors"
"net/http"
"github.com/appditto/pippin_nano_wallet/apps/server/models/requests"
"github.com/appditto/pippin_nano_wallet/libs/database/ent"
"github.com/appditto/pippin_nano_wallet/libs/utils"
"github.com/appditto/pippin_nano_wallet/libs/wallet"
"github.com/mitchellh/mapstructure"
"k8s.io/klog/v2"
)
// Some common things multiple handlers use
// Get wallet if it exists, set response
func (hc *HttpController) WalletExists(walletId string, w http.ResponseWriter, r *http.Request) *ent.Wallet {
// See if wallet exists
dbWallet, err := hc.Wallet.GetWallet(walletId)
if errors.Is(err, wallet.ErrWalletNotFound) || errors.Is(err, wallet.ErrInvalidWallet) {
ErrWalletNotFound(w, r)
return nil
} else if err != nil {
ErrInternalServerError(w, r, err.Error())
return nil
}
return dbWallet
}
// Common map decoding for most requests
func (hc *HttpController) DecodeBaseRequest(request *map[string]interface{}, w http.ResponseWriter, r *http.Request) *requests.BaseRequest {
var baseRequest requests.BaseRequest
if err := mapstructure.Decode(request, &baseRequest); err != nil {
klog.Errorf("Error unmarshalling request %s", err)
ErrUnableToParseJson(w, r)
return nil
} else if baseRequest.Wallet == "" || baseRequest.Action == "" {
ErrUnableToParseJson(w, r)
return nil
}
return &baseRequest
}
// Common map decoding for requests with count added
func (hc *HttpController) DecodeBaseRequestWithCount(request *map[string]interface{}, w http.ResponseWriter, r *http.Request) (*requests.BaseRequestWithCount, int) {
var baseRequest requests.BaseRequestWithCount
if err := mapstructure.Decode(request, &baseRequest); err != nil {
klog.Errorf("Error unmarshalling request with count %s", err)
ErrUnableToParseJson(w, r)
return nil, 0
} else if baseRequest.Wallet == "" || baseRequest.Action == "" {
ErrUnableToParseJson(w, r)
return nil, 0
}
var count int
var err error
if baseRequest.Count != nil {
count, err = utils.ToInt(*baseRequest.Count)
if err != nil || count < 1 {
ErrUnableToParseJson(w, r)
return nil, 0
}
if count < 1 {
count = 1
}
}
return &baseRequest, count
}
// ! TODO - can we reduce duplication with generics or something ?
func (hc *HttpController) DecodeAccountCreateRequest(request *map[string]interface{}, w http.ResponseWriter, r *http.Request) (*requests.AccountCreateRequest, *int) {
var accountCreateRequest requests.AccountCreateRequest
if err := mapstructure.Decode(request, &accountCreateRequest); err != nil {
klog.Errorf("Error unmarshalling request with count %s", err)
ErrUnableToParseJson(w, r)
return nil, nil
} else if accountCreateRequest.Wallet == "" || accountCreateRequest.Action == "" {
ErrUnableToParseJson(w, r)
return nil, nil
}
var idx *int
if accountCreateRequest.Index != nil {
index, err := utils.ToInt(*accountCreateRequest.Index)
if err != nil || index < 0 {
ErrUnableToParseJson(w, r)
return nil, nil
}
idx = &index
}
return &accountCreateRequest, idx
}
|
package main
import "fmt"
func main() {
var tampil_mahasiswa = map[string]string{"Aldo":"182 cm","Yosep":"178 cm"}
fmt.Println ("Aldo :",tampil_mahasiswa["Aldo"])
fmt.Println("Yosep : ",tampil_mahasiswa["Yosep"])
}
func tampil_mahasiswa(x string, y string)(string,string) {
var m = x ;
var m1 = y ;
return m, m1
}
|
package models
import "github.com/jinzhu/gorm"
type Saying struct {
gorm.Model
Content string
Author string
Status int
}
func (Saying) TableName() string {
return "saying"
}
|
package utils
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestTestingClock(t *testing.T) {
c := &TestingClock{
now: time.Unix(0, 0),
}
assert.Equal(t, int64(0), c.Now().Unix())
c.now = time.Unix(20, 0)
assert.Equal(t, int64(20), c.Now().Unix())
assert.Equal(t, int64(20000000000), c.Now().UnixNano())
c.Set(time.Unix(16000000, 0))
assert.Equal(t, int64(16000000), c.Now().Unix())
before := c.Now()
<-c.After(time.Millisecond * 100)
assert.Equal(t, before, c.Now())
}
func TestRealClock(t *testing.T) {
c := &RealClock{}
assert.WithinDuration(t, time.Now(), c.Now(), time.Second)
before := c.Now()
<-c.After(time.Millisecond * 100)
after := c.Now()
assert.WithinDuration(t, before, after, time.Millisecond*120)
diff := after.Sub(before)
assert.GreaterOrEqual(t, diff, time.Millisecond*100)
}
|
package main
import (
"context"
"flag"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"github.com/go-chi/cors"
"github.com/hardstylez72/bblog/ad/pkg/group"
"github.com/hardstylez72/bblog/ad/pkg/grouproute"
"github.com/hardstylez72/bblog/ad/pkg/infra/logger"
"github.com/hardstylez72/bblog/ad/pkg/infra/storage"
"github.com/hardstylez72/bblog/ad/pkg/route"
"github.com/hardstylez72/bblog/ad/pkg/tag"
"github.com/hardstylez72/bblog/ad/pkg/user"
"github.com/hardstylez72/bblog/ad/pkg/usergroup"
"github.com/hardstylez72/bblog/ad/pkg/userroute"
"github.com/spf13/viper"
"go.uber.org/zap"
"log"
"net/http"
"time"
)
const (
apiPathPrefix = "/api"
)
type Server struct {
log *zap.SugaredLogger
router chi.Router
}
func main() {
log, err := logger.New("")
errCheck(err, "can't load config")
defer log.Sync()
err = NewServer(log).Run()
errCheck(err, "can't run server")
}
func errCheck(err error, errorText string) {
if err == nil {
return
}
log.Fatal(errorText, ": ", err)
}
func NewServer(log *zap.SugaredLogger) *Server {
return &Server{
router: chi.NewRouter(),
log: log,
}
}
func (s *Server) Run() error {
configPath := flag.String("config", "/home/hs/go/src/github.com/hardstylez72/bblog/ad/cmd/server/config.yaml", "path to config file")
flag.Parse()
err := Load(*configPath)
if err != nil {
return err
}
//err = tracer.New(viper.GetString("tracer.jaeger.collectorEndpoint"), viper.GetString("tracer.jaeger.serviceName"))
//if err != nil {
// return err
//}
httpServer := &http.Server{
Addr: viper.GetString("port"),
Handler: s.Handler(),
}
return httpServer.ListenAndServe()
}
func (s *Server) Handler() chi.Router {
r := s.router
c := cors.Handler(cors.Options{
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
AllowedHeaders: []string{"*"},
AllowedOrigins: []string{"*"},
AllowCredentials: true,
Debug: true,
})
r.Use(c)
r.Use(middleware.RequestID)
r.Use(logger.Inject(s.log))
r.Use(middleware.Timeout(60 * time.Second))
r.Mount(apiPathPrefix, r)
err := Start(r)
if err != nil {
log.Fatal(err)
}
s.log.Info("app is successfully running")
return r
}
func Start(r chi.Router) error {
pg, err := storage.NewPGConnection(viper.GetString("databases.postgres"))
if err != nil {
return err
}
pgx, err := storage.WrapPgConnWithSqlx(pg)
if err != nil {
return err
}
err = storage.RunMigrations(pg, "ad/migrations")
if err != nil {
return err
}
tag.NewController(tag.NewRepository(pgx)).Mount(r)
route.NewController(route.NewRepository(pgx)).Mount(r)
group.NewController(group.NewRepository(pgx)).Mount(r)
grouproute.NewController(grouproute.NewRepository(pgx)).Mount(r)
user.NewController(user.NewRepository(pgx)).Mount(r)
usergroup.NewController(usergroup.NewRepository(pgx)).Mount(r)
userroute.NewController(userroute.NewRepository(pgx)).Mount(r)
ctx := context.Background()
u, err := resolveUser(ctx, user.NewRepository(pgx))
if err != nil {
return err
}
g, err := resolveGroup(ctx, group.NewRepository(pgx))
if err != nil {
return err
}
err = resolveUserAndGroup(ctx, usergroup.NewRepository(pgx), u.Id, g.Id)
if err != nil {
return err
}
rs := buildRoutes(r)
rs, err = resolveRoutes(ctx, route.NewRepository(pgx), rs)
if err != nil {
return err
}
err = resolveGroupAndRoutes(ctx, grouproute.NewRepository(pgx), rs, g.Id)
if err != nil {
return err
}
return nil
}
|
// gopack - Golang asset pipeline
// https://github.com/wcamarao/gopack
// MIT Licensed
//
package gopack
// Open-ended interface to define a custom pipeline,
// grouping assets by patterns, and allowing multiple
// processors to be applied per group.
//
// Group - asset group or type name (e.g. JavaScripts).
// Use CamelCase as you may want to export this
// out into your templates.
//
// Patterns - file patterns to match (e.g. "*.js"). The order
// is relevant, e.g. {"*.module.js", "*.js"} will
// load *.module.js before any other *.js matches.
//
// Exceptions - file patterns not to match. The order is not
// relevant. The first matched exception is skipped.
//
// Processors - processors to be applied once a pattern matches.
// Multiple processors are piped into one another.
//
type Matcher struct {
Group string
Patterns []string
Exceptions []string
Processors []Processor
}
|
package handlers
import (
"fmt"
"net/http"
)
// HelloHandler takes a GET parameter "name" and responds
// with Hello <name>! in plaintext
func HelloHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
if r.Method != http.MethodGet {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
name := r.URL.Query().Get("name")
w.WriteHeader(http.StatusOK)
w.Write([]byte(fmt.Sprintf("Hello %s!", name)))
}
|
package main
import (
"bytes"
"fmt"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
log "github.com/sirupsen/logrus"
)
type handler struct {
log *log.Logger
config *config
m sync.Mutex
}
func (h *handler) logResponse(r *http.Request, code int) {
level := log.InfoLevel
if code >= 400 {
if code >= 500 {
level = log.ErrorLevel
} else {
level = log.WarnLevel
}
}
h.log.Logln(level, code, r.URL)
}
func (h *handler) serveError(w http.ResponseWriter, r *http.Request, code int, msg string) {
h.logResponse(r, code)
http.Error(w, msg, code)
}
func (h *handler) notFound(w http.ResponseWriter, r *http.Request) {
h.serveError(w, r, http.StatusNotFound, fmt.Sprintf("Not found: %q", r.URL))
}
func (h *handler) internalError(w http.ResponseWriter, r *http.Request, err error) {
h.log.Error(err)
h.serveError(w, r, http.StatusInternalServerError, fmt.Sprintf("Internal error: %v", err))
}
func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache")
path := r.URL.EscapedPath()
if len(path) < 1 || path[0] != '/' {
h.notFound(w, r)
return
}
parts := strings.Split(path[1:], "/")
if parts[0] == "__wscapture__" {
if len(parts) != 2 {
h.notFound(w, r)
return
}
switch parts[1] {
case "socket":
h.getSocket(w, r)
case "script.js":
h.handleWsFile(w, r, "wscapture.bundle.js", true)
case "module.js":
h.handleWsFile(w, r, "wscapture.js", false)
default:
h.notFound(w, r)
}
} else {
h.handleAppFile(w, r, parts)
}
}
func (h *handler) getFile(name string, doBuild bool) *os.File {
if doBuild {
h.m.Lock()
defer h.m.Unlock()
cmd := exec.Command("make", name)
cmd.Dir = h.config.wsRoot
var buf bytes.Buffer
cmd.Stdout = &buf
cmd.Stderr = &buf
if err := cmd.Run(); err != nil {
h.log.Errorln("Could not build target:", name)
os.Stderr.Write(buf.Bytes())
return nil
}
}
fp, err := os.Open(filepath.Join(h.config.wsRoot, name))
if err != nil {
h.log.Errorln("Could not open file:", err)
return nil
}
return fp
}
func (h *handler) handleWsFile(w http.ResponseWriter, r *http.Request, name string, doBuild bool) {
fp := h.getFile(name, doBuild)
defer fp.Close()
st, err := fp.Stat()
if err != nil {
h.internalError(w, r, err)
return
}
h.logResponse(r, http.StatusOK)
http.ServeContent(w, r, name, st.ModTime(), fp)
}
func (h *handler) handleAppFile(w http.ResponseWriter, r *http.Request, parts []string) {
fparts := make([]string, 0, len(parts)+1)
fparts = append(fparts, h.config.appRoot)
for _, part := range parts {
fpart, err := url.PathUnescape(part)
if err != nil {
h.notFound(w, r)
return
}
if fpart == "" || fpart == "." || fpart == ".." {
h.notFound(w, r)
return
}
for _, c := range []byte(fpart) {
if c < 0x20 || c == '/' {
h.notFound(w, r)
return
}
}
fparts = append(fparts, fpart)
}
fpath := filepath.Join(fparts...)
fp, err := os.Open(fpath)
if err != nil {
if os.IsNotExist(err) {
h.notFound(w, r)
return
}
}
defer fp.Close()
st, err := fp.Stat()
if err != nil {
h.internalError(w, r, err)
return
}
h.logResponse(r, http.StatusOK)
name := fparts[len(fparts)-1]
http.ServeContent(w, r, name, st.ModTime(), fp)
}
|
package fuctional_options
import (
"crypto/tls"
"time"
)
// 使用一个builder类来做包装
type ServerBuilder struct {
Server
}
func (sb *ServerBuilder) Create(addr string, port int) *ServerBuilder {
sb.Server.Addr = addr
sb.Server.Port = port
// 其它代码设置其它成员的默认值
return sb
}
func (sb *ServerBuilder) WithProtocol(protocol string) *ServerBuilder {
sb.Server.Protocol = protocol
return sb
}
func (sb *ServerBuilder) WithMaxConn(maxconn int) *ServerBuilder {
sb.Server.MaxConns = maxconn
return sb
}
func (sb *ServerBuilder) WithTimeOut(timeout time.Duration) *ServerBuilder {
sb.Server.Timeout = timeout
return sb
}
func (sb *ServerBuilder) WithTLS(tls *tls.Config) *ServerBuilder {
sb.Server.TLS = tls
return sb
}
func (sb *ServerBuilder) Build() Server {
return sb.Server
}
func init() {
sb := ServerBuilder{}
server := sb.Create("127.0.0.1", 8080).
WithProtocol("udp").
WithMaxConn(1024).
WithTimeOut(30).
Build()
print(server)
}
|
package main
import (
"testing"
)
func ReadProfileTest(t *testing.T) {
}
func CreateProfileTest(t *testing.T) {
profile := map[string]interface{}{
"name": "reza andriyunanto",
"age": 22,
}
}
|
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package unistore
import (
"bytes"
"context"
"sync"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/store/mockstore/unistore/lockstore"
)
type rawHandler struct {
mu sync.RWMutex
store *lockstore.MemStore
}
func newRawHandler() *rawHandler {
return &rawHandler{
store: lockstore.NewMemStore(4096),
}
}
func (h *rawHandler) RawGet(_ context.Context, req *kvrpcpb.RawGetRequest) (*kvrpcpb.RawGetResponse, error) {
h.mu.RLock()
defer h.mu.RUnlock()
val := h.store.Get(req.Key, nil)
return &kvrpcpb.RawGetResponse{
Value: val,
NotFound: len(val) == 0,
}, nil
}
func (h *rawHandler) RawBatchGet(_ context.Context, req *kvrpcpb.RawBatchGetRequest) (*kvrpcpb.RawBatchGetResponse, error) {
h.mu.RLock()
defer h.mu.RUnlock()
pairs := make([]*kvrpcpb.KvPair, len(req.Keys))
for i, key := range req.Keys {
pairs[i] = &kvrpcpb.KvPair{
Key: key,
Value: h.store.Get(key, nil),
}
}
return &kvrpcpb.RawBatchGetResponse{Pairs: pairs}, nil
}
func (h *rawHandler) RawPut(_ context.Context, req *kvrpcpb.RawPutRequest) (*kvrpcpb.RawPutResponse, error) {
h.mu.Lock()
defer h.mu.Unlock()
h.store.Put(req.Key, req.Value)
return &kvrpcpb.RawPutResponse{}, nil
}
func (h *rawHandler) RawBatchPut(_ context.Context, req *kvrpcpb.RawBatchPutRequest) (*kvrpcpb.RawBatchPutResponse, error) {
h.mu.Lock()
defer h.mu.Unlock()
for _, pair := range req.Pairs {
h.store.Put(pair.Key, pair.Value)
}
return &kvrpcpb.RawBatchPutResponse{}, nil
}
func (h *rawHandler) RawDelete(_ context.Context, req *kvrpcpb.RawDeleteRequest) (*kvrpcpb.RawDeleteResponse, error) {
h.mu.Lock()
defer h.mu.Unlock()
h.store.Delete(req.Key)
return &kvrpcpb.RawDeleteResponse{}, nil
}
func (h *rawHandler) RawBatchDelete(_ context.Context, req *kvrpcpb.RawBatchDeleteRequest) (*kvrpcpb.RawBatchDeleteResponse, error) {
h.mu.Lock()
defer h.mu.Unlock()
for _, key := range req.Keys {
h.store.Delete(key)
}
return &kvrpcpb.RawBatchDeleteResponse{}, nil
}
func (h *rawHandler) RawDeleteRange(_ context.Context, req *kvrpcpb.RawDeleteRangeRequest) (*kvrpcpb.RawDeleteRangeResponse, error) {
h.mu.Lock()
defer h.mu.Unlock()
it := h.store.NewIterator()
var keys [][]byte
for it.Seek(req.StartKey); it.Valid(); it.Next() {
if bytes.Compare(it.Key(), req.EndKey) >= 0 {
break
}
keys = append(keys, safeCopy(it.Key()))
}
for _, key := range keys {
h.store.Delete(key)
}
return &kvrpcpb.RawDeleteRangeResponse{}, nil
}
func (h *rawHandler) RawScan(_ context.Context, req *kvrpcpb.RawScanRequest) (*kvrpcpb.RawScanResponse, error) {
h.mu.RLock()
defer h.mu.RUnlock()
it := h.store.NewIterator()
var pairs []*kvrpcpb.KvPair
if !req.Reverse {
for it.Seek(req.StartKey); it.Valid(); it.Next() {
if len(pairs) >= int(req.Limit) {
break
}
if len(req.EndKey) > 0 && bytes.Compare(it.Key(), req.EndKey) >= 0 {
break
}
pairs = h.appendPair(pairs, it)
}
} else {
for it.SeekForPrev(req.StartKey); it.Valid(); it.Prev() {
if bytes.Equal(it.Key(), req.StartKey) {
continue
}
if len(pairs) >= int(req.Limit) {
break
}
if bytes.Compare(it.Key(), req.EndKey) < 0 {
break
}
pairs = h.appendPair(pairs, it)
}
}
return &kvrpcpb.RawScanResponse{Kvs: pairs}, nil
}
func (h *rawHandler) appendPair(pairs []*kvrpcpb.KvPair, it *lockstore.Iterator) []*kvrpcpb.KvPair {
pair := &kvrpcpb.KvPair{
Key: safeCopy(it.Key()),
Value: safeCopy(it.Value()),
}
return append(pairs, pair)
}
func safeCopy(val []byte) []byte {
return append([]byte{}, val...)
}
|
package cmd
import (
"errors"
"fmt"
bosherr "github.com/cloudfoundry/bosh-agent/errors"
boshlog "github.com/cloudfoundry/bosh-agent/logger"
boshsys "github.com/cloudfoundry/bosh-agent/system"
bmconfig "github.com/cloudfoundry/bosh-micro-cli/config"
bmcpideploy "github.com/cloudfoundry/bosh-micro-cli/cpideployer"
bmdeployer "github.com/cloudfoundry/bosh-micro-cli/deployer"
bmdepl "github.com/cloudfoundry/bosh-micro-cli/deployment"
bmstemcell "github.com/cloudfoundry/bosh-micro-cli/stemcell"
bmui "github.com/cloudfoundry/bosh-micro-cli/ui"
bmvalidation "github.com/cloudfoundry/bosh-micro-cli/validation"
)
type deployCmd struct {
ui bmui.UI
userConfig bmconfig.UserConfig
fs boshsys.FileSystem
cpiManifestParser bmdepl.ManifestParser
boshManifestParser bmdepl.ManifestParser
cpiDeployer bmcpideploy.CpiDeployer
stemcellManagerFactory bmstemcell.ManagerFactory
deployer bmdeployer.Deployer
logger boshlog.Logger
logTag string
}
func NewDeployCmd(
ui bmui.UI,
userConfig bmconfig.UserConfig,
fs boshsys.FileSystem,
cpiManifestParser bmdepl.ManifestParser,
boshManifestParser bmdepl.ManifestParser,
cpiDeployer bmcpideploy.CpiDeployer,
stemcellManagerFactory bmstemcell.ManagerFactory,
deployer bmdeployer.Deployer,
logger boshlog.Logger,
) *deployCmd {
return &deployCmd{
ui: ui,
userConfig: userConfig,
fs: fs,
cpiManifestParser: cpiManifestParser,
boshManifestParser: boshManifestParser,
cpiDeployer: cpiDeployer,
stemcellManagerFactory: stemcellManagerFactory,
deployer: deployer,
logger: logger,
logTag: "deployCmd",
}
}
func (c *deployCmd) Name() string {
return "deploy"
}
func (c *deployCmd) Run(args []string) error {
releaseTarballPath, stemcellTarballPath, err := c.validateDeployInputs(args)
if err != nil {
return err
}
cpiDeployment, err := c.cpiManifestParser.Parse(c.userConfig.DeploymentFile)
if err != nil {
return bosherr.WrapError(err, "Parsing CPI deployment manifest `%s'", c.userConfig.DeploymentFile)
}
boshDeployment, err := c.boshManifestParser.Parse(c.userConfig.DeploymentFile)
if err != nil {
return bosherr.WrapError(err, "Parsing Bosh deployment manifest `%s'", c.userConfig.DeploymentFile)
}
cloud, err := c.cpiDeployer.Deploy(cpiDeployment, releaseTarballPath)
if err != nil {
return bosherr.WrapError(err, "Deploying CPI `%s'", releaseTarballPath)
}
stemcellManager := c.stemcellManagerFactory.NewManager(cloud)
stemcell, stemcellCID, err := stemcellManager.Upload(stemcellTarballPath)
if err != nil {
return bosherr.WrapError(err, "Uploading stemcell from `%s'", stemcellTarballPath)
}
err = c.deployer.Deploy(
cloud,
boshDeployment,
stemcell.ApplySpec,
cpiDeployment.Registry,
cpiDeployment.SSHTunnel,
cpiDeployment.Mbus,
stemcellCID,
)
if err != nil {
return bosherr.WrapError(err, "Deploying Microbosh")
}
// register the stemcell
return nil
}
type Deployment struct{}
// validateDeployInputs validates the presence of inputs (stemcell tarball, cpi release tarball)
func (c *deployCmd) validateDeployInputs(args []string) (string, string, error) {
if len(args) != 2 {
c.ui.Error("Invalid usage - deploy command requires exactly 2 arguments")
c.ui.Sayln("Expected usage: bosh-micro deploy <cpi-release-tarball> <stemcell-tarball>")
c.logger.Error(c.logTag, "Invalid arguments: ")
return "", "", errors.New("Invalid usage - deploy command requires exactly 2 arguments")
}
releaseTarballPath := args[0]
c.logger.Info(c.logTag, "Validating release tarball `%s'", releaseTarballPath)
fileValidator := bmvalidation.NewFileValidator(c.fs)
err := fileValidator.Exists(releaseTarballPath)
if err != nil {
c.ui.Error(fmt.Sprintf("CPI release `%s' does not exist", releaseTarballPath))
return "", "", bosherr.WrapError(err, "Checking CPI release `%s' existence", releaseTarballPath)
}
stemcellTarballPath := args[1]
c.logger.Info(c.logTag, "Validating stemcell tarball `%s'", stemcellTarballPath)
err = fileValidator.Exists(stemcellTarballPath)
if err != nil {
c.ui.Error(fmt.Sprintf("Stemcell `%s' does not exist", stemcellTarballPath))
return "", "", bosherr.WrapError(err, "Checking stemcell `%s' existence", stemcellTarballPath)
}
// validate current state: 'microbosh' deployment set
if len(c.userConfig.DeploymentFile) == 0 {
c.ui.Error("No deployment set")
return "", "", bosherr.New("No deployment set")
}
c.logger.Info(c.logTag, "Checking for deployment `%s'", c.userConfig.DeploymentFile)
err = fileValidator.Exists(c.userConfig.DeploymentFile)
if err != nil {
c.ui.Error(fmt.Sprintf("Deployment manifest path `%s' does not exist", c.userConfig.DeploymentFile))
return "", "", bosherr.WrapError(err, "Reading deployment manifest for deploy")
}
return releaseTarballPath, stemcellTarballPath, nil
}
|
package oper_log
import (
"errors"
"xorm.io/builder"
"yj-app/app/yjgframe/db"
"yj-app/app/yjgframe/utils/excel"
"yj-app/app/yjgframe/utils/page"
)
//
//查询列表请求参数
type SelectPageReq struct {
Title string `form:"title"` //系统模块
OperName string `form:"operName"` //操作人员
BusinessTypes int `form:"businessTypes"` //操作类型
Status string `form:"status"` //操作类型
BeginTime string `form:"beginTime"` //数据范围
EndTime string `form:"endTime"` //开始时间
PageNum int `form:"pageNum"` //当前页码
PageSize int `form:"pageSize"` //每页数
OrderByColumn string `form:"orderByColumn"` //排序字段
IsAsc string `form:"isAsc"` //排序方式
}
// 根据条件分页查询用户列表
func SelectPageList(param *SelectPageReq) (*[]Entity, *page.Paging, error) {
db := db.Instance().Engine()
p := new(page.Paging)
if db == nil {
return nil, p, errors.New("获取数据库连接失败")
}
model := db.Table(TableName())
if param != nil {
if param.Title != "" {
model.Where("title like ?", "%"+param.Title+"%")
}
if param.OperName != "" {
model.Where("oper_name like ?", "%"+param.OperName+"%")
}
if param.Status != "" {
model.Where("status = ?", param.Status)
}
if param.BusinessTypes >= 0 {
model.Where("status = ?", param.BusinessTypes)
}
if param.BeginTime != "" {
model.Where("date_format(oper_time,'%y%m%d') >= date_format(?,'%y%m%d')", param.BeginTime)
}
if param.EndTime != "" {
model.Where("date_format(oper_time,'%y%m%d') <= date_format(?,'%y%m%d')", param.EndTime)
}
}
totalModel := model.Clone()
total, err := totalModel.Count()
if err != nil {
return nil, p, errors.New("读取行数失败")
}
p = page.CreatePaging(param.PageNum, param.PageSize, int(total))
if param.OrderByColumn != "" {
model.OrderBy(param.OrderByColumn + " " + param.IsAsc + " ")
}
model.Limit(p.Pagesize, p.StartNum)
var result []Entity
err = model.Find(&result)
return &result, p, nil
}
// 导出excel
func SelectExportList(param *SelectPageReq, head, col []string) (string, error) {
db := db.Instance().Engine()
if db == nil {
return "", errors.New("获取数据库连接失败")
}
build := builder.Select(col...).From(TableName())
if param != nil {
if param.Title != "" {
build.Where(builder.Like{"title", param.Title})
}
if param.OperName != "" {
build.Where(builder.Like{"oper_name", param.OperName})
}
if param.Status != "" {
build.Where(builder.Eq{"status": param.Status})
}
if param.BusinessTypes >= 0 {
build.Where(builder.Eq{"business_type": param.BusinessTypes})
}
if param.BeginTime != "" {
build.Where(builder.Gte{"date_format(create_time,'%y%m%d')": "date_format('" + param.BeginTime + "','%y%m%d')"})
}
if param.EndTime != "" {
build.Where(builder.Lte{"date_format(create_time,'%y%m%d')": "date_format('" + param.EndTime + "','%y%m%d')"})
}
}
sqlStr, _, _ := build.ToSQL()
arr, err := db.SQL(sqlStr).QuerySliceString()
path, err := excel.DownlaodExcel(head, arr)
return path, err
}
//清空记录
func DeleteAll() (int64, error) {
db := db.Instance().Engine()
if db == nil {
return 0, errors.New("获取数据库连接失败")
}
rs, _ := db.Exec("delete from sys_oper_log")
return rs.RowsAffected()
}
|
package triangle
import (
"sort"
"math"
)
const testVersion = 3
func KindFromSides(a, b, c float64) Kind {
// If any of the sides is not a number then not a triangle.
if math.IsNaN(a) || math.IsNaN(b) || math.IsNaN(c) {
return NaT
}
// If any of the sides is positive infinity then not a triangle.
if math.IsInf(a, 1) || math.IsInf(b, 1) || math.IsInf(c, 1) {
return NaT
}
// If any of the sides is negative infinity then not a triangle.
if math.IsInf(a, -1) || math.IsInf(b, -1) || math.IsInf(c, -1) {
return NaT
}
// If any of the sides is 0 then not a triangle.
if a <= 0 || b <= 0 || c <= 0 {
return NaT
}
// If fails triangle equality then not a triangle.
s := []float64{a, b, c}
sort.Float64s(s)
if s[0] + s[1] < s[2] {
return NaT
}
// If all three sides are the same then Equilateral.
if a == b && b == c {
return Equ
}
// If all three sides are not the same then Scalene
if a != b && b != c && a != c {
return Sca
}
// Anything else is Isosceles
return Iso
}
// Notice KindFromSides() returns this type. Pick a suitable data type.
type Kind int
// Pick values for the following identifiers used by the test program.
const (
NaT Kind = iota // not a triangle - NaT to be 0
Equ // equilateral - Equ to be 1
Iso // isosceles - Iso to be 2
Sca // scalene - Sca to be 3
)
// Organize your code for readability.
|
package blueprint
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDeepCopy(t *testing.T) {
bpOrig := Blueprint{
Name: "deepcopy-test",
Description: "Testing DeepCopy function",
Version: "0.0.1",
Packages: []Package{
{Name: "dep-package1", Version: "*"}},
Modules: []Package{
{Name: "dep-package2", Version: "*"}},
}
bpCopy := bpOrig.DeepCopy()
require.Equalf(t, bpOrig, bpCopy, "Blueprints.DeepCopy is different from original.")
// Modify the copy
bpCopy.Packages[0].Version = "1.2.3"
require.Equalf(t, bpOrig.Packages[0].Version, "*", "Blueprint.DeepCopy failed, original modified")
// Modify the original
bpOrig.Packages[0].Version = "42.0"
require.Equalf(t, bpCopy.Packages[0].Version, "1.2.3", "Blueprint.DeepCopy failed, copy modified.")
}
func TestBlueprintInitialize(t *testing.T) {
cases := []struct {
NewBlueprint Blueprint
ExpectedError bool
}{
{Blueprint{Name: "bp-test-1", Description: "Empty version", Version: ""}, false},
{Blueprint{Name: "bp-test-2", Description: "Invalid version 1", Version: "0"}, true},
{Blueprint{Name: "bp-test-2", Description: "Invalid version 2", Version: "0.0"}, true},
{Blueprint{Name: "bp-test-3", Description: "Invalid version 3", Version: "0.0.0.0"}, true},
{Blueprint{Name: "bp-test-4", Description: "Invalid version 4", Version: "0.a.0"}, true},
{Blueprint{Name: "bp-test-5", Description: "Invalid version 5", Version: "foo"}, true},
{Blueprint{Name: "bp-test-7", Description: "Zero version", Version: "0.0.0"}, false},
{Blueprint{Name: "bp-test-8", Description: "X.Y.Z version", Version: "2.1.3"}, false},
}
for _, c := range cases {
bp := c.NewBlueprint
err := bp.Initialize()
assert.Equalf(t, (err != nil), c.ExpectedError, "Initialize(%#v) returnted an unexpected error: %#v", c.NewBlueprint, err)
}
}
func TestBumpVersion(t *testing.T) {
cases := []struct {
NewBlueprint Blueprint
OldVersion string
ExpectedVersion string
}{
{Blueprint{Name: "bp-test-1", Description: "Empty version", Version: "0.0.1"}, "", "0.0.1"},
{Blueprint{Name: "bp-test-2", Description: "Invalid version 1", Version: "0.0.1"}, "0", "0.0.1"},
{Blueprint{Name: "bp-test-3", Description: "Invalid version 2", Version: "0.0.1"}, "0.0.0.0", "0.0.1"},
{Blueprint{Name: "bp-test-4", Description: "Invalid version 3", Version: "0.0.1"}, "0.a.0", "0.0.1"},
{Blueprint{Name: "bp-test-5", Description: "Invalid version 4", Version: "0.0.1"}, "foo", "0.0.1"},
{Blueprint{Name: "bp-test-6", Description: "Invalid version 5", Version: "0.0.1"}, "0.0", "0.0.1"},
{Blueprint{Name: "bp-test-8", Description: "Same version", Version: "4.2.0"}, "4.2.0", "4.2.1"},
}
for _, c := range cases {
bp := c.NewBlueprint
err := bp.Initialize()
require.NoError(t, err)
bp.BumpVersion(c.OldVersion)
assert.Equalf(t, c.ExpectedVersion, bp.Version, "BumpVersion(%#v) is expected to return %#v, but instead returned %#v.", c.OldVersion, c.ExpectedVersion, bp.Version)
}
}
func TestGetPackages(t *testing.T) {
bp := Blueprint{
Name: "packages-test",
Description: "Testing GetPackages function",
Version: "0.0.1",
Packages: []Package{
{Name: "tmux", Version: "1.2"}},
Modules: []Package{
{Name: "openssh-server", Version: "*"}},
Groups: []Group{
{Name: "anaconda-tools"}},
}
Received_packages := bp.GetPackages()
assert.ElementsMatch(t, []string{"tmux-1.2", "openssh-server", "@anaconda-tools", "kernel"}, Received_packages)
}
func TestKernelNameCustomization(t *testing.T) {
kernels := []string{"kernel", "kernel-debug", "kernel-rt"}
for _, k := range kernels {
// kernel in customizations
bp := Blueprint{
Name: "kernel-test",
Description: "Testing GetPackages function with custom Kernel",
Version: "0.0.1",
Packages: []Package{
{Name: "tmux", Version: "1.2"}},
Modules: []Package{
{Name: "openssh-server", Version: "*"}},
Groups: []Group{
{Name: "anaconda-tools"}},
Customizations: &Customizations{
Kernel: &KernelCustomization{
Name: k,
},
},
}
Received_packages := bp.GetPackages()
assert.ElementsMatch(t, []string{"tmux-1.2", "openssh-server", "@anaconda-tools", k}, Received_packages)
}
for _, k := range kernels {
// kernel in packages
bp := Blueprint{
Name: "kernel-test",
Description: "Testing GetPackages function with custom Kernel",
Version: "0.0.1",
Packages: []Package{
{Name: "tmux", Version: "1.2"},
{Name: k},
},
Modules: []Package{
{Name: "openssh-server", Version: "*"}},
Groups: []Group{
{Name: "anaconda-tools"}},
}
Received_packages := bp.GetPackages()
// adds default kernel as well
assert.ElementsMatch(t, []string{"tmux-1.2", k, "openssh-server", "@anaconda-tools", "kernel"}, Received_packages)
}
for _, bk := range kernels {
for _, ck := range kernels {
// all combos of both kernels
bp := Blueprint{
Name: "kernel-test",
Description: "Testing GetPackages function with custom Kernel",
Version: "0.0.1",
Packages: []Package{
{Name: "tmux", Version: "1.2"},
{Name: bk},
},
Modules: []Package{
{Name: "openssh-server", Version: "*"}},
Groups: []Group{
{Name: "anaconda-tools"}},
Customizations: &Customizations{
Kernel: &KernelCustomization{
Name: ck,
},
},
}
Received_packages := bp.GetPackages()
// both kernels are included, even if they're the same
assert.ElementsMatch(t, []string{"tmux-1.2", bk, "openssh-server", "@anaconda-tools", ck}, Received_packages)
}
}
}
|
// This file is subject to a 1-clause BSD license.
// Its contents can be found in the enclosed LICENSE file.
package evdev
import (
"errors"
"fmt"
"os"
)
// List of device types.
//
// These are used to look for specific input device types
// using evdev.Find().
//
// The returned devices may not necessarily be an actual
// keyboard or mouse, etc. Just a device which can behave like one.
// For instance: Mouse may return a trackpad, a multi-touch screen
// and an actual mouse if all of these happen to be connected.
// It is up to the host to figure out which one to use.
const (
Keyboard = iota
Mouse
Joystick
)
// Find returns a list of all attached devices, which
// qualify as the given device type.
func Find(devtype int) (list []*Device, err error) {
// Ensure we clean up properly if something goes wrong.
defer func() {
if err != nil {
for _, dev := range list {
dev.Close()
}
list = nil
}
}()
var count int
var dev *Device
var testFunc func(*Device) bool
switch devtype {
case Keyboard:
testFunc = IsKeyboard
case Mouse:
testFunc = IsMouse
case Joystick:
testFunc = IsJoystick
default:
err = errors.New("Invalid device type")
return
}
for {
node := fmt.Sprintf("/dev/input/event%d", count)
dev, err = Open(node)
if err != nil {
if os.IsNotExist(err) {
err = nil
}
return
}
count++
if testFunc(dev) {
list = append(list, dev)
}
}
return
}
// IsKeyboard returns true if the given device qualifies as a keyboard.
func IsKeyboard(dev *Device) bool {
return dev.Test(dev.EventTypes(), EvKeys, EvLed)
}
// IsMouse returns true if the given device qualifies as a mouse.
func IsMouse(dev *Device) bool {
return dev.Test(dev.EventTypes(), EvKeys, EvRelative)
}
// IsJoystick returns true if the given device qualifies as a joystick.
func IsJoystick(dev *Device) bool {
return dev.Test(dev.EventTypes(), EvKeys, EvAbsolute)
}
|
/*
* @lc app=leetcode id=23 lang=golang
*
* [23] Merge k Sorted Lists
*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func mergeKLists(lists []*ListNode) *ListNode {
var ret *ListNode
var last *ListNode
var candidate *ListNode
var candidate_index int
is_done := false
for is_done == false {
for i, n := range lists {
if n == nil {
continue
}
if candidate == nil {
candidate = n
candidate_index = i
} else {
if candidate.Val > n.Val {
candidate = n
candidate_index = i
}
}
}
if candidate != nil {
if last != nil {
last.Next = lists[candidate_index]
lists[candidate_index] = lists[candidate_index].Next
last = last.Next
last.Next = nil
} else {
last = lists[candidate_index]
lists[candidate_index] = lists[candidate_index].Next
last.Next = nil
ret = last
}
candidate = nil
} else {
is_done = true
}
}
return ret
}
|
package dao
import (
"fmt"
"github.com/xormplus/xorm"
"go.uber.org/zap"
"mix/test/codes"
entity "mix/test/entity/core/transaction"
mapper "mix/test/mapper/core/transaction"
"mix/test/utils/status"
)
func (p *Dao) CreateAudit(logger *zap.Logger, session *xorm.Session, item *entity.Audit) (id int64, err error) {
res, err := mapper.CreateAudit(session, item)
if err != nil {
logger.Error("Call mapper.CreateAudit error", zap.Error(err))
return
}
id, err = res.LastInsertId()
if err != nil {
logger.Error("Get id error", zap.Error(err))
return
}
return
}
func (p *Dao) GetAudit(logger *zap.Logger, session *xorm.Session, id int64) (item *entity.Audit, err error) {
item, err = mapper.GetAudit(session, id)
if err != nil {
logger.Error("Call mapper.GetAudit error", zap.Error(err))
return
}
return
}
func (p *Dao) MustGetAudit(logger *zap.Logger, session *xorm.Session, id int64) (item *entity.Audit, err error) {
item, err = p.GetAudit(logger, session, id)
if err != nil {
return
}
if item == nil {
err = status.Code(codes.AuditNotFound)
logger.Error(
"Get audit error",
zap.Error(err),
zap.Int64("id", id),
)
return
}
return
}
func (p *Dao) GetAuditList(logger *zap.Logger, session *xorm.Session) (items []*entity.Audit, err error) {
items, err = mapper.GetAuditList(session)
if err != nil {
logger.Error("Call mapper.GetAuditList error", zap.Error(err))
return
}
return
}
func (p *Dao) RemoveAudit(logger *zap.Logger, session *xorm.Session, id int64) (err error) {
_, err = mapper.RemoveAudit(session, id)
if err != nil {
logger.Error("Call mapper.RemoveAudit error", zap.Error(err))
return
}
return
}
func (p *Dao) MustRemoveAudit(logger *zap.Logger, session *xorm.Session, id int64) (err error) {
res, err := mapper.RemoveAudit(session, id)
if err != nil {
logger.Error("Call mapper.RemoveAudit error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.RemoveAudit error",
zap.Int64("affected", affected),
zap.Int64("id",
id),
zap.Error(err))
return
}
return
}
func (p *Dao) UpdateAudit(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Audit) (err error) {
_, err = mapper.UpdateAudit(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateAudit error", zap.Error(err))
return
}
return
}
func (p *Dao) MustUpdateAudit(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Audit) (err error) {
res, err := mapper.UpdateAudit(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateAudit error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.UpdateAudit error",
zap.Int64("affected", affected),
zap.Int64("item.Id", item.Id),
zap.Error(err))
return
}
return
}
func (p *Dao) GetAuditByAddressId(logger *zap.Logger, session *xorm.Session, addressId int64, oldBalanceVersion int64) (item *entity.Audit, err error) {
item, err = mapper.GetAuditByAddressId(session, addressId, oldBalanceVersion)
if err != nil {
logger.Error("Call mapper.GetAuditByAddressId error", zap.Error(err))
return
}
return
}
func (p *Dao) MustGetAuditByAddressId(logger *zap.Logger, session *xorm.Session, addressId int64, oldBalanceVersion int64) (item *entity.Audit, err error) {
item, err = p.GetAuditByAddressId(logger, session, addressId, oldBalanceVersion)
if err != nil {
return
}
if item == nil {
err = status.Code(codes.AuditNotFound)
logger.Error(
"Get audit error",
zap.Error(err),
zap.Int64("addressId", addressId),
zap.Int64("oldBalanceVersion", oldBalanceVersion),
)
return
}
return
}
func (p *Dao) UpdateAuditByAddressId(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Audit) (err error) {
_, err = mapper.UpdateAuditByAddressId(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateAuditByAddressId error", zap.Error(err))
return
}
return
}
func (p *Dao) MustUpdateAuditByAddressId(logger *zap.Logger, engine *xorm.EngineGroup, session *xorm.Session, item *entity.Audit) (err error) {
res, err := mapper.UpdateAuditByAddressId(engine, session, item)
if err != nil {
logger.Error("Call mapper.UpdateAuditByAddressId error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.UpdateAuditByAddressId error",
zap.Int64("affected", affected),
zap.Int64("item.AddressId", item.AddressId),
zap.Int64("item.OldBalanceVersion", item.OldBalanceVersion),
zap.Error(err))
return
}
return
}
func (p *Dao) RemoveAuditByAddressId(logger *zap.Logger, session *xorm.Session, addressId int64, oldBalanceVersion int64) (err error) {
_, err = mapper.RemoveAuditByAddressId(session, addressId, oldBalanceVersion)
if err != nil {
logger.Error("Call mapper.RemoveAuditByAddressId error", zap.Error(err))
return
}
return
}
func (p *Dao) MustRemoveAuditByAddressId(logger *zap.Logger, session *xorm.Session, addressId int64, oldBalanceVersion int64) (err error) {
res, err := mapper.RemoveAuditByAddressId(session, addressId, oldBalanceVersion)
if err != nil {
logger.Error("Call mapper.RemoveAuditByAddressId error", zap.Error(err))
return
}
affected, err := res.RowsAffected()
if err != nil {
logger.Error("Get affected error", zap.Error(err))
return
}
if affected != 1 {
err = fmt.Errorf("update affected error")
logger.Error("Call mapper.RemoveAuditByAddressId error",
zap.Int64("affected", affected),
zap.Int64("addressId",
addressId),
zap.Int64("oldBalanceVersion",
oldBalanceVersion),
zap.Error(err))
return
}
return
}
|
package socketman
import (
"crypto/aes"
"crypto/cipher"
"io"
)
//NewAESPool instantiates a pool of aes encryptor/decryptor
func NewAESPool(key []byte) (*AESPool, error) {
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
return &AESPool{
block: block,
}, nil
}
//AESPool will create aes stream writers and readers for you.
//TODO: recycle streams ?
type AESPool struct {
block cipher.Block
}
//stream instantiates an aes cipher.Stream with block
func (p *AESPool) stream() cipher.Stream {
// If the key is unique for each ciphertext, then it's ok to use a zero
// IV.
var iv [aes.BlockSize]byte
stream := cipher.NewOFB(p.block, iv[:])
return stream
}
//Reader will return a new StreamReader that can decode
//using AESPool key.
func (p *AESPool) Reader(r io.Reader) *cipher.StreamReader {
stream := p.stream()
return &cipher.StreamReader{S: stream, R: r}
}
//Writer will return a new StreamWriter that can decode
//using AESPool key.
func (p *AESPool) Writer(w io.Writer) *cipher.StreamWriter {
stream := p.stream()
return &cipher.StreamWriter{S: stream, W: w}
}
// func (p *AESPool) PutReader(r io.Reader) {
// if r is a *cipher.StreamReader ...
// func (p *AESPool) PutWriter(r io.Writer) {
// if r is a *cipher.StreamWriter ...
|
package main
import "fmt"
func main() {
fmt.Println("another-branch branch (default branch)")
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package unstructured provides a generic unstructured client to invoke DCL.
package unstructured
import (
"context"
"errors"
"fmt"
"sync"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/iam"
)
var (
registrations []RegisteredResource
registrationMutex sync.RWMutex
// ErrNoSuchMethod is the error returned when calling Get, List, Apply, or Delete
// on an API that doesn't support the requested method.
ErrNoSuchMethod = errors.New("non-existent API method")
)
// ServiceTypeVersion describes a single DCL resource.
type ServiceTypeVersion struct {
// Service to which this resource belongs, e.g., "compute".
Service string
// Type of the resource, e.g., "ComputeInstance"
Type string
// Version of the resource, e.g., "ga". There may be multiple versions of the
// same Type and Service in a single DCL build.
Version string
}
// Resource is the untyped representation of a typed DCL resource.
type Resource struct {
// ResourceWithPolicy is included so that the Resource struct can interact with the
// IAMClient (IAMClient methods expect a ResourceWithPolicy)
iam.ResourceWithPolicy
// Object is a JSON compatible map with string, float, int,
// bool, []interface{}, or map[string]interface{} children.
Object map[string]interface{}
// STV indicates the type of this resource
STV ServiceTypeVersion
}
// RegisteredResource is used by generated unstructured library code to
// make type-specific operations available in a type-agnostic manner.
type RegisteredResource interface {
// STV indicates the type of this resource.
STV() ServiceTypeVersion
// Get provides an indirection for the type-specific Get call.
Get(ctx context.Context, config *dcl.Config, r *Resource) (*Resource, error)
// Apply provides an indirection for the type-specific Apply call.
Apply(ctx context.Context, config *dcl.Config, r *Resource, opts ...dcl.ApplyOption) (*Resource, error)
// HasDiff provides an indirection for the type-specific HasDiff call.
HasDiff(ctx context.Context, config *dcl.Config, r *Resource, opts ...dcl.ApplyOption) (bool, error)
// Delete provides an indirection for the type-specific Delete call.
Delete(ctx context.Context, config *dcl.Config, r *Resource) error
// GetPolicy provides an indirection for the type-specific GetPolicy call.
GetPolicy(ctx context.Context, config *dcl.Config, r *Resource) (*Resource, error)
// SetPolicy provides an indirection for the type-specific SetPolicy call.
SetPolicy(ctx context.Context, config *dcl.Config, r *Resource, p *Resource) (*Resource, error)
// SetPolicyWithEtag provides an indirection for the type-specific SetPolicy call.
SetPolicyWithEtag(ctx context.Context, config *dcl.Config, r *Resource, p *Resource) (*Resource, error)
// GetPolicyMember provides an indirection for the type-specific GetPolicyMember call.
GetPolicyMember(ctx context.Context, config *dcl.Config, r *Resource, role, member string) (*Resource, error)
// SetPolicyMember provides an indirection for the type-specific SetPolicyMember call.
SetPolicyMember(ctx context.Context, config *dcl.Config, r *Resource, m *Resource) (*Resource, error)
// DeletePolicyMember provides an indirection for the type-specific DeletePolicyMember call.
DeletePolicyMember(ctx context.Context, config *dcl.Config, r *Resource, m *Resource) error
// ID returns a string uniquely identifying this resource.
ID(r *Resource) (string, error)
}
// Equals compares two ServiceTypeVersion structures.
func (stv ServiceTypeVersion) Equals(o ServiceTypeVersion) bool {
return stv.Service == o.Service && stv.Type == o.Type && stv.Version == o.Version
}
// String returns a loggable description of this ServiceTypeVersion.
func (stv ServiceTypeVersion) String() string {
return fmt.Sprintf(`"%s.%s.%s"`, stv.Service, stv.Type, stv.Version)
}
// StateHint is a dcl.ApplyOption that acts as the unstructured analog to dcl.stateHint.
type stateHint struct {
state *Resource
}
// Apply is a no-op to conform to the dcl.ApplyOption interface.
func (s stateHint) Apply(o *dcl.ApplyOpts) {}
// WithStateHint performs the same function as dcl.WithStateHint, but
// takes an unstructured resource.
func WithStateHint(r *Resource) dcl.ApplyOption {
return stateHint{state: r}
}
// FetchStateHint returns either nil or a Resource representing the pre-apply state.
func FetchStateHint(c []dcl.ApplyOption) *Resource {
for _, p := range c {
if sh, ok := p.(stateHint); ok {
return sh.state
}
}
return nil
}
// Register adds the provided resource to the list of resources available
// via the generic Get/List/Apply/Delete functions.
func Register(rr RegisteredResource) {
registrationMutex.Lock()
defer registrationMutex.Unlock()
registrations = append(registrations, rr)
}
func registration(r *Resource) RegisteredResource {
registrationMutex.RLock()
defer registrationMutex.RUnlock()
for _, rr := range registrations {
if rr.STV().Equals(r.STV) {
return rr
}
}
return nil
}
// Get returns the current version of a given resource (usually from the
// result of a previous Apply()).
func Get(ctx context.Context, config *dcl.Config, r *Resource) (*Resource, error) {
rr := registration(r)
if rr == nil {
return nil, fmt.Errorf("unknown resource type %s", r.STV.String())
}
return rr.Get(ctx, config, r)
}
// Apply creates or updates the provided resource.
func Apply(ctx context.Context, config *dcl.Config, r *Resource, opts ...dcl.ApplyOption) (*Resource, error) {
rr := registration(r)
if rr == nil {
return nil, fmt.Errorf("unknown resource type %s", r.STV.String())
}
return rr.Apply(ctx, config, r, opts...)
}
// HasDiff returns whether the provided resource config matches the live resource, i.e.,
// a return value of true indicates that calling Apply() will cause a creation or update of
// the live resource. The `opts` parameter can optionally include a state hint.
func HasDiff(ctx context.Context, config *dcl.Config, r *Resource, opts ...dcl.ApplyOption) (bool, error) {
rr := registration(r)
if rr == nil {
return false, fmt.Errorf("unknown resource type %s", r.STV.String())
}
return rr.HasDiff(ctx, config, r, opts...)
}
// Delete deletes the provided resource.
func Delete(ctx context.Context, config *dcl.Config, r *Resource) error {
rr := registration(r)
if rr == nil {
return fmt.Errorf("unknown resource type %s", r.STV.String())
}
return rr.Delete(ctx, config, r)
}
// GetPolicy gets the IAMPolicy for the provided resource.
func GetPolicy(ctx context.Context, config *dcl.Config, r *Resource) (*Resource, error) {
rr := registration(r)
if rr == nil {
return nil, fmt.Errorf("unknown resource type %s", r.STV.String())
}
return rr.GetPolicy(ctx, config, r)
}
// SetPolicy sets the IAMPolicy for the provided resource.
func SetPolicy(ctx context.Context, config *dcl.Config, r *Resource, p *Resource) (*Resource, error) {
rr := registration(r)
if rr == nil {
return nil, fmt.Errorf("unknown resource type %s", r.STV.String())
}
return rr.SetPolicy(ctx, config, r, p)
}
// SetPolicyWithEtag sets the IAMPolicy using the etag container for the provided resource.
func SetPolicyWithEtag(ctx context.Context, config *dcl.Config, r *Resource, p *Resource) (*Resource, error) {
rr := registration(r)
if rr == nil {
return nil, fmt.Errorf("unknown resource type %s", r.STV.String())
}
return rr.SetPolicyWithEtag(ctx, config, r, p)
}
// GetPolicyMember gets the IAMPolicyMember for the provided resource.
func GetPolicyMember(ctx context.Context, config *dcl.Config, r *Resource, role, member string) (*Resource, error) {
rr := registration(r)
if rr == nil {
return nil, fmt.Errorf("unknown resource type %s", r.STV.String())
}
return rr.GetPolicyMember(ctx, config, r, role, member)
}
// SetPolicyMember sets the IAMPolicyMember for the provided resource.
func SetPolicyMember(ctx context.Context, config *dcl.Config, r *Resource, m *Resource) (*Resource, error) {
rr := registration(r)
if rr == nil {
return nil, fmt.Errorf("unknown resource type %s", r.STV.String())
}
return rr.SetPolicyMember(ctx, config, r, m)
}
// DeletePolicyMember deletes the IAMPolicyMember for the provided resource.
func DeletePolicyMember(ctx context.Context, config *dcl.Config, r *Resource, m *Resource) error {
rr := registration(r)
if rr == nil {
return fmt.Errorf("unknown resource type %s", r.STV.String())
}
return rr.DeletePolicyMember(ctx, config, r, m)
}
// ID returns a unique ID for the provided resource.
func ID(r *Resource) (string, error) {
rr := registration(r)
if rr == nil {
return "", fmt.Errorf("unknown resource type %s", r.STV.String())
}
return rr.ID(r)
}
|
package admin
import (
"fmt"
"github.com/rs/zerolog/log"
"net/http"
"net/url"
"reflect"
"toutiao/downloader"
"toutiao/tools"
"time"
)
// 提交作品
const posturl = "http://mp.toutiao.com/core/article/edit_article_post/?downloader=mp&type=purevideo"
type ArticleForm struct {
ArticleAdType int `json:"article_ad_type"`
Title string `json:"title"`
Abstract string `json:"abstract"`
// 分类
Tag string `json:"tag"`
ExternLink string `json:"extern_link"`
IsFansArticle int `json:"is_fans_article"`
Content string `json:"content"`
AddThirdTitle int `json:"add_third_title"`
TimerStatus int `json:"timer_status"`
// 2018-04-01 09:58
TimerTime string `json:"timer_time"`
RecommendAutoAnalyse int `json:"recommend_auto_analyse"`
// 标签,多个以逗号分隔,例如: 大圣归来;唯美MV
ArticleLabel string `json:"article_label"`
FromDiagnosis int `json:"from_diagnosis"`
// 和 Tag什么关系
ArticleType int `json:"article_type"`
Praise int `json:"praise"`
PgcDebut int `json:"pgc_debut"`
Save int `json:"save"`
}
type ArticleResult struct {
Message string `json:"message"`
Code string `json:"code"`
Data string `json:"data"`
}
// 将结构体转换成 FormQuery
func struct2form(f interface{}) string {
params := url.Values{}
t := reflect.TypeOf(f)
v := reflect.ValueOf(f)
for k := 0; k < t.NumField(); k++ {
field := t.Field(k)
value := fmt.Sprintf("%v", v.Field(k).Interface())
params.Set(field.Tag.Get("json"), value)
}
return params.Encode()
}
func ArticlePost(videofile downloader.VideoFile, videoapi *VideoApiData, uploadResponse *VideoUploadResponse) {
form := ArticleForm{
ArticleAdType: 3,
Title: videofile.Title,
Abstract: videofile.Desc,
Tag: "video_animation",
Content: `<p>{!-- PGC_VIDEO:{"sp":"toutiao","vid":"%s","vu":"%[1]s","thumb_url":"%s","src_thumb_uri":"%[2]s","vname":"%s"} --}</p>`,
TimerTime: time.Now().Format(("2006-01-02 15:04")),
ArticleLabel: "动漫;搞笑;",
ArticleType: 1,
Save: 1,
}
form.Content = fmt.Sprintf(form.Content, videoapi.UploadID, uploadResponse.PosterUri, videofile.Title)
fmt.Println(form)
data := struct2form(form)
log.Warn().Msgf("PostRawData: %s", data)
req, err := NewTiaoRequest(http.MethodPost, posturl, data)
if err != nil {
panic(err)
}
result := &ArticleResult{}
tools.DoRequestJson(req, result)
log.Warn().Msgf("作品提交结果: %v", result)
}
|
package seg
import (
"bufio"
"fmt"
"io"
"log"
"os"
"unicode"
)
var isLoadDictFlag = false
//TrieNode 一个节点一个汉字信息
type TrieNode struct {
Count int //汉字出现次数
Son map[rune]*TrieNode //后继节点
}
//Trie 用来保存整个字典
type Trie struct {
Root *TrieNode
}
//Add 向Trie中新增汉子节点
func (self *Trie) Add(srune []rune) {
tmp := self.Root
for i := 0; i < len(srune); i++ {
_, ok := tmp.Son[srune[i]]
if !ok {
tmp.Son[srune[i]] = &TrieNode{0, map[rune]*TrieNode{}}
}
tmp = tmp.Son[srune[i]]
if i == len(srune)-1 {
tmp.Count++ //到达词末尾,节点标记+1
}
}
}
func (tree *Trie) search(srune []rune) (int, error) {
var err error
var Count int
temp := tree.Root
for i := 0; i < len(srune); i++ {
v, ok := temp.Son[srune[i]]
if ok {
temp = v
} else {
err = fmt.Errorf("cannot find aim string: \"%s\"", string(srune))
break
}
if i == len(srune)-1 && temp.Count == 0 {
err = fmt.Errorf("cannot find aim string: \"%s\"", string(srune))
} else {
Count = temp.Count
}
}
return Count, err
}
func parserDigit(srune []rune, cur int) []rune {
tmp := cur + 1
for tmp < len(srune) && unicode.IsDigit(srune[tmp]) {
tmp++
}
return srune[cur:tmp]
}
func isEnglish(r rune) bool {
if (uint32(r) >= 61 && uint32(r) <= 122) || (uint32(r) >= 65 && uint32(r) <= 90) {
return true
}
return false
}
func parserLetter(srune []rune, cur int) []rune {
tmp := cur + 1
for tmp < len(srune) && isEnglish(srune[tmp]) {
tmp++
}
return srune[cur:tmp]
}
//最大正向匹配
func (self *Trie) Cut(ss string) []string {
if !isLoadDictFlag {
self.loadDictionary()
}
result := make([][]rune, 0)
srune := []rune(ss)
var end = len(srune)
for start := 0; start < len(srune); end = len(srune) {
if unicode.IsDigit(srune[start]) {
digit := parserDigit(srune, start)
start += len(digit)
result = append(result, digit)
}
if start >= len(srune) {
break
}
if isEnglish(srune[start]) {
en := parserLetter(srune, start)
start += len(en)
result = append(result, en)
}
if start >= len(srune) {
break
}
for start < len(srune) {
s := srune[start:end]
_, err := self.search(s)
if err == nil {
result = append(result, s)
start = end
break
} else if end == start+1 {
result = append(result, srune[start:start+1])
start++
break
} else {
end--
}
}
}
sr := make([]string, 0)
for _, v := range result {
sr = append(sr, string(v))
}
return sr
}
func parserRLetter(srune []rune, end, start int) []rune {
tmp := end - 1
for tmp > start && isEnglish(srune[tmp]) {
tmp--
}
if tmp < start {
return srune[start : end+1]
}
return srune[tmp+1 : end+1]
}
func parserRDigit(srune []rune, end, start int) []rune {
tmp := end - 1
for tmp >= start && unicode.IsDigit(srune[tmp]) {
tmp--
}
if tmp < start {
return srune[start : end+1]
}
return srune[tmp+1 : end+1]
}
//最大逆向匹配
func (self *Trie) Rcut(ss string) []string {
if !isLoadDictFlag {
self.loadDictionary()
}
ssrune := []rune(ss)
result := make([]string, 0)
end := len(ssrune)
start := 0
for start < end {
if unicode.IsDigit(ssrune[end-1]) {
digit := parserRDigit(ssrune, end-1, start)
end -= len(digit)
result = append(result, string(digit))
}
if start >= end {
break
}
if isEnglish(ssrune[end-1]) {
en := parserRLetter(ssrune, end-1, start)
end -= len(en)
result = append(result, string(en))
}
if start >= end {
break
}
tmp := start
for tmp < end {
s := ssrune[tmp:end]
_, err := self.search(s)
if err == nil {
result = append(result, string(s))
end -= len(s)
} else if end == tmp+1 {
result = append(result, string(s))
end--
} else {
tmp++
}
}
}
for i := 0; i < len(result)/2; i++ {
tmp := result[i]
result[i] = result[len(result)-1-i]
result[len(result)-1-i] = tmp
}
return result
}
func (self *Trie) loadDictionary() {
dictName := SegConfig["mmDictPath"]
dict, err := os.Open(dictName)
if err != nil {
log.Println("bingo:load dictionary failed.")
os.Exit(1)
}
defer dict.Close()
rd := bufio.NewReader(dict)
for {
line, err := rd.ReadString('\n')
if err != nil || err == io.EOF {
break
}
aimStr := line[0 : len(line)-1]
self.Add([]rune(aimStr))
}
isLoadDictFlag = true
}
//NewMMSeg 建立字典树
func NewMMSeg() *Trie {
dictTrie := new(Trie)
dictTrie.Root = &TrieNode{0, map[rune]*TrieNode{}}
return dictTrie
}
|
package factory
import (
"errors"
"fmt"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/sw"
)
const (
SoftwareBasedFactoryName = "SW"
)
type SWFactory struct{}
func (f *SWFactory) Name() string {
return SoftwareBasedFactoryName
}
func (f *SWFactory) Get(config *FactoryOpts) (bccsp.BCCSP, error) {
if config == nil || config.SwOpts == nil {
return nil, errors.New("Invalid config. It must not be nil.")
}
swOpts := config.SwOpts
var ks bccsp.KeyStore
if swOpts.Ephemeral == true {
ks = sw.NewDummyKeyStore()
} else if swOpts.FileKeystore != nil {
fks, err := sw.NewFileBasedKeyStore(nil, swOpts.FileKeystore.KeyStorePath, false)
if err != nil {
return nil, fmt.Errorf("Failed to initialize software key store: %s", err)
}
ks = fks
} else {
ks = sw.NewDummyKeyStore()
}
return sw.New(swOpts.SecLevel, swOpts.HashFamily, ks)
}
type SwOpts struct {
SecLevel int `mapstructure:"security" json:"security" yaml:"Security"`
HashFamily string `mapstructure:"hash" json:"hash" yaml:"Hash"`
Ephemeral bool `mapstructure:"tempkeys,omitempty" json:"tempkeys,omitempty"`
FileKeystore *FileKeystoreOpts `mapstructure:"filekeystore,omitempty" json:"filekeystore,omitempty" yaml:"FileKeyStore"`
DummyKeystore *DummyKeystoreOpts `mapstructure:"dummykeystore,omitempty" json:"dummykeystore,omitempty"`
}
type FileKeystoreOpts struct {
KeyStorePath string `mapstructure:"keystore" yaml:"KeyStore"`
}
type DummyKeystoreOpts struct{}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package match
import (
"strings"
)
import (
"github.com/apache/dubbo-go/common"
)
// IsMatchGlobalPattern Match value to param content by pattern
func IsMatchGlobalPattern(pattern string, value string, param *common.URL) bool {
if param != nil && strings.HasPrefix(pattern, "$") {
pattern = param.GetRawParam(pattern[1:])
}
return isMatchInternalPattern(pattern, value)
}
func isMatchInternalPattern(pattern string, value string) bool {
if "*" == pattern {
return true
}
if len(pattern) == 0 && len(value) == 0 {
return true
}
if len(pattern) == 0 || len(value) == 0 {
return false
}
i := strings.LastIndex(pattern, "*")
switch i {
case -1:
// doesn't find "*"
return value == pattern
case len(pattern) - 1:
// "*" is at the end
return strings.HasPrefix(value, pattern[0:i])
case 0:
// "*" is at the beginning
return strings.HasSuffix(value, pattern[i+1:])
default:
// "*" is in the middle
prefix := pattern[0:1]
suffix := pattern[i+1:]
return strings.HasPrefix(value, prefix) && strings.HasSuffix(value, suffix)
}
}
|
package scoring
import (
"github.com/luuphu25/data-sidecar/stat"
"github.com/luuphu25/data-sidecar/util"
)
// HighwayVal is the kind of value a highway can hold
type HighwayVal struct {
High float64
Low float64
}
// HighwayExits is the kind of value that exits can be/hold
type HighwayExits struct {
High bool
Low bool
}
// Highway adds green highway data based on a histogram
func Highway(curr util.DataPoint, data []util.DataPoint, kvs map[string]string,
record util.Recorder, storage util.StorageEngine) {
if len(data) < 20 {
return
}
// put your favorite math here!
tempSS := stat.NewSuffStat()
for xx := range data {
tempSS.Insert(data[xx].Val)
}
mean, std := tempSS.MeanStdDev()
hwy := HighwayVal{High: mean + 3.*std, Low: mean - 3.*std}
// replace the above calculation with whatever you like
// to generate upper and lower bounds
// anything will do, you can even break it out by series
// names or characteristics or whatever!
hwy.Record(curr, kvs, record)
exits := HighwayExits{High: curr.Val > hwy.High,
Low: curr.Val < hwy.Low}
exits.Record(curr, kvs, record)
}
// Record records all the relevant exits for a given highway
func (e HighwayExits) Record(curr util.DataPoint, kvs map[string]string, record util.Recorder) {
RecordExit(e.High, curr.Time, kvs, "high", record)
RecordExit(e.Low, curr.Time, kvs, "low", record)
outside := e.Low || e.High
RecordExit(outside, curr.Time, kvs, "outside", record)
}
// Record gets an entire map of quantile map and the rest and records them all
func (h HighwayVal) Record(curr util.DataPoint, kvs map[string]string, record util.Recorder) {
RecordThreshold(util.DataPoint{Val: h.High, Time: curr.Time}, kvs, "high", record)
RecordThreshold(util.DataPoint{Val: h.Low, Time: curr.Time}, kvs, "low", record)
}
|
// +build disgord_removeDiscordMutex
package disgord
// Lockable is removed on compile time since it holds no content. This allows the removal of mutexes if desired by the
// developer.
type Lockable struct{}
func (l *Lockable) RLock() {}
func (l *Lockable) RUnlock() {}
func (l *Lockable) Lock() {}
func (l *Lockable) Unlock() {}
|
// jeff@archlinux.org
package main
import (
"bufio"
"crypto/sha1"
"encoding/json"
"encoding/xml"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"sync/atomic"
)
var CacheFile string
func init() {
xch := os.Getenv("XDG_CACHE_HOME")
if xch == "" {
home := os.Getenv("HOME")
xch = filepath.Join(home, ".cache")
}
CacheFile = filepath.Join(xch, "yosumiru", "feeds_seen.json")
}
func main() {
if LastFeeds() {
RunUpdate()
}
}
func RunUpdate() {
cmd := exec.Command("sudo", "pacman", "-Syu")
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
}
type rssFeedXml struct {
XMLName xml.Name `xml:"rss"`
Channel *RssFeed
}
type RssFeed struct {
XMLName xml.Name `xml:"channel"`
Items []*RssItem `xml:"item"`
}
type RssItem struct {
XMLName xml.Name `xml:"item"`
Title string `xml:"title"`
Description string `xml:"description"`
PubDate string `xml:"pubDate,omitempty"`
}
type RssEnclosure struct {
XMLName xml.Name `xml:"enclosure"`
Url string `xml:"url,attr"`
Length string `xml:"length,attr"`
Type string `xml:"type,attr"`
}
const ARCH_NEWS_FEED = "https://www.archlinux.org/feeds/news/"
type FeedsSeen struct {
SeenMap map[string]struct{} `json:"seen_map"`
}
type Entry struct {
Title string
PubDate string
Description string
Hash string
}
func (e *Entry) HashIt() {
var entryb []byte
entryb = append(entryb, []byte(e.Title)...)
entryb = append(entryb, []byte(e.PubDate)...)
entryb = append(entryb, []byte(e.Description)...)
e.Hash = fmt.Sprintf("%x", sha1.Sum(entryb))
}
func (e *Entry) Print() {
fmt.Printf(`* %s
[%s]
%s
-----
`, e.Title, e.PubDate, fixDesc(e.Description))
}
func checkUser(question string) bool {
reader := bufio.NewReader(os.Stdin)
fmt.Print(question + " [Y/n]: ")
text, _ := reader.ReadString('\n')
return (text == "\n" ||
text == "Y\n" ||
text == "y\n")
}
func GetFeedsSeen(fsc chan *FeedsSeen) {
var fs FeedsSeen
cache, err := ioutil.ReadFile(CacheFile)
if err != nil {
_, ok := err.(*os.PathError)
if !ok {
panic(err)
}
} else {
err = json.Unmarshal(cache, &fs)
if err != nil {
panic(err)
}
}
if fs.SeenMap == nil {
fs.SeenMap = make(map[string]struct{}, 0)
}
fsc <- &fs
}
func GetArchFeed(rc chan *rssFeedXml) {
resp, err := http.Get(ARCH_NEWS_FEED)
if err != nil {
panic(err)
}
defer resp.Body.Close()
bod, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
var res rssFeedXml
err = xml.Unmarshal(bod, &res)
if err != nil {
fmt.Printf("%v\n%#v\n", err, err)
}
rc <- &res
}
func SaveCache(f *FeedsSeen) {
b, err := json.Marshal(*f)
if err != nil {
panic(err)
}
err = os.MkdirAll(filepath.Dir(CacheFile), 0700)
if err != nil {
panic(err)
}
err = ioutil.WriteFile(CacheFile, b, 0600)
if err != nil {
panic(err)
}
}
func LastFeeds() bool {
feeds := make(chan *rssFeedXml, 0)
caches := make(chan *FeedsSeen, 0)
go GetFeedsSeen(caches)
go GetArchFeed(feeds)
feed := <-feeds
cache := <-caches
entries := make([]*Entry, 0)
// copy & hash records
for _, v := range feed.Channel.Items {
entries = append(entries, &Entry{
Title: v.Title,
PubDate: v.PubDate,
Description: v.Description,
})
}
// hash entries
count := new(int64)
done := make(chan struct{}, 0)
for _, v := range entries {
go func() {
v.HashIt()
if atomic.AddInt64(count, 1) == int64(len(entries)) {
done <- struct{}{}
}
}()
}
<-done
// print unseen records
for _, v := range entries {
_, ok := cache.SeenMap[v.Hash]
if ok {
continue
}
v.Print()
if checkUser("Confirm seen?") {
cache.SeenMap[v.Hash] = struct{}{}
}
}
c := checkUser("Continue with update?")
if c {
SaveCache(cache)
}
return c
}
func fixDesc(desc string) string {
cmd := exec.Command("pandoc", "-f", "html", "-t", "org")
sip, err := cmd.StdinPipe()
if err != nil {
panic(err)
}
go func() {
_, err := sip.Write([]byte(desc))
if err != nil {
panic(err)
}
sip.Close()
}()
res, err := cmd.CombinedOutput()
if err != nil {
panic(err)
}
return string(res)
}
|
package huobi_websocket
import (
. "exchange_websocket/common"
"strings"
)
// huobi symbols
type HuobiSymbol struct {
HuobiUsdtSymbol []string
HuobiBtcSymbol []string
HuobiEthSymbol []string
HuobiSymbols []string
}
func NewHuobiSymbol() *HuobiSymbol {
hb := new(HuobiSymbol)
return hb.huobiSymbolInit()
}
func (o *HuobiSymbol) huobiSymbolInit() *HuobiSymbol {
for _, symbol := range append(CommonUsdt, HuobiUsdt...) {
o.HuobiUsdtSymbol = append(o.HuobiUsdtSymbol, strings.ToLower(strings.Replace(symbol, "_", "", -1)))
}
for _, symbol := range append(CommonBtc, HuobiBtc...) {
o.HuobiBtcSymbol = append(o.HuobiBtcSymbol, strings.ToLower(strings.Replace(symbol, "_", "", -1)))
}
for _, symbol := range append(CommonEth, HuobiEth...) {
o.HuobiEthSymbol = append(o.HuobiEthSymbol, strings.ToLower(strings.Replace(symbol, "_", "", -1)))
}
o.HuobiSymbols = append(o.HuobiUsdtSymbol, append(o.HuobiBtcSymbol, o.HuobiBtcSymbol...)...)
return o
}
func (o *HuobiSymbol) huobiSymbolTransfer(symbol string) string {
isExist1, _ := Contain(symbol, o.HuobiUsdtSymbol)
if isExist1 {
return strings.ToUpper(strings.Replace(symbol, "usdt", "_usdt", -1))
}
isExist2, _ := Contain(symbol, o.HuobiBtcSymbol)
if isExist2 {
return strings.ToUpper(strings.Replace(symbol, "btc", "_btc", -1))
}
isExist3, _ := Contain(symbol, o.HuobiEthSymbol)
if isExist3 {
return strings.ToUpper(strings.Replace(symbol, "eth", "_eth", -1))
}
return ""
}
|
package 摩尔投票
func majorityElement(nums []int) int {
candidateNum := getMajorElementCandidate(nums)
minCountOfMajorElement := len(nums)/2 + 1
if getCountOfNum(nums, candidateNum) >= minCountOfMajorElement {
return candidateNum
} else {
return -1
}
}
func getMajorElementCandidate(nums []int) int {
countOfCandidateNum := 0
candidateNum := 0
for i := 0; i < len(nums); i++ {
if countOfCandidateNum == 0 {
countOfCandidateNum = 1
candidateNum = nums[i]
continue
}
if candidateNum == nums[i] {
countOfCandidateNum++
} else {
countOfCandidateNum--
}
}
return candidateNum
}
func getCountOfNum(arr []int, num int) int {
count := 0
for i := 0; i < len(arr); i++ {
if arr[i] == num {
count++
}
}
return count
}
/*
总结:
1. 摩尔投票假定数组中一定存在主要元素,所以这题在得到"主要元素候选者"后,需要对
该候选者进行校验,判断它是不是主要元素。
*/
|
package easypost
import (
"context"
"net/http"
"net/url"
)
// PickupRate contains data about the cost of a pickup.
type PickupRate struct {
ID string `json:"id,omitempty"`
Object string `json:"object,omitempty"`
Mode string `json:"mode,omitempty"`
CreatedAt *DateTime `json:"created_at,omitempty"`
UpdatedAt *DateTime `json:"updated_at,omitempty"`
Service string `json:"service,omitempty"`
Carrier string `json:"carrier,omitempty"`
Rate string `json:"rate,omitempty"`
Currency string `json:"currency,omitempty"`
PickupID string `json:"pickup_id,omitempty"`
}
// A Pickup object represents a pickup from a carrier at a customer's residence
// or place of business.
type Pickup struct {
ID string `json:"id,omitempty"`
Object string `json:"object,omitempty"`
Reference string `json:"reference,omitempty"`
Mode string `json:"mode,omitempty"`
CreatedAt *DateTime `json:"created_at,omitempty"`
UpdatedAt *DateTime `json:"updated_at,omitempty"`
Status string `json:"status,omitempty"`
MinDatetime *DateTime `json:"min_datetime,omitempty"`
MaxDatetime *DateTime `json:"max_datetime,omitempty"`
IsAccountAddress bool `json:"is_account_address,omitempty"`
Instructions string `json:"instructions,omitempty"`
Messages []*CarrierMessage `json:"messages,omitempty"`
Confirmation string `json:"confirmation,omitempty"`
Shipment *Shipment `json:"shipment,omitempty"`
Address *Address `json:"address,omitempty"`
Batch *Batch `json:"batch,omitempty"`
CarrierAccounts []*CarrierAccount `json:"carrier_accounts,omitempty"`
PickupRates []*PickupRate `json:"pickup_rates,omitempty"`
}
// ListPickupResult holds the results from the list Pickup API.
type ListPickupResult struct {
Pickups []*Pickup `json:"pickups,omitempty"`
PaginatedCollection
}
type createPickupRequest struct {
Pickup *Pickup `json:"pickup,omitempty"`
}
// CreatePickup creates a new Pickup object, and automatically fetches rates
// for the given time and location.
//
// c := easypost.New(MyEasyPostAPIKey)
// out, err := c.CreatePickup(
// &easypost.Pickup{
// Reference: "my-first-pickup",
// MinDatetime: time.Date(2014, 10, 21, 0, 10, 0, 0, time.UTC),
// MaxDatetime: time.Date(2014, 10, 21, 15, 30, 0, 0, time.UTC),
// Shipment: &easypost.Shipment{ID: "shp_1"},
// Address: &easypost.Address{ID: "ad_1001"},
// IsAccountAddress: false,
// Instructions: "Special pickup instructions",
// },
// )
func (c *Client) CreatePickup(in *Pickup) (out *Pickup, err error) {
return c.CreatePickupWithContext(context.Background(), in)
}
// CreatePickupWithContext performs the same operation as CreatePickup, but
// allows specifying a context that can interrupt the request.
func (c *Client) CreatePickupWithContext(ctx context.Context, in *Pickup) (out *Pickup, err error) {
err = c.post(ctx, "pickups", &createPickupRequest{Pickup: in}, &out)
return
}
// GetPickup retrieves an existing Pickup object by ID.
func (c *Client) GetPickup(pickupID string) (out *Pickup, err error) {
return c.GetPickupWithContext(context.Background(), pickupID)
}
// GetPickupWithContext performs the same operation as GetPickup, but allows
// specifying a context that can interrupt the request.
func (c *Client) GetPickupWithContext(ctx context.Context, pickupID string) (out *Pickup, err error) {
err = c.get(ctx, "pickups/"+pickupID, &out)
return
}
// BuyPickup purchases and schedules a pickup.
//
// c := easypost.New(MyEasyPostAPIKey)
// rate := &PickupRate{Carrier: "UPS", Service: "Same-Day Pickup"}
// out, err := c.BuyPickup("pck_1", rate)
func (c *Client) BuyPickup(pickupID string, rate *PickupRate) (out *Pickup, err error) {
return c.BuyPickupWithContext(context.Background(), pickupID, rate)
}
// BuyPickupWithContext performs the same operation as BuyPickup, but allows
// specifying a context that can interrupt the request.
func (c *Client) BuyPickupWithContext(ctx context.Context, pickupID string, rate *PickupRate) (out *Pickup, err error) {
vals := url.Values{
"carrier": []string{rate.Carrier}, "service": []string{rate.Service},
}
err = c.post(ctx, "pickups/"+pickupID+"/buy", vals, &out)
return
}
// CancelPickup cancels a scheduled pickup.
func (c *Client) CancelPickup(pickupID string) (out *Pickup, err error) {
return c.CancelPickupWithContext(context.Background(), pickupID)
}
// CancelPickupWithContext performs the same operation as CancelPickup, but
// allows specifying a context that can interrupt the request.
func (c *Client) CancelPickupWithContext(ctx context.Context, pickupID string) (out *Pickup, err error) {
err = c.post(ctx, "pickups/"+pickupID+"/cancel", nil, &out)
return
}
// LowestPickupRate gets the lowest rate of a pickup
func (c *Client) LowestPickupRate(pickup *Pickup) (out PickupRate, err error) {
return c.LowestPickupRateWithCarrier(pickup, nil)
}
// LowestPickupRateWithCarrier performs the same operation as LowestPickupRate,
// but allows specifying a list of carriers for the lowest rate
func (c *Client) LowestPickupRateWithCarrier(pickup *Pickup, carriers []string) (out PickupRate, err error) {
return c.LowestPickupRateWithCarrierAndService(pickup, carriers, nil)
}
// LowestPickupRateWithCarrierAndService performs the same operation as LowestPickupRate,
// but allows specifying a list of carriers and service for the lowest rate
func (c *Client) LowestPickupRateWithCarrierAndService(pickup *Pickup, carriers []string, services []string) (out PickupRate, err error) {
return c.lowestPickupRate(pickup.PickupRates, carriers, services)
}
// ListPickups provides a paginated result of Pickup objects.
func (c *Client) ListPickups(opts *ListOptions) (out *ListPickupResult, err error) {
return c.ListPickupsWithContext(context.Background(), opts)
}
// ListPickupsWithContext performs the same operation as ListPickups, but
// allows specifying a context that can interrupt the request.
func (c *Client) ListPickupsWithContext(ctx context.Context, opts *ListOptions) (out *ListPickupResult, err error) {
err = c.do(ctx, http.MethodGet, "pickups", c.convertOptsToURLValues(opts), &out)
return
}
// GetNextPickupPage returns the next page of pickups
func (c *Client) GetNextPickupPage(collection *ListPickupResult) (out *ListPickupResult, err error) {
return c.GetNextPickupPageWithContext(context.Background(), collection)
}
// GetNextPickupPageWithPageSize returns the next page of pickups with a specific page size
func (c *Client) GetNextPickupPageWithPageSize(collection *ListPickupResult, pageSize int) (out *ListPickupResult, err error) {
return c.GetNextPickupPageWithPageSizeWithContext(context.Background(), collection, pageSize)
}
// GetNextPickupPageWithContext performs the same operation as GetNextPickupPage, but
// allows specifying a context that can interrupt the request.
func (c *Client) GetNextPickupPageWithContext(ctx context.Context, collection *ListPickupResult) (out *ListPickupResult, err error) {
return c.GetNextPickupPageWithPageSizeWithContext(ctx, collection, 0)
}
// GetNextPickupPageWithPageSizeWithContext performs the same operation as GetNextPickupPageWithPageSize, but
// allows specifying a context that can interrupt the request.
func (c *Client) GetNextPickupPageWithPageSizeWithContext(ctx context.Context, collection *ListPickupResult, pageSize int) (out *ListPickupResult, err error) {
if len(collection.Pickups) == 0 {
err = EndOfPaginationError
return
}
lastID := collection.Pickups[len(collection.Pickups)-1].ID
params, err := nextPageParameters(collection.HasMore, lastID, pageSize)
if err != nil {
return
}
return c.ListPickupsWithContext(ctx, params)
}
|
package transform
import (
"fmt"
"github.com/sparkymat/webdsl/css"
"github.com/sparkymat/webdsl/css/size"
)
func TranslateX(distance size.Size) css.Property {
return css.Property{}.WithPropertyType("transform").WithValues(fmt.Sprintf("translateX(%v)", distance))
}
|
package main
import (
"fmt"
"log"
"net/http"
"time"
"github.com/julienschmidt/httprouter"
)
type Event struct {
TimeToShow time.Time
TimeEndShow time.Time
Name string
}
func main() {
router := httprouter.New()
router.GET("/healthz", Healthz)
log.Println("Listening at port 1337")
go func() {
log.Fatal(http.ListenAndServe(":1337", router))
}()
now := time.Now()
events := []Event{
Event{TimeToShow: now.Add(2 * time.Second), TimeEndShow: now.Add(4 * time.Second), Name: "First"},
Event{TimeToShow: now.Add(6 * time.Second), TimeEndShow: now.Add(9 * time.Second), Name: "Second"},
Event{TimeToShow: now.Add(10 * time.Second), TimeEndShow: now.Add(11 * time.Second), Name: "Third"},
Event{TimeToShow: now.Add(15 * time.Second), TimeEndShow: now.Add(18 * time.Second), Name: "Fourth"},
Event{TimeToShow: now.Add(20 * time.Second), TimeEndShow: now.Add(22 * time.Second), Name: "Last"},
}
ticker := time.NewTicker(time.Second)
go func() {
for t := range ticker.C {
now := time.Now()
if len(events) > 0 {
if t.Truncate(time.Second).Sub(events[0].TimeToShow.Truncate(time.Second)) >= 0 && t.Truncate(time.Second).Sub(events[0].TimeEndShow.Truncate(time.Second)) <= 0 {
fmt.Println(events[0].Name)
} else {
fmt.Println("Time: ", t.Truncate(time.Second))
}
if t.Truncate(time.Second).Sub(events[0].TimeEndShow.Truncate(time.Second)) >= 0 {
events = events[1:]
}
} else {
fmt.Println("Nothing")
}
fmt.Println(time.Now().Sub(now))
}
}()
select {}
}
func Healthz(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, "ok")
}
|
package main
import (
"golang/helper"
"testing"
)
func TestLetterCasePermutation(t *testing.T) {
input := "a1b2"
expect := []string{"a1b2", "a1B2", "A1b2", "A1B2"}
helper.AssertStringArr(letterCasePermutation(input), expect, t)
input = "3z4"
expect = []string{"3z4", "3Z4"}
helper.AssertStringArr(letterCasePermutation(input), expect, t)
input = "12345"
expect = []string{"12345"}
helper.AssertStringArr(letterCasePermutation(input), expect, t)
input = "0"
expect = []string{"0"}
helper.AssertStringArr(letterCasePermutation(input), expect, t)
}
|
package main
import (
"zhiyuan/scaffold/internal/server/http"
)
//var log = logrus.New()
//var isConnected bool
func main() {
http.New()
}
|
package ca
import (
"crypto"
"crypto/rand"
"crypto/sha1"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/hex"
"encoding/json"
"errors"
"math/big"
"time"
"github.com/cloudflare/cfssl/certdb"
"github.com/cloudflare/cfssl/config"
"github.com/cloudflare/cfssl/csr"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/signer"
"github.com/cloudflare/cfssl/signer/local"
)
type StorageProvider interface {
GetMetadata(key []byte) ([]byte, error)
SetMetadata(key, value []byte) error
Accessor() certdb.Accessor
}
type KeyProvider interface {
GenerateKeyPair(label string, algo string, size int) (crypto.Signer, error)
FindKeyPair(key crypto.PublicKey) (crypto.Signer, error)
}
// CertificationAuthority represents a certification authority.
type CertificationAuthority struct {
sp StorageProvider
kp KeyProvider
signer *local.Signer
}
// Init creates a CA with given config.
func Init(cfg *Config, caFile string, kp KeyProvider) (*CertificationAuthority, error) {
db, err := openDB(caFile)
if err != nil {
return nil, err
}
ca := &CertificationAuthority{db, kp, nil}
req := cfg.CertificateRequest()
policy, err := cfg.Signing()
if err != nil {
return nil, err
}
err = ca.init(req, policy)
if err != nil {
return nil, err
}
if cfg.SelfSign {
err = ca.selfSign()
if err != nil {
return nil, err
}
}
return ca, nil
}
// Open opens an existing CA.
func Open(caFile string, kp KeyProvider) (*CertificationAuthority, error) {
db, err := openDB(caFile)
if err != nil {
return nil, err
}
ca := &CertificationAuthority{db, kp, nil}
err = ca.initSigner()
if err != nil {
return nil, err
}
return ca, nil
}
// init generates a key pair and creates a certificate signing request
// for the CA.
func (ca *CertificationAuthority) init(req *csr.CertificateRequest, policy *config.Signing) error {
if csrPEM, _ := ca.CertificateRequestPEM(); csrPEM != nil {
return errors.New("ca csr exists")
}
key, err := ca.kp.GenerateKeyPair(req.CN, req.KeyRequest.Algo(), req.KeyRequest.Size())
if err != nil {
return err
}
csrPEM, err := csr.Generate(key, req)
if err != nil {
return err
}
err = ca.sp.SetMetadata([]byte("csr"), csrPEM)
if err != nil {
return err
}
err = ca.SetPolicy(policy)
if err != nil {
return err
}
err = ca.initSigner()
if err != nil {
return err
}
return nil
}
// initSigner initializes a new signer for the CA.
func (ca *CertificationAuthority) initSigner() error {
key, err := ca.privateKey()
if err != nil {
return err
}
cert, _ := ca.Certificate()
policy, err := ca.Policy()
if err != nil {
return err
}
signer, err := local.NewSigner(key, cert, signer.DefaultSigAlgo(key), policy)
if err != nil {
return err
}
signer.SetDBAccessor(ca.sp.Accessor())
ca.signer = signer
return nil
}
// Certificate returns the certificate of the CA.
func (ca *CertificationAuthority) Certificate() (*x509.Certificate, error) {
certPEM, err := ca.CertificatePEM()
if err != nil {
return nil, err
}
return helpers.ParseCertificatePEM(certPEM)
}
// Certificate returns the certificate of the CA in PEM encoding.
func (ca *CertificationAuthority) CertificatePEM() ([]byte, error) {
return ca.sp.GetMetadata([]byte("cert"))
}
// selfSign creates a self-signed certificate for the CA.
func (ca *CertificationAuthority) selfSign() error {
csrPEM, err := ca.CertificateRequestPEM()
if err != nil {
return err
}
var oidExtensionAuthorityKeyId = config.OID([]int{2, 5, 29, 35})
keyID, err := ca.KeyID()
if err != nil {
return err
}
type authKeyId struct {
KeyIdentifier []byte `asn1:"tag:0"`
}
aki, err := asn1.Marshal(authKeyId{keyID})
if err != nil {
return err
}
akiExt := signer.Extension{
ID: oidExtensionAuthorityKeyId,
Critical: false,
Value: hex.EncodeToString(aki),
}
certPEM, err := ca.Issue(csrPEM, akiExt)
if err != nil {
return err
}
return ca.ImportCertificate(certPEM)
}
// ImportCertificate imports the given certificate if the CA does not
// have one.
func (ca *CertificationAuthority) ImportCertificate(certPEM []byte) error {
if cert, _ := ca.Certificate(); cert != nil {
return errors.New("ca cert exists")
}
_, err := helpers.ParseCertificatePEM(certPEM)
if err != nil {
return err
}
// TODO: Check signature and compare with original CSR.
err = ca.sp.SetMetadata([]byte("cert"), certPEM)
if err != nil {
return err
}
return ca.initSigner()
}
// CertificateRequest returns the certificate signing request of the CA.
func (ca *CertificationAuthority) CertificateRequest() (*x509.CertificateRequest, error) {
csrPEM, err := ca.CertificateRequestPEM()
if err != nil {
return nil, err
}
return helpers.ParseCSRPEM(csrPEM)
}
// CertificateRequestPEM returns the certificate signing request of the
// CA in PEM encoding.
func (ca *CertificationAuthority) CertificateRequestPEM() ([]byte, error) {
return ca.sp.GetMetadata([]byte("csr"))
}
// PublicKey returns the public key from the CA certificate or CSR.
func (ca *CertificationAuthority) PublicKey() (crypto.PublicKey, error) {
if cert, _ := ca.Certificate(); cert != nil {
return cert.PublicKey, nil
}
if csr, _ := ca.CertificateRequest(); csr != nil {
return csr.PublicKey, nil
}
return nil, errors.New("no valid csr in db")
}
// privateKey returns the private key as a crypto.Signer.
func (ca *CertificationAuthority) privateKey() (crypto.Signer, error) {
pub, err := ca.PublicKey()
if err != nil {
return nil, err
}
return ca.kp.FindKeyPair(pub)
}
// KeyID returns the identifier of the signing key, which will also be
// the Authority Key Identifier (AKI) for issued certificates.
func (ca *CertificationAuthority) KeyID() ([]byte, error) {
if cert, _ := ca.Certificate(); cert != nil {
return cert.SubjectKeyId, nil
}
pub, err := ca.PublicKey()
if err != nil {
return nil, err
}
pkixPub, err := x509.MarshalPKIXPublicKey(pub)
if err != nil {
return nil, err
}
var pubKeyInfo struct {
Algo pkix.AlgorithmIdentifier
BitString asn1.BitString
}
_, err = asn1.Unmarshal(pkixPub, &pubKeyInfo)
if err != nil {
return nil, err
}
hash := sha1.New()
hash.Write(pubKeyInfo.BitString.Bytes)
return hash.Sum(nil), nil
}
// Policy returns the signing policy of the CA.
func (ca *CertificationAuthority) Policy() (*config.Signing, error) {
if ca.signer != nil {
return ca.signer.Policy(), nil
}
policyJSON, err := ca.sp.GetMetadata([]byte("policy"))
if err != nil {
return nil, err
}
var policy *config.Signing
err = json.Unmarshal(policyJSON, &policy)
if err != nil {
return nil, err
}
return policy, nil
}
// SetPolicy sets the signing policy of the CA.
func (ca *CertificationAuthority) SetPolicy(policy *config.Signing) error {
if !policy.Valid() {
return errors.New("invalid policy")
}
policyJSON, err := json.Marshal(policy)
if err != nil {
return err
}
err = ca.sp.SetMetadata([]byte("policy"), policyJSON)
if err != nil {
return err
}
if ca.signer != nil {
ca.signer.SetPolicy(policy)
}
return nil
}
// Issue signs a PEM-encoded CSR and returns the certificate in PEM.
func (ca *CertificationAuthority) Issue(csrPEM []byte, exts ...signer.Extension) ([]byte, error) {
req := signer.SignRequest{
Request: string(csrPEM),
Extensions: exts,
}
return ca.signer.Sign(req)
}
// Revoke marks the certificate identified by its serial number revoked.
// The reasonCode is defined in RFC 5280 5.3.1.
func (ca *CertificationAuthority) Revoke(serial string, reasonCode int) error {
keyID, err := ca.KeyID()
if err != nil {
return err
}
aki := hex.EncodeToString(keyID)
return ca.sp.Accessor().RevokeCertificate(serial, aki, reasonCode)
}
// CRL returns a DER-encoded Certificate Revocation List, signed by the CA.
func (ca *CertificationAuthority) CRL(ttl time.Duration) ([]byte, error) {
certs, err := ca.sp.Accessor().GetRevokedAndUnexpiredCertificates()
if err != nil {
return nil, err
}
var revokedCerts []pkix.RevokedCertificate
for _, certRecord := range certs {
serialInt := new(big.Int)
serialInt.SetString(certRecord.Serial, 10)
revokedCert := pkix.RevokedCertificate{
SerialNumber: serialInt,
RevocationTime: certRecord.RevokedAt,
}
revokedCerts = append(revokedCerts, revokedCert)
}
cert, err := ca.Certificate()
if err != nil {
return nil, err
}
key, err := ca.privateKey()
if err != nil {
return nil, err
}
return cert.CreateCRL(rand.Reader, key, revokedCerts, time.Now(), time.Now().Add(ttl))
}
|
package command
import (
"bufio"
"bytes"
"flag"
"fmt"
_ "github.com/codegangsta/cli"
"golang.org/x/text/unicode/norm"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
pt "path"
)
const PathSeparator = string(filepath.Separator)
type ValidFilenameFunc func(string) bool
func IsFile(path string) bool {
stat, err := os.Stat(path)
return err == nil && stat.Mode().IsRegular()
}
func IsDir(path string) bool {
stat, err := os.Stat(path)
return err == nil && stat.IsDir()
}
// IdenticalFilenames uses unicode normalization
func IdenticalFilenames(filename0, filename1 string) bool {
return bytes.Equal(norm.NFC.Bytes([]byte(filename0)), norm.NFC.Bytes([]byte(filename1)))
}
func GetFileBasename(path string) string {
return strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
}
func GetDirContent(path string) ([]string, []string) {
F, _ := ioutil.ReadDir(path)
subdirs, filenames := make([]string, 0, len(F)), make([]string, 0, len(F))
for _, f := range F {
if f.IsDir() {
subdirs = append(subdirs, f.Name())
} else if f.Mode().IsRegular() {
filenames = append(filenames, f.Name())
}
}
return subdirs, filenames
}
func GetDirFilenames(path string, isValid ValidFilenameFunc) []string {
F, _ := ioutil.ReadDir(path)
filenames := make([]string, 0, len(F))
for _, f := range F {
if f.Mode().IsRegular() && isValid(f.Name()) {
filenames = append(filenames, f.Name())
}
}
return filenames
}
func GetDirRawFilenames(path string) []string {
F, _ := ioutil.ReadDir(path)
filenames := make([]string, 0, len(F))
for _, f := range F {
if f.Mode().IsRegular() {
filenames = append(filenames, f.Name())
}
}
return filenames
}
func IsResolutionDir(str string) bool {
if res, _ := regexp.MatchString("DS_Store", str); res == true {
return false
}
fileRegexp := regexp.MustCompile("[[:digit:]]{4}x[[:digit:]]{4}")
return fileRegexp.MatchString(str)
}
func GetDirSubDir(path string) []string {
F, _ := ioutil.ReadDir(path)
subdirs, _ := make([]string, 0, len(F)), make([]string, 0, len(F))
IsResolutionDir := func(str string) bool {
fileRegexp := regexp.MustCompile("[[:digit:]]{4}x[[:digit:]]{4}")
return fileRegexp.MatchString(str)
}
for _, f := range F {
if f.IsDir() && IsResolutionDir(f.Name()) {
subdirs = append(subdirs, f.Name())
}
}
return subdirs
}
func GetDirSubDirRoot(path string) []string {
F, _ := ioutil.ReadDir(path)
subdirs, _ := make([]string, 0, len(F)), make([]string, 0, len(F))
for _, f := range F {
if f.IsDir() {
subdirs = append(subdirs, pt.Clean(path + "/" + f.Name()))
}
}
return subdirs
}
func GetCurDirFilenames(isValid ValidFilenameFunc) []string {
path, _ := os.Getwd()
return GetDirFilenames(path, isValid)
}
func GetOutputFilename(input, output, ext string) string {
if output == "" {
return strings.TrimSuffix(input, filepath.Ext(input)) + ext
} else if IsDir(output) {
return output + PathSeparator + GetFileBasename(input) + ext
}
return output + ext
}
func GetInputOutput(input, output string, isValid ValidFilenameFunc, ext string) ([]string, []string) {
var F, G []string
if input == "" {
F = GetCurDirFilenames(isValid)
} else if IsDir(input) {
F = GetDirFilenames(input, isValid)
} else {
F = []string{input}
}
for _, f := range F {
G = append(G, GetOutputFilename(f, output, ext))
}
return F, G
}
func GetDirsFromFlagSetArgs(flags *flag.FlagSet) ([]string, error) {
if flags.NArg() == 0 {
wd, err := os.Getwd()
return []string{wd}, err
}
var err error
dirs := make([]string, 0, flags.NArg())
for _, arg := range flags.Args() {
if IsDir(arg) {
dirs = append(dirs, filepath.Clean(arg))
} else {
err = fmt.Errorf("%s is not a directory", arg)
}
}
return dirs, err
}
func NewInOut(input, output string) *InOut {
return &InOut{In: input, Out: output}
}
func CreateDir(folder string) string {
path, err := os.Getwd()
if err != nil {
panic("Error to locate pwd")
}
dir := path + "/" + folder
fmt.Println(dir)
if _, err := os.Stat(dir); err == nil {
return dir + "/"
} else {
err = os.MkdirAll(dir, 0777)
if err != nil {
panic("MkdirAll error: Please verify your acces right")
}
}
return dir + "/"
}
func (inout *InOut) Open() error {
var err error
if inout.In == "" {
inout.Reader = bufio.NewReader(os.Stdin)
} else {
inout.in, err = os.Open(inout.In)
if err != nil {
return err
}
inout.Reader = bufio.NewReader(inout.in)
}
if inout.Out == "" {
//os.MkdirAll("DefaulFolderConformation", 777)
} else {
// if false != IsDir(inout.Out) {
// os.MkdirAll(inout.Out, 777)
// }
}
return nil
}
|
package main
import (
"bytes"
"fmt"
"io"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
func TestDumpCollectionTo_Ok(t *testing.T) {
mockedMongoLib := new(mockMongoLib)
mongoService := newMongoService("127.0.0.1:27010,127.0.0.2:27010", mockedMongoLib, nil, 0, 250*time.Millisecond, 15000000)
stringWriter := bytes.NewBufferString("")
mockedMongoSession := new(mockMongoSession)
mockedMongoLib.On("DialWithTimeout", "127.0.0.1:27010,127.0.0.2:27010", 0*time.Millisecond).Return(mockedMongoSession, nil)
mockedMongoSession.On("SetPrefetch", 1.0).Return()
mockedMongoSession.On("Close").Return()
mockedMongoIter := new(mockMongoIter)
mockedMongoSession.On("SnapshotIter", "database1", "collection1", nil).Return(mockedMongoIter)
mockedMongoIter.On("Next").Times(3).Return([]byte("data"), true)
mockedMongoIter.On("Next").Return([]byte{}, false)
mockedMongoIter.On("Err").Return(nil)
err := mongoService.DumpCollectionTo("database1", "collection1", stringWriter)
assert.NoError(t, err, "Error wasn't expected during dump.")
assert.Equal(t, "datadatadata", stringWriter.String())
}
func TestDumpCollectionTo_SessionErr(t *testing.T) {
mockedMongoLib := new(mockMongoLib)
mongoService := newMongoService("127.0.0.1:27010,127.0.0.2:27010", mockedMongoLib, nil, 0, 250*time.Millisecond, 15000000)
stringWriter := bytes.NewBufferString("")
mockedMongoLib.On("DialWithTimeout", "127.0.0.1:27010,127.0.0.2:27010", 0*time.Millisecond).Return(&labixSession{}, fmt.Errorf("oops"))
err := mongoService.DumpCollectionTo("database1", "collection1", stringWriter)
assert.Error(t, err, "Error was expected during dial.")
assert.Equal(t, "Coulnd't dial mongo session: oops", err.Error())
}
func TestDumpCollectionTo_WriterErr(t *testing.T) {
mockedMongoLib := new(mockMongoLib)
mongoService := newMongoService("127.0.0.1:27010,127.0.0.2:27010", mockedMongoLib, nil, 0, 250*time.Millisecond, 15000000)
cappedStringWriter := newCappedBuffer(make([]byte, 0, 4), 11)
mockedMongoSession := new(mockMongoSession)
mockedMongoLib.On("DialWithTimeout", "127.0.0.1:27010,127.0.0.2:27010", 0*time.Millisecond).Return(mockedMongoSession, nil)
mockedMongoSession.On("SetPrefetch", 1.0).Return()
mockedMongoSession.On("Close").Return()
mockedMongoIter := new(mockMongoIter)
mockedMongoSession.On("SnapshotIter", "database1", "collection1", nil).Return(mockedMongoIter)
mockedMongoIter.On("Next").Return([]byte("data"), true)
mockedMongoIter.On("Err").Return(nil)
err := mongoService.DumpCollectionTo("database1", "collection1", cappedStringWriter)
assert.Error(t, err, "Error expected during write.")
assert.Equal(t, "buffer overflow", err.Error())
}
func TestDumpCollectionTo_IterationErr(t *testing.T) {
mockedMongoLib := new(mockMongoLib)
mongoService := newMongoService("127.0.0.1:27010,127.0.0.2:27010", mockedMongoLib, nil, 0, 250*time.Millisecond, 15000000)
stringWriter := bytes.NewBufferString("")
mockedMongoSession := new(mockMongoSession)
mockedMongoLib.On("DialWithTimeout", "127.0.0.1:27010,127.0.0.2:27010", 0*time.Millisecond).Return(mockedMongoSession, nil)
mockedMongoSession.On("SetPrefetch", 1.0).Return()
mockedMongoSession.On("Close").Return()
mockedMongoIter := new(mockMongoIter)
mockedMongoSession.On("SnapshotIter", "database1", "collection1", nil).Return(mockedMongoIter)
mockedMongoIter.On("Next").Times(3).Return([]byte("data"), true)
mockedMongoIter.On("Next").Return([]byte{}, false)
mockedMongoIter.On("Err").Return(fmt.Errorf("iteration error"))
err := mongoService.DumpCollectionTo("database1", "collection1", stringWriter)
assert.Error(t, err, "Error expected for iterator.")
assert.Equal(t, "", stringWriter.String())
assert.Equal(t, "Couldn't obtain iterator over collection=database1/collection1: iteration error", err.Error())
}
func TestRestoreCollectionFrom_Ok(t *testing.T) {
mockedMongoLib := new(mockMongoLib)
mockedBsonService := new(mockBsonService)
mongoService := newMongoService("127.0.0.1:27010,127.0.0.2:27010", mockedMongoLib, mockedBsonService, 0, 250*time.Millisecond, 15000000)
mockedMongoSession := new(mockMongoSession)
mockedMongoLib.On("DialWithTimeout", "127.0.0.1:27010,127.0.0.2:27010", 0*time.Millisecond).Return(mockedMongoSession, nil)
mockedMongoSession.On("RemoveAll", "database1", "collection1", nil).Return(nil)
mockedMongoSession.On("Close").Return()
mockedMongoBulk := new(mockMongoBulk)
mockedMongoSession.On("Bulk", "database1", "collection1").Return(mockedMongoBulk)
mockedBsonService.On("ReadNextBSON", mock.MatchedBy(func(reader io.Reader) bool { return true })).Times(3).Return([]byte("bson"), nil)
var end []byte
mockedBsonService.On("ReadNextBSON", mock.MatchedBy(func(reader io.Reader) bool { return true })).Return(end, nil)
insertedData := make([]byte, 0, 8)
mockedMongoBulk.On("Insert", []byte("bson")).Return().Run(func(args mock.Arguments) {
insertedData = append(insertedData, args.Get(0).([]byte)...)
})
mockedMongoBulk.On("Run").Return(nil)
err := mongoService.RestoreCollectionFrom("database1", "collection1", strings.NewReader("nothing"))
assert.NoError(t, err, "Error wasn't expected during restore.")
assert.Equal(t, []byte("bsonbsonbson"), insertedData)
}
func TestRestoreCollectionFrom_DialErr(t *testing.T) {
mockedMongoLib := new(mockMongoLib)
mockedBsonService := new(mockBsonService)
mongoService := newMongoService("127.0.0.1:27010,127.0.0.2:27010", mockedMongoLib, mockedBsonService, 0, 250*time.Millisecond, 15000000)
mockedMongoSession := new(mockMongoSession)
mockedMongoLib.On("DialWithTimeout", "127.0.0.1:27010,127.0.0.2:27010", 0*time.Millisecond).Return(mockedMongoSession, fmt.Errorf("couldn't dial"))
err := mongoService.RestoreCollectionFrom("database1", "collection1", strings.NewReader("nothing"))
assert.Error(t, err, "Error was expected during restore.")
assert.Equal(t, "error while dialing mongo session: couldn't dial", err.Error())
}
func TestRestoreCollectionFrom_ErrOnClean(t *testing.T) {
mockedMongoLib := new(mockMongoLib)
mockedBsonService := new(mockBsonService)
mongoService := newMongoService("127.0.0.1:27010,127.0.0.2:27010", mockedMongoLib, mockedBsonService, 0, 250*time.Millisecond, 15000000)
mockedMongoSession := new(mockMongoSession)
mockedMongoLib.On("DialWithTimeout", "127.0.0.1:27010,127.0.0.2:27010", 0*time.Millisecond).Return(mockedMongoSession, nil)
mockedMongoSession.On("RemoveAll", "database1", "collection1", nil).Return(fmt.Errorf("couldn't clean"))
mockedMongoSession.On("Close").Return()
err := mongoService.RestoreCollectionFrom("database1", "collection1", strings.NewReader("nothing"))
assert.Error(t, err, "Error was expected during restore.")
assert.Equal(t, "error while clearing collection=database1/collection1: couldn't clean", err.Error())
}
func TestRestoreCollectionFrom_ErrOnRead(t *testing.T) {
mockedMongoLib := new(mockMongoLib)
mockedBsonService := new(mockBsonService)
mongoService := newMongoService("127.0.0.1:27010,127.0.0.2:27010", mockedMongoLib, mockedBsonService, 0, 250*time.Millisecond, 15000000)
mockedMongoSession := new(mockMongoSession)
mockedMongoLib.On("DialWithTimeout", "127.0.0.1:27010,127.0.0.2:27010", 0*time.Millisecond).Return(mockedMongoSession, nil)
mockedMongoSession.On("RemoveAll", "database1", "collection1", nil).Return(nil)
mockedMongoSession.On("Close").Return()
mockedMongoBulk := new(mockMongoBulk)
mockedMongoSession.On("Bulk", "database1", "collection1").Return(mockedMongoBulk)
mockedBsonService.On("ReadNextBSON", mock.MatchedBy(func(reader io.Reader) bool { return true })).Times(3).Return([]byte("bson"), nil)
var end []byte
mockedBsonService.On("ReadNextBSON", mock.MatchedBy(func(reader io.Reader) bool { return true })).Return(end, fmt.Errorf("error on read from unit test"))
mockedMongoBulk.On("Insert", []byte("bson")).Return()
mockedMongoBulk.On("Run").Return(nil)
err := mongoService.RestoreCollectionFrom("database1", "collection1", strings.NewReader("nothing"))
assert.Error(t, err, "Error was expected during restore.")
assert.Equal(t, "error while reading bson: error on read from unit test", err.Error())
}
func TestRestoreCollectionFrom_ErrorOnWrite(t *testing.T) {
mockedMongoLib := new(mockMongoLib)
mockedBsonService := new(mockBsonService)
mongoService := newMongoService("127.0.0.1:27010,127.0.0.2:27010", mockedMongoLib, mockedBsonService, 0, 250*time.Millisecond, 15000000)
mockedMongoSession := new(mockMongoSession)
mockedMongoLib.On("DialWithTimeout", "127.0.0.1:27010,127.0.0.2:27010", 0*time.Millisecond).Return(mockedMongoSession, nil)
mockedMongoSession.On("RemoveAll", "database1", "collection1", nil).Return(nil)
mockedMongoSession.On("Close").Return()
mockedMongoBulk := new(mockMongoBulk)
mockedMongoSession.On("Bulk", "database1", "collection1").Return(mockedMongoBulk)
mockedBsonService.On("ReadNextBSON", mock.MatchedBy(func(reader io.Reader) bool { return true })).Times(3).Return([]byte("bson"), nil)
var end []byte
mockedBsonService.On("ReadNextBSON", mock.MatchedBy(func(reader io.Reader) bool { return true })).Return(end, nil)
mockedMongoBulk.On("Insert", []byte("bson")).Return()
mockedMongoBulk.On("Run").Return(fmt.Errorf("error writing to db from test"))
err := mongoService.RestoreCollectionFrom("database1", "collection1", strings.NewReader("nothing"))
assert.Error(t, err, "error writing to db from test")
}
func TestRestoreCollectionFrom_ErrorAfterOneBulkBatching(t *testing.T) {
mockedMongoLib := new(mockMongoLib)
mockedBsonService := new(mockBsonService)
mongoService := newMongoService("127.0.0.1:27010,127.0.0.2:27010", mockedMongoLib, mockedBsonService, 0, 250*time.Millisecond, 15000000)
mockedMongoSession := new(mockMongoSession)
mockedMongoLib.On("DialWithTimeout", "127.0.0.1:27010,127.0.0.2:27010", 0*time.Millisecond).Return(mockedMongoSession, nil)
mockedMongoSession.On("RemoveAll", "database1", "collection1", nil).Return(nil)
mockedMongoSession.On("Close").Return()
mockedMongoBulk := new(mockMongoBulk)
mockedMongoSession.On("Bulk", "database1", "collection1").Return(mockedMongoBulk)
b := make([]byte, 0, 10000)
for i := 0; i < 10000; i++ {
b = append(b, 0)
}
mockedBsonService.On("ReadNextBSON", mock.MatchedBy(func(reader io.Reader) bool { return true })).Times(1500).Return(b, nil)
mockedBsonService.On("ReadNextBSON", mock.MatchedBy(func(reader io.Reader) bool { return true })).Times(1).Return([]byte{1}, nil)
var end []byte
mockedBsonService.On("ReadNextBSON", mock.MatchedBy(func(reader io.Reader) bool { return true })).Return(end, nil)
mockedMongoBulk.On("Insert", b).Return()
mockedMongoBulk.On("Insert", []byte{1}).Return()
mockedMongoBulk.On("Run").Times(1).Return(nil)
mockedMongoBulk.On("Run").Return(fmt.Errorf("error writing to db from test"))
err := mongoService.RestoreCollectionFrom("database1", "collection1", strings.NewReader("nothing"))
assert.Error(t, err, "error writing to db from test")
}
|
package common
const (
PermissionVisitor = 0
PermissionAdmin = 1
)
|
package main
import (
"agenda"
"os"
log "util/logger"
cmd "github.com/Binly42/agenda-go/cmd"
)
// var logln = util.Log
// var logf = util.Logf
func init() {
}
func main() {
agenda.LoadAll()
defer agenda.SaveAll()
if err := cmd.RootCmd.Execute(); err != nil {
log.Println(err)
os.Exit(1) // FIXME:
}
}
|
package pxf_test
import (
"errors"
"github.com/greenplum-db/gp-common-go-libs/operating"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"os"
"pxf-cli/pxf"
)
var _ = Describe("RemoteCommandToRunOnSegments", func() {
BeforeEach(func() {
_ = os.Setenv("GPHOME", "/test/gphome")
_ = os.Setenv("PXF_CONF", "/test/gphome/pxf_conf")
})
It("Is successful when GPHOME and PXF_CONF are set and init is called", func() {
command, err := pxf.RemoteCommandToRunOnSegments(pxf.Init)
Expect(err).To(BeNil())
expected := "PXF_CONF=/test/gphome/pxf_conf /test/gphome/pxf/bin/pxf init"
Expect(command).To(Equal(expected))
})
It("Is successful when GPHOME is set and start/stop are called", func() {
command, err := pxf.RemoteCommandToRunOnSegments(pxf.Start)
Expect(err).To(BeNil())
Expect(command).To(Equal("/test/gphome/pxf/bin/pxf start"))
command, err = pxf.RemoteCommandToRunOnSegments(pxf.Stop)
Expect(err).To(BeNil())
Expect(command).To(Equal("/test/gphome/pxf/bin/pxf stop"))
})
It("Fails to init when PXF_CONF is not set", func() {
_ = os.Unsetenv("PXF_CONF")
command, err := pxf.RemoteCommandToRunOnSegments(pxf.Init)
Expect(command).To(Equal(""))
Expect(err).To(Equal(errors.New("PXF_CONF must be set")))
})
It("Fails to init when PXF_CONF is blank", func() {
_ = os.Setenv("PXF_CONF", "")
command, err := pxf.RemoteCommandToRunOnSegments(pxf.Init)
Expect(command).To(Equal(""))
Expect(err).To(Equal(errors.New("PXF_CONF cannot be blank")))
})
It("Fails to init, start, sync, or stop when GPHOME is not set", func() {
_ = os.Unsetenv("GPHOME")
command, err := pxf.RemoteCommandToRunOnSegments(pxf.Init)
Expect(command).To(Equal(""))
Expect(err).To(Equal(errors.New("GPHOME must be set")))
command, err = pxf.RemoteCommandToRunOnSegments(pxf.Start)
Expect(command).To(Equal(""))
Expect(err).To(Equal(errors.New("GPHOME must be set")))
command, err = pxf.RemoteCommandToRunOnSegments(pxf.Sync)
Expect(command).To(Equal(""))
Expect(err).To(Equal(errors.New("GPHOME must be set")))
command, err = pxf.RemoteCommandToRunOnSegments(pxf.Stop)
Expect(command).To(Equal(""))
Expect(err).To(Equal(errors.New("GPHOME must be set")))
})
It("Fails to init, start, sync, or stop when GPHOME is blank", func() {
_ = os.Setenv("GPHOME", "")
command, err := pxf.RemoteCommandToRunOnSegments(pxf.Init)
Expect(command).To(Equal(""))
Expect(err).To(Equal(errors.New("GPHOME cannot be blank")))
command, err = pxf.RemoteCommandToRunOnSegments(pxf.Start)
Expect(command).To(Equal(""))
Expect(err).To(Equal(errors.New("GPHOME cannot be blank")))
command, err = pxf.RemoteCommandToRunOnSegments(pxf.Sync)
Expect(command).To(Equal(""))
Expect(err).To(Equal(errors.New("GPHOME cannot be blank")))
command, err = pxf.RemoteCommandToRunOnSegments(pxf.Stop)
Expect(command).To(Equal(""))
Expect(err).To(Equal(errors.New("GPHOME cannot be blank")))
})
It("Appends the master hostname when syncing", func() {
operating.System.Hostname = func() (string, error) {
return "fake-host", nil
}
command, err := pxf.RemoteCommandToRunOnSegments(pxf.Sync)
Expect(err).To(BeNil())
Expect(command).To(Equal("/test/gphome/pxf/bin/pxf sync fake-host"))
})
})
|
package tests
import (
"testing"
"time"
"github.com/GoAdminGroup/go-admin/modules/config"
"github.com/GoAdminGroup/go-admin/tests/frameworks/gin"
)
func TestBlackBoxTestSuitOfBuiltInTables(t *testing.T) {
BlackBoxTestSuitOfBuiltInTables(t, gin.NewHandler, config.DatabaseList{
"default": {
Host: "127.0.0.1",
Port: "3306",
User: "root",
Pwd: "root",
Name: "go-admin-test",
MaxIdleConns: 50,
MaxOpenConns: 150,
ConnMaxLifetime: time.Hour,
ConnMaxIdleTime: 0,
Driver: config.DriverMysql,
},
})
}
|
package tea
func Encrypt(v0, v1, k0, k1, k2, k3 uint32) (uint32, uint32) {
var sum uint32 = 0x0
const delta uint32 = 0x9e3779b9
for i := 0; i < 32; i++ {
sum += delta
v0 += ((v1 << 4) + k0) ^ (v1 + sum) ^ ((v1 >> 5) + k1)
v1 += ((v0 << 4) + k2) ^ (v0 + sum) ^ ((v0 >> 5) + k3)
}
return v0, v1
}
// void decrypt (uint32_t* v, uint32_t* k) {
// uint32_t v0=v[0], v1=v[1], sum=0xC6EF3720, i; /* set up */
// uint32_t delta=0x9e3779b9; /* a key schedule constant */
// uint32_t k0=k[0], k1=k[1], k2=k[2], k3=k[3]; /* cache key */
// for (i=0; i<32; i++) { /* basic cycle start */
// v1 -= ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
// v0 -= ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
// sum -= delta;
// } /* end cycle */
// v[0]=v0; v[1]=v1;
// }
|
package file
import "os"
// exist returns if the file exists
func exist(path string) (exist bool, err error) {
_, err = os.Stat(path)
if err == nil {
exist = true
return
}
if os.IsNotExist(err) {
exist = false
err = nil
return
}
return
}
|
package main
import (
"fmt"
"math/rand"
"time"
)
func f(n int) {
for i := 0; i < 3; i++ {
fmt.Println(n, ":", i)
amt := time.Duration(rand.Intn(250))
time.Sleep(time.Millisecond * amt)
}
}
// Go supports concurrency
func main() {
for i := 0; i < 3; i++ {
go f(i)
// The `go` keyword forces the compiler to move on, whether
// f(0) has returned yet or not. It's a bit like `async`
// in Swift. Both line 21 and our fave, main(), are examples
// of Goroutines.
}
c1 := make(chan string) // Synchronous unbuffered channel
c2 := make(chan string, 2) // Asynch buffered channel
// From https://golang.org/pkg/builtin/#make
//
// The `make` built-in function allocates and initializes an object of type
// slice, map, or chan (only). Like `new`, the first argument is a type, not a
// value. Unlike `new`, `make's` return type is the same as the type of its
// argument, not a pointer to it.
go func() {
for {
c1 <- "from 1"
time.Sleep(time.Second * 2)
}
}()
go func() {
for {
c2 <- "from 2"
time.Sleep(time.Second * 3)
}
}()
// Go also has a `select` keyword that allows you to hop channels
// in an intelligent way. Channels are like named threads. See the
// functions outside of `main()` for more on channel syntax.
go func() {
for {
select {
case msg1 := <-c1:
fmt.Println(msg1)
case msg2 := <-c2:
fmt.Println(msg2)
}
}
}()
var input string
fmt.Scanln(&input)
// The call to Scanln is required to keep the script from finishing
// before the goroutine on line 21 has returned anything.
}
// Go has channels, which allow 2 goroutines to sync up.
// The functions signature below contain a channel named `c`, which
// transmits strings.
func pinger(c chan string) {
for i := 0; ; i++ {
c <- "ping" // Send "ping" via `c`
}
}
func printer(c chan string) {
for {
msg := <-c // Receive and store the value in `c`
fmt.Println(msg)
time.Sleep(time.Second * 1)
}
}
/*
From https://www.golang-book.com/books/intro/10
Using a channel like this synchronizes the two goroutines. When pinger
attempts to send a message on the channel it will wait until printer is
ready to receive the message. (this is known as blocking)
*/
func ponger(d chan<- string) { // chan<- specifies directionality; can only send
for i := 0; ; i++ {
d <- "pong"
}
}
func pronter(d <-chan string) { // <-chan specifies can only receive
for {
msg := <-d // Receive and store the value in `c`
fmt.Println(msg)
time.Sleep(time.Second * 1)
}
}
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"strings"
)
func main() {
flag.Parse()
fileBytes, _ := ioutil.ReadFile(flag.Args()[0])
fileStr := string(fileBytes)
testCases := parseProblem(fileStr)
for _, testCase := range testCases {
fmt.Println(solve(testCase))
}
}
func parseProblem(input string) [][]string {
lines := strings.Split(input, "\n")
lines = removeEmptyLines(lines)
result := make([][]string, 0, len(lines))
for _, line := range lines {
result = append(result, strings.Split(line, ";"))
}
return result
}
func removeEmptyLines(lines []string) []string {
result := make([]string, 0, len(lines))
for _, line := range lines {
if len(line) > 0 {
result = append(result, line)
}
}
return result
}
func solve(segments []string) string {
for len(segments) > 1 {
maxCombo, maxLength := "", 0
iMax, jMax := 0, 0
for i := 0; i < len(segments)-1; i++ {
for j := i + 1; j < len(segments); j++ {
combo, length := largestOverlap(segments[i], segments[j])
if length > maxLength {
maxCombo, maxLength = combo, length
iMax, jMax = i, j
}
}
}
segments = remove(segments, iMax, jMax)
segments = append(segments, maxCombo)
}
return segments[0]
}
func largestOverlap(s1, s2 string) (combination string, length int) {
for i := -len(s2) + 1; i < len(s1)-1; i++ {
lowerBound := intMax(i, 0)
upperBound := intMin(len(s1), len(s2)+i)
if s1[lowerBound:upperBound] == s2[lowerBound-i:upperBound-i] &&
upperBound-lowerBound > length {
length = upperBound - lowerBound
combination = ""
// prefix
if i < 0 {
combination += s2[:-i]
} else if i > 0 {
combination += s1[:i]
}
combination += s1[lowerBound:upperBound]
// suffix
if i+len(s2) > len(s1) {
combination += s2[len(s1)-i:]
} else if i+len(s2) < len(s1) {
combination += s1[len(s2)+i:]
}
}
}
return
}
func remove(input []string, index ...int) []string {
result := make([]string, 0, len(input)-len(index))
for i := 0; i < len(input); i++ {
if !contains(index, i) {
result = append(result, input[i])
}
}
return result
}
func contains(input []int, element int) bool {
for _, s := range input {
if s == element {
return true
}
}
return false
}
func intMax(i1, i2 int) int {
if i1 > i2 {
return i1
}
return i2
}
func intMin(i1, i2 int) int {
if i1 < i2 {
return i1
}
return i2
}
|
package src
import (
"fmt"
"time"
)
type Publisher struct {
subscriptions []func(string)
}
func (p *Publisher) Start() {
fmt.Println("Publisher: Wasting time...")
time.Sleep(time.Second * 5)
p.end()
}
func (p *Publisher) end() {
fmt.Println("Publisher: Notifying subscribers that I am done wasting time...")
p.notify("Done")
}
func (p *Publisher) notify(d string) {
for _, s := range p.subscriptions {
s(d)
}
}
func (p *Publisher) Subscribe(f func(string)) int {
fmt.Println("Publisher: Subscribing a subscriber...")
p.subscriptions = append(p.subscriptions, f)
return len(p.subscriptions) - 1
}
func (p *Publisher) Unsubscribe(i int) {
fmt.Println("Publisher: Unsubscribing subscriber...")
p.subscriptions = append(p.subscriptions[:i], p.subscriptions[i+1:len(p.subscriptions)]...)
}
|
package pbkdf2_test
import (
"testing"
"github.com/stretchr/testify/assert"
"cpl.li/go/cryptor/internal/crypt/pbkdf2"
"cpl.li/go/cryptor/internal/crypt/ppk"
)
func TestPBKDF2(t *testing.T) {
t.Parallel()
expected :=
"28df0b93627d5b50ed4fef574e774a00ac634cbd3395d0a57e769581e806f82f"
expectedPub :=
"a5f686a01f0327c2a1bce2d2ae01c4174d1637fd31a5a065d0b235ea37cc3d74"
var (
key ppk.PrivateKey
pub ppk.PublicKey
)
key = pbkdf2.Key([]byte(password), []byte(salt))
key.PublicKey(&pub)
assert.Equal(t, len(key), ppk.KeySize, "invalid derived key length")
assert.Equal(t, key.ToHex(), expected, "derived key does not match")
assert.Equal(t, pub.ToHex(), expectedPub,
"derived key public does not match")
dKey := pbkdf2.Key([]byte(password), nil)
if !key.Equals(dKey) {
t.Fatal("default salt failed, keys don't match")
}
}
|
package writers
import (
"fmt"
"io"
"strings"
"text/template"
"github.com/rightscale/rsc/gen"
)
// ClientWriter struct exposes methods to generate the go API client code
type ClientWriter struct {
headerTmpl *template.Template
resourceTmpl *template.Template
}
// NewClientWriter is the client writer factory.
func NewClientWriter() (*ClientWriter, error) {
funcMap := template.FuncMap{
"comment": comment,
"commandLine": commandLine,
"parameters": parameters,
"paramsInitializer": paramsInitializer,
"blankCondition": blankCondition,
"stripStar": stripStar,
}
headerT, err := template.New("header-client").Funcs(funcMap).Parse(headerTmpl)
if err != nil {
return nil, err
}
resourceT, err := template.New("resource-client").Funcs(funcMap).Parse(resourceTmpl)
if err != nil {
return nil, err
}
return &ClientWriter{
headerTmpl: headerT,
resourceTmpl: resourceT,
}, nil
}
// WriteHeader writes the header text.
func (c *ClientWriter) WriteHeader(pkg, version string, needTime, needJSON bool, w io.Writer) error {
ctx := map[string]interface{}{
"Pkg": pkg,
"APIVersion": version,
"NeedTime": needTime,
"NeedJSON": needJSON,
}
return c.headerTmpl.Execute(w, ctx)
}
// WriteResourceHeader writes the resource header.
func (c *ClientWriter) WriteResourceHeader(name string, w io.Writer) {
fmt.Fprintf(w, "/****** %s ******/\n\n", name)
}
// WriteTypeSectionHeader writes the separator between resources and data types.
func (c *ClientWriter) WriteTypeSectionHeader(w io.Writer) {
fmt.Fprintf(w, "\n/****** Data Types ******/\n\n\n")
}
// WriteType writest the type declaration for a resource action arguments.
func (c *ClientWriter) WriteType(o *gen.ObjectDataType, w io.Writer) {
fields := make([]string, len(o.Fields))
for i, f := range o.Fields {
fields[i] = fmt.Sprintf("%s %s `json:\"%s,omitempty\"`", strings.Title(f.VarName),
f.Signature(), f.Name)
}
decl := fmt.Sprintf("type %s struct {\n%s\n}", o.TypeName,
strings.Join(fields, "\n\t"))
fmt.Fprintf(w, "%s\n\n", decl)
}
// WriteResource writest the code for a resource.
func (c *ClientWriter) WriteResource(resource *gen.Resource, w io.Writer) error {
return c.resourceTmpl.Execute(w, resource)
}
// Inline templates
const headerTmpl = `
//************************************************************************//
// RightScale API client
//
// Generated with:
{{comment commandLine}}
//
// The content of this file is auto-generated, DO NOT MODIFY
//************************************************************************//
package {{.Pkg}}
import (
{{if .NeedJSON}}"encoding/json"
{{end}}"fmt"
"io/ioutil"
{{if .NeedTime}}"time"
{{end}}
"github.com/rightscale/rsc/metadata"
"github.com/rightscale/rsc/rsapi"
)
// API Version
const APIVersion = "{{.APIVersion}}"
// An Href contains the relative path to a resource or resource collection,
// e.g. "/api/servers/123" or "/api/servers".
type Href string
// ActionPath computes the path to the given resource action. For example given the href
// "/api/servers/123" calling ActionPath with resource "servers" and action "clone" returns the path
// "/api/servers/123/clone" and verb POST.
// The algorithm consists of extracting the variables from the href by looking up a matching
// pattern from the resource metadata. The variables are then substituted in the action path.
// If there are more than one pattern that match the href then the algorithm picks the one that can
// substitute the most variables.
func (r *Href) ActionPath(rName, aName string) (*metadata.ActionPath, error) {
res, ok := GenMetadata[rName]
if !ok {
return nil, fmt.Errorf("No resource with name '%s'", rName)
}
var action *metadata.Action
for _, a := range res.Actions {
if a.Name == aName {
action = a
break
}
}
if action == nil {
return nil, fmt.Errorf("No action with name '%s' on %s", aName, rName)
}
vars, err := res.ExtractVariables(string(*r))
if err != nil {
return nil, err
}
return action.URL(vars)
}
`
const resourceTmpl = `{{$resource := .}}{{define "ActionBody"}}` + actionBodyTmpl + `{{end}}
{{comment .Description}}
type {{.Name}} struct { {{range .Attributes}}
{{.FieldName}} {{.FieldType}} ` + "`" + `json:"{{.Name}},omitempty"` + "`" + `{{end}}
}
{{if .LocatorFunc}}
// Locator returns a locator for the given resource
func (r *{{.Name}}) Locator(api *API) *{{.Name}}Locator {
{{.LocatorFunc}}
}
{{end}}
{{if .Actions}}
//===== Locator
// {{.Name}}Locator exposes the {{.Name}} resource actions.
type {{.Name}}Locator struct {
Href
api *{{.ClientName}}
}
// {{.Name}}Locator builds a locator from the given href.
func (api *{{.ClientName}}) {{.Name}}Locator(href string) *{{.Name}}Locator {
return &{{.Name}}Locator{Href(href), api}
}
//===== Actions
{{end}}{{range .Actions}}{{range .PathPatterns}}
// {{.HTTPMethod}} {{.Path}}{{end}}
//
{{comment .Description}}
func (loc *{{$resource.Name}}Locator) {{.MethodName}}({{parameters .}}){{if .Return}} ({{.Return}},{{end}} error{{if .Return}}){{end}} {
{{template "ActionBody" . }}
}
{{end}}
`
const actionBodyTmpl = `{{$action := .}}{{if .Return}}var res {{.Return}}
{{end}}{{range .Params}}{{if and .Mandatory (blankCondition .VarName .Type)}}{{blankCondition .VarName .Type}}
return {{if $action.Return}}res, {{end}}fmt.Errorf("{{.VarName}} is required")
}
{{end}}{{end}}{{/* end range .Params */}}var params rsapi.APIParams{{paramsInitializer . 1 "params"}}
var p rsapi.APIParams{{paramsInitializer . 2 "p"}}
uri, err := loc.ActionPath("{{$action.ResourceName}}", "{{$action.Name}}")
if err != nil {
return {{if $action.Return}}res, {{end}}err
}
req, err := loc.api.BuildHTTPRequest(uri.HTTPMethod, uri.Path, APIVersion, params, p)
if err != nil {
return {{if $action.Return}}res, {{end}}err
}
resp, err := loc.api.PerformRequest(req)
if err != nil {
return {{if $action.Return}}res, {{end}}err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 {
respBody, _ := ioutil.ReadAll(resp.Body)
sr := string(respBody)
if sr != "" {
sr = ": " + sr
}
return {{if $action.Return}}res, {{end}}fmt.Errorf("invalid response %s%s", resp.Status, sr)
}
{{if .ReturnLocation}}location := resp.Header.Get("Location")
if len(location) == 0 {
return res, fmt.Errorf("Missing location header in response")
} else {
return &{{stripStar .Return}}{Href(location), loc.api}, nil
}{{else if .Return}}defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return res, err
}
{{if eq .Return "string"}}res = string(respBody)
{{else}}err = json.Unmarshal(respBody, &res)
{{end}}return res, err{{else}}return nil{{end}}`
|
package main
import (
"TskSch/msgQ"
"TskSch/logger"
"TskSch/resultDB"
"TskSch/mailer"
"fmt"
"github.com/garyburd/redigo/redis"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"bytes"
"os/exec"
"strconv"
"time"
"encoding/json"
"net/http"
"io/ioutil"
"code.google.com/p/goconf/conf"
"os"
"sync"
"strings"
)
type task struct {
Task_id string
}
type taskp struct {
Task_id string
Pid int
}
var y time.Time
var schedulerHost string
var ManagerHost string
var taskAgent string
var host1 string
var host2 string
var port string
func main() {
for _ = range time.Tick(time.Second * 50){
logfile := logger.LogValInit()
LogInfo := logger.Info(logfile)
LogErr := logger.Failure(logfile)
var wg sync.WaitGroup
wg.Add(4)
//Extracting conf
Finfo, _ := os.Stat("../server.conf")
v := Finfo.ModTime()
if !v.Equal(y) {
c, err := conf.ReadConfigFile("../server.conf")
if err != nil {
fmt.Println("CAN'T READ CONF FIILE",err)
}
w, _ := c.GetString("scheduler", "host")
x, _ := c.GetString("scheduler", "port")
schedulerHost = w + ":" + x
a, _ := c.GetString("manager", "host")
b, _ := c.GetString("manager", "port")
ManagerHost = a + ":" + b
p, _ := c.GetString("taskagent","host")
z, _ := c.GetString("taskagent","port")
taskAgent = p + ":" + z
host1 ,_ = c.GetString("resultDB","host")
host2 ,_ = c.GetString("msgQ","host")
port ,_ = c.GetString("msgQ","port")
}
//Checking liveliness of Scheduler
schedulerPath := "http://"+schedulerHost+"/ping"
go func(schedulerPath string,wg *sync.WaitGroup){
res, err := http.Get(schedulerPath)
if err == nil {
body , _ := ioutil.ReadAll(res.Body)
if string(body) != "" {
var status interface{}
err := json.Unmarshal([]byte(body), &status)
if err == nil {
if status.(map[string]interface{})["status"].(string) == "alive" {
LogInfo.Println("Scheduer is alive")
}else {
mailer.Mail("GOSERVE: Regarding Scheduler Status", "Scheduer is not alive")
LogErr.Println("Scheduer is not alive")
}
}
}
}else{
fmt.Println("Cannot connect to Scheduler",err)
LogErr.Println("Cannot connect to Scheduler",err)
}
wg.Done()
}(schedulerPath, &wg)
//Checking liveliness of Manager
ManagerPath := "http://"+ManagerHost+"/ping"
go func(ManagerPath string,wg *sync.WaitGroup){
res, err := http.Get(ManagerPath)
if err == nil {
body , _ := ioutil.ReadAll(res.Body)
if string(body) != "" {
var status interface{}
err := json.Unmarshal([]byte(body), &status)
if err == nil {
if status.(map[string]interface{})["status"].(string) == "alive" {
LogInfo.Println("Manager is alive")
}else {
mailer.Mail("GOSERVE: Regarding Scheduler Status", "Scheduer is not alive")
LogErr.Println("Scheduer is not alive")
}
}
}
}else{
fmt.Println("Cannot connect to Scheduler",err)
LogErr.Println("Cannot connect to Scheduler",err)
}
wg.Done()
}(ManagerPath, &wg)
//Checking liveliness of Task Agents
go func(taskAgent string,wg *sync.WaitGroup){
TaskHost := strings.Split( strings.Split(taskAgent ,":")[0] , ",")
TaskPort := strings.Split( strings.Split(taskAgent ,":")[1] , ",")
if( len(TaskHost) == len(TaskPort) ) {
for i , _ := range TaskHost {
taskagentPath := "http://"+TaskHost[i]+":"+TaskPort[i]+"/ping"
res, err := http.Get(taskagentPath)
if err == nil{
body , _ := ioutil.ReadAll(res.Body)
if string(body) != "" {
var status interface{}
err := json.Unmarshal([]byte(body), &status)
if err == nil {
if status.(map[string]interface{})["status"].(string) == "alive" {
fmt.Println("Task Agent is alive")
}else {
mailer.Mail("GOSERVE: Regarding Task Aegent : "+ taskagentPath + " Status", taskagentPath +" : is not alive")
LogErr.Println("Scheduer is not alive")
}
}
}
}else{
fmt.Println("Cannot connect to task agent",err)
LogErr.Println("Cannot connect to Task Aegent : " + taskagentPath ,err)
}
}
}else{
fmt.Println("ERROR IN CONFIG FILE")
LogErr.Println("ERROR IN CONFIG FILE")
}
wg.Done()
}(taskAgent , &wg)
go func(wg *sync.WaitGroup,host1 string,host2 string,port string) {
var ids []string
res1 := []task{}
res2 := []taskp{}
//Connecting to mongodb
session := resultDB.ResultdbInit(host1)
session.SetMode(mgo.Monotonic, true)
col := session.DB("TskSch").C("Result")
//Connecting to msgQ
Conn := msgQ.RedisInit(host2 ,port)
//Checking liveness of msgQ to get the list of taskids in msgQ
err := msgQ.Ping(Conn)
if err !=nil{
Ids, Err := redis.Values(Conn.Do("LRANGE", "task", "0", "-1"))
if Err != nil {
fmt.Println("Could not able to connect to msgQ",Err)
mailer.Mail("GOSERVE: Regarding 'msgQ' Status", "Could not able to connect to 'msgQ'")
LogErr.Println("Could not able to connect to msgQ",Err)
}else {
for _, val := range Ids {
ids = append(ids, string(val.([]byte)))
}
}
}else{
LogErr.Println(err)
}
//Checking liveliness of mongodb
err = resultDB.Ping(session)
if err !=nil{
//collecting all the taskids from resultDB which are not executed and not in execution state to check whether they are in msgQ
Err := col.Find(bson.M{"executed": false, "exec_stat": false}).Select(bson.M{"task_id":1}).All(&res1)
if Err != nil {
fmt.Println("Could not able to connect to mongodb",Err)
mailer.Mail("GOSERVE: Regarding 'mongodb' Status", "Could not able to connect to 'mongodb'")
LogErr.Println("Could not able to connect to mongodb",Err)
}
for _, val := range res1{
flag := In(val.Task_id , ids )
if flag != true {
x, err := Conn.Do("RPUSH", "task", val.Task_id)
if err != nil {
fmt.Println(x,err)
LogErr.Println(x,err)
}
LogInfo.Println("PUSHED" + val.Task_id + "TASK TO msgQ")
}
}
//collecting all the taskids from resultDB which are not executed , not in execution state and which were poped from executer but not executed
Err = col.Find(bson.M{"executed": false, "exec_stat": false ,"pid":bson.M{"$gt":0}}).Select(bson.M{"task_id":1,"pid":1}).All(&res2)
if Err != nil {
fmt.Println("Could not able to connect to mongodb",Err)
mailer.Mail("GOSERVE: Regarding 'mongodb' Status", "Could not able to connect to 'mongodb'")
LogErr.Println("Could not able to connect to mongodb",Err)
}
for _ , val := range res2{
flag := Isalive(val.Pid)
if flag != true {
flag1 := In(val.Task_id , ids )
if flag1 != true {
x, err := Conn.Do("RPUSH", "task", val.Task_id)
if err != nil {
fmt.Println(x,err)
LogErr.Println(x,err)
}
fmt.Println("PUSHED" + val.Task_id + "TASK TO msgQ" )
LogInfo.Println("PUSHED" + val.Task_id + "TASK TO msgQ")
}
}
}
}else{
resultDB.Restart(session)
LogInfo.Println("RESULT DB RESTARTED")
}
wg.Done()
fmt.Println("X!")
}(&wg,host1,host2,port)
wg.Wait()
y = v
}
}
// Function to check aliveness of the executer
func Isalive( pid int ) bool {
cmd := "kill -0 " + strconv.Itoa(pid)
var Errout bytes.Buffer
cmds := exec.Command("sh", "-c",cmd )
cmds.Stderr = &Errout
cmds.Run()
if Errout.String() != ""{
return false
}
return true
}
//helper function
func In(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.