text stringlengths 11 4.05M |
|---|
package secrets
import (
"errors"
"strings"
"testing"
"github.com/10gen/realm-cli/internal/cli"
"github.com/10gen/realm-cli/internal/cloud/realm"
"github.com/10gen/realm-cli/internal/utils/test/assert"
"github.com/10gen/realm-cli/internal/utils/test/mock"
)
func TestSecretsListHandler(t *testing.T) {
projectID := "projectID"
appID := "appID"
app := realm.App{
ID: appID,
GroupID: projectID,
ClientAppID: "eggcorn-abcde",
Name: "eggcorn",
}
testSecrets := []realm.Secret{
{ID: "secret1", Name: "test1"},
{ID: "secret2", Name: "test2"},
{ID: "secret3", Name: "dup"},
{ID: "secret4", Name: "dup"},
}
for _, tc := range []struct {
description string
secrets []realm.Secret
expectedOutput string
}{
{
description: "should list no secrets with no app secrets found",
expectedOutput: "No available secrets to show\n",
},
{
description: "should list the secrets found for the app",
secrets: testSecrets,
expectedOutput: strings.Join(
[]string{
"Found 4 secrets",
" ID Name ",
" ------- -----",
" secret1 test1",
" secret2 test2",
" secret3 dup ",
" secret4 dup ",
"",
},
"\n",
),
},
} {
t.Run(tc.description, func(t *testing.T) {
out, ui := mock.NewUI()
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return []realm.App{app}, nil
}
realmClient.SecretsFn = func(groupID, appID string) ([]realm.Secret, error) {
return tc.secrets, nil
}
cmd := &CommandList{listInputs{cli.ProjectInputs{
Project: projectID,
App: appID,
}}}
assert.Nil(t, cmd.Handler(nil, ui, cli.Clients{Realm: realmClient}))
assert.Equal(t, tc.expectedOutput, out.String())
})
}
t.Run("should return an error", func(t *testing.T) {
for _, tc := range []struct {
description string
setupClient func() realm.Client
expectedErr error
}{
{
description: "when resolving the app fails",
setupClient: func() realm.Client {
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return nil, errors.New("something bad happened")
}
return realmClient
},
expectedErr: errors.New("something bad happened"),
},
{
description: "when finding the secrets fails",
setupClient: func() realm.Client {
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return []realm.App{app}, nil
}
realmClient.SecretsFn = func(groupID, appID string) ([]realm.Secret, error) {
return nil, errors.New("something bad happened")
}
return realmClient
},
expectedErr: errors.New("something bad happened"),
},
} {
t.Run(tc.description, func(t *testing.T) {
realmClient := tc.setupClient()
cmd := &CommandList{}
err := cmd.Handler(nil, nil, cli.Clients{Realm: realmClient})
assert.Equal(t, tc.expectedErr, err)
})
}
})
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cpuprofile
import (
"bytes"
"context"
"io"
"net"
"net/http"
"runtime/pprof"
"sync"
"testing"
"time"
"github.com/google/pprof/profile"
"github.com/pingcap/tidb/testkit/testsetup"
"github.com/pingcap/tidb/util/cpuprofile/testutil"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
testsetup.SetupForCommonTest()
// To speed up testing
DefProfileDuration = time.Millisecond * 200
goleak.VerifyTestMain(m)
}
func TestBasicAPI(t *testing.T) {
err := StartCPUProfiler()
require.NoError(t, err)
defer StopCPUProfiler()
err = StartCPUProfiler()
require.Equal(t, err, errProfilerAlreadyStarted)
// Test for close multiple times.
StopCPUProfiler()
StopCPUProfiler()
globalCPUProfiler = newParallelCPUProfiler()
StopCPUProfiler()
err = StartCPUProfiler()
require.NoError(t, err)
err = StartCPUProfiler()
require.Equal(t, err, errProfilerAlreadyStarted)
}
func TestParallelCPUProfiler(t *testing.T) {
err := StartCPUProfiler()
require.NoError(t, err)
defer StopCPUProfiler()
// Test register/unregister nil
Register(nil)
require.Equal(t, 0, globalCPUProfiler.consumersCount())
Unregister(nil)
require.Equal(t, 0, globalCPUProfiler.consumersCount())
// Test profile error and duplicate register.
dataCh := make(ProfileConsumer, 10)
err = pprof.StartCPUProfile(bytes.NewBuffer(nil))
require.NoError(t, err)
// Test for duplicate register.
Register(dataCh)
Register(dataCh)
require.Equal(t, 1, globalCPUProfiler.consumersCount())
// Test profile error
data := <-dataCh
require.Equal(t, "cpu profiling already in use", data.Error.Error())
Unregister(dataCh)
require.Equal(t, 0, globalCPUProfiler.consumersCount())
// shouldn't receive data from a unregistered consumer.
data = nil
select {
case data = <-dataCh:
default:
}
require.Nil(t, data)
// unregister not exist consumer
Unregister(dataCh)
require.Equal(t, 0, globalCPUProfiler.consumersCount())
// Test register a closed consumer
dataCh = make(ProfileConsumer, 10)
close(dataCh)
Register(dataCh)
require.Equal(t, 1, globalCPUProfiler.consumersCount())
data, ok := <-dataCh
require.Nil(t, data)
require.False(t, ok)
Unregister(dataCh)
require.Equal(t, 0, globalCPUProfiler.consumersCount())
pprof.StopCPUProfile()
// Test successfully get profile data.
dataCh = make(ProfileConsumer, 10)
Register(dataCh)
data = <-dataCh
require.NoError(t, data.Error)
profileData, err := profile.ParseData(data.Data.Bytes())
require.NoError(t, err)
require.NotNil(t, profileData)
Unregister(dataCh)
require.Equal(t, 0, globalCPUProfiler.consumersCount())
// Test stop profiling when no consumer.
Register(dataCh)
for {
// wait for parallelCPUProfiler do profiling successfully
err = pprof.StartCPUProfile(bytes.NewBuffer(nil))
if err != nil {
break
}
pprof.StopCPUProfile()
time.Sleep(time.Millisecond)
}
Unregister(dataCh)
require.Equal(t, 0, globalCPUProfiler.consumersCount())
// wait for parallelCPUProfiler stop profiling
start := time.Now()
for {
err = pprof.StartCPUProfile(bytes.NewBuffer(nil))
if err == nil || time.Since(start) >= time.Second*2 {
break
}
time.Sleep(time.Millisecond)
}
require.NoError(t, err)
pprof.StopCPUProfile()
}
func getCPUProfile(d time.Duration, w io.Writer) error {
pc := NewCollector()
err := pc.StartCPUProfile(w)
if err != nil {
return err
}
time.Sleep(d)
return pc.StopCPUProfile()
}
func TestGetCPUProfile(t *testing.T) {
err := StartCPUProfiler()
require.NoError(t, err)
defer StopCPUProfiler()
// Test profile error
err = pprof.StartCPUProfile(bytes.NewBuffer(nil))
require.NoError(t, err)
err = getCPUProfile(time.Millisecond*50, bytes.NewBuffer(nil))
require.Error(t, err)
require.Equal(t, "cpu profiling already in use", err.Error())
pprof.StopCPUProfile()
// test parallel get CPU profile.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
testutil.MockCPULoad(ctx, "sql", "sql_digest", "plan_digest")
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
var err error
buf := bytes.NewBuffer(nil)
err = getCPUProfile(time.Millisecond*1000, buf)
require.NoError(t, err)
profileData, err := profile.Parse(buf)
require.NoError(t, err)
labelCnt := 0
for _, s := range profileData.Sample {
for k := range s.Label {
require.Equal(t, "sql", k)
labelCnt++
}
}
require.True(t, labelCnt > 0)
}()
}
wg.Wait()
}
func TestProfileHTTPHandler(t *testing.T) {
err := StartCPUProfiler()
require.NoError(t, err)
defer StopCPUProfiler()
// setup http server
listener, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
router := http.NewServeMux()
router.HandleFunc("/debug/pprof/profile", ProfileHTTPHandler)
httpServer := &http.Server{Handler: router, WriteTimeout: time.Second * 60}
go func() {
if err := httpServer.Serve(listener); err != nil && err != http.ErrServerClosed {
require.NoError(t, err)
}
}()
defer func() {
err := httpServer.Close()
require.NoError(t, err)
}()
address := listener.Addr().String()
// Test for get profile success.
resp, err := http.Get("http://" + address + "/debug/pprof/profile?seconds=1")
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
profileData, err := profile.Parse(resp.Body)
require.NoError(t, err)
require.NotNil(t, profileData)
require.NoError(t, resp.Body.Close())
// Test for get profile failed.
resp, err = http.Get("http://" + address + "/debug/pprof/profile?seconds=100000")
require.NoError(t, err)
require.Equal(t, 400, resp.StatusCode)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, "profile duration exceeds server's WriteTimeout\n", string(body))
require.NoError(t, resp.Body.Close())
}
|
package atomix
import (
"strconv"
"sync/atomic"
)
// Bool is an atomic boolean.
type Bool struct {
atomicType
value uint32
}
// NewBool creates a Bool.
func NewBool(value bool) *Bool {
return &Bool{value: b2i(value)}
}
func (b *Bool) String() string {
return strconv.FormatBool(b.Load())
}
// Load atomically the value.
func (b *Bool) Load() bool {
return isTrue(atomic.LoadUint32(&b.value))
}
// Store atomically the given value.
func (b *Bool) Store(new bool) {
atomic.StoreUint32(&b.value, b2i(new))
}
// Swap sets the given value and returns the previous value.
func (b *Bool) Swap(new bool) bool {
return isTrue(atomic.SwapUint32(&b.value, b2i(new)))
}
// Toggle atomically negates the boolean and returns the previous value.
func (b *Bool) Toggle() bool {
return isTrue(atomic.AddUint32(&b.value, 1) - 1)
}
// CAS is an atomic Compare-And-Swap operation.
func (b *Bool) CAS(old, new bool) bool {
return atomic.CompareAndSwapUint32(&b.value, b2i(old), b2i(new))
}
func isTrue(n uint32) bool {
return (n & 1) == 1
}
func b2i(b bool) uint32 {
if b {
return 1
}
return 0
}
|
package leetcode
import "testing"
func TestRecentCounter(t *testing.T) {
obj := Constructor()
inputs := []int{1, 100, 3001, 3002}
expected := []int{1, 2, 3, 3}
for i := 0; i < 4; i++ {
if obj.Ping(inputs[i]) != expected[i] {
t.Fatal()
}
}
}
|
package main
import (
"fmt"
"net/http"
"strings"
)
func main() {
//general testing
test := sendRequest("player", "", "43db704e10b140b3a38dce059de35a59")
fmt.Println(test.Body)
}
func sendRequest(apiType string, apiKey string, uuid string) *http.Response {
apiUrl := "https://api.hypixel.net/" + apiKey + "?key=[#type]&byUuid=[#uuid]"
if len(apiKey) > 36 {
fmt.Println("Your API key is invalid, please check it! (Exiting)")
return nil
}
switch apiType {
case "player":
{
strings.Replace(apiKey, "[#type]", "player", 1)
strings.Replace(apiKey, "[#uuid]", uuid, 1)
resp, err := http.Get(apiUrl)
if err != nil {
fmt.Println("Could not sound API request, found error: ", err)
}
return resp
}
case "guild":
{
strings.Replace(apiKey, "[#type]", "guild", 1)
strings.Replace(apiKey, "[#uuid]", uuid, 1)
resp, err := http.Get(apiUrl)
if err != nil {
fmt.Println("Could not sound API request, found error: ", err)
}
return resp
}
case "keys":
{
strings.Replace(apiKey, "[#type]", "keys", 1)
strings.Replace(apiKey, "[#uuid]", uuid, 1)
resp, err := http.Get(apiUrl)
if err != nil {
fmt.Println("Could not sound API request, found error: ", err)
}
return resp
}
case "boosters":
{
strings.Replace(apiKey, "[#type]", "boosters", 1)
strings.Replace(apiKey, "[#uuid]", uuid, 1)
resp, err := http.Get(apiUrl)
if err != nil {
fmt.Println("Could not sound API request, found error: ", err)
}
return resp
}
case "friends":
{
strings.Replace(apiKey, "[#type]", "friends", 1)
strings.Replace(apiKey, "[#uuid]", uuid, 1)
resp, err := http.Get(apiUrl)
if err != nil {
fmt.Println("Could not sound API request, found error: ", err)
}
return resp
}
}
return nil
}
|
package cache
import (
"log"
"time"
"github.com/dgraph-io/badger"
)
type BadgerDBCache struct {
db *badger.DB
}
func (b *BadgerDBCache) Set(k []byte, v []byte) error {
err := b.db.Update(func(txn *badger.Txn) error {
return txn.Set(k, v)
})
return err
}
func (b *BadgerDBCache) Setex(k []byte, ttl time.Duration, v []byte) error {
err := b.db.Update(func(txn *badger.Txn) error {
return txn.SetWithTTL(k, v, ttl)
})
return err
}
func (b *BadgerDBCache) Get(k []byte) ([]byte, error) {
var v []byte
err := b.db.View(func(txn *badger.Txn) error {
item, err := txn.Get(k)
if err != nil {
return err
}
item.Value(func(value []byte) error {
v = value
return nil
})
return nil
})
if err != nil {
return nil, err
}
return v, nil
}
func (b *BadgerDBCache) Spop(prefix []byte, amount uint) ([][]byte, error) {
var results [][]byte
// Populate
var populated [][]byte
err := b.db.View(func(txn *badger.Txn) error {
opts := badger.DefaultIteratorOptions
opts.PrefetchValues = false
it := txn.NewIterator(opts)
defer it.Close()
for it.Seek(prefix); len(populated) <= int(amount) && it.ValidForPrefix(prefix); it.Next() {
item := it.Item()
k := item.Key()
if len(k) > 0 {
populated = append(populated, k)
}
}
return nil
})
// Delete
for _, k := range populated {
err := b.db.Update(func(txn *badger.Txn) error {
return txn.Delete(k)
})
if err != nil {
log.Print("Badger: Spop: Unable to delete keys")
return results, err
}
// Assign only deleted rows
results = append(results, k)
}
return results, err
}
func (b *BadgerDBCache) Delete(k []byte) error {
err := b.db.Update(func(txn *badger.Txn) error {
return txn.Delete(k)
})
return err
}
func (b *BadgerDBCache) Close() {
b.db.Close()
}
func newBadgerDBCache() (Cacher, error) {
opts := badger.DefaultOptions
opts.Dir = "./cache"
opts.ValueDir = "./cache"
db, err := badger.Open(opts)
if err != nil {
log.Fatal(err)
}
return &BadgerDBCache{db: db}, nil
}
|
// Assembly symbol map.
package main
import (
"fmt"
"reflect"
"sort"
"strings"
)
type Symbol struct {
Constant bool // Constness of the stored value.
Val asmVal
}
func (s Symbol) String() string {
var ret string
if s.Constant {
ret = "(const) "
}
return ret + s.Val.String() + "\n"
}
type MemoryModel uint8
const (
FarCode MemoryModel = (1 << iota)
FarData = (1 << iota)
HugeData = (1 << iota)
CSInDGroup = (1 << iota)
Turbo = (1 << iota) // Indicates the TPASCAL model
Flat = (1 << iota)
Tiny = CSInDGroup
Small = 0
Compact = FarData
Medium = FarCode
Large = FarData | FarCode
Huge = HugeData | FarCode
TPascal = Turbo | Compact
TCHuge = Flat | Huge
)
// InternalSyms contains all internal symbols that can't be overwritten
// through the normal symbol map. Pointer values are undefined at first.
type InternalSyms struct {
FileName asmExpression
FileName8 asmString
StackGroup *asmExpression
ThirtyTwo *uint8
Model *MemoryModel
Interface *uint8
CPU cpuFlag
WordSize uint8
// We keep those in addition to the MemoryModel value. Auto-generating
// them from Model is not worth the hassle, especially because of the
// different value for FLAT in TASM and MASM.
SymModel *uint8
SymCodeSize *uint8
SymDataSize *uint8
}
// Lookup maps the members of s to their symbol names and returns their values
// as asmVal types.
func (s *InternalSyms) Lookup(name string) (asmVal, bool) {
if s == nil {
return nil, false
}
var num **uint8
// This isn't actually what either TASM or JWasm do, but accepting both
// real and uppercase seems the most sensible option that still allows
// custom spellings to be used for user-defined symbols together with
// OPTION CASEMAP:NONE.
switch name {
case "??filename", "??FILENAME":
return s.FileName8, true
case "@32Bit", "@32BIT":
num = &s.ThirtyTwo
case "@CodeSize", "@CODESIZE":
num = &s.SymCodeSize
case "@Cpu", "@CPU":
return asmInt{n: int64(s.CPU), base: 2}, true
case "@DataSize", "@DATASIZE":
num = &s.SymDataSize
case "@FileName", "@FILENAME":
return s.FileName, true
case "@Interface", "@INTERFACE":
num = &s.Interface
case "@Model", "@MODEL":
num = &s.SymModel
case "@stack", "@STACK":
if s.StackGroup == nil {
return nil, true
}
return *s.StackGroup, true
case "@WordSize", "@WORDSIZE":
return asmInt{n: int64(s.WordSize)}, true
}
if num == nil {
return nil, false
}
if *num == nil {
return nil, true
}
return asmInt{n: int64(**num)}, true
}
func (s InternalSyms) SegmentWordSize() uint8 {
// @32BIT is only set in TASM mode, which can't be used to compile 64-bit
// code anyway, so I guess this is fine?
if s.ThirtyTwo != nil {
return 2 + (*s.ThirtyTwo * 2)
}
return s.WordSize
}
type SymMap struct {
Map map[string]Symbol
Internals *InternalSyms
CaseSensitive *bool
}
// Dump returns a string listing all symbols in s in alphabetical order,
// together with their values, indented with the given number of tabs.
func (s SymMap) Dump(indent int) (ret string) {
if len(s.Map) == 0 {
return ""
}
var keys []string
for i := range s.Map {
keys = append(keys, i)
}
sort.Strings(keys)
for _, k := range keys {
ret += fmt.Sprintf(
"%s• %s: %s", strings.Repeat("\t", indent), k, s.Map[k],
)
}
return ret[:len(ret)-1]
}
func (s SymMap) String() (ret string) {
return s.Dump(0)
}
func (s *SymMap) ToSymCase(str string) string {
if !(*s.CaseSensitive) {
return strings.ToUpper(str)
}
return str
}
// Equal returns whether s1 and s2 are equal according to the case sensitivity
// setting of s.
func (s *SymMap) Equal(s1 string, s2 string) bool {
if !(*s.CaseSensitive) {
return strings.EqualFold(s1, s2)
}
return s1 == s2
}
// Lookup wraps Go's own map lookup using the case sensitivity setting of s.
// It returns the value of the symbol or nil if it doesn't exist in s,
// together with a possible error.
func (s *SymMap) Lookup(name string) (asmVal, ErrorList) {
realName := s.ToSymCase(name)
if ret, ok := s.Internals.Lookup(realName); ok {
return ret, nil
} else if ret, ok := s.Map[realName]; ok {
var err ErrorList
if !(*s.CaseSensitive) && name != realName {
if _, ok := s.Map[name]; ok {
err = ErrorListF(ESWarning,
"symbol name is ambiguous due to reactivated case mapping; picking %s, not %s",
realName, name,
)
}
}
return ret.Val, err
}
return nil, nil
}
// Get returns the value of a symbol that is meant to exist in s, or an error
// if it doesn't.
func (s *SymMap) Get(name string) (asmVal, ErrorList) {
if ret, err := s.Lookup(name); ret != nil {
return ret, err
}
return nil, ErrorListF(ESError, "unknown symbol: %s", name)
}
// Set tries to add a new symbol with the given name and value to s, while
// taking the constness of a possible existing value with the same name into
// account. If name is empty, the function does nothing.
func (s *SymMap) Set(name string, val asmVal, constant bool) ErrorList {
if name == "" {
return nil
}
// Maybe the asmVal interface should have received a Equal()
// method, but given the fact that most types are constant anyway…
redefinable := func(a, b asmVal) bool {
redefinableVal := func(a, b asmVal) bool {
switch a.(type) {
case asmInt:
a, b := a.(asmInt), b.(asmInt)
return a.n == b.n && a.ptr == b.ptr
case asmDataPtr:
a, b := a.(asmDataPtr), b.(asmDataPtr)
// TODO: Temporary kludge to keep pointers working while we're
// migrating to a smarter pass system.
if a.off == 0 {
return true
}
return a.et.Name() == b.et.Name() &&
a.chunk == b.chunk &&
a.off == b.off &&
a.ptr.unit.Width() == b.ptr.unit.Width()
}
return false
}
switch a.(type) {
case asmStruc:
a, b := a.(asmStruc), b.(asmStruc)
ret := a.flag == b.flag && len(a.data) == len(b.data)
for i, valB := range b.members.Map {
valA, ok := a.members.Map[i]
ret = ret && ok &&
(reflect.TypeOf(valA.Val) == reflect.TypeOf(valB.Val))
switch valA.Val.(type) {
case asmStruc: // do nothing
default:
ret = ret && redefinableVal(valA.Val, valB.Val)
}
}
return ret
}
return redefinableVal(a, b)
}
realName := s.ToSymCase(name)
if _, ok := s.Internals.Lookup(realName); ok {
return ErrorListF(ESError,
"can't overwrite internal symbol: %s", realName,
)
} else if existing := s.Map[realName]; existing.Val != nil {
fail := func() (err ErrorList) {
err = err.AddF(ESError,
"symbol already defined as %s: %s",
existing.Val.Thing(), realName,
)
return err.AddF(ESError,
"\t(previous value: %s)", existing.Val.String(),
)
}
if reflect.TypeOf(existing.Val) != reflect.TypeOf(val) {
return fail()
} else if existing.Constant && !redefinable(existing.Val, val) {
return fail()
}
}
s.Map[realName] = Symbol{Val: val, Constant: constant}
return nil
}
// NewSymMap creates a new symbol map whose case sensitivity can be controlled
// through the given pointer.
func NewSymMap(caseSensitive *bool, internals *InternalSyms) *SymMap {
return &SymMap{
Map: make(map[string]Symbol),
CaseSensitive: caseSensitive,
Internals: internals,
}
}
|
package queue_test
import (
"bytes"
"github.com/hx/queue"
"reflect"
"sync"
"testing"
"time"
)
func Assert(tb testing.TB, cond bool, msg string, v ...interface{}) {
tb.Helper()
if !cond {
tb.Logf(msg, v...)
tb.FailNow()
}
}
func Equal(tb testing.TB, exp interface{}, act interface{}) {
tb.Helper()
Assert(tb, reflect.DeepEqual(exp, act), "Expected %v to be equal to %v", act, exp)
}
func NotEqual(tb testing.TB, exp interface{}, act interface{}) {
tb.Helper()
Assert(tb, !reflect.DeepEqual(exp, act), "Expected %v not to be equal to %v", act, exp)
}
func TestQueue_Inline(t *testing.T) {
run := false
q := queue.Queue{Inline: true}
q.Add(&queue.Job{Perform: func() { run = true }})
Assert(t, run, "Job should run inline")
}
func TestQueue_Add(t *testing.T) {
run := false
new(queue.Queue).
AddFunc("", func() { run = true }).
WaitAll()
Assert(t, run, "Job should have run")
}
func TestQueue_RunDelayed(t *testing.T) {
seq := ""
new(queue.Queue).
Add(&queue.Job{Key: "a", Perform: func() { seq = seq + "a" }, Delay: 40 * time.Millisecond}).
Add(&queue.Job{Key: "b", Perform: func() { seq = seq + "b" }, Delay: 20 * time.Millisecond}).
Add(&queue.Job{Key: "c", Perform: func() { seq = seq + "c" }, Delay: 60 * time.Millisecond}).
WaitAll()
Equal(t, "bac", seq)
}
func TestQueue_Override(t *testing.T) {
seq := ""
new(queue.Queue).
Add(&queue.Job{Key: "a", Perform: func() { seq = seq + "a" }, Delay: 40 * time.Millisecond}).
Add(&queue.Job{Key: "a", Perform: func() { seq = seq + "b" }, Delay: 20 * time.Millisecond}).
WaitAll()
Equal(t, "b", seq)
}
func TestQueue_AutoKey(t *testing.T) {
j1 := queue.Job{Perform: func() {}}
j2 := queue.Job{Perform: func() {}}
Equal(t, j1.Key, j2.Key)
new(queue.Queue).Add(&j1).Add(&j2)
NotEqual(t, j1.Key, j2.Key)
}
func TestQueue_Wait(t *testing.T) {
q := new(queue.Queue)
q.Wait()
q.WaitAll()
}
func TestQueue_Repetition(t *testing.T) {
var (
count = 0
done = make(chan struct{})
q = new(queue.Queue)
)
q.Add(&queue.Job{Repeat: 10 * time.Millisecond, Perform: func() {
count++
if count == 4 {
done <- struct{}{}
}
}})
<-done
q.Shutdown()
Equal(t, 4, count)
}
func TestQueue_Debounce(t *testing.T) {
count := 0
job := &queue.Job{
Key: "do me!",
Perform: func() { count++ },
Delay: 10 * time.Millisecond,
}
q := new(queue.Queue)
q.Add(job)
time.Sleep(2 * time.Millisecond)
q.Add(job)
time.Sleep(20 * time.Millisecond)
q.Add(job)
time.Sleep(2 * time.Millisecond)
q.Add(job)
q.WaitAll()
Equal(t, 2, count)
}
func TestQueue_OnPanic(t *testing.T) {
var actual *queue.Panic
(&queue.Queue{
OnPanic: func(p *queue.Panic) { actual = p },
}).
AddFunc("", func() { panic("abc") }).
WaitAll()
Equal(t, "Anonymous job #1", actual.Job.Key)
Equal(t, "abc", actual.Error)
Assert(t, bytes.Contains(actual.FormattedStack, []byte("TestQueue_OnPanic")),
"Stack should include the calling test function")
}
func TestQueue_Exclusive(t *testing.T) {
var (
seq = ""
lock = sync.Mutex{}
q = new(queue.Queue)
add = func(s string) {
lock.Lock()
seq += s
lock.Unlock()
}
job = &queue.Job{
Key: "stuff",
Perform: func() {
add("a")
time.Sleep(20 * time.Millisecond)
add("z")
},
}
)
q.Add(job)
time.Sleep(10 * time.Millisecond)
q.Add(job).WaitAll()
job.Simultaneous = true
q.Add(job)
time.Sleep(10 * time.Millisecond)
q.Add(job).WaitAll()
Equal(t, "azazaazz", seq)
}
func TestQueue_Conflicts(t *testing.T) {
q := new(queue.Queue)
count := 0
q.Add(&queue.Job{
HasConflicts: func(_ []string) bool { return true },
DiscardOnConflict: true,
Perform: func() { count++ },
}).WaitAll()
Equal(t, 0, count)
count = 0
q.AddFunc("", func() {
time.Sleep(50 * time.Millisecond)
count = 5
})
time.Sleep(20 * time.Millisecond)
q.Add(&queue.Job{
HasConflicts: func(k []string) bool { return len(k) > 0 },
Perform: func() { count *= 2 },
}).WaitAll()
Equal(t, 10, count)
}
func TestQueue_Conflict(t *testing.T) {
var (
count = 0
tested []string
)
new(queue.Queue).
Add(&queue.Job{
Key: "thorn",
Perform: func() {
time.Sleep(50 * time.Millisecond)
count += 1
},
}).
Add(&queue.Job{
Delay: 20 * time.Millisecond,
HasConflict: func(key string) bool {
tested = append(tested, key)
return true
},
Perform: func() { count += 1 },
DiscardOnConflict: true,
}).WaitAll()
Equal(t, []string{"thorn"}, tested)
Equal(t, 1, count)
}
func TestQueue_Remove(t *testing.T) {
q := new(queue.Queue).Add(&queue.Job{
Key: "fail",
Delay: 50 * time.Millisecond,
Perform: func() { t.FailNow() },
})
Equal(t, uint(1), q.Remove("fail"))
q.WaitAll()
}
func TestQueue_CanReplace(t *testing.T) {
var (
seq = ""
lock = sync.Mutex{}
add = func(s string) {
lock.Lock()
seq += s
lock.Unlock()
}
)
new(queue.Queue).
Add(&queue.Job{
Key: "a",
Delay: 10 * time.Millisecond,
Perform: func() { add("a") },
}).
Add(&queue.Job{
Key: "b",
Delay: 20 * time.Millisecond,
Perform: func() { add("b") },
}).
Add(&queue.Job{
Key: "c",
Delay: 10 * time.Millisecond,
Perform: func() { add("c") },
CanReplace: func(key string) bool { return key == "a" },
}).
WaitAll()
Equal(t, "cb", seq)
}
func TestQueue_Clear(t *testing.T) {
q := new(queue.Queue).Pause()
q.
AddFunc("foo", func() {}).
AddFunc("baz", func() {}).
AddFunc("bar", func() {})
Equal(t, 3, len(q.Waiting()))
Equal(t, 3, len(q.Clear()))
Equal(t, 0, len(q.Waiting()))
}
func TestQueue_Force(t *testing.T) {
var (
run bool
q = (&queue.Queue{}).Pause()
)
q.Add(&queue.Job{
Key: "2",
Perform: func() { run = true },
Delay: 1 * time.Second,
}).Add(&queue.Job{
Key: "1",
Perform: func() {},
})
Equal(t, 2, len(q.Waiting()))
Equal(t, "1", q.Force().Key)
Equal(t, 1, len(q.Waiting()))
Assert(t, !run, "")
Equal(t, "2", q.Force().Key)
Equal(t, 0, len(q.Waiting()))
Assert(t, run, "")
}
func TestQueue_Drain(t *testing.T) {
var (
count int
q = new(queue.Queue).Pause()
inc = func() { count += 1 }
)
q.AddFunc("a", inc).AddFunc("b", inc).AddFunc("c", inc)
Equal(t, 3, len(q.Waiting()))
Equal(t, "c", q.Drain()[2].Key)
Equal(t, 0, len(q.Waiting()))
Equal(t, 3, count)
}
func TestQueue_Waiting(t *testing.T) {
var (
q = new(queue.Queue).Pause()
j = &queue.Job{Key: "123", Perform: func() {}}
)
q.Add(j)
Equal(t, []*queue.Job{j}, q.Waiting())
q.Drain()
Equal(t, []*queue.Job{}, q.Waiting())
}
func TestQueue_ForcedJobQueuesAnotherJob(t *testing.T) {
q := new(queue.Queue)
q.AddFunc("outer", func() {
q.AddFunc("inner", func() {})
})
q.Force() // Should not deadlock
}
func TestQueue_Resume(t *testing.T) {
var (
run bool
q = (&queue.Queue{}).Pause()
)
q.AddFunc("run", func() { run = true })
time.Sleep(20 * time.Millisecond)
Assert(t, !run, "should not have run")
q.Resume()
time.Sleep(20 * time.Millisecond)
Assert(t, run, "should have run")
}
|
package handlers
import (
"fmt"
"net/url"
"github.com/go-webauthn/webauthn/protocol"
"github.com/go-webauthn/webauthn/webauthn"
"github.com/authelia/authelia/v4/internal/middlewares"
"github.com/authelia/authelia/v4/internal/model"
"github.com/authelia/authelia/v4/internal/session"
)
func getWebAuthnUser(ctx *middlewares.AutheliaCtx, userSession session.UserSession) (user *model.WebAuthnUser, err error) {
user = &model.WebAuthnUser{
Username: userSession.Username,
DisplayName: userSession.DisplayName,
}
if user.DisplayName == "" {
user.DisplayName = user.Username
}
if user.Devices, err = ctx.Providers.StorageProvider.LoadWebAuthnDevicesByUsername(ctx, userSession.Username); err != nil {
return nil, err
}
var (
opaqueID *model.UserOpaqueIdentifier
)
if opaqueID, err = getWebAuthnUserOpaqueID(ctx, user.Username); err != nil {
return nil, err
}
user.UserID = opaqueID.Identifier.String()
return user, nil
}
func getWebAuthnUserOpaqueID(ctx *middlewares.AutheliaCtx, username string) (opaqueID *model.UserOpaqueIdentifier, err error) {
if opaqueID, err = ctx.Providers.StorageProvider.LoadUserOpaqueIdentifierBySignature(ctx, "webauthn", "pre", username); err != nil {
return nil, err
} else if opaqueID == nil {
if opaqueID, err = model.NewUserOpaqueIdentifier("webauthn", "pre", username); err != nil {
return nil, err
}
if err = ctx.Providers.StorageProvider.SaveUserOpaqueIdentifier(ctx, *opaqueID); err != nil {
return nil, err
}
}
return opaqueID, nil
}
func newWebAuthn(ctx *middlewares.AutheliaCtx) (w *webauthn.WebAuthn, err error) {
var (
u *url.URL
)
if u, err = ctx.GetXOriginalURLOrXForwardedURL(); err != nil {
return nil, err
}
rpID := u.Hostname()
origin := fmt.Sprintf("%s://%s", u.Scheme, u.Host)
config := &webauthn.Config{
RPDisplayName: ctx.Configuration.WebAuthn.DisplayName,
RPID: rpID,
RPOrigin: origin,
RPIcon: "",
AttestationPreference: ctx.Configuration.WebAuthn.ConveyancePreference,
AuthenticatorSelection: protocol.AuthenticatorSelection{
AuthenticatorAttachment: protocol.CrossPlatform,
UserVerification: ctx.Configuration.WebAuthn.UserVerification,
RequireResidentKey: protocol.ResidentKeyNotRequired(),
},
Timeout: int(ctx.Configuration.WebAuthn.Timeout.Milliseconds()),
}
ctx.Logger.Tracef("Creating new WebAuthn RP instance with ID %s and Origins %s", config.RPID, config.RPOrigin)
return webauthn.New(config)
}
|
package hasedbuffer
import (
"bytes"
"encoding/hex"
"github.com/AppImageCrafters/libzsync-go/rollinghash"
"github.com/glycerine/rbuf"
"golang.org/x/crypto/md4"
"io"
)
type HashedRingBuffer struct {
hash *rollinghash.RollingHash
rBuf *rbuf.FixedSizeRingBuf
}
func NewHashedBuffer(size int) *HashedRingBuffer {
/* Calculate bit-shift for blocksize */
var blockShift uint16
for i := uint16(0); i < 32; i++ {
if size <= (1 << i) {
blockShift = i
break
}
}
return &HashedRingBuffer{
hash: rollinghash.NewRollingHash(blockShift),
rBuf: rbuf.NewFixedSizeRingBuf(size),
}
}
func (h *HashedRingBuffer) Write(p []byte) (n int, err error) {
pSize := len(p)
evictedSize := (h.rBuf.Readable + pSize) - h.rBuf.N
if evictedSize < 0 {
evictedSize = 0
}
for i := 0; i < pSize; i++ {
if i < evictedSize {
evicted := uint16(h.rBuf.A[h.rBuf.Use][h.rBuf.Beg])
h.hash.Update(uint16(p[i]), evicted)
h.rBuf.Advance(1)
} else {
h.hash.Update(uint16(p[i]), 0)
}
}
return h.rBuf.Write(p)
}
func (h HashedRingBuffer) Bytes() []byte {
return h.rBuf.Bytes()
}
func (h HashedRingBuffer) RollingSumHex() string {
sum := h.RollingSum()
return hex.EncodeToString(sum)
}
func (h HashedRingBuffer) RollingSum() []byte {
sum := make([]byte, 4)
h.hash.PutSum(sum)
return sum
}
func (h HashedRingBuffer) CheckSum() []byte {
sumBuilder := md4.New()
slice1, slice2 := h.rBuf.BytesTwo(false)
sumBuilder.Write(slice1)
sumBuilder.Write(slice2)
sum := sumBuilder.Sum(nil)
return sum
}
func (h HashedRingBuffer) CheckSumHex() string {
sum := h.CheckSum()
return hex.EncodeToString(sum)
}
func (h *HashedRingBuffer) ReadNFrom(input io.Reader, bytes int64) (int64, error) {
newBytesIdx := h.rBuf.Last() + 1
n, readErr := h.rBuf.ReadFrom(io.LimitReader(input, bytes))
evictedSize := h.rBuf.Readable - h.rBuf.N
if evictedSize < 0 {
evictedSize = 0
}
for i := 0; i < int(n); i++ {
newChar := uint16(h.rBuf.A[h.rBuf.Use][newBytesIdx])
if i < evictedSize {
evicted := uint16(h.rBuf.A[h.rBuf.Use][h.rBuf.Beg])
h.hash.Update(newChar, evicted)
h.rBuf.Advance(1)
} else {
h.hash.Update(newChar, 0)
}
newBytesIdx = h.rBuf.Nextpos(newBytesIdx)
}
return n, readErr
}
// Reads one byte from input
func (h *HashedRingBuffer) ReadByte(input io.Reader) (byte, error) {
evicted := uint16(0)
if h.rBuf.First() != -1 {
evicted = uint16(h.rBuf.A[h.rBuf.Use][h.rBuf.First()])
h.rBuf.Advance(1)
}
n, err := h.rBuf.ReadFrom(input)
if n > 0 {
newChar := uint16(h.rBuf.A[h.rBuf.Use][h.rBuf.Last()])
h.hash.Update(newChar, evicted)
return byte(newChar), err
} else {
h.hash.Update(0, evicted)
return 0, err
}
}
// Read the complete buffer from input, missing bytes are replaced by '0'
func (h *HashedRingBuffer) ReadFull(input io.Reader) (int64, error) {
h.rBuf.Reset()
h.hash.Reset()
newCharIdx := h.rBuf.Beg + h.rBuf.Readable
n, err := h.rBuf.ReadFrom(input)
missingChars := uint16(h.rBuf.N) - uint16(n)
_, _ = h.rBuf.ReadFrom(bytes.NewBuffer(make([]byte, missingChars)))
for i := uint16(h.rBuf.N); i > 0; i-- {
newChar := uint16(h.rBuf.A[h.rBuf.Use][newCharIdx])
newCharIdx = h.rBuf.Nextpos(newCharIdx)
h.hash.Append(newChar, i)
}
return n, err
}
|
package storage
import (
"context"
"database/sql"
"time"
"github.com/google/uuid"
"github.com/ory/fosite/storage"
"github.com/authelia/authelia/v4/internal/model"
)
// Provider is an interface providing storage capabilities for persisting any kind of data related to Authelia.
type Provider interface {
model.StartupCheck
RegulatorProvider
storage.Transactional
SavePreferred2FAMethod(ctx context.Context, username string, method string) (err error)
LoadPreferred2FAMethod(ctx context.Context, username string) (method string, err error)
LoadUserInfo(ctx context.Context, username string) (info model.UserInfo, err error)
SaveUserOpaqueIdentifier(ctx context.Context, subject model.UserOpaqueIdentifier) (err error)
LoadUserOpaqueIdentifier(ctx context.Context, identifier uuid.UUID) (subject *model.UserOpaqueIdentifier, err error)
LoadUserOpaqueIdentifiers(ctx context.Context) (identifiers []model.UserOpaqueIdentifier, err error)
LoadUserOpaqueIdentifierBySignature(ctx context.Context, service, sectorID, username string) (subject *model.UserOpaqueIdentifier, err error)
SaveIdentityVerification(ctx context.Context, verification model.IdentityVerification) (err error)
ConsumeIdentityVerification(ctx context.Context, jti string, ip model.NullIP) (err error)
FindIdentityVerification(ctx context.Context, jti string) (found bool, err error)
SaveTOTPConfiguration(ctx context.Context, config model.TOTPConfiguration) (err error)
UpdateTOTPConfigurationSignIn(ctx context.Context, id int, lastUsedAt sql.NullTime) (err error)
DeleteTOTPConfiguration(ctx context.Context, username string) (err error)
LoadTOTPConfiguration(ctx context.Context, username string) (config *model.TOTPConfiguration, err error)
LoadTOTPConfigurations(ctx context.Context, limit, page int) (configs []model.TOTPConfiguration, err error)
SaveWebAuthnDevice(ctx context.Context, device model.WebAuthnDevice) (err error)
UpdateWebAuthnDeviceSignIn(ctx context.Context, id int, rpid string, lastUsedAt sql.NullTime, signCount uint32, cloneWarning bool) (err error)
DeleteWebAuthnDevice(ctx context.Context, kid string) (err error)
DeleteWebAuthnDeviceByUsername(ctx context.Context, username, description string) (err error)
LoadWebAuthnDevices(ctx context.Context, limit, page int) (devices []model.WebAuthnDevice, err error)
LoadWebAuthnDevicesByUsername(ctx context.Context, username string) (devices []model.WebAuthnDevice, err error)
SavePreferredDuoDevice(ctx context.Context, device model.DuoDevice) (err error)
DeletePreferredDuoDevice(ctx context.Context, username string) (err error)
LoadPreferredDuoDevice(ctx context.Context, username string) (device *model.DuoDevice, err error)
SaveOAuth2ConsentPreConfiguration(ctx context.Context, config model.OAuth2ConsentPreConfig) (insertedID int64, err error)
LoadOAuth2ConsentPreConfigurations(ctx context.Context, clientID string, subject uuid.UUID) (rows *ConsentPreConfigRows, err error)
SaveOAuth2ConsentSession(ctx context.Context, consent model.OAuth2ConsentSession) (err error)
SaveOAuth2ConsentSessionSubject(ctx context.Context, consent model.OAuth2ConsentSession) (err error)
SaveOAuth2ConsentSessionResponse(ctx context.Context, consent model.OAuth2ConsentSession, rejection bool) (err error)
SaveOAuth2ConsentSessionGranted(ctx context.Context, id int) (err error)
LoadOAuth2ConsentSessionByChallengeID(ctx context.Context, challengeID uuid.UUID) (consent *model.OAuth2ConsentSession, err error)
SaveOAuth2Session(ctx context.Context, sessionType OAuth2SessionType, session model.OAuth2Session) (err error)
RevokeOAuth2Session(ctx context.Context, sessionType OAuth2SessionType, signature string) (err error)
RevokeOAuth2SessionByRequestID(ctx context.Context, sessionType OAuth2SessionType, requestID string) (err error)
DeactivateOAuth2Session(ctx context.Context, sessionType OAuth2SessionType, signature string) (err error)
DeactivateOAuth2SessionByRequestID(ctx context.Context, sessionType OAuth2SessionType, requestID string) (err error)
LoadOAuth2Session(ctx context.Context, sessionType OAuth2SessionType, signature string) (session *model.OAuth2Session, err error)
SaveOAuth2PARContext(ctx context.Context, par model.OAuth2PARContext) (err error)
LoadOAuth2PARContext(ctx context.Context, signature string) (par *model.OAuth2PARContext, err error)
RevokeOAuth2PARContext(ctx context.Context, signature string) (err error)
UpdateOAuth2PARContext(ctx context.Context, par model.OAuth2PARContext) (err error)
SaveOAuth2BlacklistedJTI(ctx context.Context, blacklistedJTI model.OAuth2BlacklistedJTI) (err error)
LoadOAuth2BlacklistedJTI(ctx context.Context, signature string) (blacklistedJTI *model.OAuth2BlacklistedJTI, err error)
SchemaTables(ctx context.Context) (tables []string, err error)
SchemaVersion(ctx context.Context) (version int, err error)
SchemaLatestVersion() (version int, err error)
SchemaMigrate(ctx context.Context, up bool, version int) (err error)
SchemaMigrationHistory(ctx context.Context) (migrations []model.Migration, err error)
SchemaMigrationsUp(ctx context.Context, version int) (migrations []model.SchemaMigration, err error)
SchemaMigrationsDown(ctx context.Context, version int) (migrations []model.SchemaMigration, err error)
SchemaEncryptionChangeKey(ctx context.Context, key string) (err error)
SchemaEncryptionCheckKey(ctx context.Context, verbose bool) (result EncryptionValidationResult, err error)
Close() (err error)
}
// RegulatorProvider is an interface providing storage capabilities for persisting any kind of data related to the regulator.
type RegulatorProvider interface {
AppendAuthenticationLog(ctx context.Context, attempt model.AuthenticationAttempt) (err error)
LoadAuthenticationLogs(ctx context.Context, username string, fromDate time.Time, limit, page int) (attempts []model.AuthenticationAttempt, err error)
}
|
/*
Copyright IBM Corporation 2020
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package move2kube
import (
"os"
customize "github.com/konveyor/move2kube/internal/customizer"
"github.com/konveyor/move2kube/internal/metadata"
optimize "github.com/konveyor/move2kube/internal/optimizer"
parameterize "github.com/konveyor/move2kube/internal/parameterizer"
"github.com/konveyor/move2kube/internal/source"
transform "github.com/konveyor/move2kube/internal/transformer"
plantypes "github.com/konveyor/move2kube/types/plan"
log "github.com/sirupsen/logrus"
)
// Translate translates the artifacts and writes output
func Translate(p plantypes.Plan, outpath string) {
sourceir, err := source.Translate(p)
if err != nil {
log.Fatalf("Failed to translate the plan to intermediate representation. Error: %q", err)
}
log.Debugf("Total storages loaded : %d", len(sourceir.Storages))
log.Infoln("Begin Metadata loading")
metadataPlanners := metadata.GetLoaders()
for _, l := range metadataPlanners {
log.Debugf("[%T] Begin metadata loading", l)
err := l.LoadToIR(p, &sourceir)
if err != nil {
log.Warnf("[%T] Failed : %s", l, err.Error())
} else {
log.Debugf("[%T] Done", l)
}
}
log.Infoln("Metadata loading done")
log.Debugf("Total services loaded : %d", len(sourceir.Services))
log.Debugf("Total containers loaded : %d", len(sourceir.Containers))
optimizedir, err := optimize.Optimize(sourceir)
if err != nil {
log.Warnf("Failed to optimize the intermediate representation. Error: %q", err)
optimizedir = sourceir
}
log.Debugf("Total services optimized : %d", len(optimizedir.Services))
os.RemoveAll(outpath)
dct := transform.ComposeTransformer{}
if err := dct.Transform(optimizedir); err != nil {
log.Errorf("Error during translate docker compose file : %s", err)
} else if err = dct.WriteObjects(outpath); err != nil {
log.Errorf("Unable to write docker compose objects : %s", err)
}
ir, _ := customize.Customize(optimizedir)
log.Debugf("Total storages customized : %d", len(optimizedir.Storages))
if p.Spec.Outputs.Kubernetes.ArtifactType != plantypes.Yamls && p.Spec.Outputs.Kubernetes.ArtifactType != plantypes.Knative {
ir, _ = parameterize.Parameterize(ir)
}
t := transform.GetTransformer(ir)
if err := t.Transform(ir); err != nil {
log.Fatalf("Error during translate. Error: %q", err)
} else if err := t.WriteObjects(outpath); err != nil {
log.Fatalf("Unable to write objects Error: %q", err)
}
log.Info("Execution completed")
}
|
// Package handlers contains the full set of handler functions and routes
// supported by the web api.
package handlers
import (
"log"
"net/http"
"os"
"github.com/dimfeld/httptreemux"
)
// API constructs an http.Handler with all application routes defined.
func API(build string, shutdown chan os.Signal, log *log.Logger, server, port string) *httptreemux.ContextMux {
mux := httptreemux.NewContextMux()
matrix := New(server, port)
mux.Handle(http.MethodGet, "/test", readiness)
mux.Handle(http.MethodGet, "/.well-known/matrix/server", matrix.delegate)
return mux
}
|
package payment
import (
"errors"
"fmt"
"regexp"
"time"
)
// CreditAccount ...
type CreditAccount struct {
Account
ownerName string
cardNumber string
expirationMonth int
expirationYear int
securityCode int
availableCredit int
}
var cardNumberPattern = regexp.MustCompile("^[\\d{4}-]{3}\\d{4}$")
// CreateCreditAccount ...
func CreateCreditAccount(ownerName, cardNumber string,
expirationMonth, expirationYear, securityCode int,
chargeCh chan float32) *CreditAccount {
creditAccount := &CreditAccount{
ownerName: ownerName,
cardNumber: cardNumber,
expirationMonth: expirationMonth,
expirationYear: expirationYear,
securityCode: securityCode,
}
go func(chargeCh chan float32) {
for amount := range chargeCh {
creditAccount.processPayment(amount)
}
}(chargeCh)
return creditAccount
}
func (c *CreditAccount) processPayment(amount float32) {
fmt.Println("Processing a credit card payment, using channel...")
}
// OwnerName ...
func (c *CreditAccount) OwnerName() string {
return c.ownerName
}
// SetOwnerName ...
func (c *CreditAccount) SetOwnerName(name string) error {
if len(name) == 0 {
return errors.New("invalid name provided")
}
c.ownerName = name
return nil
}
// CardNumber ...
func (c *CreditAccount) CardNumber() string {
return c.cardNumber
}
// SetCardNumber ...
func (c *CreditAccount) SetCardNumber(number string) error {
if !cardNumberPattern.Match([]byte(number)) {
return errors.New("invalid credit card number format")
}
c.cardNumber = number
return nil
}
// ExpirationDate ...
func (c *CreditAccount) ExpirationDate() (int, int) {
return c.expirationMonth, c.expirationYear
}
// SetExpirationDate ...
func (c *CreditAccount) SetExpirationDate(month, year int) error {
now := time.Now()
if year < now.Year() ||
(year == now.Year() && time.Month(month) < now.Month()) {
return errors.New("expiration date must be in the future")
}
c.expirationMonth, c.expirationYear = month, year
return nil
}
// SecurityCode ...
func (c *CreditAccount) SecurityCode() int {
return c.securityCode
}
// SetSecurityCode ...
func (c *CreditAccount) SetSecurityCode(code int) error {
if code < 100 || code > 999 {
return errors.New("invalid security code")
}
c.securityCode = code
return nil
}
// AvailableCredit ...
func (c *CreditAccount) AvailableCredit() float32 {
// this can come from a web service, client doesn't know/care
return 5000.
}
|
package seev
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01100101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:seev.011.001.01 Document"`
Message *AgentCANotificationStatusAdviceV01 `xml:"AgtCANtfctnStsAdvc"`
}
func (d *Document01100101) AddMessage() *AgentCANotificationStatusAdviceV01 {
d.Message = new(AgentCANotificationStatusAdviceV01)
return d.Message
}
// Scope
// This message is sent by a CSD to an issuer (or its agent) to report the status, or change in status, of a notification advice or notification cancellation request.
// Usage
// When this message is used to report the status of a notification advice then the building block Agent Corporate Action Notification Advice Identification must be present.
// When this message is used to provide the status of a notification cancellation request then the building block Notification Cancellation Request Identification must be present.
type AgentCANotificationStatusAdviceV01 struct {
// Identification assigned by the Sender to unambiguously identify the status advice.
Identification *iso20022.DocumentIdentification8 `xml:"Id"`
// Identification of the linked Agent CA Notification Advice for which a status is given.
AgentCANotificationAdviceIdentification *iso20022.DocumentIdentification8 `xml:"AgtCANtfctnAdvcId"`
// Identification of the linked Agent CA Notification Cancellation Request for which a status is given.
AgentCANotificationCancellationRequestIdentification *iso20022.DocumentIdentification8 `xml:"AgtCANtfctnCxlReqId"`
// General information about the corporate action event.
CorporateActionGeneralInformation *iso20022.CorporateActionInformation2 `xml:"CorpActnGnlInf"`
// Status of the Notification Cancellation Request sent by the issuer (agent).
NotificationCancellationRequestStatus *iso20022.NotificationCancellationRequestStatus1Choice `xml:"NtfctnCxlReqSts"`
// Status of the notification advice sent by the issuer (agent).
NotificationAdviceStatus *iso20022.NotificationAdviceStatus1Choice `xml:"NtfctnAdvcSts"`
}
func (a *AgentCANotificationStatusAdviceV01) AddIdentification() *iso20022.DocumentIdentification8 {
a.Identification = new(iso20022.DocumentIdentification8)
return a.Identification
}
func (a *AgentCANotificationStatusAdviceV01) AddAgentCANotificationAdviceIdentification() *iso20022.DocumentIdentification8 {
a.AgentCANotificationAdviceIdentification = new(iso20022.DocumentIdentification8)
return a.AgentCANotificationAdviceIdentification
}
func (a *AgentCANotificationStatusAdviceV01) AddAgentCANotificationCancellationRequestIdentification() *iso20022.DocumentIdentification8 {
a.AgentCANotificationCancellationRequestIdentification = new(iso20022.DocumentIdentification8)
return a.AgentCANotificationCancellationRequestIdentification
}
func (a *AgentCANotificationStatusAdviceV01) AddCorporateActionGeneralInformation() *iso20022.CorporateActionInformation2 {
a.CorporateActionGeneralInformation = new(iso20022.CorporateActionInformation2)
return a.CorporateActionGeneralInformation
}
func (a *AgentCANotificationStatusAdviceV01) AddNotificationCancellationRequestStatus() *iso20022.NotificationCancellationRequestStatus1Choice {
a.NotificationCancellationRequestStatus = new(iso20022.NotificationCancellationRequestStatus1Choice)
return a.NotificationCancellationRequestStatus
}
func (a *AgentCANotificationStatusAdviceV01) AddNotificationAdviceStatus() *iso20022.NotificationAdviceStatus1Choice {
a.NotificationAdviceStatus = new(iso20022.NotificationAdviceStatus1Choice)
return a.NotificationAdviceStatus
}
|
package main
import (
"fmt"
// "learn10/interface_demo"
// "learn10/empty_interface"
"logger"
)
func InitLogger(name, filePath, fileName, level string) {
config := make(map[string]string, 8)
config["log_level"] = level
config["log_path"] = filePath
config["log_name"] = fileName
err := logger.InitLogger(name, config)
if err != nil {
fmt.Printf("init logger failed")
}
}
func main() {
// demo.Test()
InitLogger("file", "E:/gospace/src/logger", "myLogger1", "debug")
logger.Debug("this is debug log...")
logger.Trace("tihs is trace log...")
logger.Info("this is info log...")
logger.Warn("this is warn log...")
logger.Error("this is error log...")
logger.Fatal("this is fatal log...")
}
|
/*
* outer: outer product
*
* input:
* vector: a vector of (x, y) points
* nelts: the number of points
*
* output:
* Outer_matrix: a real matrix, whose values are filled with inter-point
* distances
* Outer_vector: a real vector, whose values are filled with origin-to-point
* distances
*/
package all
import (
"math"
)
type Double float64;
var Outer_matrix [][]Double;
var Outer_vector []Double;
func max(a, b Double) Double {
if a > b {
return a;
}
return b;
}
func sqr(x Double) Double {
return x * x;
}
func distance(a, b Point) Double {
return Double(math.Sqrt(float64(sqr(Double(a.i - b.i)) +
sqr(Double(a.j - b.j)))));
}
func fill_matrix_impl(begin, end, ncols int, done chan bool) {
if (begin + 1 == end) {
var nmax Double = -1;
for j := 0; j < ncols; j++ {
if (begin != j) {
Outer_matrix[begin][j] = distance(Winnow_points[begin], Winnow_points[j]);
nmax = max(nmax, Outer_matrix[begin][j]);
}
}
Outer_matrix[begin][begin] = Double(ncols) * nmax;
Outer_vector[begin] = distance(Point{i: 0, j: 0}, Winnow_points[begin]);
done <- true
} else {
middle := begin + (end - begin) / 2;
go fill_matrix_impl(begin, middle, ncols, done);
fill_matrix_impl(middle, end, ncols, done);
}
}
func fill_matrix(nrows, ncols int) {
done := make(chan bool);
// parallel for on rows
go fill_matrix_impl(0, nrows, ncols, done);
for i := 0; i < nrows; i++ {
<-done;
}
}
func Outer(nelts int) {
Outer_matrix = make ([][]Double, nelts)
for i := range Outer_matrix {
Outer_matrix [i] = make ([]Double, nelts)
}
Outer_vector = make ([]Double, nelts)
fill_matrix(nelts, nelts);
}
|
package main
type GamePlay struct {
Entities Entities `json:Entities, omitempty`
ResultEntities ResultEntities `json:ResultEntities, omitempty`
TotalCreditPayout int `json:TotalCreditPayout, omitempty`
}
func (status *GamePlay) GetEntities() Entities {
entities := Entities{
Entity{
Row1: "N002",
Row2: "N001",
Row3: "N009",
Row4: "N007",
},
Entity{
Row1: "N001",
Row2: "N006",
Row3: "N009",
Row4: "N007",
},
Entity{
Row1: "S001",
Row2: "N002",
Row3: "S001",
Row4: "N010",
},
Entity{
Row1: "N005",
Row2: "N009",
Row3: "N003",
Row4: "N004",
},
}
return entities
}
func (status *GamePlay) GetResultEntities() ResultEntities {
resultEntites := ResultEntities{
ResultEntity{
CreditPayout: 0,
Multiplier: 1,
Paylines: 2,
Entity: "S001",
TotalCreditPayout: 0,
},
}
return resultEntites
}
|
package main
import "testing"
func TestRemoveHTMLTag(t *testing.T) {
tests := []struct {
input string
want string
}{
{
input: "<div>test</div>",
want: "test",
},
{
input: "<div class='test'>test</div>",
want: "test",
},
{
input: "<div class='test'><!-- test -->test</div>",
want: "test",
},
{
input: "<div class='test'><a href='test' alt='test'><img src='./test' /></a></div>",
want: "",
},
{
input: "<script src='test'></script>",
want: "",
},
}
for _, test := range tests {
got := removeHTMLTag(test.input)
if got != test.want {
t.Fatalf("want %v, but %v", test.want, got)
}
}
}
func TestRemoveNewlineTag(t *testing.T) {
tests := []struct {
input string
want string
}{
{
input: "t\r\nest",
want: "test",
},
{
input: "t\nest",
want: "test",
},
{
input: "t\rest",
want: "test",
},
}
for _, test := range tests {
got := removeNewlineTag(test.input)
if got != test.want {
t.Fatalf("want %v, but %v", test.want, got)
}
}
}
|
package codec
import (
"image"
"github.com/edaniels/golog"
)
// DefaultKeyFrameInterval is the default interval chosen
// in order to produce high enough quality results at a low
// latency.
const DefaultKeyFrameInterval = 30
// An Encoder is anything that can encode images into bytes. This means that
// the encoder must follow some type of format dictated by a type (see EncoderFactory.MimeType).
// An encoder that produces bytes of different encoding formats per call is invalid.
type Encoder interface {
Encode(img image.Image) ([]byte, error)
}
// An EncoderFactory produces Encoders and provides information about the underlying encoder itself.
type EncoderFactory interface {
New(height, width, keyFrameInterval int, logger golog.Logger) (Encoder, error)
MIMEType() string
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/aws/aws-sdk-go/service/kms/kmsiface"
"github.com/satori/go.uuid"
"log"
"net/http"
"os"
)
const (
KMS_KEY = "kmsKey"
TABLE_NAME = "accountsTableName"
INDEX_NAME = "emailIndexName"
REGION = "region"
)
type Account struct {
Id string `json:"id"`
Email string `json:"email"`
Password string `json:"password"`
Name string `json:"name"`
}
var ddbClient dynamodbiface.DynamoDBAPI
var kmsClient kmsiface.KMSAPI
// init DynamoDb
func init() {
region := os.Getenv(REGION)
awsSession, err := session.NewSession(&aws.Config{
Region: ®ion,
})
if err != nil {
log.Println(fmt.Sprintf("Failed to create new AWS session: %s", err.Error()))
} else {
ddbClient = dynamodbiface.DynamoDBAPI(dynamodb.New(awsSession))
kmsClient = kmsiface.KMSAPI(kms.New(awsSession))
}
}
// returns true if account doesn't exist in db
func checkIfAccountInNew(email string) (bool, events.APIGatewayProxyResponse) {
// db query
input := &dynamodb.QueryInput{
TableName: aws.String(os.Getenv(TABLE_NAME)),
IndexName: aws.String(os.Getenv(INDEX_NAME)),
KeyConditions: map[string]*dynamodb.Condition{
"email": {
ComparisonOperator: aws.String("EQ"),
AttributeValueList: []*dynamodb.AttributeValue{
{
S: aws.String(email),
},
},
},
},
}
queryResult, err := ddbClient.Query(input)
// check if there is an error
if err != nil {
return false, errorOccurred(http.StatusInternalServerError,
fmt.Errorf("error occurred while querying the db: %s", err.Error()),
)
}
if *queryResult.Count > int64(0) {
return false, errorOccurred(http.StatusConflict,
fmt.Errorf("account with email = '%s' already exists", email),
)
}
return true, events.APIGatewayProxyResponse{}
}
// returns encrypted password
func encryptPassword(password string) ([]byte, events.APIGatewayProxyResponse) {
passwordInput := &kms.EncryptInput{
KeyId: aws.String(os.Getenv(KMS_KEY)),
Plaintext: []byte(password),
}
// encrypt the password with KMS key
encryptedPassword, err := kmsClient.Encrypt(passwordInput)
if err != nil {
return nil, errorOccurred(http.StatusInternalServerError, err)
}
return encryptedPassword.CiphertextBlob, events.APIGatewayProxyResponse{}
}
// creates account in db
func createAccountInDb(account Account) (bool, events.APIGatewayProxyResponse) {
encryptedPassword, response := encryptPassword(account.Password)
if response.StatusCode != 0 && response.Body != "" {
return false, response
}
id := uuid.NewV4().String()[0:8]
input := &dynamodb.PutItemInput{
TableName: aws.String(os.Getenv(TABLE_NAME)),
Item: map[string]*dynamodb.AttributeValue{
"id": {
S: aws.String(id),
},
"email": {
S: aws.String(account.Email),
},
"password": {
B: encryptedPassword,
},
"name": {
S: aws.String(account.Name),
},
},
}
if _, err := ddbClient.PutItem(input); err != nil {
return false, errorOccurred(http.StatusInternalServerError,
fmt.Errorf("error occurred while querying the db: %s", err.Error()),
)
}
return true, events.APIGatewayProxyResponse{}
}
// returns API response with specific statusCode and error message as a body
func errorOccurred(statusCode int, errors ...error) events.APIGatewayProxyResponse {
for _, err := range errors {
log.Println(err.Error())
}
body, _ := json.Marshal(map[string]string{
"errorMessage": http.StatusText(statusCode),
})
return events.APIGatewayProxyResponse{
StatusCode: statusCode,
Body: string(body),
}
}
func HandleRequest(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
log.Println("received body: ", request.Body)
// parse account JSON
var account Account
err := json.Unmarshal([]byte(request.Body), &account)
if err != nil {
return errorOccurred(http.StatusBadRequest,
fmt.Errorf("failed to parse body: %s", err.Error()),
), nil
}
// check if account doesn't exist in db
if isNew, response := checkIfAccountInNew(account.Email); !isNew {
return response, nil
}
// create account in db
if created, response := createAccountInDb(account); !created {
return response, nil
}
return events.APIGatewayProxyResponse{
StatusCode: 200,
Body: "",
}, nil
}
func main() {
lambda.Start(HandleRequest)
}
|
package datastruct
import (
"fmt"
)
type Set interface {
Insert(x interface{})
Erase(x interface{})
Contains(x interface{}) bool
Size() int
}
type SetImpl struct {
data map[interface{}]struct{}
}
func NewSet() *SetImpl {
return &SetImpl{data: make(map[interface{}]struct{})}
}
func (s *SetImpl) Insert(x interface{}) {
s.data[x] = struct{}{}
}
func (s *SetImpl) Erase(x interface{}) {
delete(s.data, x)
}
func (s *SetImpl) Contains(x interface{}) bool {
_, ok := s.data[x]
return ok
}
func (s *SetImpl) Size() int {
return len(s.data)
}
func (s *SetImpl) String() string {
ret := make([]interface{}, 0, len(s.data))
for k := range s.data {
ret = append(ret, k)
}
return fmt.Sprint(ret)
}
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
package main
import (
"math"
"sync"
"github.com/facebookexperimental/GOAR/endpoints"
"github.com/golang/glog"
"github.com/streadway/amqp"
"github.com/facebookexperimental/GOAR/confighandler"
"github.com/facebookexperimental/GOAR/lib"
)
// Number of processors to run concurrently.
const defaultProcessorsNum = 10
// InputEndpoint represents all requires parts
// to maintain connection with remote queue (RabbitMQ in this case).
type InputEndpoint struct {
DeliveryChannel <-chan amqp.Delivery
AmqpQueue amqp.Queue
endpoints.RabbitMQEndpoint
}
// Connect establishes and maintains all the elements of the connection
// with the remote queue.
func (endpoint *InputEndpoint) Connect(conf confighandler.Config) error {
var err error
if err = endpoint.RabbitMQEndpoint.Connect(conf); err != nil {
return err
}
if err = endpoint.RabbitMQEndpoint.Channel.Qos(
1, // prefetch count
0, // prefetch size
false, // global
); err != nil {
return err
}
endpoint.DeliveryChannel, err = endpoint.RabbitMQEndpoint.Channel.Consume(
conf.QueueLog, // queue
"", // consumer
false, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
return err
}
// OutputEndpoint represents endpoint
// maintaining connection with the output external queue
type OutputEndpoint struct {
AmqpQueue amqp.Queue
endpoints.RabbitMQEndpoint
}
// Connect establishes and maintains all the elements of the connection
// with the remote queue.
func (endpoint *OutputEndpoint) Connect(conf confighandler.Config) error {
var err error
if err = endpoint.RabbitMQEndpoint.Connect(conf); err != nil {
return err
}
endpoint.AmqpQueue, err = endpoint.Channel.QueueDeclare(
conf.QueueIncident, // name
true, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
return err
}
// Processor listens to incoming events coming from the
// input queue (typically Rabbit), matches execution with
// precomiles regex and pushes Incident objects to executors.
type Processor struct {
InputEndpoint InputEndpoint
OutputEndpoint OutputEndpoint
RawLogChannel chan []byte
IncidentChannel chan lib.Incident
eventProcessors int
}
// Connect maintains connections for both input and output of the Processor.
func (processor *Processor) Connect(conf confighandler.Config) error {
if err := processor.InputEndpoint.Connect(conf); err != nil {
return err
}
// Output Endpoint
return processor.OutputEndpoint.Connect(conf)
}
// NewProcessor configures and sets Processor object.
func NewProcessor() *Processor {
return &Processor{
eventProcessors: defaultProcessorsNum,
RawLogChannel: make(chan []byte),
IncidentChannel: make(chan lib.Incident),
}
}
// Run runs all the pieces, listening for logs in the input queue, applying
// rules passed as an argument and publishes incidents to output external queue
func (processor *Processor) Run(rules []confighandler.Rule, blocking bool) {
processor.tailInput()
processor.publishIncidents()
if blocking {
processor.processEvents(rules)
} else {
go processor.processEvents(rules)
}
}
func (processor *Processor) tailInput() {
glog.Info("Entering tailInput() goroutine")
go func(in <-chan amqp.Delivery, out chan []byte) {
for message := range in {
glog.V(2).Infof("Received a message: %s", message.Body)
out <- message.Body
message.Ack(false)
}
glog.Info("Exiting tailInput() goroutine")
}(processor.InputEndpoint.DeliveryChannel, processor.RawLogChannel)
}
func (processor *Processor) processEvents(rules []confighandler.Rule) {
regexList := compileRegexRules(rules)
var wg sync.WaitGroup
wg.Add(processor.eventProcessors)
for i := 0; i < processor.eventProcessors; i++ {
go func(processor *Processor) {
defer wg.Done()
for msg := range processor.RawLogChannel {
msgStr := string(msg)
for id, reg := range regexList {
if reg.MatchString(msgStr) {
eventParameters := make(map[string]string)
values := reg.FindStringSubmatch(msgStr)
regexNames := reg.SubexpNames()
length := int(math.Min(float64(len(values)), float64(len(regexNames))))
// skip first element in both arrays as these are full lines, not split arguments/parameters
for i := 1; i < length; i++ {
eventParameters[regexNames[i]] = values[i]
}
processor.IncidentChannel <- FormatIncident(rules[id], eventParameters, msgStr, "SYSLOGPROC")
// As soon as we manage to create an incident from matching
// any of the rules we skip to the next possible regex.
// that way we avoid too much processing and also creating multiple
// incidents from a single syslog line.
break
}
}
}
}(processor)
}
glog.Info("Processing events...")
wg.Wait()
}
func (processor *Processor) publishIncidents() {
go func(incidentChannel chan lib.Incident, queueName string) {
var body []byte
var err error
for {
msg := <-incidentChannel
if body, err = msg.IncidentToJSON(); err != nil {
glog.Errorf("Error marshaling incident to JSON: %s", err)
continue
}
if err = processor.OutputEndpoint.Channel.Publish(
"", // exchange
queueName, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
ContentType: "text/plain",
Body: body,
}); err != nil {
glog.Errorf("Error publishing incident to the Executor's queue, %s", err)
}
}
}(processor.IncidentChannel, processor.OutputEndpoint.AmqpQueue.Name)
}
|
package mutex
import (
"runtime"
"sync/atomic"
)
type Mutex struct {
state int32
}
func (m *Mutex) TryLock() bool {
return atomic.CompareAndSwapInt32(&m.state, 0, 1)
}
func (m *Mutex) Lock() {
for !m.TryLock() {
runtime.Gosched()
}
}
func (m *Mutex) Unlock() {
atomic.StoreInt32(&m.state, 0)
}
|
package main
import (
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/julienschmidt/httprouter"
)
func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
fmt.Println("Welcome!\n")
t := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.Local)
fmt.Println("Go launched at \n", t.Local())
now := time.Now()
fmt.Println("The time is now \n", now.Local())
priceResponse := new(PriceResponse)
priceResponse.CurrentDate = t
priceResponse.PredictionDate = now
priceResponse.Currency = "BTC"
priceResponse.Value = 12390
out, err := json.Marshal(priceResponse)
if err != nil {
fmt.Println("error marshalling json: ", err)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(out)
}
func Plot(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
// bitStampUsd-full.csv
// bitStampUsd-2017.csv
// bitStampUsd-Oct2017.csv
// bitStampUsd-Dec2017.csv
filename := "bitStampUsd-Dec2017.csv"
CsvPlot(filename)
fmt.Fprintf(w, "plots generated from %s..\n", filename)
}
func Split(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
// bitStampUsd-full.csv
// bitStampUsd-2017.csv
// bitStampUsd-Oct2017.csv
// bitStampUsd-Dec2017.csv
filename := "bitStampUsd-Dec2017.csv"
CsvSplit(filename)
fmt.Fprintf(w, "training/testing data sets generated from %s..\n", filename)
}
func Train(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
fmt.Println("training model..")
TrainModel()
}
func ExecuteNN(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
fmt.Println("executing neural network..")
RunNeuralNet()
}
|
package constant
const (
// APPNAME app name
APPNAME = "QueueMan"
// APPVERSION app version
APPVERSION = "V1.0.9"
)
|
package Crypto
import (
"bytes"
"crypto/cipher"
"crypto/des"
"encoding/hex"
"fmt"
)
func main() {
//key的长度必须都是8位
var key = "12345678"
var info = "asdfgasdfgasdfgasdfgasdfg"
Enc_str := EncryptDES_CBC(info, key)
fmt.Println(Enc_str)
Dec_str := DecryptDES_CBC(Enc_str, key)
fmt.Println(Dec_str)
Enc_str = EncryptDES_ECB(info, key)
fmt.Println(Enc_str)
Dec_str = DecryptDES_ECB(Enc_str, key)
fmt.Println(Dec_str)
}
//CBC加密
func EncryptDES_CBC(src, key string) string {
data := []byte(src)
keyByte := []byte(key)
block, err := des.NewCipher(keyByte)
if err != nil {
panic(err)
}
data = PKCS5Padding(data, block.BlockSize())
//获取CBC加密模式
iv := keyByte //用密钥作为向量(不建议这样使用)
mode := cipher.NewCBCEncrypter(block, iv)
out := make([]byte, len(data))
mode.CryptBlocks(out, data)
return fmt.Sprintf("%X", out)
}
//CBC解密
func DecryptDES_CBC(src, key string) string {
keyByte := []byte(key)
data, err := hex.DecodeString(src)
if err != nil {
panic(err)
}
block, err := des.NewCipher(keyByte)
if err != nil {
panic(err)
}
iv := keyByte //用密钥作为向量(不建议这样使用)
mode := cipher.NewCBCDecrypter(block, iv)
plaintext := make([]byte, len(data))
mode.CryptBlocks(plaintext, data)
plaintext = PKCS5UnPadding(plaintext)
return string(plaintext)
}
//ECB加密
func EncryptDES_ECB(src, key string) string {
data := []byte(src)
keyByte := []byte(key)
block, err := des.NewCipher(keyByte)
if err != nil {
panic(err)
}
bs := block.BlockSize()
//对明文数据进行补码
data = PKCS5Padding(data, bs)
if len(data)%bs != 0 {
panic("Need a multiple of the blocksize")
}
out := make([]byte, len(data))
dst := out
for len(data) > 0 {
//对明文按照blocksize进行分块加密
//必要时可以使用go关键字进行并行加密
block.Encrypt(dst, data[:bs])
data = data[bs:]
dst = dst[bs:]
}
return fmt.Sprintf("%X", out)
}
//ECB解密
func DecryptDES_ECB(src, key string) string {
data, err := hex.DecodeString(src)
if err != nil {
panic(err)
}
keyByte := []byte(key)
block, err := des.NewCipher(keyByte)
if err != nil {
panic(err)
}
bs := block.BlockSize()
if len(data)%bs != 0 {
panic("crypto/cipher: input not full blocks")
}
out := make([]byte, len(data))
dst := out
for len(data) > 0 {
block.Decrypt(dst, data[:bs])
data = data[bs:]
dst = dst[bs:]
}
out = PKCS5UnPadding(out)
return string(out)
}
//明文补码算法
func PKCS5Padding(ciphertext []byte, blockSize int) []byte {
padding := blockSize - len(ciphertext)%blockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(ciphertext, padtext...)
}
//明文减码算法
func PKCS5UnPadding(origData []byte) []byte {
length := len(origData)
unpadding := int(origData[length-1])
return origData[:(length - unpadding)]
}
|
package registry
import (
"bytes"
"fmt"
"github.com/iotaledger/wasp/packages/dbprovider"
"io"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
"github.com/iotaledger/hive.go/kvstore"
"github.com/iotaledger/wasp/packages/coretypes"
"github.com/iotaledger/wasp/packages/publisher"
"github.com/iotaledger/wasp/packages/util"
"github.com/iotaledger/wasp/plugins/database"
"github.com/mr-tron/base58"
)
// ChainRecord is a minimum data needed to load a committee for the chain
// it is up to the node (not smart contract) to check authorizations to create/update this record
type ChainRecord struct {
ChainID coretypes.ChainID
Color balance.Color // origin tx hash
CommitteeNodes []string // "host_addr:port"
Active bool
}
func dbkeyChainRecord(chainID *coretypes.ChainID) []byte {
return dbprovider.MakeKey(dbprovider.ObjectTypeChainRecord, chainID[:])
}
func SaveChainRecord(bd *ChainRecord) error {
if bd.ChainID == coretypes.NilChainID {
return fmt.Errorf("can be empty chain id")
}
if bd.Color == balance.ColorNew || bd.Color == balance.ColorIOTA {
return fmt.Errorf("can't be IOTA or New color")
}
var buf bytes.Buffer
if err := bd.Write(&buf); err != nil {
return err
}
if err := database.GetRegistryPartition().Set(dbkeyChainRecord(&bd.ChainID), buf.Bytes()); err != nil {
return err
}
publisher.Publish("chainrec", bd.ChainID.String(), bd.Color.String())
return nil
}
func GetChainRecord(chainID *coretypes.ChainID) (*ChainRecord, error) {
data, err := database.GetRegistryPartition().Get(dbkeyChainRecord(chainID))
if err == kvstore.ErrKeyNotFound {
return nil, nil
}
if err != nil {
return nil, err
}
ret := new(ChainRecord)
if err := ret.Read(bytes.NewReader(data)); err != nil {
return nil, err
}
return ret, nil
}
func UpdateChainRecord(chainID *coretypes.ChainID, f func(*ChainRecord) bool) (*ChainRecord, error) {
bd, err := GetChainRecord(chainID)
if err != nil {
return nil, err
}
if bd == nil {
return nil, fmt.Errorf("no chain record found for address %s", chainID.String())
}
if f(bd) {
err = SaveChainRecord(bd)
if err != nil {
return nil, err
}
}
return bd, nil
}
func ActivateChainRecord(chainID *coretypes.ChainID) (*ChainRecord, error) {
return UpdateChainRecord(chainID, func(bd *ChainRecord) bool {
if bd.Active {
return false
}
bd.Active = true
return true
})
}
func DeactivateChainRecord(chainID *coretypes.ChainID) (*ChainRecord, error) {
return UpdateChainRecord(chainID, func(bd *ChainRecord) bool {
if !bd.Active {
return false
}
bd.Active = false
return true
})
}
func GetChainRecords() ([]*ChainRecord, error) {
db := database.GetRegistryPartition()
ret := make([]*ChainRecord, 0)
err := db.Iterate([]byte{dbprovider.ObjectTypeChainRecord}, func(key kvstore.Key, value kvstore.Value) bool {
bd := new(ChainRecord)
if err := bd.Read(bytes.NewReader(value)); err == nil {
ret = append(ret, bd)
} else {
log.Warnf("corrupted chain record with key %s", base58.Encode(key))
}
return true
})
return ret, err
}
func (bd *ChainRecord) Write(w io.Writer) error {
if err := bd.ChainID.Write(w); err != nil {
return err
}
if _, err := w.Write(bd.Color[:]); err != nil {
return err
}
if err := util.WriteStrings16(w, bd.CommitteeNodes); err != nil {
return err
}
if err := util.WriteBoolByte(w, bd.Active); err != nil {
return err
}
return nil
}
func (bd *ChainRecord) Read(r io.Reader) error {
var err error
if err = bd.ChainID.Read(r); err != nil {
return err
}
if err = util.ReadColor(r, &bd.Color); err != nil {
return err
}
if bd.CommitteeNodes, err = util.ReadStrings16(r); err != nil {
return err
}
if err = util.ReadBoolByte(r, &bd.Active); err != nil {
return err
}
return nil
}
func (bd *ChainRecord) String() string {
ret := " Target: " + bd.ChainID.String() + "\n"
ret += " Color: " + bd.Color.String() + "\n"
ret += fmt.Sprintf(" Committee nodes: %+v\n", bd.CommitteeNodes)
return ret
}
|
// +build spi,!i2c
package main
import (
// Modules
_ "github.com/djthorpe/gopi-hw/sys/spi"
_ "github.com/djthorpe/sensors/sys/rfm69"
)
const (
MODULE_NAME = "sensors/rfm69/spi"
)
|
/*
Copyright 2021 CodeNotary, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"os"
"os/user"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestWriteFileToUserHomeDir(t *testing.T) {
hds := NewHomedirService()
content := []byte(`t`)
pathToFile := "testfile"
user, _ := user.Current()
err := hds.WriteFileToUserHomeDir(content, pathToFile)
assert.FileExists(t, filepath.Join(user.HomeDir, pathToFile))
assert.Nil(t, err)
os.RemoveAll(filepath.Join(user.HomeDir, pathToFile))
}
func TestFileExistsInUserHomeDir(t *testing.T) {
hds := NewHomedirService()
content := []byte(`t`)
pathToFile := "testfile"
user, _ := user.Current()
exists, err := hds.FileExistsInUserHomeDir(filepath.Join(user.HomeDir, pathToFile))
assert.False(t, exists)
assert.Nil(t, err)
err = hds.WriteFileToUserHomeDir(content, pathToFile)
assert.Nil(t, err)
exists, err = hds.FileExistsInUserHomeDir(pathToFile)
assert.True(t, exists)
assert.Nil(t, err)
os.RemoveAll(filepath.Join(user.HomeDir, pathToFile))
}
func TestReadFileFromUserHomeDir(t *testing.T) {
hds := NewHomedirService()
content := []byte(`t`)
pathToFile := "testfile"
user, _ := user.Current()
_, err := hds.ReadFileFromUserHomeDir(pathToFile)
assert.Error(t, err)
err = hds.WriteFileToUserHomeDir(content, pathToFile)
strcontent, err := hds.ReadFileFromUserHomeDir(pathToFile)
assert.NotEmpty(t, strcontent)
assert.Nil(t, err)
os.RemoveAll(filepath.Join(user.HomeDir, pathToFile))
}
func TestDeleteFileFromUserHomeDir(t *testing.T) {
hds := NewHomedirService()
content := []byte(`t`)
pathToFile := "testfile"
user, _ := user.Current()
err := hds.DeleteFileFromUserHomeDir(pathToFile)
assert.Error(t, err)
err = hds.WriteFileToUserHomeDir(content, pathToFile)
assert.Nil(t, err)
err = hds.DeleteFileFromUserHomeDir(pathToFile)
assert.Nil(t, err)
assert.NoFileExists(t, filepath.Join(user.HomeDir, pathToFile))
}
func TestWriteDirFileToUserHomeDir(t *testing.T) {
hds := NewHomedirService()
content := []byte(`t`)
pathToFile := "./testfile"
err := hds.WriteFileToUserHomeDir(content, pathToFile)
assert.FileExists(t, pathToFile)
assert.Nil(t, err)
os.RemoveAll(pathToFile)
}
func TestDirFileExistsInUserHomeDir(t *testing.T) {
hds := NewHomedirService()
content := []byte(`t`)
pathToFile := "./testfile"
exists, err := hds.FileExistsInUserHomeDir(pathToFile)
assert.False(t, exists)
assert.Nil(t, err)
err = hds.WriteFileToUserHomeDir(content, pathToFile)
assert.Nil(t, err)
exists, err = hds.FileExistsInUserHomeDir(pathToFile)
assert.True(t, exists)
assert.Nil(t, err)
os.RemoveAll(pathToFile)
}
func TestDirFileFileFromUserHomeDir(t *testing.T) {
hds := NewHomedirService()
content := []byte(`t`)
pathToFile := "./testfile"
_, err := hds.ReadFileFromUserHomeDir(pathToFile)
assert.Error(t, err)
err = hds.WriteFileToUserHomeDir(content, pathToFile)
assert.Nil(t, err)
strcontent, err := hds.ReadFileFromUserHomeDir(pathToFile)
assert.NotEmpty(t, strcontent)
assert.Nil(t, err)
os.RemoveAll(pathToFile)
}
func TestDeleteDirFileFromUserHomeDir(t *testing.T) {
hds := NewHomedirService()
content := []byte(`t`)
pathToFile := "./testfile"
err := hds.DeleteFileFromUserHomeDir(pathToFile)
assert.Error(t, err)
err = hds.WriteFileToUserHomeDir(content, pathToFile)
assert.Nil(t, err)
err = hds.DeleteFileFromUserHomeDir(pathToFile)
assert.Nil(t, err)
assert.NoFileExists(t, pathToFile)
}
|
package labels
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"sync"
"testing"
"time"
. "github.com/anthonybishopric/gotcha"
"github.com/square/p2/pkg/logging"
"k8s.io/kubernetes/pkg/labels"
)
const endpointSuffix = "/api/select"
func getMatches(t *testing.T, httpResponse string) ([]Labeled, error) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Assert(t).AreEqual(r.URL.Path, endpointSuffix, "Unexpected path requested")
Assert(t).AreEqual(r.URL.Query().Get("selector"), "r1=v1,r2=v2", "Unexpected selector requested")
Assert(t).AreEqual(r.URL.Query().Get("type"), NODE.String(), "Unexpected type requested")
Assert(t).AreEqual(r.URL.Query().Get("cachedMatch"), "true", "Expected a cachedMatch query to be sent")
fmt.Fprintln(w, httpResponse)
}))
defer server.Close()
url, err := url.Parse(server.URL)
Assert(t).IsNil(err, "expected no error parsing url")
applicator, err := NewHTTPApplicator(nil, url)
Assert(t).IsNil(err, "expected no error creating HTTP applicator")
selector := labels.Everything().Add("r1", labels.EqualsOperator, []string{"v1"}).Add("r2", labels.EqualsOperator, []string{"v2"})
return applicator.GetCachedMatches(selector, NODE, 0)
}
func TestGetMatches(t *testing.T) {
matches, err := getMatches(t, `["a","b"]`)
Assert(t).IsNil(err, "expected no error getting matches")
Assert(t).AreEqual(len(matches), 2, "Expected two matches")
Assert(t).AreEqual(matches[0].ID, "a", "Unexpected ID of first match")
Assert(t).AreEqual(matches[1].ID, "b", "Unexpected ID of second match")
}
func TestGetMatchesEmpty(t *testing.T) {
matches, err := getMatches(t, `[]`)
Assert(t).IsNil(err, "expected no error getting matches")
Assert(t).AreEqual(len(matches), 0, "Expected no matches")
}
func TestGetMatchesTypeError(t *testing.T) {
_, err := getMatches(t, `[1]`)
Assert(t).IsNotNil(err, "expected error getting matches")
}
func TestGetMatchesNoJson(t *testing.T) {
_, err := getMatches(t, `[`)
Assert(t).IsNotNil(err, "expected error getting matches")
}
func TestGetMatchesFullFormat(t *testing.T) {
matches, err := getMatches(t, `[
{
"id": "red-rocket-10",
"type": "node",
"labels": {
"r1": "v1",
"r2": "v2",
"r3": "red"
}
},
{
"id": "blue-blaster-20",
"type": "node",
"labels": {
"r1": "v1",
"r2": "v2",
"r3": "blue"
}
}
]`)
Assert(t).IsNil(err, "Should not have erred getting a result")
Assert(t).AreEqual(len(matches), 2, "Should have two results")
Assert(t).AreEqual(matches[0].ID, "red-rocket-10", "should have seen correct label")
Assert(t).AreEqual(matches[1].ID, "blue-blaster-20", "should have seen correct label")
Assert(t).AreEqual(len(matches[0].Labels), 3, "Should have seen 3 labels for red-rocket-10")
}
func TestBatchRequests(t *testing.T) {
server, applicator := fakeServerAndApplicator(t, 100*time.Millisecond)
defer server.Close()
Assert(t).IsNil(applicator.SetLabels(POD, "abc", labels.Set{"color": "green", "state": "experimental"}), "Should not err setting labels")
Assert(t).IsNil(applicator.SetLabels(POD, "def", labels.Set{"color": "green", "state": "production"}), "Should not err setting labels")
Assert(t).IsNil(applicator.SetLabels(POD, "f98", labels.Set{"color": "blue", "state": "production"}), "Should not err setting labels")
Assert(t).IsNil(applicator.SetLabels(POD, "c56", labels.Set{"color": "blue", "state": "experimental"}), "Should not err setting labels")
queryToResults := map[string][]string{
"color = green": []string{"abc", "def"},
"state = production": []string{"def", "f98"},
"color = blue, state = production": []string{"f98"},
"color = blue": []string{"f98", "c56"},
"state = experimental": []string{"c56", "abc"},
"color = blue, state = experimental": []string{"c56"},
}
var tests sync.WaitGroup
for q, expect := range queryToResults {
tests.Add(1)
go func(query string, expect []string) {
defer tests.Done()
selector, err := labels.Parse(query)
if err != nil {
t.Errorf("Test setup error: %v", err)
return
}
res, err := applicator.GetMatches(selector, POD)
if err != nil {
t.Errorf("Could not run applicator query: %v", err)
return
}
if len(expect) != len(res) {
t.Errorf("Incorrect number of query results for %v", query)
return
}
for _, labeled := range res {
var found bool
for _, id := range expect {
if id == labeled.ID {
found = true
}
}
if !found {
t.Errorf("Found %v but shouldn't have found it", labeled.ID)
}
}
}(q, expect)
}
doneCh := make(chan struct{})
go func() {
tests.Wait()
close(doneCh)
}()
select {
case <-doneCh:
return
case <-time.After(500 * time.Millisecond):
t.Fatalf("Tests timed out")
}
}
func fakeServerAndApplicator(t *testing.T, batchTime time.Duration) (*httptest.Server, *httpApplicator) {
labelServer := NewHTTPLabelServer(NewFakeApplicator(), batchTime, logging.DefaultLogger)
server := httptest.NewServer(labelServer.Handler())
url, err := url.Parse(server.URL)
Assert(t).IsNil(err, "expected no error parsing url")
applicator, err := NewHTTPApplicator(nil, url)
Assert(t).IsNil(err, "expected no error creating HTTP applicator")
return server, applicator
}
|
package cartao_credito
import "fmt"
type Cartao struct {
ID int `json:"id"`
Valor float64 `json:"valor"`
Descricao string `json:"descricao"`
Local string `json:"local"`
Usuario Usuario `json:"usuario"`
}
type Usuario struct {
ID int `json:"id"`
Login string `json:"login"`
Email string `json:"email"`
}
func New(id int, valor float64, descricao string, local string, idUsuario int, login string, email string) (*Cartao, error) {
cartao := &Cartao{
ID: id,
Valor: valor,
Descricao: descricao,
Local: local,
Usuario: Usuario{
ID: id,
Login: login,
Email: email,
},
}
if isValid, err := cartao.isValid(); !isValid {
return nil, err
}
return cartao, nil
}
func (cartao Cartao) isValid() (bool, error) {
if cartao.ID == 0 {
return false, fmt.Errorf("Preencha o id do cartão")
}
if cartao.Valor == 0.0 {
return false, fmt.Errorf("Preencha o valor do cartão")
}
if cartao.Valor < 0.0 {
return false, fmt.Errorf("O valor da compra não pode ser menor que zero")
}
if cartao.Descricao == "" {
return false, fmt.Errorf("Preencha a descrição")
}
if cartao.Local == "" {
return false, fmt.Errorf("Preencha o local")
}
if cartao.Usuario.ID == 0 {
return false, fmt.Errorf("Preencha o id do usuário")
}
return true, nil
}
|
package linkedlists
import (
"testing"
)
var kthTests = []struct {
l *ListNode
result *ListNode
k int
}{
{
&ListNode{1, nil},
&ListNode{1, nil},
0,
},
{
&ListNode{1, &ListNode{1, nil}},
&ListNode{1, nil},
1,
},
{
&ListNode{1, &ListNode{1, &ListNode{8, &ListNode{1, nil}}}},
&ListNode{8, nil},
1,
},
{
&ListNode{1, &ListNode{21, &ListNode{8, &ListNode{9, nil}}}},
&ListNode{1, nil},
3,
},
}
func TestKth(t *testing.T) {
for _, test := range kthTests {
if result := kth(test.l, test.k); !ListNodeEquals(result, test.result) {
t.Errorf("error")
}
if result := kth3(test.l, test.k); !ListNodeEquals(result, test.result) {
t.Errorf("error with kth3")
}
if result := kthrec(test.l, test.k); !ListNodeEquals(result, test.result) {
t.Errorf("error: received %v, expect: %v", result, test.result)
}
}
}
|
package helpers
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
)
// GetInputValues reads the file at the specified input path, strips the last line (if needed) and returns the content as string array
func GetInputValues(absFilePath string) (values []string) {
txt, err := ioutil.ReadFile("input")
if err != nil {
panic(fmt.Sprintf("Input file '%s' not found...", absFilePath))
}
strValues := strings.Split(string(txt), "\n")
// Remove empty last line
if len(strValues[len(strValues)-1]) == 0 {
strValues = strValues[:len(strValues)-1]
}
return strValues
}
// MustAtoi converts a string that is SURE to be an int to an int without error
func MustAtoi(input string) int {
intVal, _ := strconv.Atoi(input)
return intVal
}
|
package main
import (
"context"
"fmt"
"os"
slackbot "github.com/lusis/go-slackbot"
slack "github.com/nlopes/slack"
)
func helloFunc(ctx context.Context, bot *slackbot.Bot, evt *slack.MessageEvent) {
bot.Reply(evt, "hi there to you too!", slackbot.WithoutTyping)
}
func globalMessageHandler(ctx context.Context, bot *slackbot.Bot, evt *slack.MessageEvent) {
bot.Reply(evt, "I see your global message", slackbot.WithoutTyping)
}
func directMessageHandler(ctx context.Context, bot *slackbot.Bot, evt *slack.MessageEvent) {
bot.Reply(evt, "sorry I can't do direct messages", slackbot.WithoutTyping)
}
func postMessageHandler(ctx context.Context, bot *slackbot.Bot, evt *slack.MessageEvent) {
_, _, err := bot.Client.PostMessage(evt.Channel, "posting to a channel via api", slack.PostMessageParameters{AsUser: true})
if err != nil {
fmt.Printf("Got an error making an api call to post a message: %s\n", err.Error())
}
}
func withAttachmentHandler(ctx context.Context, bot *slackbot.Bot, evt *slack.MessageEvent) {
p := &slack.PostMessageParameters{
AsUser: true,
Attachments: []slack.Attachment{
slack.Attachment{
Fallback: "this is the fallback text",
AuthorName: "Message Author Name",
Title: "message title",
Text: "message text",
Fields: []slack.AttachmentField{
slack.AttachmentField{
Title: "field title",
Value: "field value",
Short: true,
},
},
},
},
}
_, _, err := bot.Client.PostMessage(evt.Channel, "", *p)
if err != nil {
fmt.Printf("got an error making an api call to post a message with an attachment: %s\n", err.Error())
}
}
func channelJoinHandler(ctx context.Context, bot *slackbot.Bot, channel *slack.Channel) {
_, _, err := bot.Client.PostMessage(channel.ID, "thanks for the invite", slack.PostMessageParameters{})
if err != nil {
fmt.Printf("error handling channel join event: %s", err.Error())
return
}
}
func configureBot(bot *slackbot.Bot) {
bot.OnChannelJoin(channelJoinHandler)
bot.Hear("send an attachment").MessageHandler(withAttachmentHandler)
bot.Hear("send to api").MessageHandler(postMessageHandler)
bot.Hear("global message").MessageHandler(globalMessageHandler)
toMe := bot.Messages(slackbot.DirectMention).Subrouter()
toMe.Hear("greetings and salutations").MessageHandler(helloFunc)
dms := bot.Messages(slackbot.DirectMessage).Subrouter()
dms.Hear("^.*$").MessageHandler(directMessageHandler)
}
func main() {
bot := slackbot.New(os.Getenv("SLACK_TOKEN"))
configureBot(bot)
bot.Run()
}
|
package base
import (
"sync"
)
type SentinelEntry struct {
res *ResourceWrapper
// one entry with one context
ctx *EntryContext
// each entry holds a slot chain.
// it means this entry will go through the sc
sc *SlotChain
exitCtl sync.Once
}
func NewSentinelEntry(ctx *EntryContext, rw *ResourceWrapper, sc *SlotChain) *SentinelEntry {
return &SentinelEntry{
res: rw,
ctx: ctx,
sc: sc,
}
}
func (e *SentinelEntry) SetError(err error) {
if e.ctx != nil {
e.ctx.SetError(err)
}
}
func (e *SentinelEntry) Context() *EntryContext {
return e.ctx
}
func (e *SentinelEntry) Resource() *ResourceWrapper {
return e.res
}
type ExitOptions struct {
err error
}
type ExitOption func(*ExitOptions)
func WithError(err error) ExitOption {
return func(opts *ExitOptions) {
opts.err = err
}
}
func (e *SentinelEntry) Exit(exitOps ...ExitOption) {
var options = ExitOptions{
err: nil,
}
for _, opt := range exitOps {
opt(&options)
}
ctx := e.ctx
if options.err != nil {
ctx.SetError(options.err)
}
e.exitCtl.Do(func() {
if e.sc != nil {
e.sc.exit(ctx)
e.sc.RefurbishContext(ctx)
}
})
}
|
// Copyright 2016 IBM Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"strings"
"time"
"fmt"
"github.com/codegangsta/cli"
)
const (
register = "register"
proxy = "proxy"
log = "log"
supervise = "supervise"
tenantToken = "tenant_token"
kafkaToken = "kafka_token"
kafkaUsername = "kafka_user"
kafkaPassword = "kafka_pass"
kafkaBrokers = "kafka_broker"
kafkaRestURL = "kafka_rest_url"
kafkaAdminURL = "kafka_admin_url"
kafkaSASL = "kafka_sasl"
registryToken = "registry_token"
registryURL = "registry_url"
nginxPort = "nginx_port"
controllerURL = "controller_url"
controllerPoll = "controller_poll"
tenantTTL = "tenant_ttl"
tenantHeartbeat = "tenant_heartbeat"
endpointHost = "endpoint_host"
endpointPort = "endpoint_port"
endpointType = "endpoint_type"
serviceName = "service"
logLevel = "log_level"
logstashServer = "logstash_server"
forceUpdate = "force_update"
)
// TenantFlags defines all expected args for Tenant
var TenantFlags = []cli.Flag{
cli.StringFlag{
Name: logLevel,
EnvVar: envVar(logLevel),
Value: "info",
Usage: "Logging level (debug, info, warn, error, fatal, panic)",
},
cli.BoolFlag{
Name: forceUpdate,
EnvVar: envVar(forceUpdate),
Usage: "Update Registry and Kafka credentials on startup",
},
cli.StringFlag{
Name: serviceName,
EnvVar: envVar(serviceName),
Usage: "Service name to register with",
},
cli.StringFlag{
Name: endpointHost,
EnvVar: envVar(endpointHost),
Usage: "Service endpoint host name (local IP is used if none specified)",
},
cli.IntFlag{
Name: endpointPort,
EnvVar: envVar(endpointPort),
Usage: "Service endpoint port",
},
cli.StringFlag{
Name: endpointType,
EnvVar: envVar(endpointType),
Usage: "Service endpoint type (http, https, tcp, udp, user)",
Value: "http",
},
cli.BoolTFlag{
Name: register,
EnvVar: envVar(register),
Usage: "Enable automatic service registration and heartbeat",
},
cli.BoolTFlag{
Name: proxy,
EnvVar: envVar(proxy),
Usage: "Enable automatic service discovery and load balancing across services using NGINX",
},
cli.BoolTFlag{
Name: log,
EnvVar: envVar(log),
Usage: "Enable logging of outgoing requests through proxy using FileBeat",
},
cli.BoolFlag{
Name: supervise,
EnvVar: envVar(supervise),
Usage: "Enable monitoring of application process. If application dies, container is killed as well. This has to be the last flag. All arguments provided after this flag will considered as part of the application invocation",
},
// Tenant
cli.StringFlag{
Name: tenantToken,
EnvVar: envVar(tenantToken),
Usage: "Token for Service Proxy instance",
},
cli.DurationFlag{
Name: tenantTTL,
EnvVar: envVar(tenantTTL),
Value: time.Duration(time.Minute),
Usage: "Tenant TTL for Registry",
},
cli.DurationFlag{
Name: tenantHeartbeat,
EnvVar: envVar(tenantHeartbeat),
Value: time.Duration(time.Second * 45),
Usage: "Tenant heartbeat interval to Registry",
},
// Registry
cli.StringFlag{
Name: registryURL,
EnvVar: envVar(registryURL),
Usage: "URL for Registry",
},
cli.StringFlag{
Name: registryToken,
EnvVar: envVar(registryToken),
Usage: "API token for Regsitry",
},
// NGINX
cli.IntFlag{
Name: nginxPort,
EnvVar: envVar(nginxPort),
Value: 6379,
Usage: "Port for NGINX",
},
// Controller
cli.StringFlag{
Name: controllerURL,
EnvVar: envVar(controllerURL),
Usage: "URL for Controller service",
},
cli.DurationFlag{
Name: controllerPoll,
EnvVar: envVar(controllerPoll),
Value: time.Duration(15 * time.Second),
Usage: "Interval for polling Controller",
},
// Logserver
cli.StringFlag{
Name: logstashServer,
EnvVar: envVar(logstashServer),
Usage: "Logstash target for nginx logs",
},
// Kafka
cli.StringFlag{
Name: kafkaUsername,
EnvVar: envVar(kafkaUsername),
Usage: "Username for Kafka service",
},
cli.StringFlag{
Name: kafkaPassword,
EnvVar: envVar(kafkaPassword),
Usage: "Password for Kafka service",
},
cli.StringFlag{
Name: kafkaToken,
EnvVar: envVar(kafkaToken),
Usage: "Token for Kafka service",
},
cli.StringFlag{
Name: kafkaAdminURL,
EnvVar: envVar(kafkaAdminURL),
Usage: "Admin URL for Kafka service",
},
cli.StringFlag{
Name: kafkaRestURL,
EnvVar: envVar(kafkaRestURL),
Usage: "REST URL for Kafka service",
},
cli.BoolFlag{
Name: kafkaSASL,
EnvVar: envVar(kafkaSASL),
Usage: "Use SASL/PLAIN authentication for Kafka",
},
cli.StringSliceFlag{
Name: kafkaBrokers,
EnvVar: envVar(kafkaBrokers),
Usage: "Kafka broker",
},
}
func envVar(name string) string {
return strings.ToUpper(fmt.Sprintf("%v%v", "A8_", name))
}
|
package tlsconfig
import (
"crypto/tls"
)
func Secure(certificates []tls.Certificate) *tls.Config {
tlsConfig := &tls.Config{
// Causes servers to use Go's default ciphersuite preferences,
// which are tuned to avoid attacks. Does nothing on clients.
PreferServerCipherSuites: true,
// Only use curves which have assembly implementations
CurvePreferences: []tls.CurveID{
tls.CurveP256,
tls.X25519, // Go 1.8 only
},
MinVersion: tls.VersionTLS12,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, // Go 1.8 only
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, // Go 1.8 only
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
// Best disabled, as they don't provide Forward Secrecy,
// but might be necessary for some clients
// tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
// tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
},
Certificates: certificates,
}
tlsConfig.BuildNameToCertificate() // Multiple domains on the same server with certs requires this
return tlsConfig
}
|
package fakes
import "github.com/cloudfoundry-incubator/notifications/postal"
type TemplatesLoader struct {
ContentSuffix string
Templates postal.Templates
LoadError error
}
func (fake *TemplatesLoader) LoadTemplates(subjectSuffix, contentSuffix, clientID, kindID string) (postal.Templates, error) {
fake.ContentSuffix = contentSuffix
return fake.Templates, fake.LoadError
}
|
// Copyright (c) 2014 Dataence, LLC. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// 实现TLV数据的解码功能
package tlv
import (
"errors"
"fmt"
)
// TLV网络数据解码器
type Decoder struct {
buf []byte // 缓冲区
bufLen int // 缓冲区数据长度
beforeCursor int // 之前解析到数据位置
curCursor int // 当前解析到数据位置
isFindTag bool
isFindLen bool
valueLen int // 数据段的长度
}
/**
从网络流数据中解析出TLV结构数据
*/
func (this *Decoder) Parse(request []byte, requestLen int) (tlvArray []TLVObject, err error) {
defer func() {
if errPanic := recover(); errPanic != nil {
err = errors.New(fmt.Sprintf("tlv parse panic: %v", errPanic))
}
}()
this.buf = append(this.buf, request[:requestLen]...)
this.bufLen += requestLen
//fmt.Printf("bufSize = %d\n", len(this.buf))
for ; this.curCursor < this.bufLen; this.curCursor++ {
//fmt.Printf("curCursor = %v, bufLen = %d\n", this.curCursor, this.bufLen)
//计算tag
if this.isFindTag == false {
if this.buf[this.curCursor]&0x80 == 0 {
this.isFindTag = true
this.beforeCursor = this.curCursor + 1
//fmt.Printf("findTag curCursor = %v, tag = %v\n", this.curCursor, this.buf[this.curCursor]&0x1f)
}
continue
}
//计算length
if this.isFindLen == false {
if this.buf[this.curCursor]&0x80 == 0 {
this.isFindLen = true
this.valueLen = parseLength(this.buf[this.beforeCursor : this.curCursor+1])
this.beforeCursor = this.curCursor
//fmt.Printf("findLen curCursor = %v, valueLen = %v\n", this.curCursor, this.valueLen)
if this.valueLen == 0 {
tlvArray = this.addParsedObj(tlvArray)
}
}
continue
}
//fmt.Printf("curCursor = %v, beforeCursor = %v, valueLen = %v\n", this.curCursor, this.beforeCursor, this.valueLen)
if this.curCursor-this.beforeCursor == this.valueLen {
//已经完整的获取到一个tlv包数据,开始解析整个tlv包
//fmt.Printf("find a tlv object: curCursor = %d, beforeCursor = %d\n", this.curCursor, this.beforeCursor)
tlvArray = this.addParsedObj(tlvArray)
}
}
return tlvArray, nil
}
// 添加解析完成了的对象
func (this *Decoder) addParsedObj(tlvArray []TLVObject) (retArray []TLVObject) {
tlvObject := TLVObject{}
tlvObject.FromBytes(this.buf[:this.curCursor+1])
retArray = append(tlvArray, tlvObject)
this.reset()
return retArray
}
/**
解析完一个TLV结构后,重置解码器
*/
func (this *Decoder) reset() {
//遗弃已经解析完的数据包
this.buf = this.buf[this.curCursor+1:]
this.bufLen = this.bufLen - this.curCursor - 1
this.isFindLen = false
this.isFindTag = false
this.beforeCursor = 0
this.curCursor = -1
}
/**
查找tag部分占多少字节
*/
func findTagByteCount(tlvBytes []byte) (tagByteCount int) {
for i := 0; i < len(tlvBytes); i++ {
tagByteCount++
if tlvBytes[i]&0x80 == 0 {
break
}
}
return tagByteCount
}
/**
查找length部分占多少字节
*/
func findLenByteCount(tlvBytes []byte, lenStartPos int) (lenByteCount int) {
for i := lenStartPos; i < len(tlvBytes); i++ {
lenByteCount++
if tlvBytes[i]&0x80 == 0 {
break
}
}
return lenByteCount
}
/**
解析数据类型
*/
func parseTag(tagBytes []byte) (frameType byte, dataType byte, tagValue int) {
frameType = tagBytes[0] & FarmeTypePrivate
dataType = tagBytes[0] & DataTypeStruct
tagValue = 0
byteCount := len(tagBytes)
if byteCount == 1 {
tagValue = int(tagBytes[0] & 0x1f)
return frameType, dataType, tagValue
}
power := 1
for i := 1; i < byteCount; i++ {
digit := tagBytes[i]
tagValue += int(digit&0x7f) * power
power *= 128
}
return frameType, dataType, tagValue
}
/**
解析数据长度
*/
func parseLength(lenBytes []byte) (length int) {
length = 0
power := 1
byteCount := len(lenBytes)
for i := 0; i < byteCount; i++ {
digit := lenBytes[i]
length += int(digit&0x7f) * power
power *= 128
}
return length
}
|
package example
type Order struct {
ID string
}
func processRequest() {
// What is this true stand for, why i put true value in here????
o, err := CreateOrder("product id", "customer id", "shipment id", true)
if err != nil {
panic(err)
}
}
func CreateOrder(productID, customerID, shipmentID string, isPromotion bool) (Order, error) {
return Order{}, nil
}
|
package main
import "fmt"
func main() {
fmt.Println(firstMissingPositive([]int{
//1, 2, 0,
//3, 2, 4, -1, 1,
1, 1,
}))
}
func firstMissingPositive(nums []int) int {
for i := 0; i < len(nums); i++ {
for nums[i] != i+1 && nums[i]-1 >= 0 {
nums[nums[i]-1], nums[i] = nums[i], nums[nums[i]-1]
}
}
for i := 0; i < len(nums); i++ {
t1 := nums[i] // 3
if t1 != i+1 {
return i + 1
}
}
return len(nums) + 1
}
func firstMissingPositive2(nums []int) int {
m := make(map[int]bool)
min := 1
for _, v := range nums {
m[v] = true
if v == min {
min++
for {
if _, ok := m[min]; ok {
min++
} else {
break
}
}
}
}
return min
}
|
package forum
import (
"time"
"github.com/kapmahc/axe/plugins/nut"
)
// Article article
type Article struct {
tableName struct{} `sql:"forum_articles"`
ID uint `json:"id"`
Title string `json:"title"`
Body string `json:"body"`
Type string `json:"type"`
User nut.User `json:"user"`
UserID uint `json:"userId"`
Tags []Tag `json:"tags" pg:",many2many:forum_articles_tags"`
Comments []Comment `json:"comments"`
UpdatedAt time.Time `json:"updatedAt"`
CreatedAt time.Time `json:"createdAt"`
}
// ArticleTag articles-tags
type ArticleTag struct {
tableName struct{} `sql:"forum_articles_tags"`
ArticleID uint
TagID uint
}
// Tag tag
type Tag struct {
tableName struct{} `sql:"forum_tags"`
ID uint `json:"id"`
Name string `json:"name"`
Articles []Article `json:"articles" pg:",many2many:forum_articles_tags"`
UpdatedAt time.Time `json:"updatedAt"`
CreatedAt time.Time `json:"createdAt"`
}
// Comment comment
type Comment struct {
tableName struct{} `sql:"forum_comments"`
ID uint `json:"id"`
Body string `json:"body"`
Type string `json:"type"`
User nut.User `json:"user"`
UserID uint `json:"userId"`
Article Article `json:"article"`
ArticleID uint `json:"articleId"`
UpdatedAt time.Time `json:"updatedAt"`
CreatedAt time.Time `json:"createdAt"`
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package fasthttp_test
import (
"testing"
"github.com/elastic/go-elasticsearch/v8"
"github.com/elastic/go-elasticsearch/v8/_examples/fasthttp"
)
func BenchmarkHTTPClient(b *testing.B) {
b.ReportAllocs()
client, err := elasticsearch.NewDefaultClient()
if err != nil {
b.Fatalf("ERROR: %s", err)
}
b.Run("Info()", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
if res, err := client.Info(); err != nil {
b.Errorf("Unexpected error when getting a response: %s", err)
} else {
res.Body.Close()
}
}
})
}
func BenchmarkFastHTTPClient(b *testing.B) {
b.ReportAllocs()
client, err := elasticsearch.NewClient(
elasticsearch.Config{Transport: &fasthttp.Transport{}},
)
if err != nil {
b.Fatalf("ERROR: %s", err)
}
b.Run("Info()", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
if res, err := client.Info(); err != nil {
b.Errorf("Unexpected error when getting a response: %s", err)
} else {
res.Body.Close()
}
}
})
}
|
package main
import (
"testing"
"image/color"
"fmt"
)
func TestTransfColor(t *testing.T) {
c := transfColor(color.RGBA{
R: 218,
G: 210,
B: 137,
A: 0xff,
})
fmt.Printf("R:0x%x G:0x%x B:0x%x A:0x%x\n", c.R, c.G, c.B, c.A)
fmt.Printf("R:%d G:%d B:%d A:%d\n", c.R, c.G, c.B, c.A)
}
|
func min(x int, y int) int {
if x > y {
return y
}
return x
}
func minDepth(root *TreeNode) int {
if root == nil {
return 0
} else {
var l = minDepth(root.Left)
var r = minDepth(root.Right)
if l == 0 {
return r + 1
}
if r == 0 {
return l + 1
}
return min(l, r) + 1
}
} |
package utils
import (
"github.com/go-ini/ini"
. "github.com/smartystreets/goconvey/convey"
"testing"
)
func Test_Config(t *testing.T) {
Convey("Test Set", t, func() {
Convey("initest", func() {
k := &Config{" ", "/tmp/ts.ini"} //note:you can set ts.ini 's location
k.SaveDefaultUnpackerArgs(40*1024, 21)
k.SaveFileSlice(true)
cfg, _ := ini.LooseLoad(k.cfgfile)
val := cfg.Section("pkg").Key("file_slice").MustBool()
val2 := cfg.Section("default_unpacker").Key("min_seg_size").MustUint64(0)
val3 := cfg.Section("default_unpacker").Key("max_seg_cnt").MustUint(0)
So(val, ShouldBeTrue)
So(val2, ShouldEqual, 40*1024)
So(val3, ShouldEqual, 21)
})
})
}
|
package storage
import (
"testing"
"github.com/magiconair/properties/assert"
)
func TestGetFile(t *testing.T) {
for _, tc := range getFileTestCases {
t.Run(tc.name, func(t *testing.T) {
output := tc.s3.GetFile(tc.cid)
assert.Equal(t, output, tc.result)
})
}
}
type getFileData struct {
name string
s3 S3
cid string
result string
}
var getFileTestCases = []getFileData{
{
name: "Base url without a '/' at the end",
s3: S3{
URL: "https://s3bucket.com",
},
cid: "theCid",
result: "https://s3bucket.com/theCid",
}, {
name: "Base url with a '/' at the end",
s3: S3{
URL: "https://s3bucket.com/",
},
cid: "theCid",
result: "https://s3bucket.com/theCid",
},
}
|
package main
import (
"fmt"
"net/http"
"os"
_ "github.com/go-sql-driver/mysql"
"github.com/gorilla/mux"
"github.com/holly-graham/scheduleapi/db"
"github.com/holly-graham/scheduleapi/schedule"
"github.com/holly-graham/scheduleapi/server"
)
const port = ":8000"
func main() {
db, err := db.ConnectDatabase("activities_db.config")
if err != nil {
fmt.Println("Error connecting to database:", err)
os.Exit(1)
}
scheduleService := schedule.NewService(db)
scheduleServer := server.NewServer(scheduleService)
router := mux.NewRouter()
router.HandleFunc("/day/{chosenDay}/activities", scheduleServer.ListActivitiesHandler).Methods("GET")
router.HandleFunc("/day/{chosenDay}/activities", scheduleServer.AddActivityHandler).Methods("POST")
http.Handle("/", router)
fmt.Println("Waiting for requests on port:", port)
http.ListenAndServe(port, nil)
}
|
// Package server provides HTTP/2 gRCP server functionality.
package server
|
package database
import (
"github.com/jinzhu/gorm"
)
type Mysql struct {
db *gorm.DB
}
func (mysql Mysql) New() *gorm.DB{
db, _ := gorm.Open("mysql", "root:@/learnsong?charset=utf8&parseTime=True&loc=Local")
mysql.db = db
return mysql.db
}
|
package ewallet_test
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/xendit/xendit-go/ewallet"
v1 "github.com/imrenagi/go-payment/gateway/xendit/ewallet/v1"
"github.com/imrenagi/go-payment/invoice"
)
func TestNewOvo(t *testing.T) {
tests := []struct {
name string
inv *invoice.Invoice
req *ewallet.CreatePaymentParams
// callbackURL string
// redirectURL string
wantErr error
}{
{
name: "should create correct params",
inv: dummyInv,
// callbackURL: "http://example.com/callback",
// redirectURL: "http://example.com/success",
wantErr: nil,
req: &ewallet.CreatePaymentParams{
XApiVersion: "2020-02-01",
EWalletType: "OVO",
ExternalID: "a-random-invoice-number",
Amount: 15000,
Phone: "08111231234",
ExpirationDate: &fakeDueDate,
// CallbackURL: "http://example.com/callback",
// RedirectURL: "http://example.com/success",
Items: []ewallet.Item{
{
ID: "HOME",
Name: "random-item",
Price: 15000,
Quantity: 1,
},
},
},
},
{
name: "should return error if phone number is not using correct format",
inv: incorrectPhoneDummyInv,
wantErr: fmt.Errorf("invalid phone number. must be in 08xxxx format"),
req: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
params, err := v1.NewOVO(tt.inv)
assert.Equal(t, tt.wantErr, err)
assert.EqualValues(t, tt.req, params)
})
}
}
|
// Copyright © 2018 Inanc Gumus
// Learn Go Programming Course
// License: https://creativecommons.org/licenses/by-nc-sa/4.0/
//
// For more tutorials : https://learngoprogramming.com
// In-person training : https://www.linkedin.com/in/inancgumus/
// Follow me on twitter: https://twitter.com/inancgumus
package main
import (
"fmt"
"time"
)
func main() {
// hours's type is time.Duration
// but later's type was int
// now, `later` is typeless
//
// time.Duration's underlying type is int64
// and, `later` is numeric typeless value
// so, they can be multiplied
const later = 10
hours, _ := time.ParseDuration("1h")
fmt.Printf("%s later...\n", hours*later)
}
|
package git
import (
"errors"
"testing"
"github.com/abhinav/git-pr/gateway"
"github.com/abhinav/git-pr/gateway/gatewaytest"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBulkRebaser(t *testing.T) {
type deletion struct {
Checkout string
Delete string
}
type rebaseCall struct {
From string
To string
// Base() and Err() expected on the returned RebaseHandle.
WantBase string
WantErr string
}
type ontoCall struct {
Onto string
Rebases []rebaseCall
}
tests := []struct {
Desc string
Do []ontoCall
SetupGateway func(*gatewaytest.MockGit)
// ExpectRebases is a convenience option for setting up Rebase
// requests on the gateway that never fail. This may be omitted or
// partial if the test has more complex rebase setup in SetupGateway.
ExpectRebases []*gateway.RebaseRequest
// ExpectDeletions is a convenience option for setting up
// Checkout(parent), Delete(branch) in-order without any errors. This
// may be omitted or partial if the test has a more complex deletion
// setup in SetupGateway.
ExpectDeletions []deletion
WantErrors []string
}{
{
Desc: "single rebase",
Do: []ontoCall{
{
Onto: "master",
Rebases: []rebaseCall{
{
From: "feature-1",
To: "feature-2",
WantBase: "git-pr/rebase/feature-2",
},
},
},
},
ExpectRebases: []*gateway.RebaseRequest{
{
Onto: "master",
From: "feature-1",
Branch: "git-pr/rebase/feature-2",
},
},
ExpectDeletions: []deletion{
{Checkout: "master", Delete: "git-pr/rebase/feature-2"},
},
},
{
Desc: "rebase stack",
Do: []ontoCall{
{
Onto: "origin/dev",
Rebases: []rebaseCall{
{
From: "dev",
To: "feature-1",
WantBase: "git-pr/rebase/feature-1",
},
{
From: "feature-1",
To: "feature-2",
WantBase: "git-pr/rebase/feature-2",
},
{
From: "feature-2",
To: "feature-3",
WantBase: "git-pr/rebase/feature-3",
},
{
From: "feature-3",
To: "feature-4",
WantBase: "git-pr/rebase/feature-4",
},
},
},
},
ExpectRebases: []*gateway.RebaseRequest{
{
Onto: "origin/dev",
From: "dev",
Branch: "git-pr/rebase/feature-1",
},
{
Onto: "git-pr/rebase/feature-1",
From: "feature-1",
Branch: "git-pr/rebase/feature-2",
},
{
Onto: "git-pr/rebase/feature-2",
From: "feature-2",
Branch: "git-pr/rebase/feature-3",
},
{
Onto: "git-pr/rebase/feature-3",
From: "feature-3",
Branch: "git-pr/rebase/feature-4",
},
},
ExpectDeletions: []deletion{
{
Checkout: "git-pr/rebase/feature-3",
Delete: "git-pr/rebase/feature-4",
},
{
Checkout: "git-pr/rebase/feature-2",
Delete: "git-pr/rebase/feature-3",
},
{
Checkout: "git-pr/rebase/feature-1",
Delete: "git-pr/rebase/feature-2",
},
{
Checkout: "origin/dev",
Delete: "git-pr/rebase/feature-1",
},
},
},
{
Desc: "rebase failure",
Do: []ontoCall{
{
Onto: "origin/master",
Rebases: []rebaseCall{
{
From: "master",
To: "feature-1",
WantErr: "great sadness",
},
{
From: "feature-1",
To: "feature-2",
WantErr: "great sadness",
},
{
From: "feature-2",
To: "feature-3",
WantErr: "great sadness",
},
},
},
{
Onto: "origin/master",
Rebases: []rebaseCall{
{
From: "feature-3",
To: "feature-4",
WantBase: "git-pr/rebase/feature-4",
},
},
},
},
ExpectRebases: []*gateway.RebaseRequest{
{
Onto: "origin/master",
From: "feature-3",
Branch: "git-pr/rebase/feature-4",
},
},
SetupGateway: func(git *gatewaytest.MockGit) {
git.EXPECT().
Rebase(&gateway.RebaseRequest{
Onto: "origin/master",
From: "master",
Branch: "git-pr/rebase/feature-1",
}).
Return(errors.New("great sadness"))
},
ExpectDeletions: []deletion{
{Checkout: "origin/master", Delete: "git-pr/rebase/feature-4"},
{Checkout: "origin/master", Delete: "git-pr/rebase/feature-1"},
},
WantErrors: []string{"great sadness"},
},
{
Desc: "multiple rebase failures",
Do: []ontoCall{
{
Onto: "origin/master",
Rebases: []rebaseCall{
{
From: "master",
To: "feature-1",
WantErr: "feature 1 failed",
},
},
},
{
Onto: "origin/master",
Rebases: []rebaseCall{
{
From: "feature-1",
To: "feature-2",
WantErr: "feature 2 failed",
},
},
},
{
Onto: "origin/master",
Rebases: []rebaseCall{
{
From: "feature-2",
To: "feature-3",
WantErr: "feature 3 failed",
},
},
},
},
SetupGateway: func(git *gatewaytest.MockGit) {
git.EXPECT().
Rebase(&gateway.RebaseRequest{
Onto: "origin/master",
From: "master",
Branch: "git-pr/rebase/feature-1",
}).
Return(errors.New("feature 1 failed"))
git.EXPECT().
Rebase(&gateway.RebaseRequest{
Onto: "origin/master",
From: "feature-1",
Branch: "git-pr/rebase/feature-2",
}).
Return(errors.New("feature 2 failed"))
git.EXPECT().
Rebase(&gateway.RebaseRequest{
Onto: "origin/master",
From: "feature-2",
Branch: "git-pr/rebase/feature-3",
}).
Return(errors.New("feature 3 failed"))
},
ExpectDeletions: []deletion{
{Checkout: "origin/master", Delete: "git-pr/rebase/feature-3"},
{Checkout: "origin/master", Delete: "git-pr/rebase/feature-2"},
{Checkout: "origin/master", Delete: "git-pr/rebase/feature-1"},
},
WantErrors: []string{
"feature 1 failed",
"feature 2 failed",
"feature 3 failed",
},
},
}
for _, tt := range tests {
t.Run(tt.Desc, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
gw := gatewaytest.NewMockGit(mockCtrl)
if tt.SetupGateway != nil {
tt.SetupGateway(gw)
}
for _, req := range tt.ExpectRebases {
gw.EXPECT().Rebase(req).Return(nil)
}
var deletions []*gomock.Call
for _, x := range tt.ExpectDeletions {
deletions = append(deletions,
gw.EXPECT().Checkout(x.Checkout).Return(nil),
gw.EXPECT().DeleteBranch(x.Delete).Return(nil),
)
}
gomock.InOrder(deletions...)
rebaser := NewBulkRebaser(gw)
rebaser.checkoutUniqueBranch = checkoutUniqueBranchAlwaysSuccessful
defer func() {
assert.NoError(t, rebaser.Cleanup(),
"cleanup failed")
}()
for _, ontoCall := range tt.Do {
h := rebaser.Onto(ontoCall.Onto)
for _, rebaseCall := range ontoCall.Rebases {
h = h.Rebase(rebaseCall.From, rebaseCall.To)
assert.Equal(t, rebaseCall.WantBase, h.Base())
if rebaseCall.WantErr != "" {
err := h.Err()
if assert.Error(t, err) {
assert.Contains(t, err.Error(), rebaseCall.WantErr)
}
}
}
}
if len(tt.WantErrors) > 0 {
err := rebaser.Err()
require.Error(t, err, "expected failure")
for _, msg := range tt.WantErrors {
assert.Contains(t, err.Error(), msg)
}
return
}
require.NoError(t, rebaser.Err(), "expected success")
})
}
}
func checkoutUniqueBranchAlwaysSuccessful(
_ gateway.Git, prefix string, _ string,
) (string, error) {
return prefix, nil
}
|
package delete
import (
"fmt"
"github.com/MakeNowJust/heredoc"
"github.com/cli/cli/pkg/iostreams"
"github.com/heaths/gh-label/internal/github"
"github.com/heaths/gh-label/internal/options"
"github.com/spf13/cobra"
)
type deleteOptions struct {
name string
// test
client *github.Client
io *iostreams.IOStreams
}
func DeleteCmd(globalOpts *options.GlobalOptions) *cobra.Command {
opts := &deleteOptions{}
cmd := &cobra.Command{
Use: "delete <name>",
Short: "Delete the label <name> from the repository",
Example: heredoc.Doc(`
$ gh label delete p1
`),
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.name = args[0]
return delete(globalOpts, opts)
},
}
return cmd
}
func delete(globalOpts *options.GlobalOptions, opts *deleteOptions) error {
if opts.client == nil {
owner, repo := globalOpts.Repo()
cli := &github.Cli{
Owner: owner,
Repo: repo,
}
opts.client = github.New(cli)
}
if opts.io == nil {
opts.io = iostreams.System()
}
if err := opts.client.DeleteLabel(opts.name); err != nil {
return fmt.Errorf("failed to delete label: %w", err)
}
if opts.io.IsStdoutTTY() {
fmt.Fprintf(opts.io.Out, "Deleted label '%s'\n", opts.name)
}
return nil
}
|
package config
import (
"fmt"
"github.com/BurntSushi/toml"
"github.com/pkg/errors"
)
// Настройки микросервиса
type Options struct {
HTTPServer HTTPServer
}
// Инициализация конфигов
func Init(configPath string) (options *Options, err error) {
if _, err = toml.DecodeFile(configPath, &options); err != nil {
return nil, errors.Wrap(err, "не удалось загрузить конфиги микросервиса")
}
return
}
// Настройки HTTP сервера
type HTTPServer struct {
Host string
Port int
}
// Возвращает домен сервера
func (s HTTPServer) GetDomain() string {
return fmt.Sprintf("%s:%d", s.Host, s.Port)
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2019-05-04 08:49
# @File : validation.go
# @Description :
*/
package utils
import (
"math/rand"
"regexp"
"strconv"
"time"
)
// FIXME
// 手机号校验
func ValidatePhone(phone string) bool {
// 手机号
regExp := "^((13[0-9])|(14[5,7])|(15[0-3,5-9])|(17[0,3,5-8])|(18[0-9])|166|198|199|(147))\\d{8}$"
// 固话
// regExp2 := "^((13[0-9])|(14[5|7])|(15([0-3]|[5-9]))|(18[0,5-9]))\\d{8}$|^0\\d{2,3}-?\\d{7,8}$"
regExp2 := "^(0\\d{2,3}-?)d?\\d{7,8}$|\\d{7,8}"
regExp3 := "(\\d{2,5}-\\d{7,8}(-\\d{1,})?)|(13\\d{9})|(159\\d{8})"
m1, _ := regexp.MatchString(regExp, phone)
m2, _ := regexp.MatchString(regExp2, phone)
m3, _ := regexp.MatchString(regExp3, phone)
return m1 || m2 || m3
}
// 生成随机短信数字验证码
func GenValidateCode(width int) string {
numeric := [10]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
r := len(numeric)
rand.Seed(time.Now().UnixNano())
var sb string
for i := 0; i < width; i++ {
sb += strconv.Itoa(int(numeric[ rand.Intn(r) ]))
}
return sb
}
|
// Copyright 2017 Łukasz Pankowski <lukpank at o2 dot pl>. All rights
// reserved. This source code is licensed under the terms of the MIT
// license. See LICENSE file for details.
package jsonlexer_test
import (
"bytes"
"fmt"
"io"
"strings"
"testing"
"github.com/lukpank/jsonlexer"
)
func TestLexerEmptyArray(t *testing.T) {
r := &readers{S: " [\r\n\t]"}
for i := 0; i < 2*r.Len(); i++ {
l := jsonlexer.New(r.Get(i))
if err := l.Delim('['); err != nil {
t.Fatalf("expected '[' but got error: %v", err)
}
if err := l.Delim(']'); err != nil {
t.Fatalf("expected ']' but got error: %v", err)
}
}
}
func TestLexerEmptyArrayMore(t *testing.T) {
for _, s := range []string{" [\r\n\t]", " [\r\n\t]\r\n"} {
r := &readers{S: s}
for i := 0; i < 2*r.Len(); i++ {
l := jsonlexer.New(r.Get(i))
if err := l.Delim('['); err != nil {
t.Fatalf("expected '[' but got error: %v", err)
}
more, err := l.More()
if err != nil {
t.Fatalf("expected ',' but got error: %v", err)
}
if more {
t.Fatal("expected false but got true")
}
if err := l.Delim(']'); err != nil {
t.Fatalf("expected ']' but got error: %v", err)
}
if err := l.EOF(); err != nil {
t.Fatalf("expected EOF but got error: %v", err)
}
}
}
}
func TestLexerInt64(t *testing.T) {
for i, s := range []string{" \r\n-123", " \r\n-123 "} {
r := &readers{S: s}
for j := 0; j < 2*r.Len(); j++ {
t.Run(fmt.Sprintf("s%d/%d", i, j), func(t *testing.T) {
l := jsonlexer.New(r.Get(j))
expectedInt64(t, l, -123)
})
}
}
}
func TestLexerArrayInt64(t *testing.T) {
r := &readers{S: " [\r\n123, -84\t]"}
for i := 0; i < 2*r.Len(); i++ {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
l := jsonlexer.New(r.Get(i))
if err := l.Delim('['); err != nil {
t.Fatalf("expected '[' but got error: %v", err)
}
expectedMore(t, l)
expectedInt64(t, l, 123)
expectedMore(t, l)
expectedInt64(t, l, -84)
if err := l.Delim(']'); err != nil {
t.Fatalf("expected ']' but got error: %v", err)
}
})
}
}
func TestLexerFloat64(t *testing.T) {
for i, s := range []string{" \r\n-1.5", " \r\n-1.5 "} {
r := &readers{S: s}
for j := 0; j < 2*r.Len(); j++ {
t.Run(fmt.Sprintf("s%d/%d", i, j), func(t *testing.T) {
l := jsonlexer.New(r.Get(j))
expectedFloat64(t, l, -1.5)
})
}
}
}
func TestLexerArrayFloat64(t *testing.T) {
r := &readers{S: " [\r\n1e3, -3.25e2\t]"}
for i := 0; i < 2*r.Len(); i++ {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
l := jsonlexer.New(r.Get(i))
if err := l.Delim('['); err != nil {
t.Fatalf("expected '[' but got error: %v", err)
}
expectedMore(t, l)
expectedFloat64(t, l, 1000)
expectedMore(t, l)
expectedFloat64(t, l, -325)
if err := l.Delim(']'); err != nil {
t.Fatalf("expected ']' but got error: %v", err)
}
})
}
}
func TestLexerBool(t *testing.T) {
for i, s := range []string{" \r\ntrue", " \r\ntrue ", " \r\nfalse", " \r\nfalse "} {
r := &readers{S: s}
for j := 0; j < 2*r.Len(); j++ {
t.Run(fmt.Sprintf("s%d/%d", i, j), func(t *testing.T) {
l := jsonlexer.New(r.Get(j))
expectedBool(t, l, i < 2)
})
}
}
}
func TestLexerArrayBool(t *testing.T) {
r := &readers{S: " [\r\ntrue, false\t]"}
for i := 0; i < 2*r.Len(); i++ {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
l := jsonlexer.New(r.Get(i))
if err := l.Delim('['); err != nil {
t.Fatalf("expected '[' but got error: %v", err)
}
expectedMore(t, l)
expectedBool(t, l, true)
expectedMore(t, l)
expectedBool(t, l, false)
if err := l.Delim(']'); err != nil {
t.Fatalf("expected ']' but got error: %v", err)
}
})
}
}
func TestLexerString(t *testing.T) {
cases := []struct{ input, output string }{
{" \r\n\"test\"", "test"},
{" \r\n\"test\" ", "test"},
{` "test\b\u0105ę\f\n\r\t"`, "test\bąę\f\n\r\t"},
{` "test\b\u0105ę\f\n\r\t" `, "test\bąę\f\n\r\t"},
{` "test\b\u0105` + "\x80" + `ę\f\n\r\t"`, "test\bą\uFFFDę\f\n\r\t"},
{` "test\b\u0105ę` + "\x80" + `\f\n\r\t" `, "test\bąę\uFFFD\f\n\r\t"},
}
for i, c := range cases {
r := &readers{S: c.input}
for j := 0; j < 2*r.Len(); j++ {
t.Run(fmt.Sprintf("s%d/%s", i, r.Name(j)), func(t *testing.T) {
l := jsonlexer.New(r.Get(j))
expectedString(t, l, c.output)
})
t.Run(fmt.Sprintf("s%d/%s/w", i, r.Name(j)), func(t *testing.T) {
l := jsonlexer.New(r.Get(j))
expectedStringWriteTo(t, l, c.output)
})
}
}
output := strings.Repeat("abc", 16384)
s := `"` + output + `"`
r := &readers{S: s}
l := jsonlexer.New(r.Get(101))
expectedStringWriteTo(t, l, output)
l = jsonlexer.New(r.Get(5 + r.Len()))
expectedStringWriteTo(t, l, output)
}
func TestLexerArrayString(t *testing.T) {
r := &readers{S: " [\r\n\"test\", \"123\"\t]"}
for i := 0; i < 2*r.Len(); i++ {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
l := jsonlexer.New(r.Get(i))
if err := l.Delim('['); err != nil {
t.Fatalf("expected '[' but got error: %v", err)
}
expectedMore(t, l)
expectedString(t, l, "test")
expectedMore(t, l)
expectedString(t, l, "123")
if err := l.Delim(']'); err != nil {
t.Fatalf("expected ']' but got error: %v", err)
}
})
}
}
func TestLexerStringValue(t *testing.T) {
cases := []struct{ input, output string }{
{" \r\n\"test\"", "test"},
{" \r\n\"test\" ", "test"},
{` "test\b\u0105ę\f\n\r\t"`, "test\bąę\f\n\r\t"},
{` "test\b\u0105ę\f\n\r\t" `, "test\bąę\f\n\r\t"},
{` "test\b\u0105` + "\x80" + `ę\f\n\r\t"`, "test\bą\uFFFDę\f\n\r\t"},
{` "test\b\u0105ę` + "\x80" + `\f\n\r\t" `, "test\bąę\uFFFD\f\n\r\t"},
}
for i, c := range cases {
r := &readers{S: c.input}
for j := 0; j < 2*r.Len(); j++ {
t.Run(fmt.Sprintf("s%d/%s", i, r.Name(j)), func(t *testing.T) {
l := jsonlexer.New(r.Get(j))
expectedStringValue(t, l, c.output)
l = jsonlexer.New(r.Get(j))
err := l.StringValue(c.output[1:])
if err == nil {
t.Fatal("expected error")
}
})
}
}
}
func expectedMore(t *testing.T, l *jsonlexer.Lexer) {
more, err := l.More()
if err != nil {
t.Fatalf("expected more but got error: %v", err)
}
if !more {
t.Fatal("expected more but got false")
}
}
func expectedInt64(t *testing.T, l *jsonlexer.Lexer, expected int64) {
got, err := l.Int64()
if err != nil {
t.Fatalf("expected %d but got error: %v", expected, err)
}
if got != expected {
t.Errorf("expected %d but got: %d", expected, got)
}
}
func expectedFloat64(t *testing.T, l *jsonlexer.Lexer, expected float64) {
got, err := l.Float64()
if err != nil {
t.Fatalf("expected %g but got error: %v", expected, err)
}
if got != expected {
t.Errorf("expected %g but got: %g", expected, got)
}
}
func expectedBool(t *testing.T, l *jsonlexer.Lexer, expected bool) {
got, err := l.Bool()
if err != nil {
t.Fatalf("expected %t but got error: %v", expected, err)
}
if got != expected {
t.Errorf("expected %t but got: %t", expected, got)
}
}
func expectedString(t *testing.T, l *jsonlexer.Lexer, expected string) {
got, err := l.String()
if err != nil {
t.Fatalf("expected %q but got error: %v", expected, err)
}
if got != expected {
t.Errorf("expected %q but got: %q", expected, got)
}
}
func expectedStringWriteTo(t *testing.T, l *jsonlexer.Lexer, expected string) {
var b bytes.Buffer
n, err := l.StringWriteTo(&b)
if err != nil {
t.Fatalf("expected %q but got error: %v", expected, err)
}
got := b.String()
if got != expected {
t.Errorf("expected %q but got: %q", expected, got)
}
if n != int64(len(got)) {
t.Errorf("expected n=%d but got: n=%d", len(got), n)
}
}
func expectedStringValue(t *testing.T, l *jsonlexer.Lexer, expected string) {
err := l.StringValue(expected)
if err != nil {
t.Fatalf("expected %q but got error: %v", expected, err)
}
}
func TestSplit1StringsReader(t *testing.T) {
r := &split1StringReader{s: "test1", split: 2}
b := make([]byte, 10)
n, err := r.Read(b)
if err != nil || n != 2 || string(b[:n]) != "te" {
t.Fatalf(`expected "te" but got %q (%v, %q)`, b[:n], n, err)
}
n, err = r.Read(b)
if err != nil && err != io.EOF || n != 3 || string(b[:n]) != "st1" {
t.Fatalf(`expected "st1" but got %q (%d, %v)`, b[:n], n, err)
}
n, err = r.Read(b)
if err != io.EOF || n != 0 {
t.Fatalf(`expected (0, io.EOF) but got %q (%d, %v)`, b[:n], n, err)
}
}
func TestSplitNStringsReader(t *testing.T) {
r := &splitNStringReader{s: "test1", split: 2}
b := make([]byte, 10)
n, err := r.Read(b)
if err != nil || n != 2 || string(b[:n]) != "te" {
t.Fatalf(`expected "te" but got %q (%v, %q)`, b[:n], n, err)
}
n, err = r.Read(b)
if err != nil || n != 2 || string(b[:n]) != "st" {
t.Fatalf(`expected "st1" but got %q (%d, %v)`, b[:n], n, err)
}
n, err = r.Read(b)
if err != nil && err != io.EOF || n != 1 || string(b[:n]) != "1" {
t.Fatalf(`expected "st1" but got %q (%d, %v)`, b[:n], n, err)
}
n, err = r.Read(b)
if err != io.EOF || n != 0 {
t.Fatalf(`expected (0, io.EOF) but got %q (%d, %v)`, b[:n], n, err)
}
}
type readers struct {
S string
i int
}
func (r *readers) Len() int {
return (3*len(r.S) - 1) / 2
}
func (r *readers) Get(i int) io.Reader {
rLen := r.Len()
earlyEOF := i > rLen
if i > rLen {
i -= rLen
}
if i < len(r.S) {
return &split1StringReader{s: r.S, split: i + 1, earlyEOF: earlyEOF}
}
i -= len(r.S) - 1
return &splitNStringReader{s: r.S, split: i, earlyEOF: earlyEOF}
}
func (r *readers) Name(i int) string {
if i < len(r.S) {
return fmt.Sprintf("1_%d", i+1)
}
i -= len(r.S) - 1
return fmt.Sprintf("N_%d", i)
}
type split1StringReader struct {
s string
split int
pos int
earlyEOF bool
}
func (r *split1StringReader) Read(b []byte) (int, error) {
if r.pos == len(r.s) {
return 0, io.EOF
}
if r.pos < r.split {
n := copy(b, r.s[r.pos:r.split])
r.pos += n
return n, nil
}
n := copy(b, r.s[r.pos:])
r.pos += n
if r.pos == len(r.s) { // only if earlyEOF
return n, io.EOF
}
return n, nil
}
type splitNStringReader struct {
s string
split int
pos int
earlyEOF bool
}
func (r *splitNStringReader) Read(b []byte) (int, error) {
if r.pos == len(r.s) {
return 0, io.EOF
}
end := r.pos + r.split
if end > len(r.s) {
end = len(r.s)
}
n := copy(b, r.s[r.pos:end])
r.pos += n
if r.pos == len(r.s) { // only if earlyEOF
return n, io.EOF
}
return n, nil
}
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"os"
)
const df = "downloaded"
func download(i int) {
fn := fmt.Sprintf("index%d.html", i)
resp, _ := http.Get("https://www.ptt.cc/bbs/movie/" + fn)
body, _ := ioutil.ReadAll(resp.Body)
ioutil.WriteFile(df+"/"+fn, body, 0644)
}
func main() {
// 創建下載後放檔案的目錄
os.Mkdir(df, 0700)
for i := 1; i <= 10; i++ {
download(i)
}
}
|
package filelist
import (
"bytes"
"errors"
)
var (
errInvalidEscape = errors.New("invalid escape sequence")
)
func hexToInt(in byte) uint8 {
switch {
case '0' <= in && in <= '9':
return in - '0'
case 'a' <= in && in <= 'f':
return in - 'a' + 10
case 'A' <= in && in <= 'F':
return in - 'A' + 10
}
return 0
}
func appendUnescapeUrl(dst, in []byte) ([]byte, error) {
for {
ind := bytes.IndexByte(in, '%')
if ind == -1 {
dst = append(dst, in...)
return dst, nil
}
dst = append(dst, in[:ind]...)
if ind+2 >= len(in) {
return dst, errInvalidEscape
}
dst = append(dst, hexToInt(in[ind+1])<<4+hexToInt(in[ind+2]))
in = in[ind+3:]
}
}
|
package coupons
import (
"context"
"database/sql"
"time"
"cinemo.com/shoping-cart/internal/discounts"
"cinemo.com/shoping-cart/internal/products"
)
// Service is the interface to expose coupons functions
type Service interface {
CreateCoupon(ctx context.Context, now time.Time) (*Coupon, error)
RetrieveCouponProduct(ctx context.Context, couponName string, productID int64, timestamp time.Time) (*Coupon, error)
RetrieveCouponByName(ctx context.Context, couponName string) (*Coupon, error)
RetrieveCouponByID(ctx context.Context, ID int64) (*Coupon, error)
}
type couponService struct {
db *sql.DB
productService products.Service
discountService discounts.Service
}
// NewCouponService creates new coupon service
func NewCouponService(db *sql.DB, productService products.Service, discountService discounts.Service) Service {
return &couponService{
db: db,
productService: productService,
discountService: discountService,
}
}
|
package client
import (
"context"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"github.com/Azure/go-autorest/autorest/to"
"github.com/pkg/errors"
az "github.com/ydye/personal-az-sdk-practise/pkg/azure"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2017-03-01/apimanagement"
"github.com/Azure/azure-sdk-for-go/services/authorization/mgmt/2015-07-01/authorization"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-08-01/network"
"github.com/Azure/azure-sdk-for-go/services/preview/operationalinsights/mgmt/2015-11-01-preview/operationalinsights"
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-06-01/subscriptions"
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-02-01/storage"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
log "github.com/sirupsen/logrus"
)
const (
ApplicationDir = ".azexample"
)
var (
// RequiredResourceProviders is the list of Azure Resource Providers needed for AKS Engine to function
RequiredResourceProviders = []string{"Microsoft.Compute", "Microsoft.Storage", "Microsoft.Network"}
)
// AzureClient implements the `AKSEngineClient` interface.
// This client is backed by real Azure clients talking to an ARM endpoint.
type AzureClient struct {
acceptLanguages []string
auxiliaryTokens []string
environment azure.Environment
subscriptionID string
authorizationClient authorization.RoleAssignmentsClient
deploymentsClient resources.DeploymentsClient
deploymentOperationsClient resources.DeploymentOperationsClient
resourcesClient apimanagement.GroupClient
resourceSkusClient compute.ResourceSkusClient
storageAccountsClient storage.AccountsClient
interfacesClient network.InterfacesClient
groupsClient resources.GroupsClient
subscriptionsClient subscriptions.Client
providersClient resources.ProvidersClient
virtualMachinesClient compute.VirtualMachinesClient
virtualMachineScaleSetsClient compute.VirtualMachineScaleSetsClient
virtualMachineScaleSetVMsClient compute.VirtualMachineScaleSetVMsClient
virtualMachineExtensionsClient compute.VirtualMachineExtensionsClient
availabilitySetsClient compute.AvailabilitySetsClient
workspacesClient operationalinsights.WorkspacesClient
virtualMachineImagesClient compute.VirtualMachineImagesClient
applicationsClient graphrbac.ApplicationsClient
servicePrincipalsClient graphrbac.ServicePrincipalsClient
}
// NewAzureClientWithClientSecret returns an AzureClient via client_id and client_secret
func NewAzureClientWithClientSecret(env azure.Environment, subscriptionID, clientID, clientSecret string) (*AzureClient, error) {
oauthConfig, tenantID, err := getOAuthConfig(env, subscriptionID)
if err != nil {
return nil, err
}
armSpt, err := adal.NewServicePrincipalToken(*oauthConfig, clientID, clientSecret, env.ServiceManagementEndpoint)
if err != nil {
return nil, err
}
graphSpt, err := adal.NewServicePrincipalToken(*oauthConfig, clientID, clientSecret, env.GraphEndpoint)
if err != nil {
return nil, err
}
if err = graphSpt.Refresh(); err != nil {
log.Error(err)
}
return getClient(env, subscriptionID, tenantID, autorest.NewBearerAuthorizer(armSpt), autorest.NewBearerAuthorizer(graphSpt)), nil
}
// NewAzureClientWithClientCertificateFile returns an AzureClient via client_id and jwt certificate assertion
func NewAzureClientWithClientCertificateFile(env azure.Environment, subscriptionID, clientID, certificatePath, privateKeyPath string) (*AzureClient, error) {
certificateData, err := ioutil.ReadFile(certificatePath)
if err != nil {
return nil, errors.Wrap(err, "Failed to read certificate")
}
block, _ := pem.Decode(certificateData)
if block == nil {
return nil, errors.New("Failed to decode pem block from certificate")
}
certificate, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, errors.Wrap(err, "Failed to parse certificate")
}
privateKey, err := parseRsaPrivateKey(privateKeyPath)
if err != nil {
return nil, errors.Wrap(err, "Failed to parse rsa private key")
}
return NewAzureClientWithClientCertificate(env, subscriptionID, clientID, certificate, privateKey)
}
// NewAzureClientWithClientCertificate returns an AzureClient via client_id and jwt certificate assertion
func NewAzureClientWithClientCertificate(env azure.Environment, subscriptionID, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey) (*AzureClient, error) {
oauthConfig, tenantID, err := getOAuthConfig(env, subscriptionID)
if err != nil {
return nil, err
}
return newAzureClientWithCertificate(env, oauthConfig, subscriptionID, clientID, tenantID, certificate, privateKey)
}
func newAzureClientWithCertificate(env azure.Environment, oauthConfig *adal.OAuthConfig, subscriptionID, clientID, tenantID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey) (*AzureClient, error) {
if certificate == nil {
return nil, errors.New("certificate should not be nil")
}
if privateKey == nil {
return nil, errors.New("privateKey should not be nil")
}
armSpt, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, clientID, certificate, privateKey, env.ServiceManagementEndpoint)
if err != nil {
return nil, err
}
graphSpt, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, clientID, certificate, privateKey, env.GraphEndpoint)
if err != nil {
return nil, err
}
if err = graphSpt.Refresh(); err != nil {
log.Error(err)
}
return getClient(env, subscriptionID, tenantID, autorest.NewBearerAuthorizer(armSpt), autorest.NewBearerAuthorizer(graphSpt)), nil
}
func getOAuthConfig(env azure.Environment, subscriptionID string) (*adal.OAuthConfig, string, error) {
tenantID, err := az.GetTenantID(env.ResourceManagerEndpoint, subscriptionID)
if err != nil {
return nil, "", err
}
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, tenantID)
if err != nil {
return nil, "", err
}
return oauthConfig, tenantID, nil
}
func getClient(env azure.Environment, subscriptionID, tenantID string, armAuthorizer autorest.Authorizer, graphAuthorizer autorest.Authorizer) *AzureClient {
c := &AzureClient{
environment: env,
subscriptionID: subscriptionID,
authorizationClient: authorization.NewRoleAssignmentsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
deploymentsClient: resources.NewDeploymentsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
deploymentOperationsClient: resources.NewDeploymentOperationsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
resourcesClient: apimanagement.NewGroupClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
resourceSkusClient: compute.NewResourceSkusClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
storageAccountsClient: storage.NewAccountsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
interfacesClient: network.NewInterfacesClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
groupsClient: resources.NewGroupsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
subscriptionsClient: subscriptions.NewClientWithBaseURI(env.ResourceManagerEndpoint),
providersClient: resources.NewProvidersClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
virtualMachinesClient: compute.NewVirtualMachinesClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
virtualMachineScaleSetsClient: compute.NewVirtualMachineScaleSetsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
virtualMachineScaleSetVMsClient: compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
virtualMachineExtensionsClient: compute.NewVirtualMachineExtensionsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
availabilitySetsClient: compute.NewAvailabilitySetsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
workspacesClient: operationalinsights.NewWorkspacesClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
virtualMachineImagesClient: compute.NewVirtualMachineImagesClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
applicationsClient: graphrbac.NewApplicationsClientWithBaseURI(env.GraphEndpoint, tenantID),
servicePrincipalsClient: graphrbac.NewServicePrincipalsClientWithBaseURI(env.GraphEndpoint, tenantID),
}
c.authorizationClient.Authorizer = armAuthorizer
c.availabilitySetsClient.Authorizer = armAuthorizer
c.deploymentOperationsClient.Authorizer = armAuthorizer
c.deploymentsClient.Authorizer = armAuthorizer
c.groupsClient.Authorizer = armAuthorizer
c.interfacesClient.Authorizer = armAuthorizer
c.providersClient.Authorizer = armAuthorizer
c.resourcesClient.Authorizer = armAuthorizer
c.resourceSkusClient.Authorizer = armAuthorizer
c.storageAccountsClient.Authorizer = armAuthorizer
c.subscriptionsClient.Authorizer = armAuthorizer
c.virtualMachineExtensionsClient.Authorizer = armAuthorizer
c.virtualMachineImagesClient.Authorizer = armAuthorizer
c.virtualMachineScaleSetsClient.Authorizer = armAuthorizer
c.virtualMachineScaleSetVMsClient.Authorizer = armAuthorizer
c.virtualMachinesClient.Authorizer = armAuthorizer
c.workspacesClient.Authorizer = armAuthorizer
c.applicationsClient.Authorizer = graphAuthorizer
c.servicePrincipalsClient.Authorizer = graphAuthorizer
c.deploymentsClient.PollingDelay = time.Second * 5
c.resourcesClient.PollingDelay = time.Second * 5
// Set permissive timeouts to accommodate long-running operations
c.applicationsClient.PollingDuration = DefaultARMOperationTimeout
c.authorizationClient.PollingDuration = DefaultARMOperationTimeout
c.availabilitySetsClient.PollingDuration = DefaultARMOperationTimeout
c.deploymentOperationsClient.PollingDuration = DefaultARMOperationTimeout
c.deploymentsClient.PollingDuration = DefaultARMOperationTimeout
c.groupsClient.PollingDuration = DefaultARMOperationTimeout
c.subscriptionsClient.PollingDuration = DefaultARMOperationTimeout
c.interfacesClient.PollingDuration = DefaultARMOperationTimeout
c.providersClient.PollingDuration = DefaultARMOperationTimeout
c.resourcesClient.PollingDuration = DefaultARMOperationTimeout
c.resourceSkusClient.PollingDuration = DefaultARMOperationTimeout
c.servicePrincipalsClient.PollingDuration = DefaultARMOperationTimeout
c.storageAccountsClient.PollingDuration = DefaultARMOperationTimeout
c.virtualMachineExtensionsClient.PollingDuration = DefaultARMOperationTimeout
c.virtualMachineImagesClient.PollingDuration = DefaultARMOperationTimeout
c.virtualMachineScaleSetsClient.PollingDuration = DefaultARMOperationTimeout
c.virtualMachineScaleSetVMsClient.PollingDuration = DefaultARMOperationTimeout
c.virtualMachinesClient.PollingDuration = DefaultARMOperationTimeout
c.workspacesClient.PollingDuration = DefaultARMOperationTimeout
return c
}
// EnsureProvidersRegistered checks if the AzureClient is registered to required resource providers and, if not, register subscription to providers
func (this *AzureClient) EnsureProvidersRegistered(subscriptionsID string) error {
ctx, cancel := context.WithTimeout(context.Background(), DefaultARMOperationTimeout)
defer cancel()
registeredProviders, err := this.providersClient.List(ctx, to.Int32Ptr(100), "")
if err != nil {
return err
}
if registeredProviders.Values() == nil {
return errors.Errorf("Providers list was nil. Subscription=%q", subscriptionsID)
}
m := make(map[string]bool)
for _, provider := range registeredProviders.Values() {
m[strings.ToLower(to.String(provider.Namespace))] = to.String(provider.RegistrationState) == "Registered"
}
for _, provider := range RequiredResourceProviders {
registered, ok := m[strings.ToLower(provider)]
if !ok {
return errors.Errorf("Unknow resource provider %q", provider)
}
if registered {
log.Debugf("Already registered for %q", provider)
} else {
log.Infof("Registry subscription to resource provider. provider=%q subscription=%q", provider, subscriptionsID)
if _, err := this.providersClient.Register(ctx, provider); err != nil {
return err
}
}
}
return nil
}
// AddAcceptLanguages sets the list of languages to accept on this request
func (this *AzureClient) AddAcceptLanguages(languages []string) {
this.acceptLanguages = languages
this.applicationsClient.Client.RequestInspector = this.addAcceptLanguages()
this.authorizationClient.Client.RequestInspector = this.addAcceptLanguages()
this.availabilitySetsClient.Client.RequestInspector = this.addAcceptLanguages()
this.deploymentOperationsClient.Client.RequestInspector = this.addAcceptLanguages()
this.deploymentsClient.Client.RequestInspector = this.addAcceptLanguages()
this.groupsClient.Client.RequestInspector = this.addAcceptLanguages()
this.interfacesClient.Client.RequestInspector = this.addAcceptLanguages()
this.providersClient.Client.RequestInspector = this.addAcceptLanguages()
this.resourcesClient.Client.RequestInspector = this.addAcceptLanguages()
this.resourceSkusClient.Client.RequestInspector = this.addAcceptLanguages()
this.servicePrincipalsClient.Client.RequestInspector = this.addAcceptLanguages()
this.storageAccountsClient.Client.RequestInspector = this.addAcceptLanguages()
this.subscriptionsClient.Client.RequestInspector = this.addAcceptLanguages()
this.virtualMachineExtensionsClient.Client.RequestInspector = this.addAcceptLanguages()
this.virtualMachineImagesClient.Client.RequestInspector = this.addAcceptLanguages()
this.virtualMachineScaleSetsClient.Client.RequestInspector = this.addAcceptLanguages()
this.virtualMachineScaleSetVMsClient.Client.RequestInspector = this.addAcceptLanguages()
this.virtualMachinesClient.Client.RequestInspector = this.addAcceptLanguages()
this.workspacesClient.Client.RequestInspector = this.addAcceptLanguages()
}
func (this *AzureClient) addAcceptLanguages() autorest.PrepareDecorator {
return func(p autorest.Preparer) autorest.Preparer {
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err != nil {
return r, err
}
if this.acceptLanguages != nil {
for _, language := range this.acceptLanguages {
r.Header.Add("Accept-Language", language)
}
}
return r, nil
})
}
}
func (this *AzureClient) setAuxiliaryTokens() autorest.PrepareDecorator {
return func(p autorest.Preparer) autorest.Preparer {
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err != nil {
return r, err
}
if r.Header == nil {
r.Header = make(http.Header)
}
if this.auxiliaryTokens != nil {
for _, token := range this.auxiliaryTokens {
if token == "" {
continue
}
r.Header.Set("x-ms-authorization-auxiliary", fmt.Sprintf("Bearer %s", token))
}
}
return r, nil
})
}
}
// AddAuxiliaryTokens sets the list of aux tokens to accept on this request
func (this *AzureClient) AddAuxiliaryTokens(tokens []string) {
this.auxiliaryTokens = tokens
requestWithTokens := this.setAuxiliaryTokens()
this.applicationsClient.Client.RequestInspector = requestWithTokens
this.authorizationClient.Client.RequestInspector = requestWithTokens
this.availabilitySetsClient.Client.RequestInspector = requestWithTokens
this.deploymentOperationsClient.Client.RequestInspector = requestWithTokens
this.deploymentsClient.Client.RequestInspector = requestWithTokens
this.groupsClient.Client.RequestInspector = requestWithTokens
this.interfacesClient.Client.RequestInspector = requestWithTokens
this.providersClient.Client.RequestInspector = requestWithTokens
this.resourcesClient.Client.RequestInspector = requestWithTokens
this.resourceSkusClient.Client.RequestInspector = requestWithTokens
this.servicePrincipalsClient.Client.RequestInspector = requestWithTokens
this.storageAccountsClient.Client.RequestInspector = requestWithTokens
this.subscriptionsClient.Client.RequestInspector = requestWithTokens
this.virtualMachineExtensionsClient.Client.RequestInspector = requestWithTokens
this.virtualMachineScaleSetsClient.Client.RequestInspector = requestWithTokens
this.virtualMachineScaleSetVMsClient.Client.RequestInspector = requestWithTokens
this.virtualMachinesClient.Client.RequestInspector = requestWithTokens
this.workspacesClient.Client.RequestInspector = requestWithTokens
}
func parseRsaPrivateKey(path string) (*rsa.PrivateKey, error) {
privateKeyData, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
block, _ := pem.Decode(privateKeyData)
if block == nil {
return nil, errors.New("Failed to decode a pem block from private key")
}
privatePkcs1Key, errPkcs1 := x509.ParsePKCS1PrivateKey(block.Bytes)
if errPkcs1 == nil {
return privatePkcs1Key, nil
}
privatePkcs8Key, errPkcs8 := x509.ParsePKCS8PrivateKey(block.Bytes)
if errPkcs8 == nil {
privatePkcs8RsaKey, ok := privatePkcs8Key.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("pkcs8 contained non-RSA key. Expected RSA key")
}
return privatePkcs8RsaKey, nil
}
return nil, errors.Errorf("failed to parse private key as Pkcs#1 or Pkcs#8. (%s). (%s)", errPkcs1, errPkcs8)
}
|
// primo2.go
// verifica se um número é primo
// arataca89@gmail.com
// 20210413
package main
import "fmt"
func isprime(nr int) bool {
for divisor := 2; divisor < nr; divisor++ {
if nr%divisor == 0 {
return false
}
}
return true
}
func main() {
var nr int
fmt.Print("Entre com o número: ")
fmt.Scanf("%d", &nr)
if isprime(nr) {
fmt.Println("O número", nr, "é primo")
} else {
fmt.Println("O número", nr, "não é primo")
}
}
|
package rpcd
import (
"github.com/Cloud-Foundations/Dominator/lib/errors"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
"github.com/Cloud-Foundations/Dominator/proto/hypervisor"
)
func (t *srpcType) ChangeVmVolumeSize(conn *srpc.Conn,
request hypervisor.ChangeVmVolumeSizeRequest,
reply *hypervisor.ChangeVmVolumeSizeResponse) error {
*reply = hypervisor.ChangeVmVolumeSizeResponse{
errors.ErrorToString(t.manager.ChangeVmVolumeSize(request.IpAddress,
conn.GetAuthInformation(), request.VolumeIndex,
request.VolumeSize))}
return nil
}
|
package main
type ColorStringer interface {
ColorString() string
}
|
package logs
import (
"fmt"
"os"
)
// NewStdLogger creats new std out logger
func NewStdLogger() *StdLogger {
var stdlogger StdLogger
file, err := os.OpenFile("/dev/stdout", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
fmt.Println(err)
}
stdlogger.fd = file
return &stdlogger
}
// Println function for StdLogger
func (stdlog *StdLogger) Println(v ...interface{}) {
date, time := getDateTime()
text := fmt.Sprintln(v...)
output := []byte(stdlog.prefix + " " + date + " " + time + " " + text)
stdlog.fd.Write(output)
}
// Printf function for StdLogger
func (stdlog *StdLogger) Printf(format string, v ...interface{}) {
date, time := getDateTime()
text := fmt.Sprintf(format, v...)
output := []byte(stdlog.prefix + " " + date + " " + time + " " + text)
stdlog.fd.Write(output)
}
// Close closes the /dev/stdout
func (stdlog *StdLogger) Close() {
stdlog.fd.Close()
}
// SetPrefix funcion for StdLogger
func (stdlog *StdLogger) SetPrefix(pref string) {
stdlog.prefix = pref
}
|
package main
import (
"balansir/internal/configutil"
"balansir/internal/limitutil"
"balansir/internal/listenutil"
"balansir/internal/logutil"
"balansir/internal/poolutil"
"balansir/internal/rateutil"
"balansir/internal/watchutil"
"fmt"
"io/ioutil"
"os"
)
func main() {
logutil.Init()
logutil.Info("Booting up...")
file, err := ioutil.ReadFile("config.yml")
if err != nil {
logutil.Fatal(fmt.Sprintf("Error reading configuration file: %v", err))
logutil.Fatal("Balansir stopped!")
os.Exit(1)
}
if errs := watchutil.FillConfiguration(file); errs != nil {
logutil.Fatal("Configuration errors:")
for i := 0; i < len(errs); i++ {
logutil.Fatal(errs[i])
if len(errs)-1 == i {
logutil.Fatal("Balansir stopped!")
os.Exit(1)
}
}
}
go poolutil.PoolCheck()
go watchutil.WatchConfig()
configuration := configutil.GetConfig()
if configuration.RateLimit {
visitors := limitutil.GetLimiter()
go visitors.CleanOldVisitors()
}
rateutil.GetRateCounter()
if configuration.Protocol == "https" {
if configuration.Autocert {
listenutil.ServeTLSWithAutocert()
} else {
listenutil.ServeTLSWithSelfSignedCerts()
}
} else {
listenutil.Serve()
}
}
|
package elasticrecipes
import (
"context"
"io/ioutil"
"log"
elastic "github.com/olivere/elastic"
)
// Create Index and config trough mapjsonfile
func SetMap(client *elastic.Client, index string, mapjsonfile string) (created bool, e error) {
var err error
var indexcreated bool
log.Println("setting ", mapjsonfile, " for index ", index)
// Read in nginx_json_template
buf, err := ioutil.ReadFile(mapjsonfile)
if err != nil {
log.Fatal(err)
return indexcreated, err
}
createIndex, err := client.CreateIndex(index).BodyString(string(buf)).Do(context.Background())
if err != nil {
log.Println(err.Error())
}
if !createIndex.Acknowledged {
// Not acknowledged
log.Println("Index could not be created", index)
return indexcreated, err
} else {
indexcreated = true
}
return indexcreated, err
}
|
package neo
import "github.com/jmcvetta/neoism"
func Count(modelType _Type) int {
var statement string
var result interface{}
if modelType == ALL {
statement = `MATCH (n) RETURN count(n) as count`
} else {
statement = `MATCH (:` + string(modelType) + `) RETURN count(*) as count`
}
query := neoism.CypherQuery{
Statement: statement,
Result: &result,
}
err := db.Cypher(&query)
if err != nil {
panic(err)
}
res := result.([]interface{})[0].(map[string]interface{})
return int(res["count"].(float64) + 0.5)
}
func CheckExists(modelType _Type, id int64) bool {
var result []interface{}
query := neoism.CypherQuery{
Statement: `MATCH (e:` + string(modelType) + ` { id: {id} }) RETURN count(*) as count`,
Parameters: neoism.Props{
"id": id,
},
Result: &result,
}
err := db.Cypher(&query)
if err != nil {
panic(err)
}
return int(result[0].(map[string]interface{})["count"].(float64)+0.5) != 0
}
func Clear(modelType _Type) error {
var statement string
if modelType == ALL {
statement = `MATCH (n)-[r]-() DELETE n, r`
} else {
statement = `MATCH (e:` + string(modelType) + `) DELETE e`
}
return db.Cypher(&neoism.CypherQuery{
Statement: statement,
})
}
|
package network
type IServer interface {
Listen(packet IPacket, startPort int, endPort int, isAllowConnFunc func(conn interface{}) bool) int
Close()
}
|
package parameters
import (
"github.com/iotaledger/wasp/plugins/config"
flag "github.com/spf13/pflag"
)
const (
LoggerLevel = "logger.level"
LoggerDisableCaller = "logger.disableCaller"
LoggerDisableStacktrace = "logger.disableStacktrace"
LoggerEncoding = "logger.encoding"
LoggerOutputPaths = "logger.outputPaths"
LoggerDisableEvents = "logger.disableEvents"
DatabaseDir = "database.directory"
DatabaseInMemory = "database.inMemory"
WebAPIBindAddress = "webapi.bindAddress"
WebAPIAdminWhitelist = "webapi.adminWhitelist"
WebAPIAuth = "webapi.auth"
DashboardBindAddress = "dashboard.bindAddress"
DashboardExploreAddressUrl = "dashboard.exploreAddressUrl"
DashboardAuth = "dashboard.auth"
NodeAddress = "nodeconn.address"
PeeringMyNetId = "peering.netid"
PeeringPort = "peering.port"
NanomsgPublisherPort = "nanomsg.port"
)
func InitFlags() {
flag.String(LoggerLevel, "info", "log level")
flag.Bool(LoggerDisableCaller, false, "disable caller info in log")
flag.Bool(LoggerDisableStacktrace, false, "disable stack trace in log")
flag.String(LoggerEncoding, "console", "log encoding")
flag.StringSlice(LoggerOutputPaths, []string{"stdout", "goshimmer.log"}, "log output paths")
flag.Bool(LoggerDisableEvents, true, "disable logger events")
flag.String(DatabaseDir, "waspdb", "path to the database folder")
flag.Bool(DatabaseInMemory, false, "whether the database is only kept in memory and not persisted")
flag.String(WebAPIBindAddress, "127.0.0.1:8080", "the bind address for the web API")
flag.StringSlice(WebAPIAdminWhitelist, []string{}, "IP whitelist for /adm wndpoints")
flag.StringToString(WebAPIAuth, nil, "authentication scheme for web API")
flag.String(DashboardBindAddress, "127.0.0.1:7000", "the bind address for the node dashboard")
flag.String(DashboardExploreAddressUrl, "", "URL to add as href to addresses in the dashboard [default: <nodeconn.address>:8081/explorer/address]")
flag.StringToString(DashboardAuth, nil, "authentication scheme for the node dashboard")
flag.String(NodeAddress, "127.0.0.1:5000", "node host address")
flag.Int(PeeringPort, 4000, "port for Wasp committee connection/peering")
flag.String(PeeringMyNetId, "127.0.0.1:4000", "node host address as it is recognized by other peers")
flag.Int(NanomsgPublisherPort, 5550, "the port for nanomsg even publisher")
}
func GetBool(name string) bool {
return config.Node.Bool(name)
}
func GetString(name string) string {
return config.Node.String(name)
}
func GetStringSlice(name string) []string {
return config.Node.Strings(name)
}
func GetInt(name string) int {
return config.Node.Int(name)
}
func GetStringToString(name string) map[string]string {
return config.Node.StringMap(name)
}
|
package controller
import (
"encoding/json"
"math/rand"
"strings"
"sync"
"time"
"github.com/reechou/holmes"
"github.com/reechou/robot-manager/config"
"github.com/reechou/robot-manager/models"
)
const (
GROUP_MASS_TYPE_ALL = 1
GROUP_MASS_TYPE_SELECT_GROUPS = 2
GROUP_MASS_WORKER = 1024
)
var (
RANDOM_MSG_ADD = []string{
".", "..", "↭",
"★", "✔", "↧",
"↩", "⇤", "⇜",
"↞", "↜", "┄",
"-", "--", "^", "^_^",
"!", "!!", "↮",
"!", "•", "“",
"[机智]", "[机智][机智]",
"♥", "♥♥", "♥♥♥",
"─", "↕↕", "↕",
"☈", "✓", "☑",
"⊰", "⊱", "†",
"↓", "ˉ", "﹀",
"﹏", "˜", "ˆ",
"﹡", "≑", "≐",
"≍", "≎", "≏",
"≖", "≗", "≡",
}
)
type RobotMsgInfo struct {
MsgType string `json:"msgType"`
Msg string `json:"msg"`
}
type GroupMassInfo struct {
RobotWx string `json:"robotWx"`
Msg RobotMsgInfo `json:"msg"`
GroupMassType int `json:"groupMassType"`
GroupNamePrefix string `json:"groupNamePrefix,omitempty"`
GroupList []string `json:"groupList,omitempty"`
Interval int `json:"interval"`
}
type RobotGroupMass struct {
cfg *config.Config
robotExt *RobotExt
wg sync.WaitGroup
gmChan chan *GroupMassInfo
stop chan struct{}
done chan struct{}
}
func NewRobotGroupMass(cfg *config.Config, robotExt *RobotExt) *RobotGroupMass {
rgm := &RobotGroupMass{
cfg: cfg,
robotExt: robotExt,
gmChan: make(chan *GroupMassInfo, 1024),
stop: make(chan struct{}),
done: make(chan struct{}),
}
rgm.initWorkers()
return rgm
}
func (self *RobotGroupMass) Stop() {
close(self.stop)
self.wg.Wait()
}
func (self *RobotGroupMass) DoGroupMass(gm *GroupMassInfo) {
select {
case self.gmChan <- gm:
case <-self.stop:
return
}
}
func (self *RobotGroupMass) initWorkers() {
holmes.Info("group mass init workers: %d", GROUP_MASS_WORKER)
for i := 0; i < GROUP_MASS_WORKER; i++ {
self.wg.Add(1)
go self.runWorker(self.stop)
}
}
func (self *RobotGroupMass) runWorker(stop chan struct{}) {
for {
select {
case gm := <-self.gmChan:
self.handleGroupMass(gm)
case <-stop:
self.wg.Done()
return
}
}
}
func (self *RobotGroupMass) handleGroupMass(gm *GroupMassInfo) {
interval := gm.Interval
if interval == 0 {
interval = self.cfg.GroupMassInterval
}
holmes.Debug("handle group mass[%v] start.", gm)
massRecord := self.recordGroupMass(gm)
switch gm.GroupMassType {
case GROUP_MASS_TYPE_ALL:
robot := &models.Robot{
RobotWx: gm.RobotWx,
}
has, err := models.GetRobot(robot)
if err != nil {
holmes.Error("get robot error: %v", err)
return
}
if !has {
holmes.Error("cannot found robot[%s]", gm.RobotWx)
return
}
groupList, err := models.GetAllRobotGroupList(robot.ID)
if err != nil {
holmes.Error("get all robot group list error: %v", err)
return
}
for _, v := range groupList {
if strings.HasPrefix(v.GroupNickName, gm.GroupNamePrefix) {
gsmi := &GroupSendMsgInfo{
RobotWx: robot.RobotWx,
GroupUserName: v.UserName,
GroupNickName: v.GroupNickName,
Msg: &gm.Msg,
}
status := models.MSG_CHAT_SEND_OK
ok := self.sendMsgs(gsmi)
if !ok {
status = models.MSG_CHAT_SEND_FAILED
}
rgc := &models.RobotGroupChat{
RobotId: robot.ID,
RobotWx: robot.RobotWx,
GroupId: v.ID,
GroupName: v.GroupNickName,
GroupUserName: v.UserName,
FromName: robot.RobotWx,
Status: status,
MsgType: gm.Msg.MsgType,
Content: gm.Msg.Msg,
Source: models.ROBOT_CHAT_SOURCE_FROM_WEB_MASS,
}
err = models.CreateRobotGroupChat(rgc)
if err != nil {
holmes.Error("create robot group chat error: %v", err)
}
time.Sleep(time.Duration(interval) * time.Second)
}
}
case GROUP_MASS_TYPE_SELECT_GROUPS:
for _, v := range gm.GroupList {
gsmi := &GroupSendMsgInfo{
RobotWx: gm.RobotWx,
GroupNickName: v,
Msg: &gm.Msg,
}
self.sendMsgs(gsmi)
time.Sleep(time.Duration(interval) * time.Second)
}
}
holmes.Debug("handle group mass[%v] success.", gm)
if massRecord.ID != 0 {
massRecord.Status = models.ROBOT_GROUP_MASS_STATUS_END
models.UpdateRobotGroupMassStatus(massRecord)
}
}
func (self *RobotGroupMass) recordGroupMass(gm *GroupMassInfo) *models.RobotGroupMass {
groupMass, _ := json.Marshal(gm)
record := &models.RobotGroupMass{
RobotWx: gm.RobotWx,
GroupMassContent: string(groupMass),
Status: models.ROBOT_GROUP_MASS_STATUS_START,
}
models.CreateRobotGroupMass(record)
return record
}
type GroupSendMsgInfo struct {
RobotWx string
GroupUserName string
GroupNickName string
Msg *RobotMsgInfo
}
func (self *RobotGroupMass) sendMsgs(msg *GroupSendMsgInfo) bool {
var sendReq SendMsgInfo
msgStr := msg.Msg.Msg
if msg.Msg.MsgType == MSG_TYPE_TEXT {
offset := rand.Intn(len(RANDOM_MSG_ADD))
msgStr = msg.Msg.Msg + RANDOM_MSG_ADD[offset]
}
msgStr = strings.Replace(msgStr, "\u0026", "&", -1)
sendReq.SendMsgs = append(sendReq.SendMsgs, SendBaseInfo{
WechatNick: msg.RobotWx,
ChatType: CHAT_TYPE_GROUP,
UserName: msg.GroupUserName,
NickName: msg.GroupNickName,
MsgType: msg.Msg.MsgType,
Msg: msgStr,
})
err := self.robotExt.SendMsgs(msg.RobotWx, &sendReq)
if err != nil {
holmes.Error("group mass send msg error: %v", err)
return false
}
return true
}
func init() {
rand.Seed(time.Now().UnixNano())
}
|
package fateRPGtest
import (
"testing"
"github.com/faterpg"
)
// Lily’s character, Cynere, has the
// aspect Tempted by Shiny Things
// on her sheet, which describes her
// general tendency to overvalue
// material goods and make bad
// decisions when gems and coin are
// involved. This adds an interesting,
// fun element to the character that
// gets her into a great deal of trou-
// ble, bringing a lot of personality to
// the game.
func TestPage5(t *testing.T) {
cynere := faterpg.NewNamedPC("Cynere")
aspect := faterpg.NewAspect()
aspect.Name = "Tempted by Shiny Things"
aspect.Description = `general tendency to overvalue
material goods and make bad
decisions when gems and coin are
involved`
cynere.AddAspect(aspect)
characterSheet := faterpg.NewCharacterSheet(cynere)
if characterSheet.Name != "Cynere" {
t.Error("Name no Cynere")
}
if characterSheet.Aspects[0] != aspect {
t.Error("Wrong aspect")
}
}
|
package realm
// set of supported data source types
const (
ServiceTypeCluster = "mongodb-atlas"
ServiceTypeDatalake = "datalake"
)
// default names for data source types
const (
DefaultServiceNameCluster = "mongodb-atlas"
)
|
package main
import "fmt"
import "io/ioutil"
import "regexp"
import "flag"
var re = regexp.MustCompile(".w.$")
func readDirectory(dir string, depth int) []string {
// fmt.Printf("%s with depth %d\n", dir, depth)
if depth < 0 {
return []string{}
}
files, err := ioutil.ReadDir(dir)
if err != nil {
//fmt.Printf("Failed to open directory %s: %s\n", dir, err)
return []string{}
}
out := []string{}
for _, file := range files {
if re.MatchString(fmt.Sprintf("%s", file.Mode())) {
out = append(out, fmt.Sprintf("%s %s", dir+"/"+file.Name(), file.Mode()))
fmt.Println(fmt.Sprintf("%s %s", dir+"/"+file.Name(), file.Mode()))
}
}
for _, file := range files {
if file.IsDir() {
out = append(out, readDirectory(dir+"/"+file.Name(), depth-1)...)
}
}
return out
}
func main() {
fmt.Println("Global File Finder")
dirPtr := flag.String("dir", ".", "directory to start searching from")
depthPtr := flag.Int("depth", 3, "depth of directories to search")
flag.Parse()
readDirectory(*dirPtr, *depthPtr)
}
|
package login
import (
"encoding/json"
"fmt"
"net/http"
"github.com/Tedyst/gotest/util"
)
//Handler stfu
func Handler(w http.ResponseWriter, r *http.Request) {
jwt, err := Authenticate(r)
if jwt != "" {
fmt.Fprintf(w, jwt)
return
}
if err != nil {
asd := err.(*util.ErrorString)
str, _ := util.ErrorJSONstring(asd)
fmt.Fprintf(w, string(str))
return
}
asd := &util.Response{Message: "Logged in", Success: true}
str, _ := json.Marshal(asd)
fmt.Fprintf(w, string(str))
}
|
package hot100
// 关键
// 无他: 死记硬背
// 1. for 循环, 右移缩小index位
// 2. 异或^ 缩小的值即可
func grayCode(n int) []int {
ret := make([]int, 1<<n)
for index := range ret {
ret[index] = index>>1 ^ index
}
return ret
}
|
package smallNet
type ringBuffer struct {
_data []byte
_allocSize int // data의 실제 할당 크기
_maxSize int // data의 최대 크기
_maxPacketSize int
_writeCursor int
_readCursor int
}
// 링버퍼를 한바퀴 돌 때 앞에 데이터를 다 사용했는지 체크 하지 않는다. 즉 낙관적이다
// 그래서 버퍼의 크기(maxSize)는 꽤 넉넉해야 한다.
func newRingBuffer(maxSize int, maxPacketSize int) *ringBuffer {
if maxSize <= 0 || maxSize < maxPacketSize {
return nil
}
b := &ringBuffer{
_maxSize: maxSize,
_maxPacketSize: maxPacketSize,
_allocSize: maxSize + maxPacketSize,
_data: make([]byte, maxSize+maxPacketSize),
_writeCursor: 0,
_readCursor: 0,
}
return b
}
func (b *ringBuffer) reset() {
b._writeCursor = 0
b._readCursor = 0
}
func (b *ringBuffer) getBuffer(requiredSize int) []byte {
remain := b._allocSize - b._writeCursor
if remain < requiredSize {
readsize := b._writeCursor - b._readCursor
if readsize > 0 {
copy(b._data[0:], b._data[b._readCursor:b._writeCursor])
b._writeCursor = readsize
b._readCursor = 0
} else {
b._writeCursor = 0
b._readCursor = 0
}
}
return b._data[b._writeCursor:]
}
func (b *ringBuffer) aheadWriteCursor(size int) {
b._writeCursor += size
}
func (b *ringBuffer) aheadReadCursor(size int) {
b._readCursor += size
}
func (b *ringBuffer) aheadWRCursor(size int) {
b._writeCursor += size
b._readCursor += size
}
func (b *ringBuffer) readAbleBuffer() ([]byte, int) {
readAblesize := b._writeCursor - b._readCursor
return b._data[b._readCursor:b._writeCursor], readAblesize
} |
package main
import (
"bytes"
"encoding/json"
"fmt"
"os/exec"
"strings"
"time"
"github.com/fatih/color"
"github.com/pkg/errors"
)
var (
SEPARATOR = []byte("<<SEP")
)
type Change struct {
Name string
File string
Details []string
}
type CommitDetails struct {
Author string
Commit string
Date time.Time
}
type Commit struct {
Changes []Change
CommitDetails
}
var (
ErrBadGitCommitStatus = fmt.Errorf("Bad git commit status")
)
func Parse(out []byte) ([]Commit, error) {
commits := []Commit{}
lines := bytes.Split(out, []byte("\n"))
for _, l := range lines {
if bytes.HasPrefix(l, SEPARATOR) {
var commit Commit
raw := bytes.TrimPrefix(l, SEPARATOR)
err := json.Unmarshal(raw, &commit)
if err != nil {
return commits, errors.Wrap(err, fmt.Sprintf("while parsing %s", raw))
}
commits = append(commits, commit)
} else {
changeStr := string(l)
if len(changeStr) > 0 {
changeDesc := strings.Fields(changeStr)
if len(changeDesc) < 2 {
return commits, errors.Wrap(ErrBadGitCommitStatus, fmt.Sprintf("while parsing %s", changeStr))
}
change := Change{
Name: changeDesc[0],
File: changeDesc[1],
Details: changeDesc[1:],
}
last := len(commits) - 1
commit := commits[last]
commits[last].Changes = append(commit.Changes, change)
}
}
}
return commits, nil
}
type FileChange struct {
Commit CommitDetails
Change Change
}
type FileGroup map[string]FileChange
func GroupByFile(commits []Commit) (FileGroup, error) {
fileGroup := make(FileGroup)
for _, commit := range commits {
for _, change := range commit.Changes {
if _, ok := fileGroup[change.File]; !ok {
fileGroup[change.File] = FileChange{
Commit: commit.CommitDetails,
Change: change,
}
}
}
}
return fileGroup, nil
}
func FilterCurrentDir(fileGroup FileGroup) ([]string, error) {
out := make([]string, 0, len(fileGroup))
for name, _ := range fileGroup {
if !strings.Contains(name, "/") {
out = append(out, name)
}
}
return out, nil
}
var (
red = color.New(color.FgRed).SprintFunc()
green = color.New(color.FgGreen).SprintFunc()
yellow = color.New(color.FgYellow).SprintFunc()
commitColor = color.New(color.FgCyan).SprintFunc()
authorColor = color.New(color.FgMagenta).SprintFunc()
dateColor = color.New(color.FgBlue).SprintFunc()
)
func Print(file FileChange) string {
fileName := file.Change.File
changeName := file.Change.Name
change := fmt.Sprintln(yellow(fileName))
if strings.HasPrefix(changeName, "A") {
change = fmt.Sprintln(green(fileName))
}
if strings.HasPrefix(changeName, "D") {
change = fmt.Sprintln(red(fileName))
}
return fmt.Sprintf("%s %s %s %s %s",
commitColor(file.Commit.Commit),
authorColor(file.Commit.Author),
dateColor(file.Commit.Date.UTC().Format("2006-01-02")),
changeName[:1],
change)
}
func main() {
cmd := exec.Command("git", "log",
"--name-status",
fmt.Sprintf("--format=%s{ \"Author\": \"%%an\", \"Commit\": \"%%h\", \"Date\": \"%%aI\"}", SEPARATOR),
"-p",
".")
out, err := cmd.Output()
if err != nil {
panic(err)
}
commits, err := Parse(out)
if err != nil {
panic(err)
}
fileHist, err := GroupByFile(commits)
if err != nil {
panic(err)
}
files, err := FilterCurrentDir(fileHist)
if err != nil {
panic(err)
}
for _, file := range files {
str := Print(fileHist[file])
fmt.Print(str)
}
}
|
package main
import "fmt"
type cliente struct {
nome string
sobrenome string
fumante bool
}
func main() {
c1 := cliente{
nome: "João",
sobrenome: "da Silva",
fumante: false,
}
c2 := cliente{"Joana", "Pereira", true}
fmt.Println(c1)
fmt.Println(c2)
}
|
package main
import (
"flag"
"fmt"
"goChat/Server/db"
"goChat/Server/inMemoryDatabase"
"goChat/Server/mongo"
"goChat/Server/services"
"log"
"net/http"
"github.com/gorilla/mux"
)
func main() {
router := mux.NewRouter()
port := flag.Int("port", 5020, "Port number for the server to use")
inMemoryDb := flag.Bool("inmemory", true, "Flag to use In-Memory database. Default is false")
flag.Parse()
log.Printf("Using inmemory db: %v", *inMemoryDb)
var userRepo db.IUserRepository
var messageRepo db.IMessageRepository
var convRepo db.IConversationRepository
if *inMemoryDb {
userRepo = inMemoryDatabase.NewUserRepository()
messageRepo = inMemoryDatabase.NewMessageRepository()
convRepo = inMemoryDatabase.NewConversationRepository()
} else {
userRepo = mongo.NewUserRepository("", "goChat")
messageRepo = mongo.NewMessageRepository("", "goChat")
convRepo = mongo.NewConversationRepository("", "goChat")
}
authService := services.NewAuthService(userRepo)
userService := services.NewUserService(userRepo)
messageService := services.NewMessageService(messageRepo)
convService := services.NewConversationService(convRepo, userService)
mRouter := services.NewMessageRouter(userService, convService, messageService)
// router.Handle("/", http.FileServer(http.Dir("../Client/dist")))
router.HandleFunc("/api/login", authService.AuthenticateHandler).Methods(http.MethodPost)
router.HandleFunc("/api/register", userService.SignupHandlerWithNext(authService.AuthenticateHandler)).Methods(http.MethodPost)
router.HandleFunc("/api/conversations", authService.AuthenticationMiddleware(convService.GetConversationHandler)).Methods(http.MethodGet)
router.HandleFunc("/api/getmessages/{conversationID}/{skip}/{count}", authService.AuthenticationMiddleware(messageService.GetMessageHandler)).Methods(http.MethodGet)
router.HandleFunc("/api/updatemessageasread/{messageId}", authService.AuthenticationMiddleware(messageService.UpdateMessageAsRead)).Methods(http.MethodPost)
router.HandleFunc("/api/ws", authService.AuthenticationMiddleware(func(w http.ResponseWriter, r *http.Request) {
services.AddClient(mRouter, w, r)
}))
go mRouter.Run()
serverAddr := fmt.Sprintf(":%d", *port)
log.Printf("Server running at %s", serverAddr)
log.Fatal(http.ListenAndServe(serverAddr, router))
}
|
package ttlib
import (
"github.com/johnnylee/util"
)
// ClientConfig: A configuration file for a client.
type ClientConfig struct {
Host string // The host address: <address>:<port>.
User string // The username for the client.
Pwd []byte // The user's password.
CaCert []byte // The CA certificate.
}
// Load: Load the client's configuration from the given path.
func LoadClientConfig(path string) (*ClientConfig, error) {
cc := new(ClientConfig)
return cc, util.JsonUnmarshal(path, cc)
}
// Save: Save the client's configuration to the given path.
func (cc ClientConfig) Save(path string) error {
return util.JsonMarshal(path, &cc)
}
|
package kafkaflow
import "github.com/trustmaster/goflow"
// NewUpperApp wires together the compoents
func NewUpperApp() *goflow.Graph {
u := goflow.NewGraph()
u.Add("upper", new(Upper))
u.Add("printer", new(Printer))
u.Connect("upper", "Res", "printer", "Line")
u.MapInPort("In", "upper", "Val")
return u
}
|
/*
Write a program or function that listens for incoming TCP traffic on port N. It offers a simple service: it calculates sum of IP address fields of incoming connection and returns.
Program or function reads integer N from arguments or stdin. It listens to incoming TCP connections on port N.
When someone connects to that port, the program calculates sum of its IP address fields and sends it back to the client with trailing newline and closes connection.
Port number N is a valid port, and 210 < N < 215
Trailing newline can be either \nor \r\n
You can use either IPv4 or IPv6. Since IPv6 addresses are written in hexadecimal form, you must also provide result in same format, for example 2001:0db8:0000:0042:0000:8a2e:0370:7334 => 12ecd.
This is code-golf. Standard rules and loopholes apply.
Example
You run your server with ./server 1234. The server is now running and waiting for connections on port 1234. Then a client from 127.0.0.1 connects to your server.
Your server performs a simple calculation: 127+0+0+1 => 128 and sends the result to the client (with trailing newline): 128\n. Then server closes connection and waits for next client.
*/
package main
import (
"flag"
"fmt"
"log"
"net"
"os"
"strconv"
"strings"
)
func main() {
flag.Parse()
if flag.NArg() != 1 {
usage()
}
port, _ := strconv.Atoi(flag.Arg(0))
err := server(port)
if err != nil {
log.Fatal(err)
}
}
func usage() {
fmt.Fprintln(os.Stderr, "usage: <port>")
flag.PrintDefaults()
os.Exit(2)
}
func server(port int) error {
addr := fmt.Sprintf(":%d", port)
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
}
defer ln.Close()
for {
conn, err := ln.Accept()
if err != nil {
log.Println(err)
continue
}
fmt.Println("Connection", conn.LocalAddr(), conn.RemoteAddr())
go serve(conn)
}
}
func parseip(conn net.Conn) net.IP {
addr := conn.LocalAddr()
str := addr.String()
index := strings.LastIndexByte(str, ':')
if index >= 0 {
str = str[:index]
}
str = strings.TrimLeft(str, "[")
str = strings.TrimRight(str, "]")
return net.ParseIP(str)
}
func sumip(ip net.IP) string {
v4 := ip.To4()
v6 := ip.To16()
data := v4
if data == nil {
data = v6
}
sum := 0
for i := range data {
sum += int(data[i])
}
if v4 != nil {
return fmt.Sprintf("%d", sum)
}
return fmt.Sprintf("%x", sum)
}
func serve(conn net.Conn) {
ip := parseip(conn)
fmt.Fprintf(conn, "%s\n", sumip(ip))
conn.Close()
}
|
package main
import "github.com/bbrowning/ocf/cmd"
func main() {
cmd.Execute()
}
|
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
//go:build e2e_test
// +build e2e_test
package e2e
import (
"bytes"
"context"
"fmt"
"net"
"os"
"strconv"
"testing"
"text/template"
"time"
"github.com/aws/amazon-vpc-cni-plugins/network/netns"
"github.com/aws/amazon-vpc-cni-plugins/network/vpc"
"github.com/containernetworking/cni/pkg/invoke"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/vishvananda/netlink"
)
type NetconfFieldsMap struct {
Trunk string
BranchVlanID uint16
BranchMACAddress string
BranchIPv4Address string
BranchIPv6Address string
GatewayIPv4Address string
GatewayIPv6Address string
BlockIMDS bool
}
func netconfFields() NetconfFieldsMap {
return NetconfFieldsMap{
Trunk: "eth1",
BranchVlanID: 101,
BranchMACAddress: "02:e1:48:75:86:a4",
BranchIPv4Address: "172.31.19.6/20",
BranchIPv6Address: "2600:1f13:4d9:e602:6aea:cdb1:2b2b:8d62/64",
GatewayIPv4Address: "172.31.16.1",
GatewayIPv6Address: "2600:1f13:4d9:e602::1234",
BlockIMDS: false,
}
}
func netconfFieldsBlockIMDS() NetconfFieldsMap {
return NetconfFieldsMap{
Trunk: "eth2",
BranchVlanID: 102,
BranchMACAddress: "02:e1:48:75:88:a4",
BranchIPv4Address: "172.32.19.6/20",
BranchIPv6Address: "2600:1f13:4d9:e604:6aea:cdb1:2b2b:8d62/64",
GatewayIPv4Address: "172.32.16.1",
GatewayIPv6Address: "2600:1f13:4d9:e604::1234",
BlockIMDS: true,
}
}
const (
containerID = "container_1"
// template
netConfJsonFmt = `
{
"type": "vpc-branch-eni",
"name": "vpc-branch-eni-test-network",
"cniVersion":"0.3.0",
"trunkName": "{{.Trunk}}",
"branchVlanID": "{{.BranchVlanID}}",
"branchMACAddress": "{{.BranchMACAddress}}",
"ipAddresses": ["{{.BranchIPv4Address}}","{{.BranchIPv6Address}}"],
"gatewayIPAddresses": ["{{.GatewayIPv4Address}}","{{.GatewayIPv6Address}}"],
"blockInstanceMetadata": {{.BlockIMDS}},
"interfaceType": "vlan"
}
`
// constants for TestAddDel
ifName = "testIf"
nsName = "vpcBranchEniTestNS"
// constants for TestAddDelBlockIMDS
ifNameBlockIMDS = "blockImdsTestIf"
nsNameBlockIMDS = "vpcBranchEniBlockImdsTestNS"
)
func TestAddDelBlockIMDS(t *testing.T) {
testAddDel(
t,
netconfFieldsBlockIMDS(),
nsNameBlockIMDS,
ifNameBlockIMDS,
validateAfterAddBlockIMDS,
validateAfterDel,
)
}
func TestAddDel(t *testing.T) {
var err error
// Bring down the trunk interface so that we can ensure the plugin is not assuming the trunk interface
// is already brought up.
la := netlink.NewLinkAttrs()
la.Name = netconfFields().Trunk
link := &netlink.Dummy{LinkAttrs: la}
err = netlink.LinkSetDown(link)
require.NoError(t, err)
testAddDel(
t,
netconfFields(),
nsName,
ifName,
validateAfterAdd,
validateAfterDel,
)
}
func testAddDel(
t *testing.T,
inputNetconfFields NetconfFieldsMap,
netNsName string,
interfaceName string,
validateAfterAddFunc,
validateAfterDelFunc func(*testing.T, string, NetconfFieldsMap),
) {
// Ensure that the cni plugin exists.
pluginPath, err := invoke.FindInPath("vpc-branch-eni", []string{os.Getenv("CNI_PATH")})
require.NoError(t, err, "Unable to find vpc-branch-eni plugin in path")
// Create a directory for storing test logs.
testLogDir, err := os.MkdirTemp("", "vpc-branch-eni-cni-e2eTests-test-")
err = os.Chmod(testLogDir, 0755)
require.NoError(t, err, "Unable to create directory for storing test logs")
// Configure the env var to use the test logs directory.
os.Setenv("VPC_CNI_LOG_FILE", fmt.Sprintf("%s/vpc-branch-eni.log", testLogDir))
t.Logf("Using %s for test logs", testLogDir)
defer os.Unsetenv("VPC_CNI_LOG_FILE")
// Configure logs at debug level.
os.Setenv("VPC_CNI_LOG_LEVEL", "debug")
defer os.Unsetenv("VPC_CNI_LOG_LEVEL")
// Handle deletion of test logs at the end of the test execution if specified.
ok, err := strconv.ParseBool(getEnvOrDefault("ECS_PRESERVE_E2E_TEST_LOGS", "false"))
assert.NoError(t, err, "Unable to parse ECS_PRESERVE_E2E_TEST_LOGS env var")
defer func(preserve bool) {
if !t.Failed() && !preserve {
t.Logf("Removing test logs at %s", testLogDir)
os.RemoveAll(testLogDir)
} else {
t.Logf("Preserving test logs at %s", testLogDir)
}
}(ok)
// Create a network namespace to mimic the container's network namespace.
targetNS, err := netns.NewNetNS(netNsName)
fmt.Println("Created target namespace")
require.NoError(t, err,
"Unable to create the network namespace that represents the network namespace of the container")
defer targetNS.Close()
// Construct args to invoke the CNI plugin with.
execInvokeArgs := &invoke.Args{
ContainerID: containerID,
NetNS: targetNS.GetPath(),
IfName: interfaceName,
Path: os.Getenv("CNI_PATH"),
}
var netConfBytes bytes.Buffer
netConfJsonTpl := template.Must(template.New("testAddDel").Parse(netConfJsonFmt))
tplExecErr := netConfJsonTpl.Execute(&netConfBytes, inputNetconfFields)
require.NoErrorf(t, tplExecErr, "Unable to fill in the netconf template using %+v", inputNetconfFields)
netConf := netConfBytes.Bytes()
// Execute the "ADD" command for the plugin.
execInvokeArgs.Command = "ADD"
err = invoke.ExecPluginWithoutResult(
context.Background(),
pluginPath,
netConf,
execInvokeArgs,
nil)
require.NoError(t, err, "Unable to execute ADD command for vpc-branch-eni cni plugin")
targetNS.Run(func() error {
validateAfterAddFunc(t, interfaceName, inputNetconfFields)
return nil
})
// Execute the "DEL" command for the plugin.
execInvokeArgs.Command = "DEL"
err = invoke.ExecPluginWithoutResult(
context.Background(),
pluginPath,
netConf,
execInvokeArgs,
nil)
require.NoError(t, err, "Unable to execute DEL command for vpc-branch-eni cni plugin")
targetNS.Run(func() error {
validateAfterDelFunc(t, interfaceName, inputNetconfFields)
return nil
})
}
func validateAfterAddBlockIMDS(
t *testing.T,
interfaceName string,
expectedFields NetconfFieldsMap,
) {
validateAfterAdd(t, interfaceName, expectedFields)
// Check that there's no route to go to IMDS endpoint.
for _, ep := range vpc.InstanceMetadataEndpoints {
imdsIP := net.ParseIP(ep)
_, err := netlink.RouteGet(imdsIP)
assert.Error(t, err)
}
}
func validateAfterAdd(
t *testing.T,
interfaceName string,
expectedFields NetconfFieldsMap,
) {
// Give some time for the link to come up, we just initialized it, if this time is
// too short, the link status will be `unknown` instead of `up` even though
// everything is actually set up properly.
time.Sleep(2 * time.Second)
// Check that branch link exists and is up.
branch, err := netlink.LinkByName(interfaceName)
require.NoError(t, err)
assert.Equal(t, "vlan", branch.Type())
branchAttrs := branch.Attrs()
assert.NotNil(t, branch.Attrs())
assert.Equal(t, "up", branchAttrs.OperState.String())
assert.Equal(t, expectedFields.BranchMACAddress, branchAttrs.HardwareAddr.String())
// Check IP addresses.
validateIPAddress(t, branch, netlink.FAMILY_V4, expectedFields.BranchIPv4Address)
validateIPAddress(t, branch, netlink.FAMILY_V6, expectedFields.BranchIPv6Address)
// Check default routes.
validateDefaultRoute(t, branch, netlink.FAMILY_V4, expectedFields.GatewayIPv4Address)
validateDefaultRoute(t, branch, netlink.FAMILY_V6, expectedFields.GatewayIPv6Address)
}
func validateAfterDel(
t *testing.T,
interfaceName string,
expectedFields NetconfFieldsMap,
) {
// Check branch link is deleted.
_, err := netlink.LinkByName(interfaceName)
assert.Error(t, err)
}
// getEnvOrDefault gets the value of an env var. It returns the default value
// if the env var is not set.
func getEnvOrDefault(name string, defaultValue string) string {
val := os.Getenv(name)
if val == "" {
return defaultValue
}
return val
}
// validateIPAddress validates that the link has the given IP address.
func validateIPAddress(t *testing.T, link netlink.Link, family int, ipAddress string) {
addrs, err := netlink.AddrList(link, family)
require.NoError(t, err)
for _, a := range addrs {
if a.IPNet.String() == ipAddress {
return
}
}
assert.NoError(t, fmt.Errorf("IP address %s not found", ipAddress))
}
// validateDefaultRoute validates that the link has a default route to the given gateway IP address.
func validateDefaultRoute(t *testing.T, link netlink.Link, family int, gatewayIPAddress string) {
routes, err := netlink.RouteList(link, family)
require.NoError(t, err)
for _, r := range routes {
if r.Dst == nil && r.Gw != nil {
assert.Equal(t, gatewayIPAddress, r.Gw.String())
return
}
}
assert.NoError(t, fmt.Errorf("Default route to gateway %s not found", gatewayIPAddress))
}
|
package leetcode
import (
"reflect"
"testing"
)
func TestRemoveElement(t *testing.T) {
tests := []struct {
nums []int
val int
results []int
}{
{
nums: []int{},
val: 0,
results: []int{},
},
{
nums: []int{1},
val: 1,
results: []int{},
},
{
nums: []int{1, 1},
val: 1,
results: []int{},
},
{
nums: []int{3, 2, 2, 3},
val: 3,
results: []int{2, 2},
},
{
nums: []int{0, 1, 2, 2, 3, 0, 4, 2},
val: 2,
results: []int{0, 1, 3, 0, 4},
},
}
for i, tt := range tests {
n := removeElement(tt.nums, tt.val)
if got, want := tt.nums[:n], tt.results; !reflect.DeepEqual(got, want) {
t.Fatalf("%d: got %v, want %v", i, got, want)
} else {
t.Logf("%d: got %v", i, got)
}
}
}
|
package databroker
import (
"context"
"fmt"
"io"
"github.com/pomerium/pomerium/config"
"github.com/pomerium/pomerium/internal/log"
"github.com/pomerium/pomerium/internal/registry"
"github.com/pomerium/pomerium/internal/registry/inmemory"
"github.com/pomerium/pomerium/internal/registry/redis"
"github.com/pomerium/pomerium/internal/telemetry/trace"
registrypb "github.com/pomerium/pomerium/pkg/grpc/registry"
"github.com/pomerium/pomerium/pkg/storage"
)
type registryWatchServer struct {
registrypb.Registry_WatchServer
ctx context.Context
}
func (stream registryWatchServer) Context() context.Context {
return stream.ctx
}
// Report calls the registry Report method.
func (srv *Server) Report(ctx context.Context, req *registrypb.RegisterRequest) (*registrypb.RegisterResponse, error) {
ctx, span := trace.StartSpan(ctx, "databroker.grpc.Report")
defer span.End()
r, err := srv.getRegistry()
if err != nil {
return nil, err
}
return r.Report(ctx, req)
}
// List calls the registry List method.
func (srv *Server) List(ctx context.Context, req *registrypb.ListRequest) (*registrypb.ServiceList, error) {
ctx, span := trace.StartSpan(ctx, "databroker.grpc.List")
defer span.End()
r, err := srv.getRegistry()
if err != nil {
return nil, err
}
return r.List(ctx, req)
}
// Watch calls the registry Watch method.
func (srv *Server) Watch(req *registrypb.ListRequest, stream registrypb.Registry_WatchServer) error {
ctx := stream.Context()
ctx, span := trace.StartSpan(ctx, "databroker.grpc.Watch")
defer span.End()
r, err := srv.getRegistry()
if err != nil {
return err
}
return r.Watch(req, registryWatchServer{
Registry_WatchServer: stream,
ctx: ctx,
})
}
func (srv *Server) getRegistry() (registry.Interface, error) {
backend, err := srv.getBackend()
if err != nil {
return nil, err
}
// double-checked locking
srv.mu.RLock()
r := srv.registry
srv.mu.RUnlock()
if r == nil {
srv.mu.Lock()
r = srv.registry
var err error
if r == nil {
r, err = srv.newRegistryLocked(backend)
srv.registry = r
}
srv.mu.Unlock()
if err != nil {
return nil, err
}
}
return r, nil
}
func (srv *Server) newRegistryLocked(backend storage.Backend) (registry.Interface, error) {
ctx := context.Background()
if hasRegistryServer, ok := backend.(interface {
RegistryServer() registrypb.RegistryServer
}); ok {
log.Info(ctx).Msg("using registry via storage")
return struct {
io.Closer
registrypb.RegistryServer
}{backend, hasRegistryServer.RegistryServer()}, nil
}
switch srv.cfg.storageType {
case config.StorageInMemoryName:
log.Info(ctx).Msg("using in-memory registry")
return inmemory.New(ctx, srv.cfg.registryTTL), nil
case config.StorageRedisName:
log.Info(ctx).Msg("using redis registry")
r, err := redis.New(
srv.cfg.storageConnectionString,
redis.WithTLSConfig(srv.getTLSConfigLocked(ctx)),
)
if err != nil {
return nil, fmt.Errorf("failed to create new redis registry: %w", err)
}
return r, nil
}
return nil, fmt.Errorf("unsupported registry type: %s", srv.cfg.storageType)
}
|
package pgsql
import (
"testing"
)
func TestInt2VectorArray(t *testing.T) {
testlist2{{
valuer: Int2VectorArrayFromIntSliceSlice,
scanner: Int2VectorArrayToIntSliceSlice,
data: []testdata{
{
input: [][]int{{-32768, 32767}, {0, 1, 2, 3}},
output: [][]int{{-32768, 32767}, {0, 1, 2, 3}}},
},
}, {
valuer: Int2VectorArrayFromInt8SliceSlice,
scanner: Int2VectorArrayToInt8SliceSlice,
data: []testdata{
{
input: [][]int8{{-128, 127}, {0, 1, 2, 3}},
output: [][]int8{{-128, 127}, {0, 1, 2, 3}}},
},
}, {
valuer: Int2VectorArrayFromInt16SliceSlice,
scanner: Int2VectorArrayToInt16SliceSlice,
data: []testdata{
{
input: [][]int16{{-32768, 32767}, {0, 1, 2, 3}},
output: [][]int16{{-32768, 32767}, {0, 1, 2, 3}}},
},
}, {
valuer: Int2VectorArrayFromInt32SliceSlice,
scanner: Int2VectorArrayToInt32SliceSlice,
data: []testdata{
{
input: [][]int32{{-32768, 32767}, {0, 1, 2, 3}},
output: [][]int32{{-32768, 32767}, {0, 1, 2, 3}}},
},
}, {
valuer: Int2VectorArrayFromInt64SliceSlice,
scanner: Int2VectorArrayToInt64SliceSlice,
data: []testdata{
{
input: [][]int64{{-32768, 32767}, {0, 1, 2, 3}},
output: [][]int64{{-32768, 32767}, {0, 1, 2, 3}}},
},
}, {
valuer: Int2VectorArrayFromUintSliceSlice,
scanner: Int2VectorArrayToUintSliceSlice,
data: []testdata{
{
input: [][]uint{{0, 32767}, {0, 1, 2, 3}},
output: [][]uint{{0, 32767}, {0, 1, 2, 3}}},
},
}, {
valuer: Int2VectorArrayFromUint8SliceSlice,
scanner: Int2VectorArrayToUint8SliceSlice,
data: []testdata{
{
input: [][]uint8{{0, 255}, {0, 1, 2, 3}},
output: [][]uint8{{0, 255}, {0, 1, 2, 3}}},
},
}, {
valuer: Int2VectorArrayFromUint16SliceSlice,
scanner: Int2VectorArrayToUint16SliceSlice,
data: []testdata{
{
input: [][]uint16{{0, 32767}, {0, 1, 2, 3}},
output: [][]uint16{{0, 32767}, {0, 1, 2, 3}}},
},
}, {
valuer: Int2VectorArrayFromUint32SliceSlice,
scanner: Int2VectorArrayToUint32SliceSlice,
data: []testdata{
{
input: [][]uint32{{0, 32767}, {0, 1, 2, 3}},
output: [][]uint32{{0, 32767}, {0, 1, 2, 3}}},
},
}, {
valuer: Int2VectorArrayFromUint64SliceSlice,
scanner: Int2VectorArrayToUint64SliceSlice,
data: []testdata{
{
input: [][]uint64{{0, 32767}, {0, 1, 2, 3}},
output: [][]uint64{{0, 32767}, {0, 1, 2, 3}}},
},
}, {
valuer: Int2VectorArrayFromFloat32SliceSlice,
scanner: Int2VectorArrayToFloat32SliceSlice,
data: []testdata{
{
input: [][]float32{{-32768.0, 32767.0}, {0.0, 1.0, 2.0, 3.0}},
output: [][]float32{{-32768.0, 32767.0}, {0.0, 1.0, 2.0, 3.0}}},
},
}, {
valuer: Int2VectorArrayFromFloat64SliceSlice,
scanner: Int2VectorArrayToFloat64SliceSlice,
data: []testdata{
{
input: [][]float64{{-32768.0, 32767.0}, {0.0, 1.0, 2.0, 3.0}},
output: [][]float64{{-32768.0, 32767.0}, {0.0, 1.0, 2.0, 3.0}}},
},
}, {
data: []testdata{
{
input: string(`{"-32768 32767","0 1 2 3"}`),
output: string(`{"-32768 32767","0 1 2 3"}`)},
},
}, {
data: []testdata{
{
input: []byte(`{"-32768 32767","0 1 2 3"}`),
output: []byte(`{"-32768 32767","0 1 2 3"}`)},
},
}}.execute(t, "int2vectorarr")
}
|
/*
* @lc app=leetcode.cn id=236 lang=golang
*
* [236] 二叉树的最近公共祖先
*/
package main
import "fmt"
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
// @lc code=start
func lowestCommonAncestor(root, p, q *TreeNode) *TreeNode {
if root == nil || root == p || root == q {
return root
}
left := lowestCommonAncestor(root.Left, p, q)
right := lowestCommonAncestor(root.Right, p, q)
if left != nil && right != nil {
return root
}
if left == nil {
return right
}
return left
}
// @lc code=end
func main() {
fmt.Println(lowestCommonAncestor(nil, nil, nil))
}
|
package devicesearch
import (
"github.com/rakyll/portmidi"
"github.com/telyn/midi"
"github.com/telyn/midi/portbidi"
"github.com/telyn/midi/stream"
)
type SearchResult struct {
In *portmidi.Stream
Out *portmidi.Stream
Stream stream.Stream
Channel byte
}
func (res SearchResult) Processor(dispatch midi.Dispatcher) (p *midi.Processor) {
return midi.NewProcessorWithStream(&portbidi.Stream{
In: res.In,
Out: res.Out,
}, dispatch, res.Stream)
}
|
// Copyright © 2020 Attestant Limited.
// Licensed )junder the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package first
import (
"context"
"time"
eth2client "github.com/attestantio/go-eth2-client"
"github.com/attestantio/go-eth2-client/spec"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/attestantio/vouch/services/metrics"
"github.com/pkg/errors"
"github.com/rs/zerolog"
zerologger "github.com/rs/zerolog/log"
)
// Service is the provider for beacon block proposals.
type Service struct {
clientMonitor metrics.ClientMonitor
beaconBlockProposalProviders map[string]eth2client.BeaconBlockProposalProvider
timeout time.Duration
}
// module-wide log.
var log zerolog.Logger
// New creates a new beacon block propsal strategy.
func New(ctx context.Context, params ...Parameter) (*Service, error) {
parameters, err := parseAndCheckParameters(params...)
if err != nil {
return nil, errors.Wrap(err, "problem with parameters")
}
// Set logging.
log = zerologger.With().Str("strategy", "beaconblockproposal").Str("impl", "first").Logger()
if parameters.logLevel != log.GetLevel() {
log = log.Level(parameters.logLevel)
}
s := &Service{
beaconBlockProposalProviders: parameters.beaconBlockProposalProviders,
timeout: parameters.timeout,
clientMonitor: parameters.clientMonitor,
}
return s, nil
}
// BeaconBlockProposal provides the first beacon block proposal from a number of beacon nodes.
func (s *Service) BeaconBlockProposal(ctx context.Context, slot phase0.Slot, randaoReveal phase0.BLSSignature, graffiti []byte) (*spec.VersionedBeaconBlock, error) {
// We create a cancelable context with a timeout. As soon as the first provider has responded we
// cancel the context to cancel the other requests.
ctx, cancel := context.WithTimeout(ctx, s.timeout)
proposalCh := make(chan *spec.VersionedBeaconBlock, 1)
for name, provider := range s.beaconBlockProposalProviders {
go func(ctx context.Context, name string, provider eth2client.BeaconBlockProposalProvider, ch chan *spec.VersionedBeaconBlock) {
log := log.With().Str("provider", name).Uint64("slot", uint64(slot)).Logger()
started := time.Now()
proposal, err := provider.BeaconBlockProposal(ctx, slot, randaoReveal, graffiti)
s.clientMonitor.ClientOperation(name, "beacon block proposal", err == nil, time.Since(started))
if err != nil {
log.Warn().Err(err).Msg("Failed to obtain beacon block proposal")
return
}
if proposal == nil {
log.Warn().Err(err).Msg("Returned empty beacon block proposal")
return
}
log.Trace().Dur("elapsed", time.Since(started)).Msg("Obtained beacon block proposal")
ch <- proposal
}(ctx, name, provider, proposalCh)
}
select {
case <-ctx.Done():
cancel()
log.Warn().Msg("Failed to obtain beacon block proposal before timeout")
return nil, errors.New("failed to obtain beacon block proposal before timeout")
case proposal := <-proposalCh:
cancel()
return proposal, nil
}
}
|
package main
import (
"github.com/gin-gonic/gin"
"gitlab.com/pragmaticreviews/golang-gin-poc/service"
"gitlab.com/pragmaticreviews/golang-gin-poc/controller"
)
var(
videoService service.VideoService =service.New()
videoController controller.VideoController =controller.New(videoService)
)
func main(){
server := gin.New()
server.Use(gin.Recovery())
server.Use(gin.Logger())
server.GET("/videos", func(ctx *gin.Context){
ctx.JSON(200, videoController.FindAll())
})
server.POST("/videos", func(ctx *gin.Context){
ctx.JSON(200, videoController.Save(ctx))
})
server.Run(":8091")
} |
package goticker
import (
"sync"
"time"
)
type Ticker struct {
fn func(arg interface{})
ch chan bool
wg sync.WaitGroup
interval int
}
func New(interval int, fn func(arg interface{})) *Ticker {
return &Ticker{
fn: fn,
ch: make(chan bool, 1),
interval: interval,
}
}
func (this *Ticker) Tick(arg interface{}) {
this.wg.Add(1)
ticker := time.NewTicker(time.Second * time.Duration(this.interval))
for {
select {
case <-ticker.C:
this.fn(arg)
case <-this.ch:
ticker.Stop()
this.wg.Done()
return
}
}
}
func (this *Ticker) Stop() {
this.ch <- true
this.wg.Wait()
}
|
package issue
import (
"fmt"
"time"
"williamfeng323/mooncake-duty/src/domains/project"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"williamfeng323/mooncake-duty/src/infrastructure/db"
repoimpl "williamfeng323/mooncake-duty/src/infrastructure/db/repo_impl"
validatorimpl "williamfeng323/mooncake-duty/src/infrastructure/db/validator_impl"
"williamfeng323/mooncake-duty/src/utils"
)
// Tier represents the notification tier
type Tier int
// IssueStatus represents the possible issue statuses
type IssueStatus int
const (
// T1 maps to T1 members in shift
T1 Tier = iota + 1
// T2 maps to T2 members in shift
T2
// T3 maps to T3 members in shift
T3
)
const (
// Init issue just created
Init IssueStatus = iota
// Acknowledged issue acknowledged. Cannot change back from Resolved
Acknowledged
// Resolved issue handled. Issue can skip acknowledged status and goes to resolved directly.
Resolved
)
func (iStatus IssueStatus) String() string {
statusArray := []string{"Init", "Acknowledged", "Resolved"}
return statusArray[iStatus]
}
// Valid verify if it is a valid status
func (iStatus IssueStatus) Valid() bool {
return iStatus >= 0 && iStatus <= 2
}
// Issue describe the triggered alert created issues
type Issue struct {
repo *repoimpl.IssueRepo
db.BaseModel `json:",inline" bson:",inline"`
ProjectID primitive.ObjectID `json:"projectId" bson:"projectId" required:"true"`
IssueKey string `json:"issueKey" bson:"issueKey" required:"true"`
Status IssueStatus `json:"status" bson:"status"`
AcknowledgedAt *time.Time `json:"acknowledgedAt,omitempty" bson:"acknowledgedAt,omitempty"`
AcknowledgedBy string `json:"acknowledgedBy,omitempty" bson:"acknowledgedBy,omitempty"`
ResolvedAt *time.Time `json:"resolvedAt,omitempty" bson:"resolvedAt,omitempty"`
ResolvedBy string `json:"resolvedBy,omitempty" bson:"resolvedBy,omitempty"`
T1NotifiedAt []time.Time `json:"t1NotifiedAt,omitempty" bson:"t1LastNotifiedAt,omitempty"`
T1NotificationCount int `json:"t1NotificationCount,omitempty" bson:"t1NotificationCount,omitempty"`
T2NotifiedAt []time.Time `json:"t2NotifiedAt,omitempty" bson:"t2LastNotifiedAt,omitempty"`
T2NotificationCount int `json:"t2NotificationCount,omitempty" bson:"t2NotificationCount,omitempty"`
T3NotifiedAt []time.Time `json:"t3NotifiedAt,omitempty" bson:"t3LastNotifiedAt,omitempty"`
T3NotificationCount int `json:"t3NotificationCount,omitempty" bson:"t3NotificationCount,omitempty"`
}
// GetNotificationTier returns the proper notifier base on the current alert status
func (i *Issue) GetNotificationTier() (Tier, error) {
projRepo := repoimpl.GetProjectRepo()
findProjCtx, findProjCancel := utils.GetDefaultCtx()
defer findProjCancel()
projRst := projRepo.FindOne(findProjCtx, bson.M{"_id": i.ProjectID})
if projRst.Err() != nil {
return 0, project.NotFoundError{}
}
proj := &project.Project{}
err := projRst.Decode(proj)
if err != nil {
return 0, err
}
callPerTiers := proj.CallsPerTier
if i.T2NotificationCount >= callPerTiers {
return T3, nil
} else if i.T1NotificationCount >= callPerTiers {
return T2, nil
}
return T1, nil
}
// Create verifies and inserts the issue into database
func (i *Issue) Create() error {
validator := validatorimpl.NewDefaultValidator()
errs := validator.Verify(i)
if len(errs) != 0 {
return fmt.Errorf("Save the issue failed due to: %v", errs)
}
ctxInsert, cancelInsert := utils.GetDefaultCtx()
defer cancelInsert()
_, err := repoimpl.GetIssueRepo().InsertOne(ctxInsert, i)
return err
}
// UpdateStatus update the issue status
func (i *Issue) UpdateStatus(status IssueStatus, by string) error {
if len(by) == 0 {
return fmt.Errorf("The one who update the status cannot be empty")
}
if i.Status > status {
return fmt.Errorf("The status cannot set back")
}
timeNow := time.Now()
switch status {
case Acknowledged:
i.Status = status
i.AcknowledgedAt = &timeNow
i.AcknowledgedBy = by
case Resolved:
if i.Status == Init {
i.AcknowledgedAt = &timeNow
i.AcknowledgedBy = by
}
i.Status = status
i.ResolvedAt = &timeNow
i.ResolvedBy = by
}
i.UpdatedAt = &timeNow
updCtx, updCtxCancel := utils.GetDefaultCtx()
defer updCtxCancel()
var inInterface bson.M
inrec, _ := bson.Marshal(i)
bson.Unmarshal(inrec, &inInterface)
i.repo.UpdateOne(updCtx, bson.M{"_id": i.ID}, bson.M{"$set": inInterface})
return nil
}
// IsDuplicate distinguishes if 2 issues are the same
func (i *Issue) IsDuplicate(iss *Issue) bool {
return i.IssueKey == iss.IssueKey
}
// NewIssue validate projectID existence and returns issue
func NewIssue(projectID primitive.ObjectID, key string) (*Issue, error) {
projRepo := repoimpl.GetProjectRepo()
findProjCtx, findProjCancel := utils.GetDefaultCtx()
defer findProjCancel()
projRst := projRepo.FindOne(findProjCtx, bson.M{"_id": projectID})
if projRst.Err() != nil {
return nil, project.NotFoundError{}
}
iss := &Issue{
ProjectID: projectID,
IssueKey: key,
Status: Init,
}
iss.repo = repoimpl.GetIssueRepo()
iss.ID = primitive.NewObjectID()
tNow := time.Now()
iss.CreatedAt = &tNow
return iss, nil
}
|
package main
func (this *Application) ProjectIssueCreateAction(args []string) {
}
|
package dynamo
import (
"github.com/aws/aws-sdk-go/service/dynamodb"
"testing"
)
func TestIsConditionalCheckFailedError(t *testing.T) {
testException := &dynamodb.ConditionalCheckFailedException{}
result := isConditionalCheckFailedError(testException)
if !result {
t.Fail()
}
testTransactionalExceptionReasonCode := "ConditionalCheckFailed"
testTransactionalException := &dynamodb.TransactionCanceledException{
CancellationReasons: []*dynamodb.CancellationReason{
{
Code: &testTransactionalExceptionReasonCode,
},
},
}
result = isConditionalCheckFailedError(testTransactionalException)
if !result {
t.Fail()
}
}
|
package main
import (
"fmt"
"net"
)
func main() {
var msg = make([]byte,1000)
localAddr, err := net.ResolveUDPAddr("udp","127.0.0.1:8888")
if err != nil {
fmt.Println(err)
}
remoterAddr, err1 := net.ResolveUDPAddr("udp","127.0.0.1:8889")
if err1 != nil {
fmt.Println(err1)
}
fmt.Println("wait dialudp")
con, err2 := net.DialUDP("udp",localAddr,remoterAddr)
if err2 != nil {
fmt.Printf("dial failed, err2 is %v",err2)
}
fmt.Printf("dialudp success \r\n")
nWrite,err := con.Write([]byte("hello,server"))
if err!= nil{
fmt.Printf("write failed.",nWrite)
}
for {
nRead, addr, err3 := con.ReadFromUDP(msg)
if err3 != nil {
fmt.Println("read failed.", addr, nRead)
}
var Readmsg = make([]byte, nRead)
copy(Readmsg, msg)
fmt.Println(Readmsg)
fmt.Println(string(Readmsg))
}
}
|
package metricRouter
import (
"sync"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
agg "github.com/ClusterCockpit/cc-metric-collector/internal/metricAggregator"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
mct "github.com/ClusterCockpit/cc-metric-collector/pkg/multiChanTicker"
)
type metricCachePeriod struct {
startstamp time.Time
stopstamp time.Time
numMetrics int
sizeMetrics int
metrics []lp.CCMetric
}
// Metric cache data structure
type metricCache struct {
numPeriods int
curPeriod int
lock sync.Mutex
intervals []*metricCachePeriod
wg *sync.WaitGroup
ticker mct.MultiChanTicker
tickchan chan time.Time
done chan bool
output chan lp.CCMetric
aggEngine agg.MetricAggregator
}
type MetricCache interface {
Init(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error
Start()
Add(metric lp.CCMetric)
GetPeriod(index int) (time.Time, time.Time, []lp.CCMetric)
AddAggregation(name, function, condition string, tags, meta map[string]string) error
DeleteAggregation(name string) error
Close()
}
func (c *metricCache) Init(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) error {
var err error = nil
c.done = make(chan bool)
c.wg = wg
c.ticker = ticker
c.numPeriods = numPeriods
c.output = output
c.intervals = make([]*metricCachePeriod, 0)
for i := 0; i < c.numPeriods+1; i++ {
p := new(metricCachePeriod)
p.numMetrics = 0
p.sizeMetrics = 0
p.metrics = make([]lp.CCMetric, 0)
c.intervals = append(c.intervals, p)
}
// Create a new aggregation engine. No separate goroutine at the moment
// The code is executed by the MetricCache goroutine
c.aggEngine, err = agg.NewAggregator(c.output)
if err != nil {
cclog.ComponentError("MetricCache", "Cannot create aggregator")
return err
}
return nil
}
// Start starts the metric cache
func (c *metricCache) Start() {
c.tickchan = make(chan time.Time)
c.ticker.AddChannel(c.tickchan)
// Router cache is done
done := func() {
cclog.ComponentDebug("MetricCache", "DONE")
close(c.done)
}
// Rotate cache interval
rotate := func(timestamp time.Time) int {
oldPeriod := c.curPeriod
c.curPeriod = oldPeriod + 1
if c.curPeriod >= c.numPeriods {
c.curPeriod = 0
}
c.intervals[oldPeriod].numMetrics = 0
c.intervals[oldPeriod].stopstamp = timestamp
c.intervals[c.curPeriod].startstamp = timestamp
c.intervals[c.curPeriod].stopstamp = timestamp
return oldPeriod
}
c.wg.Add(1)
go func() {
defer c.wg.Done()
for {
select {
case <-c.done:
done()
return
case tick := <-c.tickchan:
c.lock.Lock()
old := rotate(tick)
// Get the last period and evaluate aggregation metrics
starttime, endtime, metrics := c.GetPeriod(old)
c.lock.Unlock()
if len(metrics) > 0 {
c.aggEngine.Eval(starttime, endtime, metrics)
} else {
// This message is also printed in the first interval after startup
cclog.ComponentDebug("MetricCache", "EMPTY INTERVAL?")
}
}
}
}()
cclog.ComponentDebug("MetricCache", "START")
}
// Add a metric to the cache. The interval is defined by the global timer (rotate() in Start())
// The intervals list is used as round-robin buffer and the metric list grows dynamically and
// to avoid reallocations
func (c *metricCache) Add(metric lp.CCMetric) {
if c.curPeriod >= 0 && c.curPeriod < c.numPeriods {
c.lock.Lock()
p := c.intervals[c.curPeriod]
if p.numMetrics < p.sizeMetrics {
p.metrics[p.numMetrics] = metric
p.numMetrics = p.numMetrics + 1
p.stopstamp = metric.Time()
} else {
p.metrics = append(p.metrics, metric)
p.numMetrics = p.numMetrics + 1
p.sizeMetrics = p.sizeMetrics + 1
p.stopstamp = metric.Time()
}
c.lock.Unlock()
}
}
func (c *metricCache) AddAggregation(name, function, condition string, tags, meta map[string]string) error {
return c.aggEngine.AddAggregation(name, function, condition, tags, meta)
}
func (c *metricCache) DeleteAggregation(name string) error {
return c.aggEngine.DeleteAggregation(name)
}
// Get all metrics of a interval. The index is the difference to the current interval, so index=0
// is the current one, index=1 the last interval and so on. Returns and empty array if a wrong index
// is given (negative index, index larger than configured number of total intervals, ...)
func (c *metricCache) GetPeriod(index int) (time.Time, time.Time, []lp.CCMetric) {
var start time.Time = time.Now()
var stop time.Time = time.Now()
var metrics []lp.CCMetric
if index >= 0 && index < c.numPeriods {
pindex := c.curPeriod - index
if pindex < 0 {
pindex = c.numPeriods - pindex
}
if pindex >= 0 && pindex < c.numPeriods {
start = c.intervals[pindex].startstamp
stop = c.intervals[pindex].stopstamp
metrics = c.intervals[pindex].metrics
//return c.intervals[pindex].startstamp, c.intervals[pindex].stopstamp, c.intervals[pindex].metrics
} else {
metrics = make([]lp.CCMetric, 0)
}
} else {
metrics = make([]lp.CCMetric, 0)
}
return start, stop, metrics
}
// Close finishes / stops the metric cache
func (c *metricCache) Close() {
cclog.ComponentDebug("MetricCache", "CLOSE")
c.done <- true
}
func NewCache(output chan lp.CCMetric, ticker mct.MultiChanTicker, wg *sync.WaitGroup, numPeriods int) (MetricCache, error) {
c := new(metricCache)
err := c.Init(output, ticker, wg, numPeriods)
if err != nil {
return nil, err
}
return c, err
}
|
// Copyright 2022 PingCAP, Inc. Licensed under Apache-2.0.
package spans
import (
"bytes"
"fmt"
"github.com/google/btree"
"github.com/pingcap/tidb/br/pkg/logutil"
"github.com/pingcap/tidb/br/pkg/utils"
"github.com/pingcap/tidb/kv"
)
// Value is the value type of stored in the span tree.
type Value = uint64
// join finds the upper bound of two values.
func join(a, b Value) Value {
if a > b {
return a
}
return b
}
// Span is the type of an adjacent sub key space.
type Span = kv.KeyRange
// Valued is span binding to a value, which is the entry type of span tree.
type Valued struct {
Key Span
Value Value
}
func (r Valued) String() string {
return fmt.Sprintf("(%s, %d)", logutil.StringifyRange(r.Key), r.Value)
}
func (r Valued) Less(other btree.Item) bool {
return bytes.Compare(r.Key.StartKey, other.(Valued).Key.StartKey) < 0
}
// ValuedFull represents a set of valued ranges, which doesn't overlap and union of them all is the full key space.
type ValuedFull struct {
inner *btree.BTree
}
// NewFullWith creates a set of a subset of spans.
func NewFullWith(initSpans []Span, init Value) *ValuedFull {
t := btree.New(16)
for _, r := range Collapse(len(initSpans), func(i int) Span { return initSpans[i] }) {
t.ReplaceOrInsert(Valued{Value: init, Key: r})
}
return &ValuedFull{inner: t}
}
// Merge merges a new interval into the span set. The value of overlapped
// part with other spans would be "merged" by the `join` function.
// An example:
/*
|___________________________________________________________________________|
^-----------------^-----------------^-----------------^---------------------^
| c = 42 | c = 43 | c = 45 | c = 41 |
^--------------------------^
merge(| c = 44 |)
Would Give:
|___________________________________________________________________________|
^-----------------^----^------------^-------------^---^---------------------^
| c = 42 | 43 | c = 44 | c = 45 | c = 41 |
|-------------|
Unchanged, because 44 < 45.
*/
func (f *ValuedFull) Merge(val Valued) {
overlaps := make([]Valued, 0, 16)
f.overlapped(val.Key, &overlaps)
f.mergeWithOverlap(val, overlaps, nil)
}
// Traverse traverses all ranges by order.
func (f *ValuedFull) Traverse(m func(Valued) bool) {
f.inner.Ascend(func(item btree.Item) bool {
return m(item.(Valued))
})
}
func (f *ValuedFull) mergeWithOverlap(val Valued, overlapped []Valued, newItems *[]Valued) {
// There isn't any range overlaps with the input range, perhaps the input range is empty.
// do nothing for this case.
if len(overlapped) == 0 {
return
}
for _, r := range overlapped {
f.inner.Delete(r)
// Assert All overlapped ranges are deleted.
}
var (
initialized = false
collected Valued
rightTrail *Valued
flushCollected = func() {
if initialized {
f.inner.ReplaceOrInsert(collected)
if newItems != nil {
*newItems = append(*newItems, collected)
}
}
}
emitToCollected = func(rng Valued, standalone bool) {
merged := rng.Value
if !standalone {
merged = join(val.Value, rng.Value)
}
if !initialized {
collected = rng
collected.Value = merged
initialized = true
return
}
if merged == collected.Value && utils.CompareBytesExt(collected.Key.EndKey, true, rng.Key.StartKey, false) == 0 {
collected.Key.EndKey = rng.Key.EndKey
} else {
flushCollected()
collected = Valued{
Key: rng.Key,
Value: merged,
}
}
}
)
leftmost := overlapped[0]
if bytes.Compare(leftmost.Key.StartKey, val.Key.StartKey) < 0 {
emitToCollected(Valued{
Key: Span{StartKey: leftmost.Key.StartKey, EndKey: val.Key.StartKey},
Value: leftmost.Value,
}, true)
overlapped[0].Key.StartKey = val.Key.StartKey
}
rightmost := overlapped[len(overlapped)-1]
if utils.CompareBytesExt(rightmost.Key.EndKey, true, val.Key.EndKey, true) > 0 {
rightTrail = &Valued{
Key: Span{StartKey: val.Key.EndKey, EndKey: rightmost.Key.EndKey},
Value: rightmost.Value,
}
overlapped[len(overlapped)-1].Key.EndKey = val.Key.EndKey
}
for _, rng := range overlapped {
emitToCollected(rng, false)
}
if rightTrail != nil {
emitToCollected(*rightTrail, true)
}
flushCollected()
}
// overlapped inserts the overlapped ranges of the span into the `result` slice.
func (f *ValuedFull) overlapped(k Span, result *[]Valued) {
var (
first Span
hasFirst bool
)
// Firstly, let's find whether there is a overlapped region with less start key.
f.inner.DescendLessOrEqual(Valued{Key: k}, func(item btree.Item) bool {
first = item.(Valued).Key
hasFirst = true
return false
})
if !hasFirst || !Overlaps(first, k) {
first = k
}
f.inner.AscendGreaterOrEqual(Valued{Key: first}, func(item btree.Item) bool {
r := item.(Valued)
if !Overlaps(r.Key, k) {
return false
}
*result = append(*result, r)
return true
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.