text stringlengths 11 4.05M |
|---|
package messenger
const (
PersonasPath string = "personas"
)
// Persona represents the object of persona.
type Persona struct {
Name string `json:"name"`
ProfilePictureURL string `json:"profile_picture_url"`
}
// PersonaResponse represents the response for create.
type PersonaResponse struct {
ID string `json:"id"`
}
// ListOfPersona represents the response of list of persona.
type ListOfPersonaResponse struct {
Data []Persona `json:"data"`
}
// DeletePersonaResponse represents the response for delete.
type DeletePersonaResponse struct {
Success bool `json:"success"`
}
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package codec
import (
"encoding/binary"
"math"
"github.com/pingcap/errors"
)
const signMask uint64 = 0x8000000000000000
// EncodeIntToCmpUint make int v to comparable uint type
func EncodeIntToCmpUint(v int64) uint64 {
return uint64(v) ^ signMask
}
// DecodeCmpUintToInt decodes the u that encoded by EncodeIntToCmpUint
func DecodeCmpUintToInt(u uint64) int64 {
return int64(u ^ signMask)
}
// EncodeInt appends the encoded value to slice b and returns the appended slice.
// EncodeInt guarantees that the encoded value is in ascending order for comparison.
func EncodeInt(b []byte, v int64) []byte {
var data [8]byte
u := EncodeIntToCmpUint(v)
binary.BigEndian.PutUint64(data[:], u)
return append(b, data[:]...)
}
// EncodeIntDesc appends the encoded value to slice b and returns the appended slice.
// EncodeIntDesc guarantees that the encoded value is in descending order for comparison.
func EncodeIntDesc(b []byte, v int64) []byte {
var data [8]byte
u := EncodeIntToCmpUint(v)
binary.BigEndian.PutUint64(data[:], ^u)
return append(b, data[:]...)
}
// DecodeInt decodes value encoded by EncodeInt before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeInt(b []byte) ([]byte, int64, error) {
if len(b) < 8 {
return nil, 0, errors.New("insufficient bytes to decode value")
}
u := binary.BigEndian.Uint64(b[:8])
v := DecodeCmpUintToInt(u)
b = b[8:]
return b, v, nil
}
// DecodeIntDesc decodes value encoded by EncodeInt before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeIntDesc(b []byte) ([]byte, int64, error) {
if len(b) < 8 {
return nil, 0, errors.New("insufficient bytes to decode value")
}
u := binary.BigEndian.Uint64(b[:8])
v := DecodeCmpUintToInt(^u)
b = b[8:]
return b, v, nil
}
// EncodeUint appends the encoded value to slice b and returns the appended slice.
// EncodeUint guarantees that the encoded value is in ascending order for comparison.
func EncodeUint(b []byte, v uint64) []byte {
var data [8]byte
binary.BigEndian.PutUint64(data[:], v)
return append(b, data[:]...)
}
// EncodeUintDesc appends the encoded value to slice b and returns the appended slice.
// EncodeUintDesc guarantees that the encoded value is in descending order for comparison.
func EncodeUintDesc(b []byte, v uint64) []byte {
var data [8]byte
binary.BigEndian.PutUint64(data[:], ^v)
return append(b, data[:]...)
}
// DecodeUint decodes value encoded by EncodeUint before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeUint(b []byte) ([]byte, uint64, error) {
if len(b) < 8 {
return nil, 0, errors.New("insufficient bytes to decode value")
}
v := binary.BigEndian.Uint64(b[:8])
b = b[8:]
return b, v, nil
}
// DecodeUintDesc decodes value encoded by EncodeInt before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeUintDesc(b []byte) ([]byte, uint64, error) {
if len(b) < 8 {
return nil, 0, errors.New("insufficient bytes to decode value")
}
data := b[:8]
v := binary.BigEndian.Uint64(data)
b = b[8:]
return b, ^v, nil
}
// EncodeVarint appends the encoded value to slice b and returns the appended slice.
// Note that the encoded result is not memcomparable.
func EncodeVarint(b []byte, v int64) []byte {
var data [binary.MaxVarintLen64]byte
n := binary.PutVarint(data[:], v)
return append(b, data[:n]...)
}
// DecodeVarint decodes value encoded by EncodeVarint before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeVarint(b []byte) ([]byte, int64, error) {
v, n := binary.Varint(b)
if n > 0 {
return b[n:], v, nil
}
if n < 0 {
return nil, 0, errors.New("value larger than 64 bits")
}
return nil, 0, errors.New("insufficient bytes to decode value")
}
// EncodeUvarint appends the encoded value to slice b and returns the appended slice.
// Note that the encoded result is not memcomparable.
func EncodeUvarint(b []byte, v uint64) []byte {
var data [binary.MaxVarintLen64]byte
n := binary.PutUvarint(data[:], v)
return append(b, data[:n]...)
}
// DecodeUvarint decodes value encoded by EncodeUvarint before.
// It returns the leftover un-decoded slice, decoded value if no error.
func DecodeUvarint(b []byte) ([]byte, uint64, error) {
v, n := binary.Uvarint(b)
if n > 0 {
return b[n:], v, nil
}
if n < 0 {
return nil, 0, errors.New("value larger than 64 bits")
}
return nil, 0, errors.New("insufficient bytes to decode value")
}
const (
negativeTagEnd = 8 // negative tag is (negativeTagEnd - length).
positiveTagStart = 0xff - 8 // Positive tag is (positiveTagStart + length).
)
// EncodeComparableVarint encodes an int64 to a mem-comparable bytes.
func EncodeComparableVarint(b []byte, v int64) []byte {
if v < 0 {
// All negative value has a tag byte prefix (negativeTagEnd - length).
// Smaller negative value encodes to more bytes, has smaller tag.
if v >= -0xff {
return append(b, negativeTagEnd-1, byte(v))
} else if v >= -0xffff {
return append(b, negativeTagEnd-2, byte(v>>8), byte(v))
} else if v >= -0xffffff {
return append(b, negativeTagEnd-3, byte(v>>16), byte(v>>8), byte(v))
} else if v >= -0xffffffff {
return append(b, negativeTagEnd-4, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
} else if v >= -0xffffffffff {
return append(b, negativeTagEnd-5, byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
} else if v >= -0xffffffffffff {
return append(b, negativeTagEnd-6, byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8),
byte(v))
} else if v >= -0xffffffffffffff {
return append(b, negativeTagEnd-7, byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16),
byte(v>>8), byte(v))
}
return append(b, negativeTagEnd-8, byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24),
byte(v>>16), byte(v>>8), byte(v))
}
return EncodeComparableUvarint(b, uint64(v))
}
// EncodeComparableUvarint encodes uint64 into mem-comparable bytes.
func EncodeComparableUvarint(b []byte, v uint64) []byte {
// The first byte has 256 values, [0, 7] is reserved for negative tags,
// [248, 255] is reserved for larger positive tags,
// So we can store value [0, 239] in a single byte.
// Values cannot be stored in single byte has a tag byte prefix (positiveTagStart+length).
// Larger value encodes to more bytes, has larger tag.
if v <= positiveTagStart-negativeTagEnd {
return append(b, byte(v)+negativeTagEnd)
} else if v <= 0xff {
return append(b, positiveTagStart+1, byte(v))
} else if v <= 0xffff {
return append(b, positiveTagStart+2, byte(v>>8), byte(v))
} else if v <= 0xffffff {
return append(b, positiveTagStart+3, byte(v>>16), byte(v>>8), byte(v))
} else if v <= 0xffffffff {
return append(b, positiveTagStart+4, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
} else if v <= 0xffffffffff {
return append(b, positiveTagStart+5, byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
} else if v <= 0xffffffffffff {
return append(b, positiveTagStart+6, byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8),
byte(v))
} else if v <= 0xffffffffffffff {
return append(b, positiveTagStart+7, byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16),
byte(v>>8), byte(v))
}
return append(b, positiveTagStart+8, byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24),
byte(v>>16), byte(v>>8), byte(v))
}
var (
errDecodeInsufficient = errors.New("insufficient bytes to decode value")
errDecodeInvalid = errors.New("invalid bytes to decode value")
)
// DecodeComparableUvarint decodes mem-comparable uvarint.
func DecodeComparableUvarint(b []byte) ([]byte, uint64, error) {
if len(b) == 0 {
return nil, 0, errDecodeInsufficient
}
first := b[0]
b = b[1:]
if first < negativeTagEnd {
return nil, 0, errors.Trace(errDecodeInvalid)
}
if first <= positiveTagStart {
return b, uint64(first) - negativeTagEnd, nil
}
length := int(first) - positiveTagStart
if len(b) < length {
return nil, 0, errors.Trace(errDecodeInsufficient)
}
var v uint64
for _, c := range b[:length] {
v = (v << 8) | uint64(c)
}
return b[length:], v, nil
}
// DecodeComparableVarint decodes mem-comparable varint.
func DecodeComparableVarint(b []byte) ([]byte, int64, error) {
if len(b) == 0 {
return nil, 0, errors.Trace(errDecodeInsufficient)
}
first := b[0]
if first >= negativeTagEnd && first <= positiveTagStart {
return b, int64(first) - negativeTagEnd, nil
}
b = b[1:]
var length int
var v uint64
if first < negativeTagEnd {
length = negativeTagEnd - int(first)
v = math.MaxUint64 // negative value has all bits on by default.
} else {
length = int(first) - positiveTagStart
}
if len(b) < length {
return nil, 0, errors.Trace(errDecodeInsufficient)
}
for _, c := range b[:length] {
v = (v << 8) | uint64(c)
}
if first > positiveTagStart && v > math.MaxInt64 {
return nil, 0, errors.Trace(errDecodeInvalid)
} else if first < negativeTagEnd && v <= math.MaxInt64 {
return nil, 0, errors.Trace(errDecodeInvalid)
}
return b[length:], int64(v), nil
}
|
package api
import (
"errors"
"fmt"
"sync"
"github.com/gholt/store"
"github.com/pandemicsyn/ftls"
pb "github.com/pandemicsyn/oort/api/groupproto"
"github.com/pandemicsyn/oort/api/proto"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type groupStore struct {
lock sync.Mutex
addr string
ftlsc *ftls.Config
opts []grpc.DialOption
conn *grpc.ClientConn
client pb.GroupStoreClient
handlersDoneChan chan struct{}
pendingLookupReqChan chan *asyncGroupLookupRequest
freeLookupReqChan chan *asyncGroupLookupRequest
freeLookupResChan chan *asyncGroupLookupResponse
pendingReadReqChan chan *asyncGroupReadRequest
freeReadReqChan chan *asyncGroupReadRequest
freeReadResChan chan *asyncGroupReadResponse
pendingWriteReqChan chan *asyncGroupWriteRequest
freeWriteReqChan chan *asyncGroupWriteRequest
freeWriteResChan chan *asyncGroupWriteResponse
pendingDeleteReqChan chan *asyncGroupDeleteRequest
freeDeleteReqChan chan *asyncGroupDeleteRequest
freeDeleteResChan chan *asyncGroupDeleteResponse
pendingLookupGroupReqChan chan *asyncGroupLookupGroupRequest
freeLookupGroupReqChan chan *asyncGroupLookupGroupRequest
freeLookupGroupResChan chan *asyncGroupLookupGroupResponse
pendingReadGroupReqChan chan *asyncGroupReadGroupRequest
freeReadGroupReqChan chan *asyncGroupReadGroupRequest
freeReadGroupResChan chan *asyncGroupReadGroupResponse
}
// NewGroupStore creates a GroupStore connection via grpc to the given
// address.
func NewGroupStore(addr string, concurrency int, ftlsConfig *ftls.Config, opts ...grpc.DialOption) (store.GroupStore, error) {
stor := &groupStore{
addr: addr,
ftlsc: ftlsConfig,
opts: opts,
handlersDoneChan: make(chan struct{}),
}
stor.pendingLookupReqChan = make(chan *asyncGroupLookupRequest, concurrency)
stor.freeLookupReqChan = make(chan *asyncGroupLookupRequest, concurrency)
stor.freeLookupResChan = make(chan *asyncGroupLookupResponse, concurrency)
for i := 0; i < cap(stor.freeLookupReqChan); i++ {
stor.freeLookupReqChan <- &asyncGroupLookupRequest{resChan: make(chan *asyncGroupLookupResponse, 1)}
}
for i := 0; i < cap(stor.freeLookupResChan); i++ {
stor.freeLookupResChan <- &asyncGroupLookupResponse{}
}
go stor.handleLookupStream()
stor.pendingReadReqChan = make(chan *asyncGroupReadRequest, concurrency)
stor.freeReadReqChan = make(chan *asyncGroupReadRequest, concurrency)
stor.freeReadResChan = make(chan *asyncGroupReadResponse, concurrency)
for i := 0; i < cap(stor.freeReadReqChan); i++ {
stor.freeReadReqChan <- &asyncGroupReadRequest{resChan: make(chan *asyncGroupReadResponse, 1)}
}
for i := 0; i < cap(stor.freeReadResChan); i++ {
stor.freeReadResChan <- &asyncGroupReadResponse{}
}
go stor.handleReadStream()
stor.pendingWriteReqChan = make(chan *asyncGroupWriteRequest, concurrency)
stor.freeWriteReqChan = make(chan *asyncGroupWriteRequest, concurrency)
stor.freeWriteResChan = make(chan *asyncGroupWriteResponse, concurrency)
for i := 0; i < cap(stor.freeWriteReqChan); i++ {
stor.freeWriteReqChan <- &asyncGroupWriteRequest{resChan: make(chan *asyncGroupWriteResponse, 1)}
}
for i := 0; i < cap(stor.freeWriteResChan); i++ {
stor.freeWriteResChan <- &asyncGroupWriteResponse{}
}
go stor.handleWriteStream()
stor.pendingDeleteReqChan = make(chan *asyncGroupDeleteRequest, concurrency)
stor.freeDeleteReqChan = make(chan *asyncGroupDeleteRequest, concurrency)
stor.freeDeleteResChan = make(chan *asyncGroupDeleteResponse, concurrency)
for i := 0; i < cap(stor.freeDeleteReqChan); i++ {
stor.freeDeleteReqChan <- &asyncGroupDeleteRequest{resChan: make(chan *asyncGroupDeleteResponse, 1)}
}
for i := 0; i < cap(stor.freeDeleteResChan); i++ {
stor.freeDeleteResChan <- &asyncGroupDeleteResponse{}
}
go stor.handleDeleteStream()
stor.pendingLookupGroupReqChan = make(chan *asyncGroupLookupGroupRequest, concurrency)
stor.freeLookupGroupReqChan = make(chan *asyncGroupLookupGroupRequest, concurrency)
stor.freeLookupGroupResChan = make(chan *asyncGroupLookupGroupResponse, concurrency)
for i := 0; i < cap(stor.freeLookupGroupReqChan); i++ {
stor.freeLookupGroupReqChan <- &asyncGroupLookupGroupRequest{resChan: make(chan *asyncGroupLookupGroupResponse, 1)}
}
for i := 0; i < cap(stor.freeLookupGroupResChan); i++ {
stor.freeLookupGroupResChan <- &asyncGroupLookupGroupResponse{}
}
go stor.handleLookupGroupStream()
stor.pendingReadGroupReqChan = make(chan *asyncGroupReadGroupRequest, concurrency)
stor.freeReadGroupReqChan = make(chan *asyncGroupReadGroupRequest, concurrency)
stor.freeReadGroupResChan = make(chan *asyncGroupReadGroupResponse, concurrency)
for i := 0; i < cap(stor.freeReadGroupReqChan); i++ {
stor.freeReadGroupReqChan <- &asyncGroupReadGroupRequest{resChan: make(chan *asyncGroupReadGroupResponse, 1)}
}
for i := 0; i < cap(stor.freeReadGroupResChan); i++ {
stor.freeReadGroupResChan <- &asyncGroupReadGroupResponse{}
}
go stor.handleReadGroupStream()
return stor, nil
}
func (stor *groupStore) Startup(ctx context.Context) error {
stor.lock.Lock()
err := stor.startup()
stor.lock.Unlock()
return err
}
func (stor *groupStore) startup() error {
if stor.conn != nil {
return nil
}
var err error
creds, err := ftls.NewGRPCClientDialOpt(stor.ftlsc)
if err != nil {
stor.conn = nil
return err
}
opts := make([]grpc.DialOption, len(stor.opts))
copy(opts, stor.opts)
opts = append(opts, creds)
stor.conn, err = grpc.Dial(stor.addr, opts...)
if err != nil {
stor.conn = nil
return err
}
stor.client = pb.NewGroupStoreClient(stor.conn)
return nil
}
// Shutdown will close any existing connections; note that Startup may
// automatically get called with any further activity, but it will use a new
// connection. To ensure the groupStore has no further activity, use Close.
func (stor *groupStore) Shutdown(ctx context.Context) error {
stor.lock.Lock()
err := stor.shutdown()
stor.lock.Unlock()
return err
}
func (stor *groupStore) shutdown() error {
if stor.conn == nil {
return nil
}
stor.conn.Close()
stor.conn = nil
stor.client = nil
return nil
}
// Close will shutdown outgoing connectivity and stop all background
// goroutines; note that the groupStore is no longer usable after a call to
// Close, including using Startup.
func (stor *groupStore) Close() {
stor.lock.Lock()
stor.shutdown()
close(stor.handlersDoneChan)
stor.lock.Unlock()
}
func (stor *groupStore) EnableWrites(ctx context.Context) error {
return nil
}
func (stor *groupStore) DisableWrites(ctx context.Context) error {
// TODO: I suppose we could implement toggling writes from this client;
// I'll leave that for later.
return errors.New("cannot disable writes with this client at this time")
}
func (stor *groupStore) Flush(ctx context.Context) error {
// Nothing cached on this end, so nothing to flush.
return nil
}
func (stor *groupStore) AuditPass(ctx context.Context) error {
return errors.New("audit passes not available with this client at this time")
}
func (stor *groupStore) Stats(ctx context.Context, debug bool) (fmt.Stringer, error) {
return noStats, nil
}
func (stor *groupStore) ValueCap(ctx context.Context) (uint32, error) {
// TODO: This should be a (cached) value from the server. Servers don't
// change their value caps on the fly, so the cache can be kept until
// disconnect.
return 0xffffffff, nil
}
type asyncGroupLookupRequest struct {
req pb.LookupRequest
resChan chan *asyncGroupLookupResponse
canceledLock sync.Mutex
canceled bool
}
type asyncGroupLookupResponse struct {
res *pb.LookupResponse
err error
}
func (stor *groupStore) handleLookupStream() {
resChan := make(chan *asyncGroupLookupResponse, cap(stor.freeLookupReqChan))
resFunc := func(stream pb.GroupStore_StreamLookupClient) {
var err error
var res *asyncGroupLookupResponse
for {
select {
case res = <-stor.freeLookupResChan:
case <-stor.handlersDoneChan:
return
}
res.res, res.err = stream.Recv()
err = res.err
if err != nil {
res.res = nil
}
select {
case resChan <- res:
case <-stor.handlersDoneChan:
return
}
if err != nil {
return
}
}
}
var err error
var stream pb.GroupStore_StreamLookupClient
waitingMax := uint32(cap(stor.freeLookupReqChan)) - 1
waiting := make([]*asyncGroupLookupRequest, waitingMax+1)
waitingIndex := uint32(0)
for {
select {
case req := <-stor.pendingLookupReqChan:
j := waitingIndex
for waiting[waitingIndex] != nil {
waitingIndex++
if waitingIndex > waitingMax {
waitingIndex = 0
}
if waitingIndex == j {
panic("coding error: got more concurrent requests from pendingLookupReqChan than should be available")
}
}
req.req.Rpcid = waitingIndex
waiting[waitingIndex] = req
waitingIndex++
if waitingIndex > waitingMax {
waitingIndex = 0
}
if stream == nil {
stor.lock.Lock()
if stor.client == nil {
if err = stor.startup(); err != nil {
stor.lock.Unlock()
res := <-stor.freeLookupResChan
res.err = err
res.res = &pb.LookupResponse{Rpcid: req.req.Rpcid}
resChan <- res
break
}
}
stream, err = stor.client.StreamLookup(context.Background())
stor.lock.Unlock()
if err != nil {
res := <-stor.freeLookupResChan
res.err = err
res.res = &pb.LookupResponse{Rpcid: req.req.Rpcid}
resChan <- res
break
}
go resFunc(stream)
}
if err = stream.Send(&req.req); err != nil {
stream = nil
res := <-stor.freeLookupResChan
res.err = err
res.res = &pb.LookupResponse{Rpcid: req.req.Rpcid}
resChan <- res
}
case res := <-resChan:
if res.res == nil {
// Receiver got unrecoverable error, so we'll have to
// respond with errors to all waiting requests.
wereWaiting := make([]*asyncGroupLookupRequest, len(waiting))
for i, v := range waiting {
wereWaiting[i] = v
}
err := res.err
if err == nil {
err = errors.New("receiver had error, had to close any other waiting requests")
}
go func(reqs []*asyncGroupLookupRequest, err error) {
for _, req := range reqs {
if req == nil {
continue
}
res := <-stor.freeLookupResChan
res.err = err
res.res = &pb.LookupResponse{Rpcid: req.req.Rpcid}
resChan <- res
}
}(wereWaiting, err)
break
}
if res.res.Rpcid < 0 || res.res.Rpcid > waitingMax {
// TODO: Debug log error?
break
}
req := waiting[res.res.Rpcid]
if req == nil {
// TODO: Debug log error?
break
}
waiting[res.res.Rpcid] = nil
req.canceledLock.Lock()
if !req.canceled {
req.resChan <- res
} else {
stor.freeLookupReqChan <- req
stor.freeLookupResChan <- res
}
req.canceledLock.Unlock()
case <-stor.handlersDoneChan:
return
}
}
}
func (stor *groupStore) Lookup(ctx context.Context, keyA, keyB uint64, childKeyA, childKeyB uint64) (timestampMicro int64, length uint32, err error) {
var req *asyncGroupLookupRequest
select {
case req = <-stor.freeLookupReqChan:
case <-ctx.Done():
return 0, 0, ctx.Err()
}
req.canceled = false
req.req.KeyA = keyA
req.req.KeyB = keyB
req.req.ChildKeyA = childKeyA
req.req.ChildKeyB = childKeyB
select {
case stor.pendingLookupReqChan <- req:
case <-ctx.Done():
stor.freeLookupReqChan <- req
return 0, 0, ctx.Err()
}
var res *asyncGroupLookupResponse
select {
case res = <-req.resChan:
case <-ctx.Done():
req.canceledLock.Lock()
select {
case <-req.resChan:
default:
req.canceled = true
}
req.canceledLock.Unlock()
return 0, 0, ctx.Err()
}
stor.freeLookupReqChan <- req
if res.err != nil {
err = res.err
stor.freeLookupResChan <- res
return 0, 0, err
}
timestampMicro = res.res.TimestampMicro
length = res.res.Length
if res.res.Err == "" {
err = nil
} else {
err = proto.TranslateErrorString(res.res.Err)
}
stor.freeLookupResChan <- res
return timestampMicro, length, err
}
type asyncGroupReadRequest struct {
req pb.ReadRequest
resChan chan *asyncGroupReadResponse
canceledLock sync.Mutex
canceled bool
}
type asyncGroupReadResponse struct {
res *pb.ReadResponse
err error
}
func (stor *groupStore) handleReadStream() {
resChan := make(chan *asyncGroupReadResponse, cap(stor.freeReadReqChan))
resFunc := func(stream pb.GroupStore_StreamReadClient) {
var err error
var res *asyncGroupReadResponse
for {
select {
case res = <-stor.freeReadResChan:
case <-stor.handlersDoneChan:
return
}
res.res, res.err = stream.Recv()
err = res.err
if err != nil {
res.res = nil
}
select {
case resChan <- res:
case <-stor.handlersDoneChan:
return
}
if err != nil {
return
}
}
}
var err error
var stream pb.GroupStore_StreamReadClient
waitingMax := uint32(cap(stor.freeReadReqChan)) - 1
waiting := make([]*asyncGroupReadRequest, waitingMax+1)
waitingIndex := uint32(0)
for {
select {
case req := <-stor.pendingReadReqChan:
j := waitingIndex
for waiting[waitingIndex] != nil {
waitingIndex++
if waitingIndex > waitingMax {
waitingIndex = 0
}
if waitingIndex == j {
panic("coding error: got more concurrent requests from pendingReadReqChan than should be available")
}
}
req.req.Rpcid = waitingIndex
waiting[waitingIndex] = req
waitingIndex++
if waitingIndex > waitingMax {
waitingIndex = 0
}
if stream == nil {
stor.lock.Lock()
if stor.client == nil {
if err = stor.startup(); err != nil {
stor.lock.Unlock()
res := <-stor.freeReadResChan
res.err = err
res.res = &pb.ReadResponse{Rpcid: req.req.Rpcid}
resChan <- res
break
}
}
stream, err = stor.client.StreamRead(context.Background())
stor.lock.Unlock()
if err != nil {
res := <-stor.freeReadResChan
res.err = err
res.res = &pb.ReadResponse{Rpcid: req.req.Rpcid}
resChan <- res
break
}
go resFunc(stream)
}
if err = stream.Send(&req.req); err != nil {
stream = nil
res := <-stor.freeReadResChan
res.err = err
res.res = &pb.ReadResponse{Rpcid: req.req.Rpcid}
resChan <- res
}
case res := <-resChan:
if res.res == nil {
// Receiver got unrecoverable error, so we'll have to
// respond with errors to all waiting requests.
wereWaiting := make([]*asyncGroupReadRequest, len(waiting))
for i, v := range waiting {
wereWaiting[i] = v
}
err := res.err
if err == nil {
err = errors.New("receiver had error, had to close any other waiting requests")
}
go func(reqs []*asyncGroupReadRequest, err error) {
for _, req := range reqs {
if req == nil {
continue
}
res := <-stor.freeReadResChan
res.err = err
res.res = &pb.ReadResponse{Rpcid: req.req.Rpcid}
resChan <- res
}
}(wereWaiting, err)
break
}
if res.res.Rpcid < 0 || res.res.Rpcid > waitingMax {
// TODO: Debug log error?
break
}
req := waiting[res.res.Rpcid]
if req == nil {
// TODO: Debug log error?
break
}
waiting[res.res.Rpcid] = nil
req.canceledLock.Lock()
if !req.canceled {
req.resChan <- res
} else {
stor.freeReadReqChan <- req
stor.freeReadResChan <- res
}
req.canceledLock.Unlock()
case <-stor.handlersDoneChan:
return
}
}
}
func (stor *groupStore) Read(ctx context.Context, keyA, keyB uint64, childKeyA, childKeyB uint64, value []byte) (timestampMicro int64, rvalue []byte, err error) {
var req *asyncGroupReadRequest
select {
case req = <-stor.freeReadReqChan:
case <-ctx.Done():
return 0, rvalue, ctx.Err()
}
req.canceled = false
req.req.KeyA = keyA
req.req.KeyB = keyB
req.req.ChildKeyA = childKeyA
req.req.ChildKeyB = childKeyB
select {
case stor.pendingReadReqChan <- req:
case <-ctx.Done():
stor.freeReadReqChan <- req
return 0, rvalue, ctx.Err()
}
var res *asyncGroupReadResponse
select {
case res = <-req.resChan:
case <-ctx.Done():
req.canceledLock.Lock()
select {
case <-req.resChan:
default:
req.canceled = true
}
req.canceledLock.Unlock()
return 0, rvalue, ctx.Err()
}
stor.freeReadReqChan <- req
if res.err != nil {
err = res.err
stor.freeReadResChan <- res
return 0, rvalue, err
}
timestampMicro = res.res.TimestampMicro
rvalue = append(rvalue, res.res.Value...)
if res.res.Err == "" {
err = nil
} else {
err = proto.TranslateErrorString(res.res.Err)
}
stor.freeReadResChan <- res
return timestampMicro, rvalue, err
}
type asyncGroupWriteRequest struct {
req pb.WriteRequest
resChan chan *asyncGroupWriteResponse
canceledLock sync.Mutex
canceled bool
}
type asyncGroupWriteResponse struct {
res *pb.WriteResponse
err error
}
func (stor *groupStore) handleWriteStream() {
resChan := make(chan *asyncGroupWriteResponse, cap(stor.freeWriteReqChan))
resFunc := func(stream pb.GroupStore_StreamWriteClient) {
var err error
var res *asyncGroupWriteResponse
for {
select {
case res = <-stor.freeWriteResChan:
case <-stor.handlersDoneChan:
return
}
res.res, res.err = stream.Recv()
err = res.err
if err != nil {
res.res = nil
}
select {
case resChan <- res:
case <-stor.handlersDoneChan:
return
}
if err != nil {
return
}
}
}
var err error
var stream pb.GroupStore_StreamWriteClient
waitingMax := uint32(cap(stor.freeWriteReqChan)) - 1
waiting := make([]*asyncGroupWriteRequest, waitingMax+1)
waitingIndex := uint32(0)
for {
select {
case req := <-stor.pendingWriteReqChan:
j := waitingIndex
for waiting[waitingIndex] != nil {
waitingIndex++
if waitingIndex > waitingMax {
waitingIndex = 0
}
if waitingIndex == j {
panic("coding error: got more concurrent requests from pendingWriteReqChan than should be available")
}
}
req.req.Rpcid = waitingIndex
waiting[waitingIndex] = req
waitingIndex++
if waitingIndex > waitingMax {
waitingIndex = 0
}
if stream == nil {
stor.lock.Lock()
if stor.client == nil {
if err = stor.startup(); err != nil {
stor.lock.Unlock()
res := <-stor.freeWriteResChan
res.err = err
res.res = &pb.WriteResponse{Rpcid: req.req.Rpcid}
resChan <- res
break
}
}
stream, err = stor.client.StreamWrite(context.Background())
stor.lock.Unlock()
if err != nil {
res := <-stor.freeWriteResChan
res.err = err
res.res = &pb.WriteResponse{Rpcid: req.req.Rpcid}
resChan <- res
break
}
go resFunc(stream)
}
if err = stream.Send(&req.req); err != nil {
stream = nil
res := <-stor.freeWriteResChan
res.err = err
res.res = &pb.WriteResponse{Rpcid: req.req.Rpcid}
resChan <- res
}
case res := <-resChan:
if res.res == nil {
// Receiver got unrecoverable error, so we'll have to
// respond with errors to all waiting requests.
wereWaiting := make([]*asyncGroupWriteRequest, len(waiting))
for i, v := range waiting {
wereWaiting[i] = v
}
err := res.err
if err == nil {
err = errors.New("receiver had error, had to close any other waiting requests")
}
go func(reqs []*asyncGroupWriteRequest, err error) {
for _, req := range reqs {
if req == nil {
continue
}
res := <-stor.freeWriteResChan
res.err = err
res.res = &pb.WriteResponse{Rpcid: req.req.Rpcid}
resChan <- res
}
}(wereWaiting, err)
break
}
if res.res.Rpcid < 0 || res.res.Rpcid > waitingMax {
// TODO: Debug log error?
break
}
req := waiting[res.res.Rpcid]
if req == nil {
// TODO: Debug log error?
break
}
waiting[res.res.Rpcid] = nil
req.canceledLock.Lock()
if !req.canceled {
req.resChan <- res
} else {
stor.freeWriteReqChan <- req
stor.freeWriteResChan <- res
}
req.canceledLock.Unlock()
case <-stor.handlersDoneChan:
return
}
}
}
func (stor *groupStore) Write(ctx context.Context, keyA, keyB uint64, childKeyA, childKeyB uint64, timestampMicro int64, value []byte) (oldTimestampMicro int64, err error) {
var req *asyncGroupWriteRequest
select {
case req = <-stor.freeWriteReqChan:
case <-ctx.Done():
return 0, ctx.Err()
}
req.canceled = false
req.req.KeyA = keyA
req.req.KeyB = keyB
req.req.ChildKeyA = childKeyA
req.req.ChildKeyB = childKeyB
req.req.TimestampMicro = timestampMicro
if len(value) == 0 {
panic(fmt.Sprintf("REMOVEME %s asked to Write a zlv", stor.addr))
}
req.req.Value = value
select {
case stor.pendingWriteReqChan <- req:
case <-ctx.Done():
stor.freeWriteReqChan <- req
return 0, ctx.Err()
}
var res *asyncGroupWriteResponse
select {
case res = <-req.resChan:
case <-ctx.Done():
req.canceledLock.Lock()
select {
case <-req.resChan:
default:
req.canceled = true
}
req.canceledLock.Unlock()
return 0, ctx.Err()
}
stor.freeWriteReqChan <- req
if res.err != nil {
err = res.err
stor.freeWriteResChan <- res
return 0, err
}
oldTimestampMicro = res.res.TimestampMicro
if res.res.Err == "" {
err = nil
} else {
err = proto.TranslateErrorString(res.res.Err)
}
stor.freeWriteResChan <- res
return oldTimestampMicro, err
}
type asyncGroupDeleteRequest struct {
req pb.DeleteRequest
resChan chan *asyncGroupDeleteResponse
canceledLock sync.Mutex
canceled bool
}
type asyncGroupDeleteResponse struct {
res *pb.DeleteResponse
err error
}
func (stor *groupStore) handleDeleteStream() {
resChan := make(chan *asyncGroupDeleteResponse, cap(stor.freeDeleteReqChan))
resFunc := func(stream pb.GroupStore_StreamDeleteClient) {
var err error
var res *asyncGroupDeleteResponse
for {
select {
case res = <-stor.freeDeleteResChan:
case <-stor.handlersDoneChan:
return
}
res.res, res.err = stream.Recv()
err = res.err
if err != nil {
res.res = nil
}
select {
case resChan <- res:
case <-stor.handlersDoneChan:
return
}
if err != nil {
return
}
}
}
var err error
var stream pb.GroupStore_StreamDeleteClient
waitingMax := uint32(cap(stor.freeDeleteReqChan)) - 1
waiting := make([]*asyncGroupDeleteRequest, waitingMax+1)
waitingIndex := uint32(0)
for {
select {
case req := <-stor.pendingDeleteReqChan:
j := waitingIndex
for waiting[waitingIndex] != nil {
waitingIndex++
if waitingIndex > waitingMax {
waitingIndex = 0
}
if waitingIndex == j {
panic("coding error: got more concurrent requests from pendingDeleteReqChan than should be available")
}
}
req.req.Rpcid = waitingIndex
waiting[waitingIndex] = req
waitingIndex++
if waitingIndex > waitingMax {
waitingIndex = 0
}
if stream == nil {
stor.lock.Lock()
if stor.client == nil {
if err = stor.startup(); err != nil {
stor.lock.Unlock()
res := <-stor.freeDeleteResChan
res.err = err
res.res = &pb.DeleteResponse{Rpcid: req.req.Rpcid}
resChan <- res
break
}
}
stream, err = stor.client.StreamDelete(context.Background())
stor.lock.Unlock()
if err != nil {
res := <-stor.freeDeleteResChan
res.err = err
res.res = &pb.DeleteResponse{Rpcid: req.req.Rpcid}
resChan <- res
break
}
go resFunc(stream)
}
if err = stream.Send(&req.req); err != nil {
stream = nil
res := <-stor.freeDeleteResChan
res.err = err
res.res = &pb.DeleteResponse{Rpcid: req.req.Rpcid}
resChan <- res
}
case res := <-resChan:
if res.res == nil {
// Receiver got unrecoverable error, so we'll have to
// respond with errors to all waiting requests.
wereWaiting := make([]*asyncGroupDeleteRequest, len(waiting))
for i, v := range waiting {
wereWaiting[i] = v
}
err := res.err
if err == nil {
err = errors.New("receiver had error, had to close any other waiting requests")
}
go func(reqs []*asyncGroupDeleteRequest, err error) {
for _, req := range reqs {
if req == nil {
continue
}
res := <-stor.freeDeleteResChan
res.err = err
res.res = &pb.DeleteResponse{Rpcid: req.req.Rpcid}
resChan <- res
}
}(wereWaiting, err)
break
}
if res.res.Rpcid < 0 || res.res.Rpcid > waitingMax {
// TODO: Debug log error?
break
}
req := waiting[res.res.Rpcid]
if req == nil {
// TODO: Debug log error?
break
}
waiting[res.res.Rpcid] = nil
req.canceledLock.Lock()
if !req.canceled {
req.resChan <- res
} else {
stor.freeDeleteReqChan <- req
stor.freeDeleteResChan <- res
}
req.canceledLock.Unlock()
case <-stor.handlersDoneChan:
return
}
}
}
func (stor *groupStore) Delete(ctx context.Context, keyA, keyB uint64, childKeyA, childKeyB uint64, timestampMicro int64) (oldTimestampMicro int64, err error) {
var req *asyncGroupDeleteRequest
select {
case req = <-stor.freeDeleteReqChan:
case <-ctx.Done():
return 0, ctx.Err()
}
req.canceled = false
req.req.KeyA = keyA
req.req.KeyB = keyB
req.req.ChildKeyA = childKeyA
req.req.ChildKeyB = childKeyB
req.req.TimestampMicro = timestampMicro
select {
case stor.pendingDeleteReqChan <- req:
case <-ctx.Done():
stor.freeDeleteReqChan <- req
return 0, ctx.Err()
}
var res *asyncGroupDeleteResponse
select {
case res = <-req.resChan:
case <-ctx.Done():
req.canceledLock.Lock()
select {
case <-req.resChan:
default:
req.canceled = true
}
req.canceledLock.Unlock()
return 0, ctx.Err()
}
stor.freeDeleteReqChan <- req
if res.err != nil {
err = res.err
stor.freeDeleteResChan <- res
return 0, err
}
oldTimestampMicro = res.res.TimestampMicro
if res.res.Err == "" {
err = nil
} else {
err = proto.TranslateErrorString(res.res.Err)
}
stor.freeDeleteResChan <- res
return oldTimestampMicro, err
}
type asyncGroupLookupGroupRequest struct {
req pb.LookupGroupRequest
resChan chan *asyncGroupLookupGroupResponse
canceledLock sync.Mutex
canceled bool
}
type asyncGroupLookupGroupResponse struct {
res *pb.LookupGroupResponse
err error
}
func (stor *groupStore) handleLookupGroupStream() {
resChan := make(chan *asyncGroupLookupGroupResponse, cap(stor.freeLookupGroupReqChan))
resFunc := func(stream pb.GroupStore_StreamLookupGroupClient) {
var err error
var res *asyncGroupLookupGroupResponse
for {
select {
case res = <-stor.freeLookupGroupResChan:
case <-stor.handlersDoneChan:
return
}
res.res, res.err = stream.Recv()
err = res.err
if err != nil {
res.res = nil
}
select {
case resChan <- res:
case <-stor.handlersDoneChan:
return
}
if err != nil {
return
}
}
}
var err error
var stream pb.GroupStore_StreamLookupGroupClient
waitingMax := uint32(cap(stor.freeLookupGroupReqChan)) - 1
waiting := make([]*asyncGroupLookupGroupRequest, waitingMax+1)
waitingIndex := uint32(0)
for {
select {
case req := <-stor.pendingLookupGroupReqChan:
j := waitingIndex
for waiting[waitingIndex] != nil {
waitingIndex++
if waitingIndex > waitingMax {
waitingIndex = 0
}
if waitingIndex == j {
panic("coding error: got more concurrent requests from pendingLookupGroupReqChan than should be available")
}
}
req.req.Rpcid = waitingIndex
waiting[waitingIndex] = req
waitingIndex++
if waitingIndex > waitingMax {
waitingIndex = 0
}
if stream == nil {
stor.lock.Lock()
if stor.client == nil {
if err = stor.startup(); err != nil {
stor.lock.Unlock()
res := <-stor.freeLookupGroupResChan
res.err = err
res.res = &pb.LookupGroupResponse{Rpcid: req.req.Rpcid}
resChan <- res
break
}
}
stream, err = stor.client.StreamLookupGroup(context.Background())
stor.lock.Unlock()
if err != nil {
res := <-stor.freeLookupGroupResChan
res.err = err
res.res = &pb.LookupGroupResponse{Rpcid: req.req.Rpcid}
resChan <- res
break
}
go resFunc(stream)
}
if err = stream.Send(&req.req); err != nil {
stream = nil
res := <-stor.freeLookupGroupResChan
res.err = err
res.res = &pb.LookupGroupResponse{Rpcid: req.req.Rpcid}
resChan <- res
}
case res := <-resChan:
if res.res == nil {
// Receiver got unrecoverable error, so we'll have to
// respond with errors to all waiting requests.
wereWaiting := make([]*asyncGroupLookupGroupRequest, len(waiting))
for i, v := range waiting {
wereWaiting[i] = v
}
err := res.err
if err == nil {
err = errors.New("receiver had error, had to close any other waiting requests")
}
go func(reqs []*asyncGroupLookupGroupRequest, err error) {
for _, req := range reqs {
if req == nil {
continue
}
res := <-stor.freeLookupGroupResChan
res.err = err
res.res = &pb.LookupGroupResponse{Rpcid: req.req.Rpcid}
resChan <- res
}
}(wereWaiting, err)
break
}
if res.res.Rpcid < 0 || res.res.Rpcid > waitingMax {
// TODO: Debug log error?
break
}
req := waiting[res.res.Rpcid]
if req == nil {
// TODO: Debug log error?
break
}
waiting[res.res.Rpcid] = nil
req.canceledLock.Lock()
if !req.canceled {
req.resChan <- res
} else {
stor.freeLookupGroupReqChan <- req
stor.freeLookupGroupResChan <- res
}
req.canceledLock.Unlock()
case <-stor.handlersDoneChan:
return
}
}
}
func (stor *groupStore) LookupGroup(ctx context.Context, parentKeyA, parentKeyB uint64) (items []store.LookupGroupItem, err error) {
var req *asyncGroupLookupGroupRequest
select {
case req = <-stor.freeLookupGroupReqChan:
case <-ctx.Done():
return nil, ctx.Err()
}
req.canceled = false
req.req.KeyA = parentKeyA
req.req.KeyB = parentKeyB
select {
case stor.pendingLookupGroupReqChan <- req:
case <-ctx.Done():
stor.freeLookupGroupReqChan <- req
return nil, ctx.Err()
}
var res *asyncGroupLookupGroupResponse
select {
case res = <-req.resChan:
case <-ctx.Done():
req.canceledLock.Lock()
select {
case <-req.resChan:
default:
req.canceled = true
}
req.canceledLock.Unlock()
return nil, ctx.Err()
}
stor.freeLookupGroupReqChan <- req
if res.err != nil {
err = res.err
stor.freeLookupGroupResChan <- res
return nil, err
}
items = make([]store.LookupGroupItem, len(res.res.Items))
for i, v := range res.res.Items {
items[i].ChildKeyA = v.ChildKeyA
items[i].ChildKeyB = v.ChildKeyB
items[i].TimestampMicro = v.TimestampMicro
items[i].Length = v.Length
}
if res.res.Err == "" {
err = nil
} else {
err = proto.TranslateErrorString(res.res.Err)
}
stor.freeLookupGroupResChan <- res
return items, err
}
type asyncGroupReadGroupRequest struct {
req pb.ReadGroupRequest
resChan chan *asyncGroupReadGroupResponse
canceledLock sync.Mutex
canceled bool
}
type asyncGroupReadGroupResponse struct {
res *pb.ReadGroupResponse
err error
}
func (stor *groupStore) handleReadGroupStream() {
resChan := make(chan *asyncGroupReadGroupResponse, cap(stor.freeReadGroupReqChan))
resFunc := func(stream pb.GroupStore_StreamReadGroupClient) {
var err error
var res *asyncGroupReadGroupResponse
for {
select {
case res = <-stor.freeReadGroupResChan:
case <-stor.handlersDoneChan:
return
}
res.res, res.err = stream.Recv()
err = res.err
if err != nil {
res.res = nil
}
select {
case resChan <- res:
case <-stor.handlersDoneChan:
return
}
if err != nil {
return
}
}
}
var err error
var stream pb.GroupStore_StreamReadGroupClient
waitingMax := uint32(cap(stor.freeReadGroupReqChan)) - 1
waiting := make([]*asyncGroupReadGroupRequest, waitingMax+1)
waitingIndex := uint32(0)
for {
select {
case req := <-stor.pendingReadGroupReqChan:
j := waitingIndex
for waiting[waitingIndex] != nil {
waitingIndex++
if waitingIndex > waitingMax {
waitingIndex = 0
}
if waitingIndex == j {
panic("coding error: got more concurrent requests from pendingReadGroupReqChan than should be available")
}
}
req.req.Rpcid = waitingIndex
waiting[waitingIndex] = req
waitingIndex++
if waitingIndex > waitingMax {
waitingIndex = 0
}
if stream == nil {
stor.lock.Lock()
if stor.client == nil {
if err = stor.startup(); err != nil {
stor.lock.Unlock()
res := <-stor.freeReadGroupResChan
res.err = err
res.res = &pb.ReadGroupResponse{Rpcid: req.req.Rpcid}
resChan <- res
break
}
}
stream, err = stor.client.StreamReadGroup(context.Background())
stor.lock.Unlock()
if err != nil {
res := <-stor.freeReadGroupResChan
res.err = err
res.res = &pb.ReadGroupResponse{Rpcid: req.req.Rpcid}
resChan <- res
break
}
go resFunc(stream)
}
if err = stream.Send(&req.req); err != nil {
stream = nil
res := <-stor.freeReadGroupResChan
res.err = err
res.res = &pb.ReadGroupResponse{Rpcid: req.req.Rpcid}
resChan <- res
}
case res := <-resChan:
if res.res == nil {
// Receiver got unrecoverable error, so we'll have to
// respond with errors to all waiting requests.
wereWaiting := make([]*asyncGroupReadGroupRequest, len(waiting))
for i, v := range waiting {
wereWaiting[i] = v
}
err := res.err
if err == nil {
err = errors.New("receiver had error, had to close any other waiting requests")
}
go func(reqs []*asyncGroupReadGroupRequest, err error) {
for _, req := range reqs {
if req == nil {
continue
}
res := <-stor.freeReadGroupResChan
res.err = err
res.res = &pb.ReadGroupResponse{Rpcid: req.req.Rpcid}
resChan <- res
}
}(wereWaiting, err)
break
}
if res.res.Rpcid < 0 || res.res.Rpcid > waitingMax {
// TODO: Debug log error?
break
}
req := waiting[res.res.Rpcid]
if req == nil {
// TODO: Debug log error?
break
}
waiting[res.res.Rpcid] = nil
req.canceledLock.Lock()
if !req.canceled {
req.resChan <- res
} else {
stor.freeReadGroupReqChan <- req
stor.freeReadGroupResChan <- res
}
req.canceledLock.Unlock()
case <-stor.handlersDoneChan:
return
}
}
}
func (stor *groupStore) ReadGroup(ctx context.Context, parentKeyA, parentKeyB uint64) (items []store.ReadGroupItem, err error) {
var req *asyncGroupReadGroupRequest
select {
case req = <-stor.freeReadGroupReqChan:
case <-ctx.Done():
return nil, ctx.Err()
}
req.canceled = false
req.req.KeyA = parentKeyA
req.req.KeyB = parentKeyB
select {
case stor.pendingReadGroupReqChan <- req:
case <-ctx.Done():
stor.freeReadGroupReqChan <- req
return nil, ctx.Err()
}
var res *asyncGroupReadGroupResponse
select {
case res = <-req.resChan:
case <-ctx.Done():
req.canceledLock.Lock()
select {
case <-req.resChan:
default:
req.canceled = true
}
req.canceledLock.Unlock()
return nil, ctx.Err()
}
stor.freeReadGroupReqChan <- req
if res.err != nil {
err = res.err
stor.freeReadGroupResChan <- res
return nil, err
}
items = make([]store.ReadGroupItem, len(res.res.Items))
for i, v := range res.res.Items {
items[i].ChildKeyA = v.ChildKeyA
items[i].ChildKeyB = v.ChildKeyB
items[i].TimestampMicro = v.TimestampMicro
items[i].Value = v.Value
}
if res.res.Err == "" {
err = nil
} else {
err = proto.TranslateErrorString(res.res.Err)
}
stor.freeReadGroupResChan <- res
return items, err
}
|
package filters
import (
"bytes"
"fmt"
"log"
"net"
"os"
"strings"
"github.com/bonjourmalware/melody/internal/logging"
)
// IPRanges abstracts an array of IPRange
type IPRanges []IPRange
// IPRules groups the whitelisted and blacklisted ip rules
type IPRules struct {
WhitelistedIPs IPRanges
BlacklistedIPs IPRanges
}
// IPRange is a range of IP represented by a lower and an upper bound
type IPRange struct {
Lower net.IP
Upper net.IP
}
// NewIPRange created a new ip range from a lower and an upper bound
func NewIPRange(lower net.IP, upper net.IP) IPRange {
return IPRange{
Lower: lower,
Upper: upper,
}
}
//
//func (iprl *IPRules) ParseRules(rules []string) {
// for _, rawRule := range rules {
// rule := strings.Replace(rawRule, " ", "", -1)
//
// if strings.HasPrefix(rawRule, "not") {
// rule = strings.TrimPrefix(rule, "not")
//
// if strings.Contains(rule, "-") {
// err := iprl.BlacklistRange(rule)
// if err != nil {
// log.Println(fmt.Sprintf("Failed to parse the IP rule [%s]:", rule))
// log.Println(err)
// os.Exit(1)
// }
// continue
// } else if strings.Contains(rule, "/") {
// err := iprl.BlacklistCIDR(rule)
// if err != nil {
// log.Println(fmt.Sprintf("Failed to parse the IP rule [%s]:", rule))
// log.Println(err)
// os.Exit(1)
// }
// continue
// }
//
// err := iprl.Blacklist(rule)
// if err != nil {
// log.Println(fmt.Sprintf("Failed to parse the IP rule [%s]:", rule))
// //log.Println(err)
// os.Exit(1)
// }
// continue
// }
//
// if strings.Contains(rule, "-") {
// err := iprl.WhitelistRange(rule)
// if err != nil {
// log.Println(fmt.Sprintf("Failed to parse the IP rule [%s]:", rule))
// log.Println(err)
// os.Exit(1)
// }
// } else if strings.Contains(rule, "/") {
// err := iprl.WhitelistCIDR(rule)
// if err != nil {
// log.Println(fmt.Sprintf("Failed to parse the IP rule [%s]:", rule))
// //log.Println(err)
// os.Exit(1)
// }
// continue
// } else {
// err := iprl.Whitelist(rule)
// if err != nil {
// log.Println(fmt.Sprintf("Failed to parse the IP rule [%s]:", rule))
// log.Println(err)
// os.Exit(1)
// }
// }
// }
//
// iprl.BlacklistedIPs.MergeOverlapping()
// iprl.WhitelistedIPs.MergeOverlapping()
//}
// ParseRules loads a whitelist and a blacklist into a set of IPRules
func (iprl *IPRules) ParseRules(whitelist []string, blacklist []string) {
for _, rawRule := range whitelist {
rule := strings.Replace(rawRule, " ", "", -1)
if strings.Contains(rule, "-") {
err := iprl.WhitelistRange(rule)
if err != nil {
logging.Errors.Println(fmt.Sprintf("Failed to parse the IP rule [%s]:", rule))
logging.Errors.Println(err)
os.Exit(1)
}
} else if strings.Contains(rule, "/") {
err := iprl.WhitelistCIDR(rule)
if err != nil {
logging.Errors.Println(fmt.Sprintf("Failed to parse the IP rule [%s]:", rule))
logging.Errors.Println(err)
os.Exit(1)
}
continue
} else {
err := iprl.Whitelist(rule)
if err != nil {
logging.Errors.Println(fmt.Sprintf("Failed to parse the IP rule [%s]:", rule))
logging.Errors.Println(err)
os.Exit(1)
}
}
}
for _, rawRule := range blacklist {
rule := strings.Replace(rawRule, " ", "", -1)
if strings.Contains(rule, "-") {
err := iprl.BlacklistRange(rule)
if err != nil {
logging.Errors.Println(fmt.Sprintf("Failed to parse the IP rule [%s]:", rule))
logging.Errors.Println(err)
os.Exit(1)
}
continue
} else if strings.Contains(rule, "/") {
err := iprl.BlacklistCIDR(rule)
if err != nil {
logging.Errors.Println(fmt.Sprintf("Failed to parse the IP rule [%s]:", rule))
logging.Errors.Println(err)
os.Exit(1)
}
continue
}
err := iprl.Blacklist(rule)
if err != nil {
logging.Errors.Println(fmt.Sprintf("Failed to parse the IP rule [%s]:", rule))
logging.Errors.Println(err)
os.Exit(1)
}
continue
}
iprl.BlacklistedIPs.MergeOverlapping()
iprl.WhitelistedIPs.MergeOverlapping()
}
//
// IPRanges methods
//
// MergeOverlapping optimize the parsed IPRange by keeping only non-overlapping ranges
func (iprgs *IPRanges) MergeOverlapping() {
workSlice := make(IPRanges, len(*iprgs))
copy(workSlice, *iprgs)
for i := 0; i < len(workSlice); i++ {
for idx, candidate := range workSlice {
if candidate.Equals(workSlice[i]) {
// Skip
continue
}
if candidate.ContainsIPRange(workSlice[i]) {
workSlice.RemoveAt(i)
i = 0 // Restart upper loop
break
}
if workSlice[i].ContainsIPRange(candidate) {
workSlice.RemoveAt(idx)
i = 0 // Restart upper loop
break
}
if !candidate.IsUpperOrLowerBoundary(workSlice[i].Lower) && candidate.IsUpperOrLowerBoundary(workSlice[i].Upper) {
// Replace the candidate's upper with the current's upper
workSlice[idx].Upper = workSlice[i].Upper
workSlice.RemoveAt(i)
i = 0 // Restart upper loop
break
}
if !candidate.IsUpperOrLowerBoundary(workSlice[i].Upper) && candidate.IsUpperOrLowerBoundary(workSlice[i].Lower) {
// Replace the candidate's lower with the current's lower
workSlice[idx].Lower = workSlice[i].Lower
workSlice.RemoveAt(i)
i = 0 // Restart upper loop
break
}
}
}
*iprgs = workSlice
}
// RemoveAt is an helper that removes a range at the the given index
func (iprgs *IPRanges) RemoveAt(index int) {
workSlice := make(IPRanges, len(*iprgs))
copy(workSlice, *iprgs)
workSlice = append(workSlice[:index], workSlice[index+1:]...)
*iprgs = workSlice
}
// Add is an helper that adds a range made of a single IP
func (iprgs *IPRanges) Add(ip net.IP) {
ipr := NewIPRange(ip, ip)
*iprgs = append(*iprgs, ipr)
}
// AddString is an helper that parses and adds a range of IP from a string
func (iprgs *IPRanges) AddString(ipstr string) error {
var ip net.IP
if val := net.ParseIP(ipstr); val != nil {
ip = val
} else {
return fmt.Errorf("invalid IP [%s]", ipstr)
}
iprgs.Add(ip.To4())
return nil
}
// AddRange is an helper that adds a range of IP made of two IPs
func (iprgs *IPRanges) AddRange(lower net.IP, upper net.IP) {
ipr := NewIPRange(lower, upper)
*iprgs = append(*iprgs, ipr)
}
//
// IPRange methods
//
// ContainsIPString is an helper that checks if a range contains the given IP string
func (iprg IPRange) ContainsIPString(ipstr string) bool {
var ip net.IP
if val := net.ParseIP(ipstr); val != nil {
ip = val
} else {
return false
}
return iprg.ContainsIP(ip)
}
// ContainsIP is an helper that checks if a range contains the given IP
func (iprg IPRange) ContainsIP(ip net.IP) bool {
if bytes.Compare(ip.To4(), iprg.Lower) >= 0 && bytes.Compare(ip.To4(), iprg.Upper) <= 0 {
return true
}
return false
}
// ContainsIPRange is an helper that checks if a range contains the given IP range
func (iprg IPRange) ContainsIPRange(iprange IPRange) bool {
if iprg.ContainsIP(iprange.Lower.To4()) && iprange.ContainsIP(iprange.Upper.To4()) {
return true
}
return false
}
// IsUpperOrLowerBoundary is an helper that checks if the given IP is either the lower of the upper bound of a range
func (iprg IPRange) IsUpperOrLowerBoundary(ip net.IP) bool {
if !net.IP.Equal(ip.To4(), iprg.Lower) && !net.IP.Equal(ip.To4(), iprg.Upper) {
return false
}
return true
}
// Equals is an helper that checks if an IPRange is equal to another
func (iprg *IPRange) Equals(iprange IPRange) bool {
return net.IP.Equal(iprg.Upper, iprange.Upper.To4()) && net.IP.Equal(iprg.Lower, iprange.Lower.To4())
}
//
// CIDR
//
// WhitelistCIDR parses and adds a CIDR string to the IPRules' whitelist
func (iprl *IPRules) WhitelistCIDR(rawIPCIDR string) error {
_, ipnet, err := net.ParseCIDR(rawIPCIDR)
if err != nil {
return err
}
ipFrom, ipTo, err := addressRange(ipnet)
if err != nil {
return err
}
iprl.WhitelistedIPs.AddRange(ipFrom, ipTo)
return nil
}
// BlacklistCIDR parses and adds a CIDR string to the IPRules' blacklist
func (iprl *IPRules) BlacklistCIDR(rawIPCIDR string) error {
_, ipnet, err := net.ParseCIDR(rawIPCIDR)
if err != nil {
return err
}
ipFrom, ipTo, err := addressRange(ipnet)
if err != nil {
return err
}
iprl.BlacklistedIPs.AddRange(ipFrom, ipTo)
return nil
}
//
// Ranges
//
// WhitelistRange parses and adds an IP range string to the IPRules' whitelist
func (iprl *IPRules) WhitelistRange(rawIPRange string) error {
var ipFrom net.IP
var ipTo net.IP
hostRange := strings.Split(rawIPRange, "-")
lower, higher := hostRange[0], hostRange[1]
if val := net.ParseIP(lower); val != nil {
ipFrom = val
} else {
return fmt.Errorf("[%s-%s] is not a valid IP address range", lower, higher)
}
if val := net.ParseIP(higher); val != nil {
ipTo = val
} else {
return fmt.Errorf("[%s-%s] is not a valid IP address range", lower, higher)
}
checkValidIPRange(ipFrom, ipTo)
iprl.WhitelistedIPs.AddRange(ipFrom, ipTo)
return nil
}
// BlacklistRange parses and adds an IP range string to the IPRules' blacklist
func (iprl *IPRules) BlacklistRange(rawIPRange string) error {
var ipFrom net.IP
var ipTo net.IP
hostRange := strings.Split(rawIPRange, "-")
lower, higher := hostRange[0], hostRange[1]
if val := net.ParseIP(lower); val != nil {
ipFrom = val
} else {
return fmt.Errorf("[%s-%s] is not a valid IP address range", lower, higher)
}
if val := net.ParseIP(higher); val != nil {
ipTo = val
} else {
return fmt.Errorf("[%s-%s] is not a valid IP address range", lower, higher)
}
checkValidIPRange(ipFrom, ipTo)
iprl.BlacklistedIPs.AddRange(ipFrom, ipTo)
return nil
}
//
// Single IPs
//
// Whitelist checks the validity of an IP string and adds it to the IPRules' whitelist
func (iprl *IPRules) Whitelist(ip string) error {
checkValidIP(ip)
if err := iprl.WhitelistedIPs.AddString(ip); err != nil {
return err
}
return nil
}
// Blacklist checks the validity of an IP string and adds it to the IPRules' blacklist
func (iprl *IPRules) Blacklist(ip string) error {
checkValidIP(ip)
if err := iprl.BlacklistedIPs.AddString(ip); err != nil {
return err
}
return nil
}
//
// Checks
//
func checkValidIP(ipstr string) {
if !isValidIPString(ipstr) {
log.Println(fmt.Sprintf("[%s] is not a valid IP address", ipstr))
os.Exit(1)
}
}
func checkValidIPRange(lower net.IP, upper net.IP) {
if !isValidIPRange(lower, upper) {
log.Println(fmt.Sprintf("[%s-%s] is not a valid host range", lower.String(), upper.String()))
os.Exit(1)
}
}
func isValidIPString(ipstr string) bool {
if val := net.ParseIP(ipstr); val == nil {
return false
}
return true
}
func isValidIPRange(lower net.IP, upper net.IP) bool {
return bytes.Compare(lower, upper) <= 0
}
|
package charge
import (
"fmt"
"sync"
"testing"
lua "github.com/yuin/gopher-lua"
)
var (
s1 = `
function c1()
return 1
end
`
s2 = `
function c1()
return 2
end
`
)
func TestPrecompiled(t *testing.T) {
p := &lStatePool{
script: s1,
saved: make([]*lua.LState, 0, 4),
}
do(p, 1000)
p.Reload(s2)
do(p, 1000)
}
func do(p *lStatePool, times int) {
var wg sync.WaitGroup
wg.Add(times)
for i := 0; i < times; i++ {
go func(i int) {
L := p.Get()
if err := L.CallByParam(lua.P{
Fn: L.GetGlobal("c1"),
NRet: 1,
Protect: true,
}, lua.LNumber(1000000000*i), lua.LNumber(10)); err != nil {
panic(err)
}
ret := L.Get(-1)
fmt.Println(ret.String())
wg.Done()
}(i)
}
wg.Wait()
}
|
package utils
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"encoding/base64"
)
//aec加密
func AesEncrypt(src string, key string) (string, error) {
// 转成字节数组
origData := []byte(src)
k := []byte(key)
// 分组秘钥
block, err := aes.NewCipher(k)
if err != nil {
return "", err
}
// 获取秘钥块的长度
blockSize := block.BlockSize()
// 补全码
origData = PKCS7Padding(origData, blockSize)
// 加密模式
blockMode := cipher.NewCBCEncrypter(block, k[:blockSize])
// 创建数组
cryted := make([]byte, len(origData))
// 加密
blockMode.CryptBlocks(cryted, origData)
return base64.StdEncoding.EncodeToString(cryted), nil
}
//aec解密
func AesDecrypt(src string, key string) (string, error) {
// 转成字节数组
crytedByte, dErr := base64.StdEncoding.DecodeString(src)
if dErr != nil {
return "", dErr
}
k := []byte(key)
// 分组秘钥
block, err := aes.NewCipher(k)
if err != nil {
return "", err
}
// 获取秘钥块的长度
blockSize := block.BlockSize()
// 加密模式
blockMode := cipher.NewCBCDecrypter(block, k[:blockSize])
// 创建数组
orig := make([]byte, len(crytedByte))
// 解密
blockMode.CryptBlocks(orig, crytedByte)
// 去补全码
orig = PKCS7UnPadding(orig)
return string(orig), nil
}
//补码
func PKCS7Padding(ciphertext []byte, blocksize int) []byte {
padding := blocksize - len(ciphertext)%blocksize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(ciphertext, padtext...)
}
//去码
func PKCS7UnPadding(origData []byte) []byte {
length := len(origData)
if length == 0 {
return []byte{}
}
unpadding := int(origData[length-1])
return origData[:(length - unpadding)]
}
|
// Copyright © 2017 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"github.com/spf13/cobra"
"github.com/OlegKuleba/goCobra/utils"
"fmt"
"strings"
"github.com/OlegKuleba/goCobra/models"
)
// editContactCmd represents the editContact command
var editContactCmd = &cobra.Command{
Use: "editContact",
Short: "A brief description of your command",
Long: `A longer description that spans multiple lines and likely contains examples
and usage of using your command. For example:
Cobra is a CLI library for Go that empowers applications.
This application is a tool to generate the needed files
to quickly create a Cobra application.`,
Args:cobra.MinimumNArgs(5),
Run: editContact,
}
func editContact(cmd *cobra.Command, args []string) {
if !utils.IsFileExist() { // Если файла еще нет, выводим инфу об этом и о том, что создание файла происходит при записи нового контакта.
return
}
if !utils.CheckParamsExceptApartment(args[0], args[1], args[2], args[3], args[4]) { // Если аргументы не проходят валидацию (все, кроме квартиры, т.к. она опциональная), выводим инфу об этом и выходим
return
}
// Извлекаем аргументы из командной строки и записываем в адрес
address := []string{args[2], args[3], args[4]}
if len(args) > 5 { // Если квартира указана, то
address = append(address, args[5]) // добавляем квартиру в адрес
if !utils.Validate(args[5], utils.BuildingOrApartmentFlag) { // и валидируем ее
utils.PrintValidationMessages()
return
}
}
// Собираем контакт в структуру и отдаем на изменение (адрес в структуре является строкой)
contact := models.NewContact(args[0], args[1], strings.Join(address, "-")) // Для удобочитаемости файла элементы адреса будут разделены символом "-"
if utils.EditContact(contact) {
fmt.Println("Запись успешно изменена")
} else {
fmt.Println("Запись не изменена")
}
}
func init() {
rootCmd.AddCommand(editContactCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// editContactCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// editContactCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggfuncs
import (
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/util/chunk"
)
// percentRank calculates the percentage of partition values less than the value in the current row, excluding the highest value.
// It can be calculated as `(rank - 1) / (total_rows_in_set - 1).
type percentRank struct {
baseAggFunc
rowComparer
}
func (*percentRank) AllocPartialResult() (partial PartialResult, memDelta int64) {
return PartialResult(&partialResult4Rank{}), DefPartialResult4RankSize
}
func (*percentRank) ResetPartialResult(partial PartialResult) {
p := (*partialResult4Rank)(partial)
p.curIdx = 0
p.lastRank = 0
p.rows = p.rows[:0]
}
func (*percentRank) UpdatePartialResult(_ sessionctx.Context, rowsInGroup []chunk.Row, partial PartialResult) (memDelta int64, err error) {
p := (*partialResult4Rank)(partial)
p.rows = append(p.rows, rowsInGroup...)
memDelta += int64(len(rowsInGroup)) * DefRowSize
return memDelta, nil
}
func (pr *percentRank) AppendFinalResult2Chunk(_ sessionctx.Context, partial PartialResult, chk *chunk.Chunk) error {
p := (*partialResult4Rank)(partial)
numRows := int64(len(p.rows))
p.curIdx++
if p.curIdx == 1 {
p.lastRank = 1
chk.AppendFloat64(pr.ordinal, 0)
return nil
}
if pr.compareRows(p.rows[p.curIdx-2], p.rows[p.curIdx-1]) == 0 {
chk.AppendFloat64(pr.ordinal, float64(p.lastRank-1)/float64(numRows-1))
return nil
}
p.lastRank = p.curIdx
chk.AppendFloat64(pr.ordinal, float64(p.lastRank-1)/float64(numRows-1))
return nil
}
|
package main
import (
ers "errors"
// "log"
"fmt"
"net"
//"strings"
"time"
"github.com/soniah/gosnmp"
"strconv"
)
func pduVal2str(pdu gosnmp.SnmpPDU) string {
value := pdu.Value
if pdu.Type == gosnmp.OctetString {
return string(value.([]byte))
} else {
return ""
}
}
func pduVal2Int64(pdu gosnmp.SnmpPDU) int64 {
value := pdu.Value
var val int64
//revisar esta asignación
switch value := value.(type) { // shadow
case int:
val = int64(value)
case int8:
val = int64(value)
case int16:
val = int64(value)
case int32:
val = int64(value)
case int64:
val = int64(value)
case uint:
val = int64(value)
case uint8:
val = int64(value)
case uint16:
val = int64(value)
case uint32:
val = int64(value)
case uint64:
val = int64(value)
case string:
// for testing and other apps - numbers may appear as strings
var err error
if val, err = strconv.ParseInt(value, 10, 64); err != nil {
return val
}
default:
return 0
}
return val
}
func pduVal2Hwaddr(pdu gosnmp.SnmpPDU) (string, error) {
value := pdu.Value
switch vt := value.(type) {
case string:
value = net.HardwareAddr(vt).String()
case []byte:
value = net.HardwareAddr(vt).String()
default:
return "", fmt.Errorf("invalid type (%T) for hwaddr conversion", value)
}
return string(value.([]byte)), nil
}
func pduVal2IPaddr(pdu gosnmp.SnmpPDU) (string, error) {
var ipbs []byte
value := pdu.Value
switch vt := value.(type) {
case string:
ipbs = []byte(vt)
case []byte:
ipbs = vt
default:
return "", fmt.Errorf("invalid type (%T) for ipaddr conversion", value)
}
switch len(ipbs) {
case 4, 16:
value = net.IP(ipbs).String()
default:
return "", fmt.Errorf("invalid length (%d) for ipaddr conversion", len(ipbs))
}
return string(value.([]byte)), nil
}
const (
maxOids = 60 // const in gosnmp
)
func snmpClient(d *SnmpDevice) (*gosnmp.GoSNMP, error) {
s := d.cfg
var client *gosnmp.GoSNMP
hostIPs, err := net.LookupHost(s.Host)
if err != nil {
log.Errorf("Error on Name Lookup for host: %s ERROR: %s", s.Host, err)
return nil, err
}
if len(hostIPs) == 0 {
log.Errorf("Error on Name Lookup for host: %s ", s.Host)
return nil, ers.New("Error on Name Lookup for host :" + s.Host)
}
if len(hostIPs) > 1 {
d.log.Warnf("Lookup for %s host has more than one IP: %v => Finally used first IP %s", s.Host, hostIPs, hostIPs[0])
}
switch s.SnmpVersion {
case "1":
client = &gosnmp.GoSNMP{
Target: hostIPs[0],
Port: uint16(s.Port),
Version: gosnmp.Version1,
Timeout: time.Duration(s.Timeout) * time.Second,
Retries: s.Retries,
}
case "2c":
//validate community
if len(s.Community) < 1 {
d.log.Errorf("Error no community found %s in host %s", s.Community, s.Host)
return nil, ers.New("Error on snmp community")
}
client = &gosnmp.GoSNMP{
Target: hostIPs[0],
Port: uint16(s.Port),
Community: s.Community,
Version: gosnmp.Version2c,
Timeout: time.Duration(s.Timeout) * time.Second,
Retries: s.Retries,
}
case "3":
seclpmap := map[string]gosnmp.SnmpV3MsgFlags{
"NoAuthNoPriv": gosnmp.NoAuthNoPriv,
"AuthNoPriv": gosnmp.AuthNoPriv,
"AuthPriv": gosnmp.AuthPriv,
}
authpmap := map[string]gosnmp.SnmpV3AuthProtocol{
"NoAuth": gosnmp.NoAuth,
"MD5": gosnmp.MD5,
"SHA": gosnmp.SHA,
}
privpmap := map[string]gosnmp.SnmpV3PrivProtocol{
"NoPriv": gosnmp.NoPriv,
"DES": gosnmp.DES,
"AES": gosnmp.AES,
}
UsmParams := new(gosnmp.UsmSecurityParameters)
if len(s.V3AuthUser) < 1 {
d.log.Errorf("Error username not found in snmpv3 %s in host %s", s.V3AuthUser, s.Host)
return nil, ers.New("Error on snmp v3 user")
}
switch s.V3SecLevel {
case "NoAuthNoPriv":
UsmParams = &gosnmp.UsmSecurityParameters{
UserName: s.V3AuthUser,
AuthenticationProtocol: gosnmp.NoAuth,
PrivacyProtocol: gosnmp.NoPriv,
}
case "AuthNoPriv":
if len(s.V3AuthPass) < 1 {
d.log.Errorf("Error password not found in snmpv3 %s in host %s", s.V3AuthUser, s.Host)
return nil, ers.New("Error on snmp v3 AuthPass")
}
//validate correct s.authuser
if val, ok := authpmap[s.V3AuthProt]; !ok {
d.log.Errorf("Error in Auth Protocol %v | %v in host %s", s.V3AuthProt, val, s.Host)
return nil, ers.New("Error on snmp v3 AuthProt")
}
//validate s.authpass s.authprot
UsmParams = &gosnmp.UsmSecurityParameters{
UserName: s.V3AuthUser,
AuthenticationProtocol: authpmap[s.V3AuthProt],
AuthenticationPassphrase: s.V3AuthPass,
PrivacyProtocol: gosnmp.NoPriv,
}
case "AuthPriv":
//validate s.authpass s.authprot
if len(s.V3AuthPass) < 1 {
d.log.Errorf("Error password not found in snmpv3 %s in host %s", s.V3AuthUser, s.Host)
return nil, ers.New("Error on snmp v3 AuthPass")
}
if val, ok := authpmap[s.V3AuthProt]; !ok {
d.log.Errorf("Error in Auth Protocol %v | %v in host %s", s.V3AuthProt, val, s.Host)
return nil, ers.New("Error on snmp v3 AuthProt")
}
//validate s.privpass s.privprot
if len(s.V3PrivPass) < 1 {
d.log.Errorf("Error privPass not found in snmpv3 %s in host %s", s.V3AuthUser, s.Host)
// log.Printf("DEBUG SNMP: %+v", *s)
return nil, ers.New("Error on snmp v3 PrivPAss")
}
if val, ok := privpmap[s.V3PrivProt]; !ok {
d.log.Errorf("Error in Priv Protocol %v | %v in host %s", s.V3PrivProt, val, s.Host)
return nil, ers.New("Error on snmp v3 AuthPass")
}
UsmParams = &gosnmp.UsmSecurityParameters{
UserName: s.V3AuthUser,
AuthenticationProtocol: authpmap[s.V3AuthProt],
AuthenticationPassphrase: s.V3AuthPass,
PrivacyProtocol: privpmap[s.V3PrivProt],
PrivacyPassphrase: s.V3PrivPass,
}
default:
d.log.Errorf("Error no Security Level found %s in host %s", s.V3SecLevel, s.Host)
return nil, ers.New("Error on snmp Security Level")
}
client = &gosnmp.GoSNMP{
Target: hostIPs[0],
Port: uint16(s.Port),
Version: gosnmp.Version3,
Timeout: time.Duration(s.Timeout) * time.Second,
Retries: s.Retries,
SecurityModel: gosnmp.UserSecurityModel,
MsgFlags: seclpmap[s.V3SecLevel],
SecurityParameters: UsmParams,
}
default:
d.log.Errorf("Error no snmpversion found %s in host %s", s.SnmpVersion, s.Host)
return nil, ers.New("Error on snmp Version")
}
if s.SnmpDebug {
client.Logger = d.DebugLog()
}
//first connect
err = client.Connect()
if err != nil {
d.log.Errorf("error on first connect %s", err)
} else {
d.log.Infof("First SNMP connection to host %s stablished", s.Host)
}
//first snmp query
d.SysInfo, err = d.GetSysInfo(client)
if err != nil {
d.log.Errorf("error on get System Info %s", err)
} else {
d.log.Infof("Got basic system info %#v ", d.SysInfo)
}
return client, err
}
|
package main
import (
"image/png"
"os"
"image"
"io/ioutil"
"bytes"
"fmt"
"github.com/liyue201/goqr"
"github.com/boombuler/barcode"
"github.com/boombuler/barcode/qr"
)
func qrCodeGen(t string, filename string) (error) {
qrCode, _ := qr.Encode(t, qr.M, qr.Auto)
qrCode, _ = barcode.Scale(qrCode, 2000, 2000)
file, err := os.Create(filename)
defer file.Close()
png.Encode(file, qrCode)
return err
}
func scanQRCode(path string) {
imgdata, err := ioutil.ReadFile(path)
if err != nil {
fmt.Printf("%v\n", err)
return
}
img, _, err := image.Decode(bytes.NewReader(imgdata))
if err != nil {
fmt.Printf("image.Decode error: %v\n", err)
return
}
qrCodes, err := goqr.Recognize(img)
if err != nil {
fmt.Printf("Recognize failed: %v\n", err)
return
}
for _, qrCode := range qrCodes {
fmt.Printf("qrCode text: %s\n", qrCode.Payload)
}
return
}
func main() {
t := "This is a text"
filenameR := "qrcode.png"
qrCodeGen(t, filenameR)
filenameW := "qrcode.png"
scanQRCode(filenameW)
}
|
package goil
import (
"bytes"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"time"
)
// TODO Check if when post with no group "official" field exists
func CreatePublication(message string, category Category) *Publication {
return &Publication{Message: message, Category: category, Group: NoGroup}
}
// Post as a group
func (p *Publication) PublishAs(group Group, official bool) {
p.Group = group
p.Official = official
}
// Add an event to a publication
func (p *Publication) AddEvent(event Event) {
p.Event = event
}
// CreateSurvey creates a survey structure
func CreateSurvey(question string, end time.Time, multiple bool, answers ...string) Survey {
return Survey{
Question: question,
End: end, Answers: answers,
Multiple: multiple,
}
}
// Adds a survey to a publication
func (p *Publication) AddSurvey(survey Survey) {
p.Survey = survey
}
// Converts bool to string
func bts(from bool) string {
var output string
if from {
output = "1"
} else {
output = "0"
}
return output
}
// Write the publication to the body of the request
func (publication *Publication) write(w io.Writer) (string, error) {
// Prepare it as a multipart/form-data
writer := multipart.NewWriter(w)
// Prepare the base parameters
params := map[string]string{
"message": publication.Message,
"category": publication.Category.format(),
"group": publication.Group.format(),
"private": bts(publication.Dislike),
"dislike": bts(publication.Dislike),
}
// If there is no group indicated, do not take into account publication.Official
if publication.Group != 0 {
params["official"] = bts(publication.Official)
}
// Is there an event ? Then add it
if publication.Event.populated() {
params["event_title"] = publication.Event.Name
params["event_start"] = publication.Event.Start.Format(timeLayout)
params["event_end"] = publication.Event.End.Format(timeLayout)
}
// Is there a survey ? Then add it
if publication.Survey.populated() {
params["survey_question"] = publication.Survey.Question
params["survey_end"] = publication.Survey.End.Format(timeLayout)
params["survey_multiple"] = bts(publication.Survey.Multiple)
// Answers will be added later, as there are multiple of them
}
// Add the key/value pairs to the multipart request
for key, val := range params {
err := writer.WriteField(key, val)
if err != nil {
return "", err
}
}
// Add the survey answers
if publication.Survey.populated() {
for _, answer := range params {
err := writer.WriteField("survey_answer[]", answer)
if err != nil {
return "", err
}
}
}
var err error
// Add the attachments
if publication.Attachments.Populated() {
err = publication.Attachments.writeToMultipart(writer)
}
// Get content type
contentType := writer.FormDataContentType()
// Close the writer
err = writer.Close()
return contentType, err
}
// The postPublicationURI is the URI for posting a publication
const postPublicationURI string = BaseURLString + "post/add"
// Publish publication
func (s *Session) PostPublication(publication *Publication) error {
// Check
if s == nil {
return fmt.Errorf("Given session pointer is nil")
}
// Prepare the body of the request
body := &bytes.Buffer{}
// Write to the body
contentType, err := publication.write(body)
if err != nil {
return err
}
// Create the request
req, err := http.NewRequest("POST", postPublicationURI, body)
if err != nil {
return err
}
// Add the right header
req.Header.Set("Content-Type", contentType)
// Execute it
resp, err := s.Client.Do(req)
if err != nil {
return err
}
// Check for good feedback
resp.Body.Close()
if resp.StatusCode != 200 {
return errors.New(fmt.Sprintf("Status Code of response isn't right: %d instead of 200", resp.StatusCode))
}
return nil
}
|
package main
import "github.com/makishi00/go-vue-bbs/model"
func main() {
db := model.GetDBConn()
db.DropTableIfExists(&model.User{})
db.DropTableIfExists(&model.Token{})
db.DropTableIfExists(&model.Article{})
db.AutoMigrate(&model.User{})
db.AutoMigrate(&model.Token{})
db.AutoMigrate(&model.Article{})
}
|
package chapter8
import (
"fmt"
"testing"
)
func TestBSTSortedOrder(t *testing.T) {
tree := BST{19,
&BST{7,
&BST{Value: 3},
&BST{Value: 11},
},
&BST{43,
&BST{Value: 23},
nil,
},
}
fmt.Println("TestBSTSortedOrder:")
BSTSortedOrder(&tree)
fmt.Println()
}
|
package upstream_notify
import (
"context"
"encoding/json"
"errors"
"fmt"
"strconv"
"tpay_backend/model"
"tpay_backend/payapi/internal/logic"
"tpay_backend/upstream"
"github.com/tal-tech/go-zero/core/logx"
"tpay_backend/payapi/internal/svc"
)
type ThreeSevenPayTransferLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewThreeSevenPayTransferLogic(ctx context.Context, svcCtx *svc.ServiceContext) ThreeSevenPayTransferLogic {
return ThreeSevenPayTransferLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *ThreeSevenPayTransferLogic) ThreeSevenPayTransfer(body []byte) error {
var reqData struct {
Success int64 `json:"success"` // 请求是否成功 1、成功;0、失败
Message string `json:"message"` // 出错消息,请求处理失败才会出现
Ticket string `json:"ticket"` // 访问票据
IsPay int64 `json:"ispay"` // 是否支付,0 没有支付 1 已经支付
PayCode string `json:"paycode"` // 支付代码 支付网关返回编码
PayAmount float64 `json:"payamount"` // 支付金额 支付网关返回的实际金额,业务逻辑中应使用此金额作为入金金额而非定单金额
PayTime string `json:"msg"` // 支付时间 字符串类型格式为: 2000-01-01 23:34:56
PayUser string `json:"status"` // 支付用户
Sign string `json:"sign"` // 签名
Amount int64 `json:"amount"` // 创建订单时的金额,原样返回
Note string `json:"note"` // 创建订单时的备注,原样返回
UserId string `json:"userid"` // 商户编号
OrderId string `json:"orderid"` // 商户订单号
PayType string `json:"type"` // 支付类型
SerialNo string `json:"serialno"` // 支付备注
BMount string `json:"bmount"` // 尾部金额
IsCancel int64 `json:"iscancel"` // 是否被取消 0 没有取消 1 已经取消
OrderType int64 `json:"ordertype"` // 订单类型 1=支付充值订单 2=代付提现订单
Mark string `json:"mark"` // 订单取消原因
}
// 1.解析接口数据
if err := json.Unmarshal(body, &reqData); err != nil {
return errors.New(fmt.Sprintf("解析json参数失败:%v, body:%v", err, string(body)))
}
// 2.验证参数
if reqData.UserId == "" || reqData.Sign == "" || reqData.OrderId == "" || reqData.Ticket == "" {
return errors.New(fmt.Sprintf("缺少必须参数,reqData:%+v", reqData))
}
// 3.获取上游
up, err := model.NewUpstreamModel(l.svcCtx.DbEngine).FindOneByUpstreamMerchantNo(reqData.UserId)
if err != nil {
if err == model.ErrRecordNotFound {
return errors.New(fmt.Sprintf("未找到对应的上游:UpstreamMerchantNo:%v", reqData.UserId))
} else {
return errors.New(fmt.Sprintf("查询上游信息失败:err:%v,UpstreamMerchantNo:%v", err, reqData.UserId))
}
}
logx.Infof("上游信息:%+v", up)
upObj, err := logic.NewFuncLogic(l.svcCtx).GetUpstreamObject(up)
if err != nil {
logx.Errorf("获取上游对象失败err:%v,upstream:%+v", err, up)
return errors.New("获取上游对象失败")
}
// 4.校验签名
dataMap := make(map[string]interface{})
if reqData.IsCancel == upstream.ThreeSevenPayCancelled {
dataMap["orderid"] = reqData.OrderId
dataMap["amount"] = strconv.FormatInt(reqData.Amount, 10)
dataMap["sign"] = reqData.Sign
} else {
dataMap["orderid"] = reqData.OrderId
dataMap["amount"] = strconv.FormatFloat(reqData.PayAmount, 'f', -1, 64)
dataMap["sign"] = reqData.Sign
}
if err := upObj.CheckSign(dataMap); err != nil {
logx.Errorf("校验签名失败err:%v,dataMap:%+v", err, dataMap)
return errors.New("校验签名失败")
}
// 5.查询订单
order, err := model.NewTransferOrderModel(l.svcCtx.DbEngine).FindByOrderNo(reqData.OrderId)
if err != nil {
if err == model.ErrRecordNotFound {
l.Errorf("订单[%v]不存在", reqData.OrderId)
return errors.New(fmt.Sprintf("找不到订单[%v]", reqData.OrderId))
} else {
return errors.New("查询订单失败")
}
}
l.Infof("订单信息:%+v", order)
if order.OrderStatus == model.TransferOrderStatusPaid {
l.Errorf("代付订单已支付,重复通知, order.OrderNo:%v", order.OrderNo)
return nil
}
if order.OrderStatus != model.TransferOrderStatusPending {
l.Errorf("代付订单不是待支付订单, order.OrderNo:%v, order.OrderStatus:%v", order.OrderNo, order.OrderStatus)
return errors.New("订单状态不允许")
}
if order.ReqAmount != reqData.Amount {
l.Errorf("订单[%v]金额不对, order.reqAmount:%v, reqData.Amount:%v", reqData.OrderId, order.ReqAmount, reqData.Amount)
return errors.New("订单金额不对")
}
// 6.同步订单信息
var orderStatus int64
var failReason string
if reqData.IsPay == upstream.ThreeSevenPayPaid {
orderStatus = model.PayOrderStatusPaid
} else if reqData.IsCancel == upstream.ThreeSevenPayCancelled {
orderStatus = model.PayOrderStatusFail
failReason = reqData.Mark
} else {
l.Errorf("上游通知的是一个未知的订单状态, reqData.IsPay:%v, reqData.IsCancel", reqData.IsPay, reqData.IsCancel)
return errors.New("订单状态不对")
}
if err := NewSyncOrder(context.TODO(), l.svcCtx).SyncTransferOrder(order, orderStatus, failReason); err != nil {
l.Errorf("同步订单信息, orderNo:%v, MerchantNo:%v, err:%v", order.OrderNo, order.MerchantNo, err)
return err
}
return nil
}
|
package test2json
import (
"context"
"io"
"testing"
"github.com/stretchr/testify/require"
"go.skia.org/infra/go/deepequal/assertdeep"
"go.skia.org/infra/go/exec"
"go.skia.org/infra/go/sklog"
"go.skia.org/infra/go/sktest"
"go.skia.org/infra/go/testutils"
"go.skia.org/infra/go/testutils/unittest"
"go.skia.org/infra/go/util"
)
func runTest(t sktest.TestingT, w io.Writer, content TestContent) {
// Setup.
testDir, cleanup, err := SetupTest(content)
require.NoError(t, err)
defer cleanup()
// Ignore the error, since some cases expect it.
_, _ = exec.RunCommand(context.Background(), &exec.Command{
Name: "go",
Args: []string{"test", "-json", "./..."},
Dir: testDir,
Stdout: w,
})
}
func runTestAndCompare(t sktest.TestingT, expectEvents []*Event, content TestContent) {
r, w := io.Pipe()
go func() {
defer testutils.AssertCloses(t, w)
runTest(t, w, content)
}()
i := 0
for actual := range EventStream(r) {
expect := expectEvents[i]
// Fake out some fields.
require.False(t, util.TimeIsZero(actual.Time))
actual.Time = expect.Time
actual.Output = tsRegex.ReplaceAllString(actual.Output, "0.00s")
actual.Elapsed = 0.0
// Compare to the expected event.
sklog.Errorf("Event %d", i)
assertdeep.Equal(t, expect, actual)
i++
}
}
func TestEventStreamFail(t *testing.T) {
unittest.MediumTest(t)
runTestAndCompare(t, EVENTS_FAIL, CONTENT_FAIL)
}
func TestEventStreamPass(t *testing.T) {
unittest.MediumTest(t)
runTestAndCompare(t, EVENTS_PASS, CONTENT_PASS)
}
func TestEventStreamSkip(t *testing.T) {
unittest.MediumTest(t)
runTestAndCompare(t, EVENTS_SKIP, CONTENT_SKIP)
}
|
package main
import (
"fmt"
)
// https://leetcode-cn.com/problems/minimum-deletions-to-make-string-balanced/
// 1653. 使字符串平衡的最少删除次数 | Minimum Deletions to Make String Balanced
//------------------------------------------------------------------------------
func minimumDeletions(s string) int {
return minimumDeletions0(s)
}
//------------------------------------------------------------------------------
// Solution 1
//
// 分别计算前缀 a 与后缀 b 的数量, 则前缀 a 与后缀 b 的和的最大值就是删除后的最长字符串.
// * P[i] 表示 i 位置前缀 a 的数量
// * S[i] 表示 i 位置后缀 b 的数量
// * N - P[i] + S[i], 就是要删除的字符数, 只需要求 MAX(P[i]+S[i]) 即可.
// 可以只用一个数组保存前缀 a 数量, 求后缀 b 的时候可以只用一个变量代替, 以节省空间.
//
// 算法复杂度:
// * 时间: O(N)
// * 空间: O(N)
func minimumDeletions1(s string) int {
N := len(s)
preA := make([]int, N+1)
for i := 0; i < N; i++ {
preA[i+1] = preA[i]
if s[i] == 'a' {
preA[i+1]++
}
}
res, b := 0, 0
for i := N - 1; i >= 0; i-- {
if s[i] == 'b' {
b++
}
if preA[i+1]+b > res {
res = preA[i+1] + b
}
}
return N - res
}
//------------------------------------------------------------------------------
// Solution 0
//
// 栈: 栈中存储待删除的字符 'b', 当遇到 'a' 时, 可以删除栈顶的一个 'b', 这样就可以保证遍历过的
// 所有字母都是按照 'aa..abb..b' 顺序排列的. 最后出栈的次数就是最少删除次数.
//
// 复杂度分析:
// * 时间: O(N)
// * 空间: O(1). 实际上并不需要真正的栈, 只要用一个变量表示栈是否为空即可.
func minimumDeletions0(s string) int {
pos, cnt := 0, 0
for _, c := range s {
if c == 'b' {
pos++
} else {
if pos > 0 {
pos--
cnt++
}
}
}
return cnt
}
//------------------------------------------------------------------------------
// main
func main() {
cases := []string{
// 2
"aababbab",
// 25
"ababaaaabbbbbaaababbbbbbaaabbaababbabbbbaabbbbaabbabbabaabbbababaa",
}
realCase := cases[0:]
for i, c := range realCase {
fmt.Println("## case", i)
// solve
fmt.Println(minimumDeletions0(c))
fmt.Println(minimumDeletions1(c))
}
}
|
package hostsfile_test
import (
"bytes"
"fmt"
"net/netip"
"strings"
"github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/hostsfile"
"github.com/AdguardTeam/golibs/netutil"
)
func ExampleFuncSet() {
const content = "# comment\n" +
"1.2.3.4 host1 host2\n" +
"4.3.2.1 host3\n" +
"1.2.3.4 host4 host5 # repeating address\n" +
"2.3.4.5 host3 # repeating hostname"
addrs := map[string][]netip.Addr{}
names := map[netip.Addr][]string{}
set := hostsfile.FuncSet(func(r *hostsfile.Record) {
names[r.Addr] = append(names[r.Addr], r.Names...)
for _, name := range r.Names {
addrs[name] = append(addrs[name], r.Addr)
}
})
// Parse the hosts file.
err := hostsfile.Parse(set, strings.NewReader(content), nil)
fmt.Printf("error: %s\n", err)
fmt.Printf("records for 1.2.3.4: %q\n", names[netip.MustParseAddr("1.2.3.4")])
fmt.Printf("records for host3: %s\n", addrs["host3"])
// Output:
// error: parsing: line 1: line is empty
// records for 1.2.3.4: ["host1" "host2" "host4" "host5"]
// records for host3: [4.3.2.1 2.3.4.5]
}
// invalidSet is a [HandleSet] implementation that collects invalid records.
type invalidSet []hostsfile.Record
// Add implements the [Set] interface for invalidSet.
func (s *invalidSet) Add(r *hostsfile.Record) { *s = append(*s, *r) }
// AddInvalid implements the [HandleSet] interface for invalidSet.
func (s *invalidSet) HandleInvalid(srcName string, data []byte, err error) {
addrErr := &netutil.AddrError{}
if !errors.As(err, &addrErr) {
return
}
rec := &hostsfile.Record{Source: srcName}
_ = rec.UnmarshalText(data)
if commIdx := bytes.IndexByte(data, '#'); commIdx >= 0 {
data = bytes.TrimRight(data[:commIdx], " \t")
}
invIdx := bytes.Index(data, []byte(addrErr.Addr))
for _, name := range bytes.Fields(data[invIdx:]) {
rec.Names = append(rec.Names, string(name))
}
s.Add(rec)
}
func ExampleHandleSet() {
const content = "\n" +
"# comment\n" +
"4.3.2.1 invalid.-host valid.host # comment\n" +
"1.2.3.4 another.valid.host\n"
set := invalidSet{}
err := hostsfile.Parse(&set, strings.NewReader(content), nil)
fmt.Printf("error: %v\n", err)
for _, r := range set {
fmt.Printf("%q\n", r.Names)
}
// Output:
// error: <nil>
// ["invalid.-host" "valid.host"]
// ["another.valid.host"]
}
|
package easyorm_test
//type Account struct {
// Id int64 `easyorm:id,primary_key`
// Name string `easyorm:name`
// Passowrd string `easyorm:passowrd`
// Status int8 `easyorm:status`
//}
//
//func ExampleEasyORM() {
// db, _ := easyorm.Open("root:123456@tcp(localhost:3306)/test")
// tb, err := db.BindModel("tb_account", &Account{})
// if err != nil {
// log.Fatalf("db.BindModel: %v", err)
// }
//
// tb.Count()
//
// //单条记录接口
// tb.Add(id, Account{})
// tb.Set(id, Account{})
// tb.Get(id)
// tb.Remove(id)
//
// tb.Set(id, map[string]interface{})
//
// //批量接口
// tb.Insert(&easyorm.Condition{}, []*Account)
// tb.Update(&easyorm.Condition{}, []*Account)
// tb.Select(&easyorm.Condition{})
// tb.Delete(&easyorm.Condition{})
//}
|
// Copyright 2014 The Sporting Exchange Limited. All rights reserved.
// Use of this source code is governed by a free license that can be
// found in the LICENSE file.
// Package relay implements fan-out to remote relays.
package relay
import (
"bytes"
"expvar"
"fmt"
"log"
"net"
"os"
"sync"
"opentsp.org/internal/tsdb"
)
const (
oqfDrop = "Drop"
oqfDropAndLog = "DropAndLog"
)
var (
statRelayCurrEstab = expvar.NewMap("relay.CurrEstab")
statRelayErrors = expvar.NewMap("relay.Errors")
statRelayQueue = expvar.NewMap("relay.Queue")
)
type Config struct {
DropRepeats bool
Host string
MaxConnsPerHost *int
OnQueueFull string
}
func (c *Config) Validate() error {
if c.Host == "" {
return fmt.Errorf("invalid relay: missing Host")
}
switch c.OnQueueFull {
default:
return fmt.Errorf("invalid OnQueueFull: %q", c.OnQueueFull)
case "":
c.OnQueueFull = oqfDrop
case oqfDrop, oqfDropAndLog:
// ok
}
switch max, defaultMax := c.MaxConnsPerHost, 1; {
default:
return fmt.Errorf("MaxConnsPerHost out of range: %d", *max)
case max == nil:
c.MaxConnsPerHost = &defaultMax
case *max < 16:
// ok
}
return nil
}
type Relay struct {
name string
host string
drop func([]byte)
client *tsdb.Client
}
// NewRelay returns a new relay.
func NewRelay(name string, config *Config) (*Relay, error) {
if err := config.Validate(); err != nil {
return nil, fmt.Errorf("relay %s: %v", name, err)
}
r := &Relay{
name: name,
host: config.Host,
}
r.drop = drop(name)
if config.OnQueueFull == oqfDropAndLog {
r.drop = logLost(name, r.drop)
}
r.client = tsdb.NewClient(config.Host, &tsdb.ClientConfig{
DropRepeats: config.DropRepeats,
MaxConnsPerHost: *config.MaxConnsPerHost,
Drop: r.drop,
})
r.client.Dial = dial(name, r.client.Dial)
queue := r.client.Queue()
statRelayQueue.Set("relay="+name, expvar.Func(func() interface{} {
return len(queue)
}))
return r, nil
}
// Submit submits the given point to the relay. It does not block in network calls
// to the relay host. Not safe for concurrent use.
func (r *Relay) Submit(point *tsdb.Point) {
r.client.Put(point)
}
type dialFunc func(string) (net.Conn, error)
func dial(name string, fn dialFunc) dialFunc {
return func(addr string) (net.Conn, error) {
conn, err := fn(addr)
if err != nil {
statRelayErrors.Add("type=Dial relay="+name, 1)
log.Printf("relay %s: %v", name, err)
return nil, err
}
statRelayCurrEstab.Add("relay="+name, 1)
return connMonitor{conn, name}, nil
}
}
type dropFunc func([]byte)
func drop(name string) dropFunc {
return func(buf []byte) {
npoints := bytes.Count(buf, []byte{'\n'})
statRelayErrors.Add("type=Drop relay="+name, int64(npoints))
}
}
func logLost(name string, fn dropFunc) dropFunc {
mu := sync.Mutex{}
return func(buf []byte) {
fn(buf)
mu.Lock()
for len(buf) > 0 {
i := bytes.IndexByte(buf, '\n')
point := buf[:i]
buf = buf[i+1:]
fmt.Fprintf(os.Stdout, "relay %s: lost: %s\n", name, point)
}
mu.Unlock()
}
}
// connMonitor logs connection errors for the given relay.
type connMonitor struct {
net.Conn
Relay string
}
func (cm connMonitor) Read(buf []byte) (int, error) {
n, err := cm.Conn.Read(buf)
if err != nil {
log.Printf("relay %s: %v", cm.Relay, err)
}
return n, err
}
func (cm connMonitor) Write(buf []byte) (int, error) {
n, err := cm.Conn.Write(buf)
if err != nil {
log.Printf("relay %s: %v", cm.Relay, err)
}
return n, err
}
func (cm connMonitor) Close() error {
err := cm.Conn.Close()
if err != nil {
log.Printf("relay %s: %v", cm.Relay, err)
} else {
statRelayCurrEstab.Add("relay="+cm.Relay, -1)
}
return err
}
|
package clientModel
import (
"bytes"
"encoding/json"
"errors"
)
// Client
type Client struct {
// Unique identifier for Client
ClientID string `json:"clientID"`
// Like password to matching with client id
ClientSecret string `json:"clientSecret"`
// The base domain url
HomepageURI string `json:"homepageURI"`
// The uri that client when the client register for callback
RegisteredCallback string `json:"registeredCallback"`
}
func (strct *Client) MarshalJSON() ([]byte, error) {
buf := bytes.NewBuffer(make([]byte, 0))
buf.WriteString("{")
comma := false
// "ClientID" field is required
// only required object types supported for marshal checking (for now)
// Marshal the "clientID" field
if comma {
buf.WriteString(",")
}
buf.WriteString("\"clientID\": ")
if tmp, err := json.Marshal(strct.ClientID); err != nil {
return nil, err
} else {
buf.Write(tmp)
}
comma = true
// "ClientSecret" field is required
// only required object types supported for marshal checking (for now)
// Marshal the "clientSecret" field
if comma {
buf.WriteString(",")
}
buf.WriteString("\"clientSecret\": ")
if tmp, err := json.Marshal(strct.ClientSecret); err != nil {
return nil, err
} else {
buf.Write(tmp)
}
comma = true
// "HomepageURI" field is required
// only required object types supported for marshal checking (for now)
// Marshal the "homepageURI" field
if comma {
buf.WriteString(",")
}
buf.WriteString("\"homepageURI\": ")
if tmp, err := json.Marshal(strct.HomepageURI); err != nil {
return nil, err
} else {
buf.Write(tmp)
}
comma = true
// "RegisteredCallback" field is required
// only required object types supported for marshal checking (for now)
// Marshal the "registeredCallback" field
if comma {
buf.WriteString(",")
}
buf.WriteString("\"registeredCallback\": ")
if tmp, err := json.Marshal(strct.RegisteredCallback); err != nil {
return nil, err
} else {
buf.Write(tmp)
}
comma = true
buf.WriteString("}")
rv := buf.Bytes()
return rv, nil
}
func (strct *Client) UnmarshalJSON(b []byte) error {
clientIDReceived := false
clientSecretReceived := false
homepageURIReceived := false
registeredCallbackReceived := false
var jsonMap map[string]json.RawMessage
if err := json.Unmarshal(b, &jsonMap); err != nil {
return err
}
// parse all the defined properties
for k, v := range jsonMap {
switch k {
case "clientID":
if err := json.Unmarshal([]byte(v), &strct.ClientID); err != nil {
return err
}
clientIDReceived = true
case "clientSecret":
if err := json.Unmarshal([]byte(v), &strct.ClientSecret); err != nil {
return err
}
clientSecretReceived = true
case "homepageURI":
if err := json.Unmarshal([]byte(v), &strct.HomepageURI); err != nil {
return err
}
homepageURIReceived = true
case "registeredCallback":
if err := json.Unmarshal([]byte(v), &strct.RegisteredCallback); err != nil {
return err
}
registeredCallbackReceived = true
}
}
// check if clientID (a required property) was received
if !clientIDReceived {
return errors.New("\"clientID\" is required but was not present")
}
// check if clientSecret (a required property) was received
if !clientSecretReceived {
return errors.New("\"clientSecret\" is required but was not present")
}
// check if homepageURI (a required property) was received
if !homepageURIReceived {
return errors.New("\"homepageURI\" is required but was not present")
}
// check if registeredCallback (a required property) was received
if !registeredCallbackReceived {
return errors.New("\"registeredCallback\" is required but was not present")
}
return nil
}
|
package api
import (
"encoding/json"
"github.com/gorilla/mux"
"github.com/sirsean/packhunter/model"
"github.com/sirsean/packhunter/mongo"
"github.com/sirsean/packhunter/ph"
"github.com/sirsean/packhunter/service"
"github.com/sirsean/packhunter/web"
"net/http"
"strings"
)
func ListMyUsers(w http.ResponseWriter, r *http.Request) {
session := mongo.Session()
defer session.Close()
currentUser, _ := web.CurrentUser(r, session)
t, _ := currentUser.Tag("Following")
tag, _ := service.GetTagByIdHex(session, t.Id)
response, _ := json.Marshal(tag.Users)
w.Write(response)
}
func ShowUser(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
username := vars["username"]
session := mongo.Session()
defer session.Close()
currentUser, _ := web.CurrentUser(r, session)
user := ph.GetUserByUsername(currentUser.AccessToken, username)
tags, _ := service.TagsUserIsOn(session, currentUser, user)
publicTags, _ := service.UserPublicTags(session, currentUser, user)
type UserResponse struct {
ph.User
Tags []model.BasicTag `json:"tags"`
PublicTags []model.BasicTagSubscribed `json:"public_tags"`
}
response, _ := json.Marshal(UserResponse{
User: user,
Tags: tags,
PublicTags: publicTags,
})
w.Write(response)
}
func SetUserTags(w http.ResponseWriter, r *http.Request) {
type TagsForm struct {
TagIds string `schema:"tag_ids"`
}
vars := mux.Vars(r)
username := vars["username"]
session := mongo.Session()
defer session.Close()
currentUser, _ := web.CurrentUser(r, session)
user := ph.GetUserByUsername(currentUser.AccessToken, username)
r.ParseForm()
form := new(TagsForm)
postDecoder.Decode(form, r.PostForm)
tagIds := strings.Split(form.TagIds, ",")
for _, t := range currentUser.Tags {
tag, _ := service.GetTagByIdHex(session, t.Id)
if tagsContains(tagIds, t.Id) {
tag.AddUser(user)
} else {
tag.RemoveUser(user)
}
service.SaveTag(session, &tag)
}
}
func tagsContains(tagIds []string, tagId string) bool {
for _, t := range tagIds {
if t == tagId {
return true
}
}
return false
}
func UserLogout(w http.ResponseWriter, r *http.Request) {
web.Logout(w, r)
}
|
package main
import "fmt"
// slice
type taskList struct{
tasks []*task
}
func (t *taskList) add_task(ts *task){
t.tasks = append(t.tasks, ts)
}
func (t *taskList) delete_task(index int){
t.tasks = append(t.tasks[:index], t.tasks[index+1:]...)
}
func (t *taskList) print_list(){
for _, task := range t.tasks{
fmt.Println("Name", task.name)
fmt.Println("Description", task.description)
}
}
func (t *taskList) print_list_completed(){
for _, task := range t.tasks{
if task.complete{
fmt.Println("Name", task.name)
fmt.Println("Description", task.description)
}
}
}
type task struct{
name string
description string
complete bool
}
func (t *task) mark_complete() {
t.complete = true
}
func (t *task) update_description(new_description string) {
t.description = new_description
}
func (t *task) update_name(new_name string) {
t.name = new_name
}
func main() {
t1 := &task{
name: "Complete go course",
description: "Complete go course this week",
}
t2 := &task{
name: "Complete python course",
description: "Complete python course this week",
}
t3 := &task{
name: "Complete react course",
description: "Complete react course this week",
}
list := &taskList{
tasks: []*task{
t1, t2,
},
}
list.add_task(t3)
list.print_list_completed()
// for i := 0; i < len(list.tasks); i++{
// fmt.Println("Index", i, "Task", list.tasks[i].name)
// }
// for index, task := range list.tasks{
// fmt.Println("Index", index, "Task", task.name)
// }
task_map := make(map[string]*taskList)
task_map["Kevin"] = list
fmt.Println("Kevin Task")
task_map["Kevin"].print_list()
t4 := &task{
name: "Complete java course",
description: "Complete java course this week",
}
t5 := &task{
name: "Complete c# course",
description: "Complete c# course this week",
}
list2 := &taskList{
tasks: []*task{
t4, t5,
},
}
task_map["Maria"] = list2
fmt.Println("Maria Task")
task_map["Maria"].print_list()
} |
package provisioner
import (
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"strconv"
"github.com/resin-os/resin-provisioner/util"
)
var apiKeyRegexp = regexp.MustCompile("[a-zA-Z0-9]+")
func checkSocket(path string) error {
// The socket file not existing means we can create it.
if _, err := os.Stat(path); os.IsNotExist(err) {
return nil
}
// Otherwise, remove the socket ready for re-creation. If we fail here,
// just pass on the error.
return os.Remove(path)
}
func readerToString(r io.Reader) (ret string, err error) {
var bytes []byte
if bytes, err = ioutil.ReadAll(r); err == nil {
ret = string(bytes)
}
return
}
func reportError(status int, writer http.ResponseWriter, req *http.Request,
err error, userErr string) {
log.Printf("ERROR: %s %s: %s (%s)\n", req.Method, req.URL.Path, err,
userErr)
writer.WriteHeader(status)
fmt.Fprintf(writer, "ERROR: %s", userErr)
}
func readPostBodyReportErr(writer http.ResponseWriter, req *http.Request) string {
// req.Body doesn't need to be closed by us.
if str, err := readerToString(req.Body); err != nil {
reportError(500, writer, req, err,
"Can't convert read to string")
return ""
} else {
return str
}
}
func isInteger(str string) bool {
_, err := strconv.Atoi(str)
return err == nil
}
func isValidApiKey(str string) bool {
return apiKeyRegexp.Match([]byte(str))
}
func supervisorDbusRunning() (bool, error) {
if dbus, err := NewDbus(); err != nil {
return false, err
} else {
defer dbus.Close()
return dbus.SupervisorRunning()
}
}
func setSupervisorTag() error {
if fields, err := util.GetEnvFileFields(SUPERVISOR_CONF_PATH); err != nil {
return err
} else {
fields["SUPERVISOR_TAG"] = INIT_UPDATER_SUPERVISOR_TAG
return util.SetEnvFileFields(SUPERVISOR_CONF_PATH, fields)
}
}
func randomHexString(byteLength uint32) (str string, err error) {
slice := make([]byte, byteLength)
if _, err = rand.Read(slice); err == nil {
str = fmt.Sprintf("%x", slice)
}
return
}
|
package main
import "fmt"
func main() {
var line int = 10
for i := 0; i < line; i++ {
for j := 0; j < line - i - 1; j++ {
fmt.Printf(" ")
}
for k := 0; k < 2 * i + 1; k++ {
fmt.Printf("*")
}
fmt.Println()
}
}
|
package tools
import (
"github.com/astaxie/beego"
"io/ioutil"
"os"
"golangapi/models"
// "log"
)
var (
filecache string = beego.AppConfig.String("filecache")
)
type Filehelper struct{}
// func checkDirectoryIsExist(directoryname string, iscreate bool) bool {
// var exist = true
// if _ err := os.Stat(cache + "/" + directoryname); os.IsNotExist(err) {
// exist = false
// }
// return exist
// }
func checkFileIsExist(dirpath string, filename string) (bool) {
var exist = true;
if _, err := os.Stat(dirpath); os.IsNotExist(err) {
_ = os.MkdirAll(dirpath, 0777)
}
if _, err := os.Stat(filename); os.IsNotExist(err) {
exist = false;
}
return exist;
}
func (f *Filehelper)WriteFile(filename string, file []byte) (string, error) {
if checkFileIsExist(filecache, filename) {
return "", (&ResultHelp{}).NewErr("file is exists")
}
path := filecache + "/" + filename
err := ioutil.WriteFile(path, file, 0777)
return filename, err
}
func (f *Filehelper)ReadFile(filename string) ([]byte, error){
if checkFileIsExist(filecache, filename) {
return nil, (&ResultHelp{}).NewErr("file is exists")
}
path := filecache + "/" + filename
file, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return file, nil
}
func (f *Filehelper)UploadFileToMongo(filemode models.Filemodel) (string, error) {
var filename string
mongogridfshelper := &MongoGridFSHelper{}
redishelper := &RedisHelper{}
if filemode.Filename == "" {
filename = GetGuid()
} else {
filename = filemode.Filename
}
switch {
case filemode.Currentchunk == 0 && filemode.Currentchunk == filemode.Maxchunks - 1 :
return mongogridfshelper.UploadFile(filemode)
case filemode.Currentchunk < filemode.Maxchunks:
if filemode.Currentchunk != 0 {
if filechunkdata ,err := redishelper.GetVByK(filename, "bytes"); err == nil {
filemode.Filedata = append(filechunkdata.([]byte), filemode.Filedata...)
}
}
if filemode.Currentchunk == filemode.Maxchunks - 1 {
return mongogridfshelper.UploadFile(filemode)
} else {
if err := redishelper.SetKVBySETEX(filename, filemode.Filedata, 60); err == nil {
return filename, nil
}
}
}
return "", (&ResultHelp{}).NewErr("server err")
}
|
package main
import (
"fmt"
//"strconv"
"strings"
"unicode/utf8"
)
func main() {
/*s := "Hello, 世界"
fmt.Printf("len(s)=%d\n", len(s))
fmt.Println(s[7:])*/
//fmt.Println("HasSuffix(abcdef, ef)=" + strconv.FormatBool(HasSuffix("abcdef", "efg")))
//DecodeRuneToString(s)
//fmt.Printf("CountCharInString(%s)=%d\n", s, CountCharInString(s))
//fmt.Printf("RuneCountInString(%s)=%d", s, utf8.RuneCountInString(s))
//StrToRuneSlice()
//fmt.Println(basename("D:/目录1/目录2/文件1.txt"))
fmt.Println(lastIndex("D:/目录1/目录2/文件12.txt"))
}
func HasSuffix(s string, suffix string) bool {
return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
}
func DecodeRuneToString(s string) {
for i := 0; i < len(s); {
r, size := utf8.DecodeRuneInString(s[i:])
fmt.Printf("%d\t%c\n", i, r)
i += size
}
}
func CountCharInString(s string) int {
var n int
for i, r := range s {
fmt.Printf("%d\t%c\n", i, r)
n++
}
return n
}
func StrToRuneSlice() {
s := "hello,世界"
fmt.Printf("% x\n", s)
r := []rune(s)
fmt.Printf("%x\n", r)
fmt.Println(string(r))
}
func basename(s string) string {
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '/' {
s = s[i+1:]
break
}
}
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
s = s[:i]
break
}
}
return s
}
func lastIndex(s string) string {
if i := strings.LastIndex(s, "/"); i >= 0 {
s = s[i+1:]
}
if i := strings.LastIndex(s, "."); i >= 0 {
s = s[:i]
}
return s
}
|
package main
// ----------------------------- 方法1: 暴力法 -----------------------------
func numMagicSquaresInside(grid [][]int) int {
rows, cols := getRowsAndCols(grid)
countOfMagicSquare := 0
for i := 2; i < rows; i++ {
for t := 2; t < cols; t++ {
if isMagicSquare(grid, i, t) {
countOfMagicSquare++
}
}
}
return countOfMagicSquare
}
func isMagicSquare(matrix [][]int, x, y int) bool {
countOfNum := getCountOfNumOfSquare(matrix, x, y)
rowSum, colSum, mainDiagonalSum, paraDiagonalSum := getAllSumsOfSquare(matrix, x, y)
return isAllDigitDifferentAndInRangeOneToNine(countOfNum) && isAllSumEqual(rowSum, colSum, mainDiagonalSum, paraDiagonalSum)
}
func getCountOfNumOfSquare(matrix [][]int, x, y int) map[int]int {
countOfNum := make(map[int]int)
for i := x - 2; i <= x; i++ {
for t := y - 2; t <= y; t++ {
countOfNum[matrix[i][t]]++
}
}
return countOfNum
}
func getAllSumsOfSquare(matrix [][]int, x, y int) ([]int, []int, int, int) {
rowSum, colSum := make([]int, 3), make([]int, 3)
mainDiagonalSum, paraDiagonalSum := 0, 0
for i := x - 2; i <= x; i++ {
for t := y - 2; t <= y; t++ {
relativeX, relativeY := i-x+2, t-y+2
rowSum[relativeX] += matrix[i][t]
colSum[relativeY] += matrix[i][t]
if relativeX == relativeY {
mainDiagonalSum += matrix[i][t]
}
if relativeX+relativeY == 2 {
paraDiagonalSum += matrix[i][t]
}
}
}
return rowSum, colSum, mainDiagonalSum, paraDiagonalSum
}
func isAllDigitDifferentAndInRangeOneToNine(countOfNum map[int]int) bool {
for i := 1; i <= 9; i++ {
if countOfNum[i] != 1 {
return false
}
}
return true
}
func isAllSumEqual(rowSum, colSum []int, mainDiagonalSum, paraDiagonalSum int) bool {
refSum := mainDiagonalSum
if paraDiagonalSum != refSum {
return false
}
for i := 0; i < len(rowSum); i++ {
if refSum != rowSum[i] {
return false
}
}
for i := 0; i < len(colSum); i++ {
if refSum != colSum[i] {
return false
}
}
return true
}
func getRowsAndCols(matrix [][]int) (int, int) {
return len(matrix), len(matrix[0])
}
// ----------------------------- 方法2: 暴力法优化方向(无代码) -----------------------------
// 1. 暴力法的求取总和,可以使用前缀和进行优化。
// 2. 上面的 countOfNum 类型是 map[int]int, 其实可以替换为长度为 9 的数组。
// ----------------------------- 方法3: 搜索 + 字典树(无代码) -----------------------------
// 1. 可以通过搜索的方式,得出所有 3 阶幻方。 (看评论区说只有 8 个)
// 2. 将 3 阶幻方矩阵转换为数组,那这个数组就相当于字符串。于是可以以这 8 个幻方为基础,建立字典树。
// 3. 回到题目,暴力遍历 grid 时,通过字典树,就能判断 grid 中每一个 3X3 的区域是否为幻方了。
/*
题目链接:
总结;
1. 这题其实可以求出所有幻方,将其构建为一颗字典树,这样判断是否是幻方就很简单了。
2. 这题我采用了暴力。
3. 可以用前缀和优化上面的代码。
*/
|
package main
import "fmt"
func main() {
//map和slice组合
//元素类型为map的切片
var s1 []map[int]string
s1 = make([]map[int]string, 2, 5)
s1[0] = make(map[int]string, 2)
s1[0][2] = "test"
s1[0][22] = "tt"
fmt.Println(s1)
//值为切片类型的map
var testmap map[string][]int
testmap = make(map[string][]int, 3)
// testmap["武汉"] = make([]int, 2, 2)
// testmap["武汉"][0] = 10
testmap["武汉"] = []int{20, 30}
fmt.Println(testmap)
}
|
package main
import (
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/gui"
"github.com/therecipe/qt/quick"
)
func init() {
PieChart_QmlRegisterType2("Charts", 1, 0, "PieChart")
}
type PieChart struct {
quick.QQuickPaintedItem
_ func() `constructor:"init"`
_ string `property:"name"`
_ *gui.QColor `property:"color"`
}
func (p *PieChart) init() {
p.ConnectPaint(p.paint)
}
func (p *PieChart) paint(painter *gui.QPainter) {
pen := gui.NewQPen3(p.Color())
pen.SetWidth(2)
painter.SetPen(pen)
painter.SetRenderHints(gui.QPainter__Antialiasing, true)
painter.DrawPie3(core.NewQRect4(0, 0, int(p.Width()), int(p.Height())).Adjusted(1, 1, -1, -1), 90*16, 290*16)
}
|
// DO NOT EDIT. This file was generated by "github.com/frk/gosql".
package testdata
import (
"github.com/frk/gosql"
"github.com/frk/gosql/internal/testdata/common"
)
func (q *SelectWithRecordNestedSingleQuery) Exec(c gosql.Conn) error {
const queryString = `SELECT
n."foo_bar_baz_val"
, n."foo_baz_val"
, n."foo2_bar_baz_val"
, n."foo2_baz_val"
FROM "test_nested" AS n
LIMIT 1` // `
row := c.QueryRow(queryString)
q.Nested = new(common.Nested)
q.Nested.FOO = new(common.Foo)
q.Nested.FOO.Baz = new(common.Baz)
q.Nested.Foo.Baz = new(common.Baz)
return row.Scan(
&q.Nested.FOO.Bar.Baz.Val,
&q.Nested.FOO.Baz.Val,
&q.Nested.Foo.Bar.Baz.Val,
&q.Nested.Foo.Baz.Val,
)
}
|
package handlers
import (
"github.com/go-chi/chi"
)
func (s *Server) setupEndpoints(r *chi.Mux) {
r.Route("/api/v1", func(r chi.Router) {
r.Route("/users", func(r chi.Router) {
r.Post("/register", s.registerUser())
r.Post("/login", s.loginUser())
})
r.Route("/todos", func(r chi.Router) {
r.Use(s.withUser)
r.Post("/create", s.createTodo())
r.Route("/{id}", func(r chi.Router) {
r.Use(s.todoCTX)
r.Use(s.withOwner("todo"))
r.Patch("/", s.updateTodo())
r.Delete("/", s.deleteTodo())
})
})
})
}
|
/*
dokugen is a simple command line utility that exposes many of the basic functions of the
sudoku package. It's able to generate puzzles (with difficutly) and solve provided puzzles.
Run with -h to see help on how to use it.
*/
package main
import (
"bytes"
"encoding/csv"
"encoding/json"
"flag"
"fmt"
"github.com/boltdb/bolt"
"github.com/gosuri/uiprogress"
"github.com/jkomoros/sudoku"
"github.com/jkomoros/sudoku/sdkconverter"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"runtime"
"strconv"
"strings"
)
//TODO: let people pass in a filename to export to.
const _STORED_PUZZLES_DB = ".puzzle_cache"
//Used as the grid to pass back when FAKE-GENERATE is true.
const TEST_GRID = `6|1|2|.|.|.|4|.|3
.|3|.|4|9|.|.|7|2
.|.|7|.|.|.|.|6|5
.|.|.|.|6|1|.|8|.
1|.|3|.|4|.|2|.|6
.|6|.|5|2|.|.|.|.
.|9|.|.|.|.|5|.|.
7|2|.|.|8|5|.|3|.
5|.|1|.|.|.|9|4|7`
type appOptions struct {
GENERATE bool
HELP bool
PUZZLE_TO_SOLVE string
NUM int
PRINT_STATS bool
WALKTHROUGH bool
RAW_SYMMETRY string
RAW_DIFFICULTY string
SYMMETRY sudoku.SymmetryType
SYMMETRY_PROPORTION float64
MIN_FILLED_CELLS int
MIN_DIFFICULTY float64
MAX_DIFFICULTY float64
NO_CACHE bool
PUZZLE_FORMAT string
NO_PROGRESS bool
CSV bool
CONVERTER sdkconverter.SudokuPuzzleConverter
//Only used in testing.
FAKE_GENERATE bool
flagSet *flag.FlagSet
progress *uiprogress.Progress
}
type outputWriter struct {
output io.ReadWriter
csvWriter *csv.Writer
tempRec []string
}
var difficultyRanges map[string]struct {
low, high float64
}
func init() {
//grid.Difficulty can make use of a number of processes simultaneously.
runtime.GOMAXPROCS(6)
difficultyRanges = map[string]struct{ low, high float64 }{
"gentle": {0.0, 0.3},
"easy": {0.3, 0.6},
"medium": {0.6, 0.7},
"tough": {0.7, 1.0},
}
}
func (o *outputWriter) Write(output string, extra string) {
//Extra will only be written if not going to CSV
if o.csvWriter == nil {
fmt.Fprintln(o.output, output)
if extra != "" {
fmt.Fprintln(o.output, extra)
}
return
}
o.tempRec = append(o.tempRec, output)
}
func (o *outputWriter) EndOfRec() {
if o.csvWriter == nil {
return
}
o.csvWriter.Write(o.tempRec)
o.tempRec = nil
}
func (o *outputWriter) Done() {
if o.csvWriter == nil {
return
}
o.csvWriter.Flush()
}
func NewOutputWriter(options *appOptions, output io.ReadWriter) *outputWriter {
var csvWriter *csv.Writer
if options.CSV {
csvWriter = csv.NewWriter(output)
}
return &outputWriter{output, csvWriter, nil}
}
func defineFlags(options *appOptions) {
options.flagSet.BoolVar(&options.GENERATE, "g", false, "if true, will generate a puzzle.")
options.flagSet.BoolVar(&options.HELP, "h", false, "If provided, will print help and exit.")
options.flagSet.IntVar(&options.NUM, "n", 1, "Number of things to generate")
options.flagSet.BoolVar(&options.PRINT_STATS, "p", false, "If provided, will print stats.")
options.flagSet.StringVar(&options.PUZZLE_TO_SOLVE, "s", "", "If provided, will solve the puzzle at the given filename and print solution. If -csv is provided, will expect the file to be a csv where the first column of each row is a puzzle in the specified puzzle format.")
options.flagSet.BoolVar(&options.WALKTHROUGH, "w", false, "If provided, will print out a walkthrough to solve the provided puzzle.")
options.flagSet.StringVar(&options.RAW_SYMMETRY, "y", "vertical", "Valid values: 'none', 'both', 'horizontal', 'vertical")
options.flagSet.Float64Var(&options.SYMMETRY_PROPORTION, "r", 0.7, "What proportion of cells should be filled according to symmetry")
options.flagSet.IntVar(&options.MIN_FILLED_CELLS, "min-filled-cells", 0, "The minimum number of cells that should be filled in the generated puzzles.")
options.flagSet.Float64Var(&options.MIN_DIFFICULTY, "min", 0.0, "Minimum difficulty for generated puzzle")
options.flagSet.Float64Var(&options.MAX_DIFFICULTY, "max", 1.0, "Maximum difficulty for generated puzzle")
options.flagSet.BoolVar(&options.NO_CACHE, "no-cache", false, "If provided, will not vend generated puzzles from the cache of previously generated puzzles.")
//TODO: the format should also be how we interpret loads, too.
options.flagSet.StringVar(&options.PUZZLE_FORMAT, "format", "sdk", "Which format to export puzzles from. Defaults to 'sdk'")
options.flagSet.BoolVar(&options.CSV, "csv", false, "Export CSV, and expect inbound puzzle files to be a CSV with a puzzle per row.")
options.flagSet.StringVar(&options.RAW_DIFFICULTY, "d", "", "difficulty, one of {gentle, easy, medium, tough}")
options.flagSet.BoolVar(&options.NO_PROGRESS, "no-progress", false, "If provided, will not print a progress bar")
}
//If it returns true, the program should quit.
func (o *appOptions) fixUp(errOutput io.ReadWriter) bool {
if errOutput == nil {
errOutput = os.Stderr
}
logger := log.New(errOutput, "", log.LstdFlags)
o.RAW_SYMMETRY = strings.ToLower(o.RAW_SYMMETRY)
switch o.RAW_SYMMETRY {
case "none":
o.SYMMETRY = sudoku.SYMMETRY_NONE
case "both":
o.SYMMETRY = sudoku.SYMMETRY_BOTH
case "horizontal":
o.SYMMETRY = sudoku.SYMMETRY_HORIZONTAL
case "vertical":
o.SYMMETRY = sudoku.SYMMETRY_VERTICAL
default:
logger.Println("Unknown symmetry flag: ", o.RAW_SYMMETRY)
return true
}
o.RAW_DIFFICULTY = strings.ToLower(o.RAW_DIFFICULTY)
if o.RAW_DIFFICULTY != "" {
vals, ok := difficultyRanges[o.RAW_DIFFICULTY]
if !ok {
logger.Println("Invalid difficulty option:", o.RAW_DIFFICULTY)
return true
}
o.MIN_DIFFICULTY = vals.low
o.MAX_DIFFICULTY = vals.high
logger.Println("Using difficulty max:", strconv.FormatFloat(vals.high, 'f', -1, 64), "min:", strconv.FormatFloat(vals.low, 'f', -1, 64))
}
o.CONVERTER = sdkconverter.Converters[sdkconverter.Format(o.PUZZLE_FORMAT)]
if o.CONVERTER == nil {
logger.Println("Invalid format option:", o.PUZZLE_FORMAT)
return true
}
return false
}
func getOptions(flagSet *flag.FlagSet, flagArguments []string, errOutput io.ReadWriter) *appOptions {
options := &appOptions{flagSet: flagSet}
defineFlags(options)
flagSet.Parse(flagArguments)
if options.fixUp(errOutput) {
os.Exit(1)
}
return options
}
func main() {
flagSet := flag.CommandLine
process(getOptions(flagSet, os.Args[1:], nil), os.Stdout, os.Stderr)
}
func process(options *appOptions, output io.ReadWriter, errOutput io.ReadWriter) {
options.flagSet.SetOutput(errOutput)
if options.HELP {
options.flagSet.PrintDefaults()
return
}
logger := log.New(errOutput, "", log.LstdFlags)
var grid sudoku.MutableGrid
writer := NewOutputWriter(options, output)
var bar *uiprogress.Bar
//TODO: do more useful / explanatory printing here.
if options.NUM > 1 && !options.NO_PROGRESS {
options.progress = uiprogress.New()
options.progress.Out = errOutput
options.progress.Start()
bar = options.progress.AddBar(options.NUM).PrependElapsed().AppendCompleted()
}
var incomingPuzzles []sudoku.MutableGrid
if options.PUZZLE_TO_SOLVE != "" {
//There are puzzles to load up.
data, err := ioutil.ReadFile(options.PUZZLE_TO_SOLVE)
if err != nil {
logger.Fatalln("Read error for specified file:", err)
}
var tempGrid sudoku.MutableGrid
var puzzleData []string
if options.CSV {
//Load up multiple.
csvReader := csv.NewReader(bytes.NewReader(data))
rows, err := csvReader.ReadAll()
if err != nil {
logger.Fatalln("The provided input CSV was not a valid CSV:", err)
}
for _, row := range rows {
puzzleData = append(puzzleData, row[0])
}
} else {
//Just load up a single file worth.
puzzleData = []string{string(data)}
}
for _, puzz := range puzzleData {
tempGrid = sudoku.NewGrid()
//TODO: shouldn't a load method have a way to say the string provided is invalid?
options.CONVERTER.Load(tempGrid, string(puzz))
incomingPuzzles = append(incomingPuzzles, tempGrid)
}
//Tell the main loop how many puzzles to expect.
//TODO: this feels a bit like a hack, doesn't it? options.NUM is normally a user input value.
options.NUM = len(incomingPuzzles)
}
for i := 0; i < options.NUM; i++ {
//TODO: allow the type of symmetry to be configured.
if options.GENERATE {
if options.FAKE_GENERATE {
grid = sudoku.NewGrid()
grid.LoadSDK(TEST_GRID)
} else {
gOptions := &sudoku.GenerationOptions{
Symmetry: options.SYMMETRY,
SymmetryPercentage: options.SYMMETRY_PROPORTION,
MinFilledCells: options.MIN_FILLED_CELLS,
}
grid = generatePuzzle(options.MIN_DIFFICULTY, options.MAX_DIFFICULTY, gOptions, options.NO_CACHE, logger)
}
writer.Write(options.CONVERTER.DataString(grid), "")
} else if len(incomingPuzzles)-1 >= i {
//Load up an inbound puzzle
grid = incomingPuzzles[i]
}
if grid == nil {
//No grid to do anything with.
logger.Fatalln("No grid loaded.")
}
//TODO: use of this option leads to a busy loop somewhere... Is it related to the generate-multiple-and-difficulty hang?
var directions *sudoku.SolveDirections
if options.WALKTHROUGH || options.PRINT_STATS {
directions = grid.HumanSolution(nil)
if directions == nil || len(directions.CompoundSteps) == 0 {
//We couldn't solve it. Let's check and see if the puzzle is well formed.
if grid.HasMultipleSolutions() {
//TODO: figure out why guesses wouldn't be used here effectively.
logger.Println("The puzzle had multiple solutions; that means it's not well-formed")
}
}
}
if options.WALKTHROUGH {
writer.Write(directions.Walkthrough(), "")
}
if options.PRINT_STATS {
writer.Write(strconv.FormatFloat(grid.Difficulty(), 'f', -1, 64),
strings.Join(directions.Stats(), "\n"))
}
//TODO: using the existence of options.PUZZLE_TO_SOLVE as the way to detect that
//we are working on an inbound puzzle seems a bit hackish.
if options.PUZZLE_TO_SOLVE != "" {
grid.Solve()
writer.Write(options.CONVERTER.DataString(grid), "")
}
writer.EndOfRec()
if bar != nil {
bar.Incr()
}
}
writer.Done()
}
type StoredPuzzle struct {
Options *sudoku.GenerationOptions
Difficulty float64
//In DOKU format
PuzzleData string
}
//TODO: take a sudoku.GenerationOptions to simplify signature
func storePuzzle(dbName string, grid sudoku.Grid, difficulty float64, options *sudoku.GenerationOptions, logger *log.Logger) bool {
db, err := bolt.Open(dbName, 0600, nil)
if err != nil {
logger.Fatalln("Couldn't open DB file", err)
return false
}
defer db.Close()
converter := sdkconverter.Converters["doku"]
if converter == nil {
logger.Fatalln("Couldn't find doku converter")
}
puzzleData := converter.DataString(grid)
if puzzleData == "" {
logger.Fatalln("Puzzle didn't convert to doku format cleanly")
}
puzzleObj := &StoredPuzzle{
Options: options,
Difficulty: difficulty,
PuzzleData: puzzleData,
}
jsonBlob, err := json.MarshalIndent(puzzleObj, "", " ")
if err != nil {
logger.Fatalln("Json couldn't be marshalled", err)
}
err = db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(sudoku.DifficultyModelHash()))
if err != nil {
return err
}
id, err := bucket.NextSequence()
if err != nil {
return err
}
err = bucket.Put([]byte(strconv.Itoa(int(id))), []byte(jsonBlob))
if err != nil {
return err
}
//It worked
return nil
})
if err != nil {
logger.Fatalln("Transacation failed: ", err)
return false
}
return true
}
//TODO: take a sudoku.GenerationOptions to simplify signature
func vendPuzzle(dbName string, min float64, max float64, options *sudoku.GenerationOptions) sudoku.MutableGrid {
db, err := bolt.Open(dbName, 0600, nil)
if err != nil {
//TODO: pass in logger
log.Fatalln("Couldn't open DB file", err)
return nil
}
defer db.Close()
converter := sdkconverter.Converters["doku"]
if converter == nil {
log.Fatalln("Couldn't find doku converter")
}
var finalPuzzle string
err = db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(sudoku.DifficultyModelHash()))
if err != nil {
log.Println(err)
return err
}
matchingPuzzles := make(map[string]string)
bucket.ForEach(func(key, value []byte) error {
puzzleInfo := &StoredPuzzle{}
err := json.Unmarshal(value, puzzleInfo)
if err != nil {
return err
}
if puzzleInfo.Options.MinFilledCells != options.MinFilledCells {
//Doesn't match
return nil
}
if puzzleInfo.Options.SymmetryPercentage != options.SymmetryPercentage {
//Doesn't match
return nil
}
if puzzleInfo.Options.Symmetry != options.Symmetry {
//Doesn't match
return nil
}
if puzzleInfo.Difficulty > max || puzzleInfo.Difficulty < min {
//Doesn't match
return nil
}
//Does match!
matchingPuzzles[string(key)] = puzzleInfo.PuzzleData
return nil
})
//Select one at random
if len(matchingPuzzles) == 0 {
//No puzzles matched.
return nil
}
var keys []string
for key, _ := range matchingPuzzles {
keys = append(keys, key)
}
key := keys[rand.Intn(len(keys))]
finalPuzzle = matchingPuzzles[key]
//Doesn't matter that much if we can't delete the key.
err = bucket.Delete([]byte(key))
if err != nil {
//TODO: shouldn't we ahve a logger here?
log.Println("Couldn't delete the key we picked:", err)
}
return nil
})
if finalPuzzle == "" {
return nil
}
grid := sudoku.NewGrid()
converter.Load(grid, finalPuzzle)
return grid
}
func generatePuzzle(min float64, max float64, options *sudoku.GenerationOptions, skipCache bool, logger *log.Logger) sudoku.MutableGrid {
var result sudoku.MutableGrid
if !skipCache {
result = vendPuzzle(_STORED_PUZZLES_DB, min, max, options)
if result != nil {
logger.Println("Vending a puzzle from the cache.")
return result
}
}
//We'll have to generate one ourselves.
count := 0
for {
//The first time we don't bother saying what number attemp it is, because if the first run is likely to generate a useable puzzle it's just noise.
if count != 0 {
logger.Println("Attempt", count, "at generating puzzle.")
}
result = sudoku.GenerateGrid(options)
difficulty := result.Difficulty()
if difficulty >= min && difficulty <= max {
return result
}
logger.Println("Rejecting grid of difficulty", difficulty)
if storePuzzle(_STORED_PUZZLES_DB, result, difficulty, options, logger) {
logger.Println("Stored the puzzle for future use.")
}
count++
}
return nil
}
|
package main
import (
"fmt"
)
func solution(s string) []int {
round := 0
zero := 0
curZero := 0
for s != "1" {
fmt.Printf("t1: %T\n", s)
curZero, s = rounds(s)
zero += curZero
round++
}
return []int{zero, round}
}
func rounds(s string) (int, string) {
allCnt := len(s)
zeroCnt := countZero(s)
binary := toBinary(allCnt - zeroCnt) // allCnt - zerCnt : oneCnt
fmt.Println(binary)
return zeroCnt, binary
}
func countZero(num string) int {
zeroCnt := 0
for _, v := range num {
if string(v) == "0" {
zeroCnt++
}
}
return zeroCnt
}
func toBinary(n int) string {
r := ""
for n != 0 {
switch n % 2 {
case 0:
r += "0"
case 1:
r += "1"
}
n /= 2
}
return r
}
|
package oidc_test
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/ory/fosite"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/authelia/authelia/v4/internal/authorization"
"github.com/authelia/authelia/v4/internal/configuration/schema"
"github.com/authelia/authelia/v4/internal/mocks"
"github.com/authelia/authelia/v4/internal/model"
"github.com/authelia/authelia/v4/internal/oidc"
"github.com/authelia/authelia/v4/internal/storage"
)
func TestOpenIDConnectStore_GetInternalClient(t *testing.T) {
s := oidc.NewStore(&schema.IdentityProvidersOpenIDConnect{
IssuerCertificateChain: schema.X509CertificateChain{},
IssuerPrivateKey: keyRSA2048,
Clients: []schema.IdentityProvidersOpenIDConnectClient{
{
ID: myclient,
Description: myclientdesc,
AuthorizationPolicy: onefactor,
Scopes: []string{oidc.ScopeOpenID, oidc.ScopeProfile},
Secret: tOpenIDConnectPlainTextClientSecret,
},
},
}, nil)
client, err := s.GetClient(context.Background(), "myinvalidclient")
assert.EqualError(t, err, "invalid_client")
assert.Nil(t, client)
client, err = s.GetClient(context.Background(), myclient)
require.NoError(t, err)
require.NotNil(t, client)
assert.Equal(t, myclient, client.GetID())
}
func TestOpenIDConnectStore_GetInternalClient_ValidClient(t *testing.T) {
ctx := context.Background()
id := myclient
c1 := schema.IdentityProvidersOpenIDConnectClient{
ID: id,
Description: myclientdesc,
AuthorizationPolicy: onefactor,
Scopes: []string{oidc.ScopeOpenID, oidc.ScopeProfile},
Secret: tOpenIDConnectPlainTextClientSecret,
}
s := oidc.NewStore(&schema.IdentityProvidersOpenIDConnect{
IssuerCertificateChain: schema.X509CertificateChain{},
IssuerPrivateKey: keyRSA2048,
Clients: []schema.IdentityProvidersOpenIDConnectClient{c1},
}, nil)
client, err := s.GetFullClient(ctx, id)
require.NoError(t, err)
require.NotNil(t, client)
assert.Equal(t, id, client.GetID())
assert.Equal(t, myclientdesc, client.GetDescription())
assert.Equal(t, fosite.Arguments(c1.Scopes), client.GetScopes())
assert.Equal(t, fosite.Arguments([]string{oidc.GrantTypeAuthorizationCode}), client.GetGrantTypes())
assert.Equal(t, fosite.Arguments([]string{oidc.ResponseTypeAuthorizationCodeFlow}), client.GetResponseTypes())
assert.Equal(t, []string(nil), client.GetRedirectURIs())
assert.Equal(t, authorization.OneFactor, client.GetAuthorizationPolicyRequiredLevel(authorization.Subject{}))
assert.Equal(t, "$plaintext$client-secret", client.GetSecret().Encode())
}
func TestOpenIDConnectStore_GetInternalClient_InvalidClient(t *testing.T) {
ctx := context.Background()
c1 := schema.IdentityProvidersOpenIDConnectClient{
ID: myclient,
Description: myclientdesc,
AuthorizationPolicy: onefactor,
Scopes: []string{oidc.ScopeOpenID, oidc.ScopeProfile},
Secret: tOpenIDConnectPlainTextClientSecret,
}
s := oidc.NewStore(&schema.IdentityProvidersOpenIDConnect{
IssuerCertificateChain: schema.X509CertificateChain{},
IssuerPrivateKey: keyRSA2048,
Clients: []schema.IdentityProvidersOpenIDConnectClient{c1},
}, nil)
client, err := s.GetFullClient(ctx, "another-client")
assert.Nil(t, client)
assert.EqualError(t, err, "invalid_client")
}
func TestOpenIDConnectStore_IsValidClientID(t *testing.T) {
ctx := context.Background()
s := oidc.NewStore(&schema.IdentityProvidersOpenIDConnect{
IssuerCertificateChain: schema.X509CertificateChain{},
IssuerPrivateKey: keyRSA2048,
Clients: []schema.IdentityProvidersOpenIDConnectClient{
{
ID: myclient,
Description: myclientdesc,
AuthorizationPolicy: onefactor,
Scopes: []string{oidc.ScopeOpenID, oidc.ScopeProfile},
Secret: tOpenIDConnectPlainTextClientSecret,
},
},
}, nil)
validClient := s.IsValidClientID(ctx, myclient)
invalidClient := s.IsValidClientID(ctx, "myinvalidclient")
assert.True(t, validClient)
assert.False(t, invalidClient)
}
func TestStoreSuite(t *testing.T) {
suite.Run(t, &StoreSuite{})
}
type StoreSuite struct {
suite.Suite
ctx context.Context
ctrl *gomock.Controller
mock *mocks.MockStorage
store *oidc.Store
}
func (s *StoreSuite) SetupTest() {
s.ctx = context.Background()
s.ctrl = gomock.NewController(s.T())
s.mock = mocks.NewMockStorage(s.ctrl)
s.store = oidc.NewStore(&schema.IdentityProvidersOpenIDConnect{
Clients: []schema.IdentityProvidersOpenIDConnectClient{
{
ID: "hs256",
Secret: tOpenIDConnectPBKDF2ClientSecret,
AuthorizationPolicy: authorization.OneFactor.String(),
RedirectURIs: []string{
"https://client.example.com",
},
TokenEndpointAuthMethod: oidc.ClientAuthMethodClientSecretJWT,
TokenEndpointAuthSigningAlg: oidc.SigningAlgHMACUsingSHA256,
},
}}, s.mock)
}
func (s *StoreSuite) TestGetSubject() {
s.T().Run("GenerateNew", func(t *testing.T) {
s.mock.
EXPECT().
LoadUserOpaqueIdentifierBySignature(s.ctx, "openid", "", "john").
Return(nil, nil)
s.mock.
EXPECT().
SaveUserOpaqueIdentifier(s.ctx, gomock.Any()).
Return(nil)
opaqueID, err := s.store.GetSubject(s.ctx, "", "john")
assert.NoError(t, err)
assert.NotEqual(t, uint32(0), opaqueID)
})
s.T().Run("ReturnDatabaseErrorOnLoad", func(t *testing.T) {
s.mock.
EXPECT().
LoadUserOpaqueIdentifierBySignature(s.ctx, "openid", "", "john").
Return(nil, fmt.Errorf("failed to load"))
opaqueID, err := s.store.GetSubject(s.ctx, "", "john")
assert.EqualError(t, err, "failed to load")
assert.Equal(t, uint32(0), opaqueID.ID())
})
s.T().Run("ReturnDatabaseErrorOnSave", func(t *testing.T) {
s.mock.
EXPECT().
LoadUserOpaqueIdentifierBySignature(s.ctx, "openid", "", "john").
Return(nil, nil)
s.mock.
EXPECT().
SaveUserOpaqueIdentifier(s.ctx, gomock.Any()).
Return(fmt.Errorf("failed to save"))
opaqueID, err := s.store.GetSubject(s.ctx, "", "john")
assert.EqualError(t, err, "failed to save")
assert.Equal(t, uint32(0), opaqueID.ID())
})
}
func (s *StoreSuite) TestTx() {
gomock.InOrder(
s.mock.EXPECT().BeginTX(s.ctx).Return(s.ctx, nil),
s.mock.EXPECT().Commit(s.ctx).Return(nil),
s.mock.EXPECT().Rollback(s.ctx).Return(nil),
s.mock.EXPECT().BeginTX(s.ctx).Return(nil, fmt.Errorf("failed to begin")),
s.mock.EXPECT().Commit(s.ctx).Return(fmt.Errorf("failed to commit")),
s.mock.EXPECT().Rollback(s.ctx).Return(fmt.Errorf("failed to rollback")),
)
x, err := s.store.BeginTX(s.ctx)
s.Equal(s.ctx, x)
s.NoError(err)
s.NoError(s.store.Commit(s.ctx))
s.NoError(s.store.Rollback(s.ctx))
x, err = s.store.BeginTX(s.ctx)
s.Equal(nil, x)
s.EqualError(err, "failed to begin")
s.EqualError(s.store.Commit(s.ctx), "failed to commit")
s.EqualError(s.store.Rollback(s.ctx), "failed to rollback")
}
func (s *StoreSuite) TestClientAssertionJWTValid() {
gomock.InOrder(
s.mock.
EXPECT().
LoadOAuth2BlacklistedJTI(s.ctx, "3a240379e8286a7a8ff5e99d68567e0e5e34e80168b8feffa89d3d33dea95b63").
Return(&model.OAuth2BlacklistedJTI{
ID: 1,
Signature: "3a240379e8286a7a8ff5e99d68567e0e5e34e80168b8feffa89d3d33dea95b63",
ExpiresAt: time.Now().Add(time.Hour),
}, nil),
s.mock.
EXPECT().
LoadOAuth2BlacklistedJTI(s.ctx, "e7f67ad76c80d57d34b19598462817932aec21d2806a08a786a8d4b9dd476068").
Return(&model.OAuth2BlacklistedJTI{
ID: 1,
Signature: "e7f67ad76c80d57d34b19598462817932aec21d2806a08a786a8d4b9dd476068",
ExpiresAt: time.Now().Add(-time.Hour),
}, nil),
s.mock.
EXPECT().
LoadOAuth2BlacklistedJTI(s.ctx, "f29ef0d85303a09411b76001c579980f1b1b7fc9deb1fa647875a724f4f231c6").
Return(nil, fmt.Errorf("failed to load")),
)
s.EqualError(s.store.ClientAssertionJWTValid(s.ctx, "066ee771-e156-4886-b99f-ee09b0d3edf4"), "jti_known")
s.NoError(s.store.ClientAssertionJWTValid(s.ctx, "5dad3ff7-e4f2-41b6-98a3-b73d872076ce"))
s.EqualError(s.store.ClientAssertionJWTValid(s.ctx, "65471ccb-d650-4006-a95f-cb4f4e3d7202"), "failed to load")
}
func (s *StoreSuite) TestCreateSessions() {
challenge := model.MustNullUUID(model.NewRandomNullUUID())
session := &oidc.Session{
ChallengeID: challenge,
}
sessionData, _ := json.Marshal(session)
gomock.InOrder(
s.mock.
EXPECT().
SaveOAuth2Session(s.ctx, storage.OAuth2SessionTypeAuthorizeCode, model.OAuth2Session{ChallengeID: challenge, RequestID: "abc", ClientID: "example", Signature: "abc", Active: true, Session: sessionData, RequestedScopes: model.StringSlicePipeDelimited{}, GrantedScopes: model.StringSlicePipeDelimited{}}).
Return(nil),
s.mock.
EXPECT().
SaveOAuth2Session(s.ctx, storage.OAuth2SessionTypeAuthorizeCode, model.OAuth2Session{ChallengeID: challenge, RequestID: "abc", ClientID: "example", Signature: "abc", Active: true, Session: sessionData, RequestedScopes: model.StringSlicePipeDelimited{}, GrantedScopes: model.StringSlicePipeDelimited{}}).
Return(fmt.Errorf("duplicate key")),
s.mock.
EXPECT().
SaveOAuth2Session(s.ctx, storage.OAuth2SessionTypeAccessToken, model.OAuth2Session{ChallengeID: challenge, RequestID: "abc", ClientID: "example", Signature: "abc", Active: true, Session: sessionData, RequestedScopes: model.StringSlicePipeDelimited{}, GrantedScopes: model.StringSlicePipeDelimited{}}).
Return(nil),
s.mock.
EXPECT().
SaveOAuth2Session(s.ctx, storage.OAuth2SessionTypeRefreshToken, model.OAuth2Session{ChallengeID: challenge, RequestID: "abc", ClientID: "example", Signature: "abc", Active: true, Session: sessionData, RequestedScopes: model.StringSlicePipeDelimited{}, GrantedScopes: model.StringSlicePipeDelimited{}}).
Return(nil),
s.mock.
EXPECT().
SaveOAuth2Session(s.ctx, storage.OAuth2SessionTypeOpenIDConnect, model.OAuth2Session{ChallengeID: challenge, RequestID: "abc", ClientID: "example", Signature: "abc", Active: true, Session: sessionData, RequestedScopes: model.StringSlicePipeDelimited{}, GrantedScopes: model.StringSlicePipeDelimited{}}).
Return(nil),
s.mock.
EXPECT().
SaveOAuth2Session(s.ctx, storage.OAuth2SessionTypePKCEChallenge, model.OAuth2Session{ChallengeID: challenge, RequestID: "abc", ClientID: "example", Signature: "abc", Active: true, Session: sessionData, RequestedScopes: model.StringSlicePipeDelimited{}, GrantedScopes: model.StringSlicePipeDelimited{}}).
Return(nil),
s.mock.
EXPECT().
SaveOAuth2PARContext(s.ctx, model.OAuth2PARContext{Signature: "abc", RequestID: "abc", ClientID: "example", Session: sessionData}).
Return(nil),
)
s.NoError(s.store.CreateAuthorizeCodeSession(s.ctx, "abc", &fosite.Request{
ID: "abc",
Client: &oidc.BaseClient{
ID: "example",
},
Session: session,
}))
s.EqualError(s.store.CreateAuthorizeCodeSession(s.ctx, "abc", &fosite.Request{
ID: "abc",
Client: &oidc.BaseClient{
ID: "example",
},
Session: session,
}), "duplicate key")
s.EqualError(s.store.CreateAuthorizeCodeSession(s.ctx, "abc", &fosite.Request{
ID: "abc",
Client: &oidc.BaseClient{
ID: "example",
},
Session: nil,
}), "failed to create new *model.OAuth2Session: the session type OpenIDSession was expected but the type '<nil>' was used")
s.NoError(s.store.CreateAccessTokenSession(s.ctx, "abc", &fosite.Request{
ID: "abc",
Client: &oidc.BaseClient{
ID: "example",
},
Session: session,
}))
s.NoError(s.store.CreateRefreshTokenSession(s.ctx, "abc", &fosite.Request{
ID: "abc",
Client: &oidc.BaseClient{
ID: "example",
},
Session: session,
}))
s.NoError(s.store.CreateOpenIDConnectSession(s.ctx, "abc", &fosite.Request{
ID: "abc",
Client: &oidc.BaseClient{
ID: "example",
},
Session: session,
}))
s.NoError(s.store.CreatePKCERequestSession(s.ctx, "abc", &fosite.Request{
ID: "abc",
Client: &oidc.BaseClient{
ID: "example",
},
Session: session,
}))
s.NoError(s.store.CreatePARSession(s.ctx, "abc", &fosite.AuthorizeRequest{
Request: fosite.Request{
ID: "abc",
Client: &oidc.BaseClient{
ID: "example",
},
Session: session,
}}))
s.EqualError(s.store.CreatePARSession(s.ctx, "abc", &fosite.AuthorizeRequest{
Request: fosite.Request{
ID: "abc",
Client: &oidc.BaseClient{
ID: "example",
},
Session: nil,
}}), "failed to create new PAR context: can't assert type '<nil>' to an *OAuth2Session")
}
func (s *StoreSuite) TestRevokeSessions() {
gomock.InOrder(
s.mock.
EXPECT().
DeactivateOAuth2Session(s.ctx, storage.OAuth2SessionTypeAuthorizeCode, "abc1").
Return(nil),
s.mock.
EXPECT().
DeactivateOAuth2Session(s.ctx, storage.OAuth2SessionTypeAuthorizeCode, "abc2").
Return(fmt.Errorf("not found")),
s.mock.
EXPECT().
RevokeOAuth2Session(s.ctx, storage.OAuth2SessionTypeAccessToken, "at_example1").
Return(nil),
s.mock.
EXPECT().
RevokeOAuth2Session(s.ctx, storage.OAuth2SessionTypeAccessToken, "at_example2").
Return(fmt.Errorf("not found")),
s.mock.
EXPECT().
RevokeOAuth2SessionByRequestID(s.ctx, storage.OAuth2SessionTypeAccessToken, "65471ccb-d650-4006-a95f-cb4f4e3d7200").
Return(nil),
s.mock.
EXPECT().
RevokeOAuth2SessionByRequestID(s.ctx, storage.OAuth2SessionTypeAccessToken, "65471ccb-d650-4006-a95f-cb4f4e3d7201").
Return(fmt.Errorf("not found")),
s.mock.
EXPECT().
RevokeOAuth2SessionByRequestID(s.ctx, storage.OAuth2SessionTypeAccessToken, "65471ccb-d650-4006-a95f-cb4f4e3d7202").
Return(sql.ErrNoRows),
s.mock.
EXPECT().
RevokeOAuth2Session(s.ctx, storage.OAuth2SessionTypeRefreshToken, "rt_example1").
Return(nil),
s.mock.
EXPECT().
RevokeOAuth2Session(s.ctx, storage.OAuth2SessionTypeRefreshToken, "rt_example2").
Return(fmt.Errorf("not found")),
s.mock.
EXPECT().
DeactivateOAuth2SessionByRequestID(s.ctx, storage.OAuth2SessionTypeRefreshToken, "65471ccb-d650-4006-a95f-cb4f4e3d7200").
Return(nil),
s.mock.
EXPECT().
DeactivateOAuth2SessionByRequestID(s.ctx, storage.OAuth2SessionTypeRefreshToken, "65471ccb-d650-4006-a95f-cb4f4e3d7201").
Return(fmt.Errorf("not found")),
s.mock.
EXPECT().
DeactivateOAuth2SessionByRequestID(s.ctx, storage.OAuth2SessionTypeRefreshToken, "65471ccb-d650-4006-a95f-cb4f4e3d7202").
Return(sql.ErrNoRows),
s.mock.
EXPECT().
DeactivateOAuth2SessionByRequestID(s.ctx, storage.OAuth2SessionTypeRefreshToken, "65471ccb-d650-4006-a95f-cb4f4e3d7200").
Return(nil),
s.mock.
EXPECT().
DeactivateOAuth2SessionByRequestID(s.ctx, storage.OAuth2SessionTypeRefreshToken, "65471ccb-d650-4006-a95f-cb4f4e3d7201").
Return(fmt.Errorf("not found")),
s.mock.
EXPECT().
DeactivateOAuth2SessionByRequestID(s.ctx, storage.OAuth2SessionTypeRefreshToken, "65471ccb-d650-4006-a95f-cb4f4e3d7202").
Return(sql.ErrNoRows),
s.mock.
EXPECT().
RevokeOAuth2Session(s.ctx, storage.OAuth2SessionTypePKCEChallenge, "pkce1").
Return(nil),
s.mock.
EXPECT().
RevokeOAuth2Session(s.ctx, storage.OAuth2SessionTypePKCEChallenge, "pkce2").
Return(fmt.Errorf("not found")),
s.mock.
EXPECT().
RevokeOAuth2Session(s.ctx, storage.OAuth2SessionTypeOpenIDConnect, "ac_1").
Return(nil),
s.mock.
EXPECT().
RevokeOAuth2Session(s.ctx, storage.OAuth2SessionTypeOpenIDConnect, "ac_2").
Return(fmt.Errorf("not found")),
s.mock.
EXPECT().
RevokeOAuth2PARContext(s.ctx, "urn:par1").
Return(nil),
s.mock.
EXPECT().
RevokeOAuth2PARContext(s.ctx, "urn:par2").
Return(fmt.Errorf("not found")),
)
s.NoError(s.store.InvalidateAuthorizeCodeSession(s.ctx, "abc1"))
s.EqualError(s.store.InvalidateAuthorizeCodeSession(s.ctx, "abc2"), "not found")
s.NoError(s.store.DeleteAccessTokenSession(s.ctx, "at_example1"))
s.EqualError(s.store.DeleteAccessTokenSession(s.ctx, "at_example2"), "not found")
s.NoError(s.store.RevokeAccessToken(s.ctx, "65471ccb-d650-4006-a95f-cb4f4e3d7200"))
s.EqualError(s.store.RevokeAccessToken(s.ctx, "65471ccb-d650-4006-a95f-cb4f4e3d7201"), "not found")
s.EqualError(s.store.RevokeAccessToken(s.ctx, "65471ccb-d650-4006-a95f-cb4f4e3d7202"), "not_found")
s.NoError(s.store.DeleteRefreshTokenSession(s.ctx, "rt_example1"))
s.EqualError(s.store.DeleteRefreshTokenSession(s.ctx, "rt_example2"), "not found")
s.NoError(s.store.RevokeRefreshToken(s.ctx, "65471ccb-d650-4006-a95f-cb4f4e3d7200"))
s.EqualError(s.store.RevokeRefreshToken(s.ctx, "65471ccb-d650-4006-a95f-cb4f4e3d7201"), "not found")
s.EqualError(s.store.RevokeRefreshToken(s.ctx, "65471ccb-d650-4006-a95f-cb4f4e3d7202"), "sql: no rows in result set")
s.NoError(s.store.RevokeRefreshTokenMaybeGracePeriod(s.ctx, "65471ccb-d650-4006-a95f-cb4f4e3d7200", "1"))
s.EqualError(s.store.RevokeRefreshTokenMaybeGracePeriod(s.ctx, "65471ccb-d650-4006-a95f-cb4f4e3d7201", "2"), "not found")
s.EqualError(s.store.RevokeRefreshTokenMaybeGracePeriod(s.ctx, "65471ccb-d650-4006-a95f-cb4f4e3d7202", "3"), "sql: no rows in result set")
s.NoError(s.store.DeletePKCERequestSession(s.ctx, "pkce1"))
s.EqualError(s.store.DeletePKCERequestSession(s.ctx, "pkce2"), "not found")
s.NoError(s.store.DeleteOpenIDConnectSession(s.ctx, "ac_1"))
s.EqualError(s.store.DeleteOpenIDConnectSession(s.ctx, "ac_2"), "not found")
s.NoError(s.store.DeletePARSession(s.ctx, "urn:par1"))
s.EqualError(s.store.DeletePARSession(s.ctx, "urn:par2"), "not found")
}
func (s *StoreSuite) TestGetSessions() {
challenge := model.MustNullUUID(model.NewRandomNullUUID())
session := &oidc.Session{
ChallengeID: challenge,
ClientID: "hs256",
}
sessionData, _ := json.Marshal(session)
sessionb := &oidc.Session{
ChallengeID: challenge,
ClientID: "hs256",
}
sessionDatab, _ := json.Marshal(sessionb)
gomock.InOrder(
s.mock.EXPECT().LoadOAuth2Session(s.ctx, storage.OAuth2SessionTypeAuthorizeCode, "ac_123").
Return(&model.OAuth2Session{ClientID: "hs256", Session: sessionData, Active: true}, nil),
s.mock.EXPECT().LoadOAuth2Session(s.ctx, storage.OAuth2SessionTypeAuthorizeCode, "ac_456").
Return(&model.OAuth2Session{ClientID: "hs256", Session: sessionData, Active: false}, nil),
s.mock.EXPECT().LoadOAuth2Session(s.ctx, storage.OAuth2SessionTypeAuthorizeCode, "ac_aaa").
Return(nil, sql.ErrNoRows),
s.mock.EXPECT().LoadOAuth2Session(s.ctx, storage.OAuth2SessionTypeAuthorizeCode, "ac_130").
Return(nil, fmt.Errorf("timeout")),
s.mock.EXPECT().LoadOAuth2Session(s.ctx, storage.OAuth2SessionTypeAuthorizeCode, "ac_badclient").
Return(&model.OAuth2Session{ClientID: "no-client", Session: sessionDatab, Active: true}, nil),
s.mock.EXPECT().LoadOAuth2Session(s.ctx, storage.OAuth2SessionTypeAccessToken, "at").
Return(&model.OAuth2Session{ClientID: "hs256", Session: sessionData, Active: true}, nil),
s.mock.EXPECT().LoadOAuth2Session(s.ctx, storage.OAuth2SessionTypeRefreshToken, "rt").
Return(&model.OAuth2Session{ClientID: "hs256", Session: sessionData, Active: true}, nil),
s.mock.EXPECT().LoadOAuth2Session(s.ctx, storage.OAuth2SessionTypePKCEChallenge, "pkce").
Return(&model.OAuth2Session{ClientID: "hs256", Session: sessionData, Active: true}, nil),
s.mock.EXPECT().LoadOAuth2Session(s.ctx, storage.OAuth2SessionTypeOpenIDConnect, "ot").
Return(&model.OAuth2Session{ClientID: "hs256", Session: sessionData, Active: true}, nil),
s.mock.
EXPECT().
LoadOAuth2PARContext(s.ctx, "urn:par").
Return(&model.OAuth2PARContext{Signature: "abc", RequestID: "abc", ClientID: "hs256", Session: sessionData}, nil),
s.mock.
EXPECT().
LoadOAuth2PARContext(s.ctx, "urn:par").
Return(nil, sql.ErrNoRows),
)
var (
r fosite.Requester
err error
)
r, err = s.store.GetAuthorizeCodeSession(s.ctx, "ac_123", &oidc.Session{})
s.NotNil(r)
s.NoError(err)
r, err = s.store.GetAuthorizeCodeSession(s.ctx, "ac_456", &oidc.Session{})
s.NotNil(r)
s.EqualError(err, "Authorization code has ben invalidated")
r, err = s.store.GetAuthorizeCodeSession(s.ctx, "ac_aaa", &oidc.Session{})
s.Nil(r)
s.EqualError(err, "not_found")
r, err = s.store.GetAuthorizeCodeSession(s.ctx, "ac_130", &oidc.Session{})
s.Nil(r)
s.EqualError(err, "timeout")
r, err = s.store.GetAuthorizeCodeSession(s.ctx, "ac_badclient", &oidc.Session{})
s.Nil(r)
s.EqualError(err, "error occurred while mapping OAuth 2.0 Session back to a Request while trying to lookup the registered client: invalid_client")
r, err = s.store.GetAccessTokenSession(s.ctx, "at", &oidc.Session{})
s.NotNil(r)
s.NoError(err)
r, err = s.store.GetRefreshTokenSession(s.ctx, "rt", &oidc.Session{})
s.NotNil(r)
s.NoError(err)
r, err = s.store.GetPKCERequestSession(s.ctx, "pkce", &oidc.Session{})
s.NotNil(r)
s.NoError(err)
r, err = s.store.GetOpenIDConnectSession(s.ctx, "ot", &fosite.Request{
ID: "abc",
Client: &oidc.BaseClient{
ID: "example",
},
Session: session,
})
s.NotNil(r)
s.NoError(err)
r, err = s.store.GetPARSession(s.ctx, "urn:par")
s.NotNil(r)
s.NoError(err)
r, err = s.store.GetPARSession(s.ctx, "urn:par")
s.Nil(r)
s.EqualError(err, "sql: no rows in result set")
}
func (s *StoreSuite) TestIsJWTUsed() {
gomock.InOrder(
s.mock.
EXPECT().
LoadOAuth2BlacklistedJTI(s.ctx, "3a240379e8286a7a8ff5e99d68567e0e5e34e80168b8feffa89d3d33dea95b63").
Return(&model.OAuth2BlacklistedJTI{
ID: 1,
Signature: "3a240379e8286a7a8ff5e99d68567e0e5e34e80168b8feffa89d3d33dea95b63",
ExpiresAt: time.Now().Add(time.Hour),
}, nil),
s.mock.
EXPECT().
LoadOAuth2BlacklistedJTI(s.ctx, "e7f67ad76c80d57d34b19598462817932aec21d2806a08a786a8d4b9dd476068").
Return(&model.OAuth2BlacklistedJTI{
ID: 1,
Signature: "e7f67ad76c80d57d34b19598462817932aec21d2806a08a786a8d4b9dd476068",
ExpiresAt: time.Now().Add(-time.Hour),
}, nil),
s.mock.
EXPECT().
LoadOAuth2BlacklistedJTI(s.ctx, "f29ef0d85303a09411b76001c579980f1b1b7fc9deb1fa647875a724f4f231c6").
Return(nil, fmt.Errorf("failed to load")),
)
used, err := s.store.IsJWTUsed(s.ctx, "066ee771-e156-4886-b99f-ee09b0d3edf4")
s.True(used)
s.EqualError(err, "jti_known")
used, err = s.store.IsJWTUsed(s.ctx, "5dad3ff7-e4f2-41b6-98a3-b73d872076ce")
s.False(used)
s.NoError(err)
used, err = s.store.IsJWTUsed(s.ctx, "65471ccb-d650-4006-a95f-cb4f4e3d7202")
s.True(used)
s.EqualError(err, "failed to load")
}
func (s *StoreSuite) TestMarkJWTUsedForTime() {
gomock.InOrder(
s.mock.EXPECT().
SaveOAuth2BlacklistedJTI(s.ctx, model.OAuth2BlacklistedJTI{Signature: "f29ef0d85303a09411b76001c579980f1b1b7fc9deb1fa647875a724f4f231c6", ExpiresAt: time.Unix(160000000, 0)}).
Return(nil),
s.mock.EXPECT().SaveOAuth2BlacklistedJTI(s.ctx, model.OAuth2BlacklistedJTI{Signature: "0dab0de97ed4e05da82763497448daf4f6b555c99218100e3ef5a81f36232940", ExpiresAt: time.Unix(160000000, 0)}).
Return(fmt.Errorf("already marked")),
)
s.NoError(s.store.MarkJWTUsedForTime(s.ctx, "65471ccb-d650-4006-a95f-cb4f4e3d7202", time.Unix(160000000, 0)))
s.EqualError(s.store.MarkJWTUsedForTime(s.ctx, "65471ccb-d650-4006-a95f-cb4f4e3d7201", time.Unix(160000000, 0)), "already marked")
}
|
/*
Tencent is pleased to support the open source community by making Basic Service Configuration Platform available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions and
limitations under the License.
*/
package dao
import (
"errors"
"fmt"
rawgen "gorm.io/gen"
"gorm.io/gorm"
"bscp.io/pkg/dal/gen"
"bscp.io/pkg/dal/table"
"bscp.io/pkg/kit"
"bscp.io/pkg/search"
"bscp.io/pkg/types"
)
// TemplateRevision supplies all the template revision related operations.
type TemplateRevision interface {
// Create one template revision instance.
Create(kit *kit.Kit, templateRevision *table.TemplateRevision) (uint32, error)
// CreateWithTx create one template revision instance with transaction.
CreateWithTx(kit *kit.Kit, tx *gen.QueryTx, template *table.TemplateRevision) (uint32, error)
// List templates with options.
List(kit *kit.Kit, bizID, templateID uint32, s search.Searcher, opt *types.BasePage) ([]*table.TemplateRevision, int64, error)
// Delete one template revision instance.
Delete(kit *kit.Kit, templateRevision *table.TemplateRevision) error
// GetByUniqueKey get template revision by unique key.
GetByUniqueKey(kit *kit.Kit, bizID, templateID uint32, revisionName string) (*table.TemplateRevision, error)
// ListByIDs list template revisions by template revision ids.
ListByIDs(kit *kit.Kit, ids []uint32) ([]*table.TemplateRevision, error)
// ListByIDsWithTx list template revisions by template revision ids with transaction.
ListByIDsWithTx(kit *kit.Kit, tx *gen.QueryTx, ids []uint32) ([]*table.TemplateRevision, error)
// ListByTemplateIDs list template revisions by template ids.
ListByTemplateIDs(kit *kit.Kit, bizID uint32, templateIDs []uint32) ([]*table.TemplateRevision, error)
// DeleteForTmplWithTx delete template revision for one template with transaction.
DeleteForTmplWithTx(kit *kit.Kit, tx *gen.QueryTx, bizID, templateID uint32) error
}
var _ TemplateRevision = new(templateRevisionDao)
type templateRevisionDao struct {
genQ *gen.Query
idGen IDGenInterface
auditDao AuditDao
}
// Create one template revision instance.
func (dao *templateRevisionDao) Create(kit *kit.Kit, g *table.TemplateRevision) (uint32, error) {
if err := g.ValidateCreate(); err != nil {
return 0, err
}
if err := dao.validateAttachmentExist(kit, g.Attachment); err != nil {
return 0, err
}
// generate a TemplateRevision id and update to TemplateRevision.
id, err := dao.idGen.One(kit, table.Name(g.TableName()))
if err != nil {
return 0, err
}
g.ID = id
ad := dao.auditDao.DecoratorV2(kit, g.Attachment.BizID).PrepareCreate(g)
// 多个使用事务处理
createTx := func(tx *gen.Query) error {
q := tx.TemplateRevision.WithContext(kit.Ctx)
if err := q.Create(g); err != nil {
return err
}
if err := ad.Do(tx); err != nil {
return err
}
return nil
}
if err := dao.genQ.Transaction(createTx); err != nil {
return 0, err
}
return g.ID, nil
}
// CreateWithTx create one template revision instance with transaction.
func (dao *templateRevisionDao) CreateWithTx(kit *kit.Kit, tx *gen.QueryTx, g *table.TemplateRevision) (uint32, error) {
if err := g.ValidateCreate(); err != nil {
return 0, err
}
// generate a TemplateRevision id and update to TemplateRevision.
id, err := dao.idGen.One(kit, table.Name(g.TableName()))
if err != nil {
return 0, err
}
g.ID = id
q := tx.TemplateRevision.WithContext(kit.Ctx)
if err := q.Create(g); err != nil {
return 0, err
}
ad := dao.auditDao.DecoratorV2(kit, g.Attachment.BizID).PrepareCreate(g)
if err := ad.Do(tx.Query); err != nil {
return 0, err
}
return g.ID, nil
}
// List template revisions with options.
func (dao *templateRevisionDao) List(kit *kit.Kit, bizID, templateID uint32, s search.Searcher, opt *types.BasePage) (
[]*table.TemplateRevision, int64, error) {
m := dao.genQ.TemplateRevision
q := dao.genQ.TemplateRevision.WithContext(kit.Ctx)
var conds []rawgen.Condition
// add search condition
if s != nil {
exprs := s.SearchExprs(dao.genQ)
if len(exprs) > 0 {
var do gen.ITemplateRevisionDo
for i := range exprs {
if i == 0 {
do = q.Where(exprs[i])
}
do = do.Or(exprs[i])
}
conds = append(conds, do)
}
}
d := q.Where(m.BizID.Eq(bizID), m.TemplateID.Eq(templateID)).Where(conds...)
if opt.All {
result, err := d.Find()
if err != nil {
return nil, 0, err
}
return result, int64(len(result)), err
}
return d.FindByPage(opt.Offset(), opt.LimitInt())
}
// Delete one template revision instance.
func (dao *templateRevisionDao) Delete(kit *kit.Kit, g *table.TemplateRevision) error {
// 参数校验
if err := g.ValidateDelete(); err != nil {
return err
}
// 删除操作, 获取当前记录做审计
m := dao.genQ.TemplateRevision
q := dao.genQ.TemplateRevision.WithContext(kit.Ctx)
oldOne, err := q.Where(m.ID.Eq(g.ID), m.BizID.Eq(g.Attachment.BizID)).Take()
if err != nil {
return err
}
ad := dao.auditDao.DecoratorV2(kit, g.Attachment.BizID).PrepareDelete(oldOne)
// 多个使用事务处理
deleteTx := func(tx *gen.Query) error {
q = tx.TemplateRevision.WithContext(kit.Ctx)
if _, err := q.Where(m.BizID.Eq(g.Attachment.BizID)).Delete(g); err != nil {
return err
}
if err := ad.Do(tx); err != nil {
return err
}
return nil
}
if err := dao.genQ.Transaction(deleteTx); err != nil {
return err
}
return nil
}
// GetByUniqueKey get template revision by unique key
func (dao *templateRevisionDao) GetByUniqueKey(kit *kit.Kit, bizID, templateID uint32, revisionName string) (
*table.TemplateRevision, error) {
m := dao.genQ.TemplateRevision
q := dao.genQ.TemplateRevision.WithContext(kit.Ctx)
templateRevision, err := q.Where(m.BizID.Eq(bizID), m.TemplateID.Eq(templateID),
m.RevisionName.Eq(revisionName)).Take()
if err != nil {
return nil, fmt.Errorf("get template revision failed, err: %v", err)
}
return templateRevision, nil
}
// ListByIDs list template revisions by template revision ids.
func (dao *templateRevisionDao) ListByIDs(kit *kit.Kit, ids []uint32) ([]*table.TemplateRevision, error) {
m := dao.genQ.TemplateRevision
q := dao.genQ.TemplateRevision.WithContext(kit.Ctx)
return q.Where(m.ID.In(ids...)).Find()
}
// ListByIDsWithTx list template revisions by template revision ids with transaction.
func (dao *templateRevisionDao) ListByIDsWithTx(kit *kit.Kit, tx *gen.QueryTx, ids []uint32) (
[]*table.TemplateRevision, error) {
m := tx.TemplateRevision
q := tx.TemplateRevision.WithContext(kit.Ctx)
return q.Where(m.ID.In(ids...)).Find()
}
// ListByTemplateIDs list template revisions by template ids.
func (dao *templateRevisionDao) ListByTemplateIDs(kit *kit.Kit, bizID uint32, templateIDs []uint32) ([]*table.TemplateRevision,
error) {
m := dao.genQ.TemplateRevision
q := dao.genQ.TemplateRevision.WithContext(kit.Ctx)
return q.Where(m.BizID.Eq(bizID), m.TemplateID.In(templateIDs...)).Find()
}
// DeleteForTmplWithTx delete template revision for one template with transaction.
func (dao *templateRevisionDao) DeleteForTmplWithTx(kit *kit.Kit, tx *gen.QueryTx, bizID, templateID uint32) error {
m := tx.TemplateRevision
q := tx.TemplateRevision.WithContext(kit.Ctx)
if _, err := q.Where(m.BizID.Eq(bizID), m.TemplateID.Eq(templateID)).Delete(); err != nil {
return err
}
return nil
}
// validateAttachmentExist validate if attachment resource exists before operating template revision
func (dao *templateRevisionDao) validateAttachmentExist(kit *kit.Kit, am *table.TemplateRevisionAttachment) error {
m := dao.genQ.Template
q := dao.genQ.Template.WithContext(kit.Ctx)
if _, err := q.Where(m.ID.Eq(am.TemplateID)).Take(); err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return fmt.Errorf("template revision attached template %d is not exist", am.TemplateID)
}
return fmt.Errorf("get template revision attached template failed, err: %v", err)
}
return nil
}
|
package chessboard
// Rank stores if a square is occupied by a piece
type Rank []bool
// Chessboard contains eight Ranks, accessed with values from 'A' to 'H'
type Chessboard map[byte]Rank
// CountInRank returns how many squares are occupied in the chessboard,
// within the given rank
func (cb Chessboard) CountInRank(rank byte) (ret int) {
for _, r := range cb[rank] {
if r {
ret++
}
}
return ret
}
// CountInFile returns how many squares are occupied in the chessboard,
// within the given file
func (cb Chessboard) CountInFile(file int) (ret int) {
if file < 1 || file > 8 {
return
}
for _, f := range cb {
if f[file-1] {
ret++
}
}
return ret
}
// CountAll should count how many squares are present in the chessboard
func (cb Chessboard) CountAll() (ret int) {
for _, rank := range cb {
for range rank {
ret++
}
}
return ret
}
// CountOccupied returns how many squares are occupied in the chessboard
func (cb Chessboard) CountOccupied() (ret int) {
for rank := range cb {
ret += cb.CountInRank(rank)
}
return ret
}
|
package server
import (
"FPproject/Backend/log"
"FPproject/Backend/models"
"net/http"
"github.com/gin-gonic/gin"
)
func (h *Handler) InsertUH(c *gin.Context) {
var body models.UserHealth
err := c.BindJSON(&body)
if err != nil {
log.Info.Println(err)
c.JSON(http.StatusBadRequest, gin.H{
"status": "bad request",
})
return
}
userid := c.Keys["ID"].(string)
id, err := h.db.InsertUH(userid, body)
if err != nil {
log.Warning.Println(err)
c.JSON(http.StatusInternalServerError, gin.H{
"status": "internal server error",
})
return
}
c.JSON(http.StatusOK, gin.H{
"status": "OK",
"id": id,
})
}
func (h *Handler) DelUH(c *gin.Context) {
userid := c.Keys["ID"].(string)
id, err := h.db.DelUH(userid)
if err != nil {
log.Warning.Println(err)
c.JSON(http.StatusInternalServerError, gin.H{
"status": "internal server error",
})
return
}
c.JSON(http.StatusOK, gin.H{
"status": "OK",
"id": id,
})
}
func (h *Handler) GetUH(c *gin.Context) {
userid := c.Keys["ID"].(string)
var body models.UserHealth
body, err := h.db.GetUH(userid)
if err != nil {
log.Warning.Println(err)
c.JSON(http.StatusInternalServerError, gin.H{
"status": "internal server error",
})
return
}
c.JSON(http.StatusOK, body)
}
func (h *Handler) UpdateUH(c *gin.Context) {
var body models.UserHealth
err := c.BindJSON(&body)
if err != nil {
log.Info.Println(err)
c.JSON(http.StatusBadRequest, gin.H{
"status": "bad request",
})
return
}
body.ID = c.Keys["ID"].(string)
id, err := h.db.UpdateUH(body)
if err != nil {
log.Warning.Println(err)
c.JSON(http.StatusInternalServerError, gin.H{
"status": "internal server error",
})
return
}
c.JSON(http.StatusOK, gin.H{
"status": "OK",
"id": id,
})
}
|
package main
import "fmt"
func main() {
// 数组的长度是类型的一部分
var arr1 [3]int
var arr2 [4]string
fmt.Printf("%T, %T \n", arr1, arr2)
// 数组的初始化 第一种方法
var arr3 [3]int
arr3[0] = 1
arr3[1] = 2
arr3[2] = 3
fmt.Println(arr3)
// 第二种初始化数组的犯法
var arr4 = [4]int {10, 20, 30, 40}
fmt.Println(arr4)
// 第三种数组初始化方法,自动推断数组长度
var arr5 = [...]int{1, 2}
fmt.Println(arr5)
// 第四种初始化数组的方法,指定下标
a := [...]int{1:1, 3:5}
fmt.Println(a)
for i := 0; i < len(a); i++ {
fmt.Print(a[i], " ")
}
for _, value := range a {
fmt.Print(value, " ")
}
fmt.Println()
// 值类型 引用类型
// 基本数据类型和数组都是值类型
var aa = 10
bb := aa
aa = 20
fmt.Println(aa, bb)
// 数组
var array1 = [...]int {1, 2, 3}
array2 := array1
array2[0] = 3
fmt.Println(array1, array2)
// 切片定义
var array3 = []int{1,2,3}
array4 := array3
array4[0] = 3
fmt.Println(array3, array4)
// 二维数组
var array5 = [...][2]int{{1,2},{2,3}}
for i := 0; i < len(array5); i++ {
for j := 0; j < len(array5[0]); j++ {
fmt.Println(array5[i][j])
}
}
for _, item := range array5 {
for _, item2 := range item {
fmt.Println(item2)
}
}
}
|
package models
import (
"gopkg.in/mgo.v2"
"net/http"
"strings"
)
const (
MONGO_ADDRESS = "127.0.0.1:27017"
MONGO_DB_NAME = "InventorDB"
MONGO_COL_MO_NAME = "monitors"
MONGO_COL_SU_NAME = "systemUnit"
MONGO_COL_NB_NAME = "notebook"
MONGO_COL_NH_NAME = "networkHardware"
MONGO_COL_PR_NAME = "printer"
MONGO_COL_SR_NAME = "server"
MONGO_COL_TP_NAME = "telephone"
MONGO_COL_UP_NAME = "ups"
MONGO_COL_USER_ADMIN_NAME = "user"
MONGO_COL_ROLE_NAME = "role"
MONGO_COL_LOCATION_NAME = "location"
MONGO_COL_MATRIX_NAME = "matrixtype"
MONGO_COL_USER_NAME = "simpleuser"
MONGO_COL_UNDEFINED_COMPUTER_NAME = "undefinedComputer"
)
const (
NOTEBOOK = "Notebook"
SYSTEM_UNIT = "SystemUnit"
MONITOR = "Monitor"
NETWORK_HARDWARE = "NetworkHardware"
PRINTER = "Printer"
SERVER = "Server"
TELEPHONE = "Telephone"
UPS_CONST = "UPS"
)
func InitDB() (*mgo.Session, error ){
session, err := mgo.Dial(MONGO_ADDRESS)
if err != nil {
return session, err
}
return session, nil
}
func getCollection(session *mgo.Session, typeEquipment string) (collection *mgo.Collection, err error) {
session.SetSafe(&mgo.Safe{})
collection = session.DB(MONGO_DB_NAME).C(typeEquipment)
return collection, nil
}
func make4digits(strN *string) {
AMOUNT_DIGIT_IN_ID := 4
listN := strings.Split(*strN, "")
result := make([]string, AMOUNT_DIGIT_IN_ID)
for i, el := range listN {
result[i+AMOUNT_DIGIT_IN_ID-len(*strN)] = el
}
for i, el := range result {
if el == "" {
result[i] = "0"
}
}
*strN = strings.Join(result, "")
}
type Equipment interface {
Get(id string) (interface{}, error)
GetAll() ([]interface{}, error)
Save(interface{}) (bool, error)
Update(interface{}, *http.Request) (bool, error)
Handle(args map[string]interface{}, id string, w http.ResponseWriter, r *http.Request)
}
type GetterNextID interface {
GetLastID() string
}
|
package parcels
import "sort"
type Ref struct {
Doc int64 // Document identifier
Pos int16 // Position index
Weight float64 // Weight of the ngram
}
// Check for include item
func refsContains(rs []Ref, r Ref) bool {
l := len(rs)
if l == 0 {
return false
}
i := sort.Search(l, func(i int) bool { return rs[i].Doc >= r.Doc })
if i == l {
return false
}
return rs[i] == r
}
// Append id to the list
func refsInclude(rs []Ref, r Ref) []Ref {
l := len(rs)
if l == 0 {
return []Ref{r}
}
i := sort.Search(l, func(i int) bool { return rs[i].Doc >= r.Doc })
if i == l {
return append(rs, r)
}
if rs[i].Doc == r.Doc {
return rs
}
rs1 := rs[:i]
rs2 := rs[i:]
lst := make([]Ref, len(rs1)+len(rs2)+1)
copy(lst[0:], rs1)
copy(lst[i+1:], rs2)
lst[i] = r
return lst
}
func refsExclude(rs []Ref, id int64) []Ref {
l := len(rs)
if l == 0 {
return nil
}
i := sort.Search(l, func(i int) bool { return rs[i].Doc >= id })
if i == l {
return rs
}
if rs[i].Doc == id {
if l == 1 {
return nil
}
return append(rs[:i], rs[i+1:]...)
}
return rs
}
// Merge two lists
func refsAdd(as, bs []Ref) []Ref {
la := len(as)
lb := len(bs)
if la == 0 {
return bs
}
if lb == 0 {
return as
}
a := 0
b := 0
c := make([]Ref, 0, la+lb)
for a < la && b < lb {
if as[a].Doc < bs[b].Doc {
c = append(c, as[a])
a++
continue
}
if as[a].Doc > bs[b].Doc {
c = append(c, bs[b])
b++
continue
}
c = append(c, as[a])
a++
b++
}
if a < la {
c = append(c, as[a:]...)
}
if b < lb {
c = append(c, bs[b:]...)
}
return c
}
// Subtract list b from list a
func refsSub(as, bs []Ref) []Ref {
la := len(as)
lb := len(bs)
if la == 0 {
return nil
}
if lb == 0 {
return as
}
a := 0
b := 0
c := make([]Ref, 0, la)
for a < la && b < lb {
if as[a].Doc < bs[b].Doc {
c = append(c, as[a])
a++
continue
}
if as[a].Doc > bs[b].Doc {
b++
continue
}
a++
b++
}
if a < la {
c = append(c, as[a:]...)
}
return c
}
|
package defines
import (
"fmt"
"net/http"
)
//const(
// CODE_IS_MISSING = 1
// CODE_IS_INVALID = 2
// CLINET_ID_MISSING = 3
//)
var INTERNAL_ERROR *ErrCode = NewErrCodeWithHttpStatus("1000", "internal error", http.StatusInternalServerError)
var SAVE_DATA_ERROR *ErrCode = NewErrCodeWithHttpStatus("1001", "save data error", http.StatusInternalServerError)
var CODE_IS_MISSING *ErrCode = NewErrCodeWithHttpStatus("1101", "code is missing", http.StatusUnauthorized)
var CODE_IS_INVALID *ErrCode = NewErrCodeWithHttpStatus("1102", "check code error", http.StatusUnauthorized)
var CLINET_ID_MISSING *ErrCode = NewErrCodeWithHttpStatus("1103", "client_id is missing", http.StatusUnauthorized)
var CLINET_ID_NOT_MATCH *ErrCode = NewErrCodeWithHttpStatus("1104", "client_id is Not match", http.StatusUnauthorized)
var CLIENT_SECRET_MISSING *ErrCode = NewErrCodeWithHttpStatus("1105", "client_secret is missing", http.StatusUnauthorized)
var CHECK_CLIENT_ID_ERROR *ErrCode = NewErrCodeWithHttpStatus("1106", "Check client_id error", http.StatusUnauthorized)
var CLINET_SECRET_NOT_MATCH *ErrCode = NewErrCodeWithHttpStatus("1107", "client_secret is Not match", http.StatusUnauthorized)
var REDIRECT_URI_MISSING *ErrCode = NewErrCodeWithHttpStatus("1108", "redirect_uri is missing", http.StatusUnauthorized)
var SCOPE_MISSING *ErrCode = NewErrCodeWithHttpStatus("1109", "scope is missing", http.StatusUnauthorized)
var SCOPE_ERROR *ErrCode = NewErrCodeWithHttpStatus("1110", "scope error", http.StatusUnauthorized)
var PASSWORD_CREDENTIALS_HEAD_MISSING *ErrCode = NewErrCodeWithHttpStatus("1111", "Password Credentials Header: Authorization missing", http.StatusUnauthorized)
var USERNAME_MISSING *ErrCode = NewErrCodeWithHttpStatus("1112", "username is missing", http.StatusUnauthorized)
var PASSWORD_MISSING *ErrCode = NewErrCodeWithHttpStatus("1113", "password is missing", http.StatusUnauthorized)
var PASSWORD_NOT_MATCH *ErrCode = NewErrCodeWithHttpStatus("1114", "password is not match", http.StatusUnauthorized)
var AUTHORIZATION_BASIC_ERROR *ErrCode = NewErrCodeWithHttpStatus("1115", "authorization basic error", http.StatusUnauthorized)
var REFRESH_TOKEN_MISSING *ErrCode = NewErrCodeWithHttpStatus("1116", "refresh token is missing", http.StatusUnauthorized)
var REFRESH_TOKEN_NOT_FOUND *ErrCode = NewErrCodeWithHttpStatus("1117", "refresh token not found", http.StatusUnauthorized)
var USERAUTHORIZE_CHECK_ERROR *ErrCode = NewErrCodeWithHttpStatus("1118", "check user authorize error", http.StatusUnauthorized)
var RESPONSE_TYPE_NOT_SUPPORT *ErrCode = NewErrCodeWithHttpStatus("1201", "response type not support", http.StatusBadRequest)
var ACCESSTOKEN_MISSING *ErrCode = NewErrCodeWithHttpStatus("2000", "Access token: Authorization missing", http.StatusUnauthorized)
var GENERATE_ACCESSTOKEN_ERROR *ErrCode = NewErrCodeWithHttpStatus("2001", "generate access token error", http.StatusInternalServerError)
var GENERATE_REFRESHTOKEN_ERROR *ErrCode = NewErrCodeWithHttpStatus("2002", "generate refresh token error", http.StatusInternalServerError)
var SAVE_ACCESSTOKEN_ERROR *ErrCode = NewErrCodeWithHttpStatus("2001", "save access token error", http.StatusInternalServerError)
var SAVE_REFRESHTOKEN_ERROR *ErrCode = NewErrCodeWithHttpStatus("2002", "save refresh token error", http.StatusInternalServerError)
var AUTHENTICATE_ACCESSTOKEN_ERROR *ErrCode = NewErrCodeWithHttpStatus("2010", "authenticate access token error", http.StatusUnauthorized)
var TOKEN_ERROR *ErrCode = NewErrCodeWithHttpStatus("2003", "解析Token发生错误", http.StatusUnauthorized)
type ErrCode struct {
Code string `json:"code"`
Msg string `json:"msg"`
HttpStatus int
jsonStr string
}
func NewErrCode(code string, msg string) *ErrCode {
return &ErrCode{code, msg, http.StatusUnauthorized, fmt.Sprintf("{ \"code\" : %s, \"msg\" : %s }", code, msg)}
}
func NewErrCodeWithHttpStatus(code string, msg string, httpstatus int) *ErrCode {
return &ErrCode{code, msg, httpstatus, fmt.Sprintf("{ \"code\" : %s, \"msg\" : %s }", code, msg)}
}
func (errcode *ErrCode) format() *ErrCode {
if errcode.jsonStr == "" {
errcode.jsonStr = fmt.Sprintf("{ \"code\" : %s, \"msg\" : %s }", errcode.Code, errcode.Msg)
}
return errcode
}
func (errcode *ErrCode) Error() string {
return errcode.format().jsonStr
}
|
package main
import (
"log"
"os"
"github.com/square/p2/pkg/hooks"
"github.com/square/p2/pkg/logging"
"github.com/square/p2/pkg/manifest"
"github.com/square/p2/pkg/pods"
"github.com/square/p2/pkg/types"
"github.com/square/p2/pkg/version"
"gopkg.in/alecthomas/kingpin.v2"
)
var (
podDir = kingpin.Arg("pod", "A path to a pod that exists on disk already.").Required().String()
hookType = kingpin.Arg("hook-type", "Execute one of the given hook types").Required().String()
hookDir = kingpin.Flag("hook-dir", "The root of the hooks").Default(hooks.DefaultPath).String()
manifestPath = kingpin.Flag("manifest", "The manifest to use (this is useful when we are in the before_install phase)").ExistingFile()
nodeName = kingpin.Flag("node-name", "The name of this node (default: hostname)").String()
podRoot = kingpin.Flag("pod-root", "The system root for pods").Default(pods.DefaultPath).String()
sqlitePath = kingpin.Flag("sqlite", "Path to SQLite database to use as an audit logger.").String()
)
func main() {
kingpin.Version(version.VERSION)
kingpin.Parse()
if *nodeName == "" {
hostname, err := os.Hostname()
if err != nil {
log.Fatalf("error getting node name: %v", err)
}
*nodeName = hostname
}
var auditLogger hooks.AuditLogger
auditLogger = hooks.NewFileAuditLogger(&logging.DefaultLogger)
if *sqlitePath != "" {
al, err := hooks.NewSQLiteAuditLogger(*sqlitePath, &logging.DefaultLogger)
if err != nil {
logging.DefaultLogger.Errorf("Unable to connect sqlite database at %s, printing audit logs to STDOUT %v", *sqlitePath, err)
}
defer al.Close()
auditLogger = al
}
dir := hooks.NewContext(*hookDir, *podRoot, &logging.DefaultLogger, auditLogger)
hookType, err := hooks.AsHookType(*hookType)
if err != nil {
log.Fatalln(err)
}
pod, err := pods.PodFromPodHome(types.NodeName(*nodeName), *podDir)
if err != nil {
log.Fatalln(err)
}
var podManifest manifest.Manifest
if *manifestPath != "" {
podManifest, err = manifest.FromPath(*manifestPath)
if err != nil {
log.Fatalln(err)
}
} else {
podManifest, err = pod.CurrentManifest()
if err != nil {
log.Fatalln(err)
}
}
hooksRequired := []string{}
log.Printf("About to run %s hooks for pod %s\n", hookType, pod.Home())
err = dir.RunHookType(hookType, pod, podManifest, hooksRequired)
if err != nil {
log.Fatalln(err)
}
}
|
package gui
import (
"math"
"math/rand"
"github.com/gopherjs/gopherjs/js"
"github.com/nequilich/gocto"
)
type CanvasBody struct {
body *gocto.Body
canvasMap map[string]*gocto.Canvas
contextMap map[string]*js.Object
}
func (this CanvasBody) resizeAllCanvas() {
for _, canvas := range this.canvasMap {
canvas.Set("width", gocto.GetWindowInnerWidth())
canvas.Set("height", gocto.GetWindowInnerHeight())
}
}
func (this CanvasBody) refreshUi() {
drawGrid(this.contextMap["gridContext"])
drawMenu(this.contextMap["menuContext"])
}
func SetUpInterface() {
var canvasBody CanvasBody = CanvasBody{&gocto.Body{gocto.GetElementById("canvasBody")}, make(map[string]*gocto.Canvas), make(map[string]*js.Object)}
backgroundCanvas := &gocto.Canvas{gocto.CreateElement("canvas")}
gridCanvas := &gocto.Canvas{gocto.CreateElement("canvas")}
menuCanvas := &gocto.Canvas{gocto.CreateElement("canvas")}
canvasBody.canvasMap["backgroundCanvas"] = backgroundCanvas
canvasBody.canvasMap["gridCanvas"] = gridCanvas
canvasBody.canvasMap["menuCanvas"] = menuCanvas
canvasBody.body.AppendChild(backgroundCanvas)
canvasBody.body.AppendChild(gridCanvas)
canvasBody.body.AppendChild(menuCanvas)
canvasBody.contextMap["backgroundContext"] = backgroundCanvas.GetContext2d()
canvasBody.contextMap["gridContext"] = gridCanvas.GetContext2d()
canvasBody.contextMap["menuContext"] = menuCanvas.GetContext2d()
gocto.AddEventListener("resize", func() {
canvasBody.resizeAllCanvas()
canvasBody.refreshUi()
})
canvasBody.resizeAllCanvas()
canvasBody.refreshUi()
}
func drawGrid(context *js.Object) {
var polygonSides float64 = 6
var polygonRadius float64 = 32
var xAdjust float64 = polygonRadius * math.Cos(math.Pi/polygonSides)
var yAdjust float64 = polygonRadius * math.Sin(math.Pi/polygonSides) * (polygonSides / 2)
heightInPolygons := (gocto.GetWindowInnerHeight() / int(polygonRadius*1.5)) + 1
widthInPolygons := (gocto.GetWindowInnerWidth() / (int(xAdjust) * 2)) + 1
context.Set("strokeStyle", "black")
context.Set("lineWidth", 2)
xStart := -(heightInPolygons / 2)
for y := 0; y < heightInPolygons; y++ {
for x := xStart; x < widthInPolygons; x++ {
var xPixel float64 = float64(x)*xAdjust*2 + (float64(y) * xAdjust)
var yPixel float64 = float64(y) * yAdjust
drawPolygon(context, xPixel, yPixel, polygonSides, polygonRadius)
}
}
}
func drawPolygon(context *js.Object, xPixel float64, yPixel float64, sides float64, radius float64) {
context.Set("fillStyle", getRandomColour())
context.Call("beginPath")
context.Call("moveTo",
xPixel+radius*math.Cos(math.Pi/sides),
yPixel+radius*math.Sin(math.Pi/sides))
for i := 0; float64(i) < sides+1; i++ {
context.Call("lineTo",
xPixel+radius*math.Cos(float64(i)*2*math.Pi/sides+math.Pi/sides),
yPixel+radius*math.Sin(float64(i)*2*math.Pi/sides+math.Pi/sides))
}
context.Call("fill")
context.Call("stroke")
context.Call("closePath")
}
func getRandomColour() string {
n := rand.Intn(3)
if n == 1 {
return "green"
} else if n == 2 {
return "blue"
} else {
return "yellow"
}
}
func drawMenu(context *js.Object) {
gocto.DrawImage(context, "gui/menu-elements/menu_test_L.png", 0, gocto.GetWindowInnerHeight()-320)
gocto.DrawImage(context, "gui/menu-elements/menu_test_R.png", gocto.GetWindowInnerWidth()-500, gocto.GetWindowInnerHeight()-125)
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"sort"
"strings"
"github.com/BurntSushi/toml"
"github.com/codegangsta/cli"
"github.com/natefinch/atomic"
"github.com/pborman/getopt"
)
var Version = "No Version Provided"
// Config holds the emoji configuration
type Config struct {
Words map[string]string `toml:"commitKinds"`
}
var pwd string
var hook = "\n# simplifies emoji usage \nlipstick \"`cat $1`\" > \"$1\""
func init() {
var err error
pwd, err = os.Getwd()
if err != nil {
log.Fatal(err)
}
}
// install adds the hook to this program to the local git repo
func install() {
if _, err := os.Stat(pwd + "/.git"); err != nil {
log.Fatal("fatal: Not a git repository (or any of the parent directories): .git")
}
f, err := os.OpenFile(pwd+"/.git/hooks/commit-msg", os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0777)
defer f.Close()
_, werr := f.WriteString(hook)
if werr != nil || err != nil {
log.Fatal("fatal: unable to create the commit-msg hook!", werr, err)
}
log.Println("created hook for ", pwd)
}
func uninstall() {
if _, err := os.Stat(pwd + "/.git"); err != nil {
log.Fatal("fatal: Not a git repository (or any of the parent directories): .git")
}
f, err := os.Open(pwd + "/.git/hooks/commit-msg")
d, rerr := ioutil.ReadAll(f)
if err != nil || rerr != nil {
log.Fatal("fatal: unable to remove commit-msg hook", rerr, err)
}
f.Close()
old := string(d)
new := strings.Replace(old, "\n# simplifies emoji usage", "", -1)
new = strings.Replace(new, "\nlipstick \"`cat $1`\" > \"$1\"", "", -1)
out := strings.NewReader(new)
if err := atomic.WriteFile(pwd+"/.git/hooks/commit-msg", out); err != nil {
log.Fatal("fatal: unable to remove commit-msg hook", err)
}
}
// Run is our main function
func Run(c *cli.Context) {
getopt.Parse()
args := getopt.Args()
msg := strings.Join(args, " ")
cfg, err := loadEmojiMap()
if err != nil {
log.Fatal("fatal: could not load config", err)
}
if msg != "" {
fmt.Println(replace(cfg, msg))
} else {
log.Fatal("fatal: no message given")
}
}
// loadEmojiMap loads the config file into our config and fallsback on the
// default built in config
func loadEmojiMap() (*Config, error) {
cfg := &Config{}
if _, err := loadLocalConfig(cfg); err != nil {
if _, err := loadDefaultConfig(cfg); err != nil {
return nil, err
}
}
return cfg, nil
}
// loadLocalConfig attempts to load the local config file
func loadLocalConfig(cfg *Config) (*Config, error) {
if _, err := toml.DecodeFile(pwd+"/.lipstickrc", &cfg); err != nil {
return nil, err
}
return cfg, nil
}
// loadDefaultConfig attempts to load the builtin config file from the bindata
// file.
func loadDefaultConfig(cfg *Config) (*Config, error) {
data, err := Asset("config/lipstickrc.toml")
if err != nil {
return nil, err
}
if _, err := toml.Decode(string(data), &cfg); err != nil {
return nil, err
}
return cfg, nil
}
// replace finds words that fit our params in the msg and replaces them with
// the words defined in our config file.
func replace(cfg *Config, msg string) string {
for key, value := range cfg.Words {
msg = strings.Replace(msg, ":"+key+":", value, -1)
}
return msg
}
// createConfig writes the default .lipstickrc to a file.
func createConfig() {
if _, err := os.Stat(".lipstickrc"); !os.IsNotExist(err) {
log.Fatal("fatal: .lipstickrc exists")
}
data, err := Asset("config/lipstickrc.toml")
if err != nil {
log.Fatal("fatal: could not load default .lipstickrc", err)
}
r := strings.NewReader(string(data))
if err := atomic.WriteFile(".lipstickrc", r); err != nil {
log.Fatal("fatal: could not generate .lipstickrc", err)
}
}
// listAvailable shows the available mappings in alphabetical order
func listAvailable() {
cfg, err := loadEmojiMap()
if err != nil {
log.Fatal("fatal: could not load config", err)
}
fmt.Println()
// Get the longest keys maxLength and create a slice of keys
var maxLen int
keys := []string{}
for key := range cfg.Words {
keys = append(keys, key)
if len(key) > maxLen {
maxLen = len(key)
}
}
// Sort keys by alpha
sort.Strings(keys)
var padVal int
var displayKey string
var value string
for _, key := range keys {
// Sets the padding value based on the length of the key
padVal = (maxLen - len(key)) + 2
displayKey = rightPad(":"+key+":", " ", padVal)
value = cfg.Words[key]
fmt.Println(displayKey, value)
}
fmt.Println()
}
func rightPad(s string, padStr string, pLen int) string {
return s + strings.Repeat(padStr, pLen)
}
func main() {
app := cli.NewApp()
app.Name = "lipstick"
app.Usage = "Make your git commits more expressive"
app.Action = Run
app.Version = Version
app.Commands = []cli.Command{
{
Name: "install",
Aliases: []string{"i"},
Usage: "initialize the git hook",
Action: func(c *cli.Context) {
install()
},
}, {
Name: "uninstall",
Aliases: []string{"u"},
Usage: "remove the git hook",
Action: func(c *cli.Context) {
uninstall()
},
}, {
Name: "initialize",
Aliases: []string{"init"},
Usage: "creates a .lipstickrc file if one does not exist",
Action: func(c *cli.Context) {
createConfig()
},
}, {
Name: "list",
Aliases: []string{"l"},
Usage: "lists the available lipstick mappings",
Action: func(c *cli.Context) {
listAvailable()
},
},
}
app.Run(os.Args)
}
|
// Copyright 2021 Akamai Technologies, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collectors
import (
"fmt"
client "github.com/akamai/AkamaiOPEN-edgegrid-golang/client-v1"
gtm "github.com/akamai/AkamaiOPEN-edgegrid-golang/reportsgtm-v1" // Note: imports ./configgtm-v1_3
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"strconv"
"time"
)
var (
gtmLivenessTrafficExporter GTMLivenessTrafficExporter
durationBuckets = []float64{60, 1800, 3600, 7200, 14400}
)
type GTMLivenessTrafficExporter struct {
GTMConfig GTMMetricsConfig
LivenessMetricPrefix string
LivenessLookbackDuration time.Duration
LastTimestamp map[string]map[string]time.Time // index by domain, liveness
LivenessRegistry *prometheus.Registry
}
func NewLivenessTrafficCollector(r *prometheus.Registry, gtmMetricsConfig GTMMetricsConfig, gtmMetricPrefix string, tstart time.Time, lookbackDuration time.Duration) *GTMLivenessTrafficExporter {
gtmLivenessTrafficExporter = GTMLivenessTrafficExporter{GTMConfig: gtmMetricsConfig, LivenessLookbackDuration: lookbackDuration}
gtmLivenessTrafficExporter.LivenessMetricPrefix = gtmMetricPrefix + "property_liveness_errors"
gtmLivenessTrafficExporter.LivenessLookbackDuration = lookbackDuration
gtmLivenessTrafficExporter.LivenessRegistry = r
// Populate LastTimestamp per domain, liveness. Start time applies to all.
domainMap := make(map[string]map[string]time.Time)
for _, domain := range gtmMetricsConfig.Domains {
tStampMap := make(map[string]time.Time) // index by property name
livenessDurationHistogramMap[domain.Name] = make(map[string]map[int]prometheus.Histogram)
livenessErrorsSummaryMap[domain.Name] = make(map[string]map[int]prometheus.Summary)
for _, prop := range domain.Liveness {
livenessDurationHistogramMap[domain.Name][prop.PropertyName] = make(map[int]prometheus.Histogram)
livenessErrorsSummaryMap[domain.Name][prop.PropertyName] = make(map[int]prometheus.Summary)
tStampMap[prop.PropertyName] = tstart
}
domainMap[domain.Name] = tStampMap
}
gtmLivenessTrafficExporter.LastTimestamp = domainMap
return >mLivenessTrafficExporter
}
// Summaries map by domain, property, datacenter
var livenessDurationHistogramMap = make(map[string]map[string]map[int]prometheus.Histogram)
var livenessErrorsSummaryMap = make(map[string]map[string]map[int]prometheus.Summary)
func (l *GTMLivenessTrafficExporter) getDatacenterHistogramMetrics(domain, property string, dcid int) map[string]interface{} {
histMap := make(map[string]interface{})
if histo, ok := livenessDurationHistogramMap[domain][property][dcid]; ok {
histMap["duration"] = histo
} else {
// doesn't exist. need to create
labels := prometheus.Labels{"domain": domain, "property": property, "datacenter": strconv.Itoa(dcid)}
livenessDurationHistogramMap[domain][property][dcid] = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: gtmLivenessTrafficExporter.LivenessMetricPrefix,
Name: "duration_per_datacenter_histogram",
Help: "Histogram of datacenter error duration (per domain and property)",
ConstLabels: labels,
Buckets: durationBuckets,
})
l.LivenessRegistry.MustRegister(livenessDurationHistogramMap[domain][property][dcid])
histMap["duration"] = livenessDurationHistogramMap[domain][property][dcid]
}
if esum, ok := livenessErrorsSummaryMap[domain][property][dcid]; ok {
histMap["errors"] = esum
} else {
// doesn't exist. need to create
labels := prometheus.Labels{"domain": domain, "property": property, "datacenter": strconv.Itoa(dcid)}
livenessErrorsSummaryMap[domain][property][dcid] = prometheus.NewSummary(
prometheus.SummaryOpts{
Namespace: gtmLivenessTrafficExporter.LivenessMetricPrefix,
Name: "errors_per_datacenter_summary",
Help: "Summary of datacenter errors (per domain and property)",
ConstLabels: labels,
MaxAge: gtmLivenessTrafficExporter.LivenessLookbackDuration,
BufCap: prometheus.DefBufCap * 2,
})
l.LivenessRegistry.MustRegister(livenessErrorsSummaryMap[domain][property][dcid])
histMap["errors"] = livenessErrorsSummaryMap[domain][property][dcid]
}
return histMap
}
// Describe function
func (l *GTMLivenessTrafficExporter) Describe(ch chan<- *prometheus.Desc) {
ch <- prometheus.NewDesc(l.LivenessMetricPrefix, "Akamai GTM Property Liveness Errors", nil, nil)
}
// Collect function
func (l *GTMLivenessTrafficExporter) Collect(ch chan<- prometheus.Metric) {
log.Debugf("Entering GTM Property Liveness Errors Collect")
endtime := time.Now().UTC() // Use same current time for all zones
// Collect metrics for each domain and liveness
for _, domain := range l.GTMConfig.Domains {
log.Debugf("Processing domain %s", domain.Name)
for _, prop := range domain.Liveness {
// get last timestamp recorded. make sure diff > 5 mins.
lasttime := l.LastTimestamp[domain.Name][prop.PropertyName].Add(time.Minute)
log.Debugf("Fetching liveness errors Report for property %s in domain %s.", prop.PropertyName, domain.Name)
livenessTrafficReport, err := retrieveLivenessTraffic(domain.Name, prop.PropertyName, prop.AgentIP, prop.TargetIP, lasttime)
if err != nil {
apierr, ok := err.(client.APIError)
if ok && apierr.Status == 500 {
log.Warnf("Unable to get liveness errors report for property %s. Internal error ... Skipping.", prop.PropertyName)
continue
}
log.Errorf("Unable to get liveness report for property %s ... Skipping. Error: %s", prop.PropertyName, err.Error())
continue
}
if len(livenessTrafficReport.DataRows) < 1 && endtime.Day() != lasttime.Day() {
// We've probably crossed a day boundary. Bump last time
lasttime = lasttime.Add(23*time.Hour + 59*time.Minute + 59*time.Second)
// get updated report
livenessTrafficReport, err = retrieveLivenessTraffic(domain.Name, prop.PropertyName, prop.AgentIP, prop.TargetIP, lasttime)
if err != nil {
apierr, ok := err.(client.APIError)
if ok && apierr.Status == 500 {
log.Warnf("Unable to get liveness errors report for property %s. Internal error ... Skipping.", prop.PropertyName)
continue
}
if ok && apierr.Status == 400 {
log.Warnf("Unable to get liveness errors report for property %s. ... Skipping.", prop.PropertyName)
log.Errorf("%s", err.Error())
continue
}
log.Errorf("Unable to get liveness report for property %s ... Skipping. Error: %s", prop.PropertyName, err.Error())
continue
}
}
log.Debugf("Traffic Metadata: [%v]", livenessTrafficReport.Metadata)
for _, reportInstance := range livenessTrafficReport.DataRows {
instanceTimestamp, err := parseTimeString(reportInstance.Timestamp, GTMTrafficLongTimeFormat)
if err != nil {
log.Errorf("Instance timestamp invalid ... Skipping. Error: %s", err.Error())
continue
}
if !instanceTimestamp.After(l.LastTimestamp[domain.Name][prop.PropertyName]) {
log.Debugf("Instance timestamp: [%v]. Last timestamp: [%v]", instanceTimestamp, l.LastTimestamp[domain.Name][prop.PropertyName])
log.Warnf("Attempting to re process report instance: [%v]. Skipping.", reportInstance)
continue
}
// See if we missed an interval. Log warning for low
log.Debugf("Instance timestamp: [%v]. Last timestamp: [%v]", instanceTimestamp, l.LastTimestamp[domain.Name][prop.PropertyName])
var baseLabels = []string{"domain", "property", "datacenter"}
for _, instanceDC := range reportInstance.Datacenters {
// create metrics for datacenters in property per timestamp
var tsLabels = baseLabels
labelVals := []string{domain.Name, prop.PropertyName, strconv.Itoa(instanceDC.DatacenterID)}
if prop.AgentIP == instanceDC.AgentIP {
tsLabels = append(tsLabels, "agentip")
labelVals = append(labelVals, instanceDC.AgentIP)
}
if prop.TargetIP == instanceDC.TargetIP {
tsLabels = append(tsLabels, "targetip")
labelVals = append(labelVals, instanceDC.TargetIP)
}
if prop.ErrorCode {
tsLabels = append(tsLabels, "errorcode")
codestring := fmt.Sprintf("%v", instanceDC.ErrorCode)
labelVals = append(labelVals, codestring)
}
ts := instanceTimestamp.Format(time.RFC3339)
if l.GTMConfig.TSLabel {
tsLabels = append(tsLabels, "interval_timestamp")
labelVals = append(labelVals, ts)
}
desc := prometheus.NewDesc(prometheus.BuildFQName(l.LivenessMetricPrefix, "", "datacenter_failures"), "Number of datacenter failures (per domain, property, datacenter)", tsLabels, nil)
log.Debugf("Creating error failures counter metric. Domain: %s, Property: %s, Datacenter: %d, Timestamp: %v", domain.Name, prop.PropertyName, instanceDC.DatacenterID, ts)
var errorsmetric, durmetric prometheus.Metric
errorsmetric = prometheus.MustNewConstMetric(
desc, prometheus.CounterValue, 1, labelVals...)
if l.GTMConfig.UseTimestamp != nil && !*l.GTMConfig.UseTimestamp {
ch <- errorsmetric
} else {
ch <- prometheus.NewMetricWithTimestamp(instanceTimestamp, errorsmetric)
}
desc = prometheus.NewDesc(prometheus.BuildFQName(l.LivenessMetricPrefix, "", "datacenter_failure_duration"), "Datacenter falure duration (per domain, property, datacenter)", tsLabels, nil)
log.Debugf("Creating failure duration gauge metric. Domain: %s, Property: %s, Datacenter: %d, Timestamp: %v", domain.Name, prop.PropertyName, instanceDC.DatacenterID, ts)
durmetric = prometheus.MustNewConstMetric(
desc, prometheus.GaugeValue, float64(instanceDC.Duration), labelVals...)
if l.GTMConfig.UseTimestamp != nil && !*l.GTMConfig.UseTimestamp {
ch <- durmetric
} else {
ch <- prometheus.NewMetricWithTimestamp(instanceTimestamp, durmetric)
}
maps := l.getDatacenterHistogramMetrics(domain.Name, prop.PropertyName, instanceDC.DatacenterID)
maps["duration"].(prometheus.Histogram).Observe(float64(instanceDC.Duration))
maps["errors"].(prometheus.Summary).Observe(float64(1))
} // datacenter end
// Update last timestamp processed
if instanceTimestamp.After(l.LastTimestamp[domain.Name][prop.PropertyName]) {
log.Debugf("Updating Last Timestamp from %v TO %v", l.LastTimestamp[domain.Name][prop.PropertyName], instanceTimestamp)
l.LastTimestamp[domain.Name][prop.PropertyName] = instanceTimestamp
}
// only process one each interval!
break
} // interval end
} // liveness end
} // domain end
}
func retrieveLivenessTraffic(domain, prop, agentID, targetID string, start time.Time) (*LivenessErrorsResponse, error) {
qargs := make(map[string]string)
if len(targetID) > 0 {
qargs["targetId"] = targetID // Takes priority
}
if len(agentID) > 0 {
if len(targetID) > 0 {
log.Warnf("Both agentId and targetId filters set. Using targetId ONLY")
} else {
qargs["agentId"] = agentID
}
}
// Get valid Traffic Window
var err error
livenessTrafficWindow, err := gtm.GetLivenessTestsWindow()
if err != nil {
return nil, err
}
// Make sure provided start and end are in range
if livenessTrafficWindow.StartTime.Before(start) {
if livenessTrafficWindow.EndTime.After(start) {
qargs["date"], err = convertTimeFormat(start, GTMTrafficDateFormat)
} else {
qargs["date"], err = convertTimeFormat(livenessTrafficWindow.EndTime, GTMTrafficDateFormat)
}
} else {
qargs["date"], err = convertTimeFormat(livenessTrafficWindow.StartTime, GTMTrafficDateFormat)
}
if err != nil {
return nil, err
}
resp, err := GetLivenessErrorsReport(domain, prop, qargs)
if err != nil {
return nil, err
}
/*
// DEBUG
meta := &LivenessTMeta{
Date: "2016-11-23",
Domain: "example.akadns.net",
Property: "www",
Uri: "https://akab-xxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx.luna.akamaiapis.net/gtm-api/v1/reports/liveness-tests/domains/example.akadns.net/properties/www?date=2016-11-23"}
dcptr := &LivenessDRow{
DatacenterID: 3201,
AgentIP: "204.1.136.239",
TestName: "Our defences",
ErrorCode: 3101,
Duration: 0,
Nickname: "Winterfell",
TrafficTargetName: "Winterfell - 1.2.3.4",
TargetIP: "1.2.3.4"}
ldrows := []*LivenessDRow{dcptr}
ldrowptr := &LivenessTData{Timestamp: time.Now().UTC().Add(-10*time.Minute).Format(time.RFC3339), Datacenters: ldrows}
resp = &LivenessErrorsResponse{
Metadata: meta,
DataRows: []*LivenessTData{ldrowptr},
}
// END DEBUG
*/
//DataRows is list of pointers
sortLivenessDataRowsByTimestamp(resp.DataRows)
return resp, nil
}
|
package order
import (
"time"
)
//Order is the definition of purchase table in database
type Order struct {
ID uint `gorm:"primary_key" json:"id" valid:"-"`
NumberSold int `gorm:"not null" json:"number_sold" valid:"numeric,required"`
SellPrice int `gorm:"not null" json:"sell_price" valid:"numeric,required"`
TotalPrice int `gorm:"not null" json:"total_price" valid:"numeric"`
Notes string `gorm:"type:varchar(100)" json:"receipt_number" valid:"-"`
ProductID uint `json:"product_id" valid:"numeric,required" sql:"type:uint REFERENCES products(id)"`
ProductStockID uint `json:"product_stock_id" valid:"-" sql:"type:uint REFERENCES product_stocks(id)"`
CreatedAt time.Time `json:"created_at" valid:"-"`
UpdatedAt time.Time `json:"updated_at" valid:"-"`
DeletedAt *time.Time `json:"deleted_at" valid:"-"`
}
|
//
// Copyright 2020 The AVFS authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package memidm
import (
"sync"
)
const (
// minUid is the minimum uid for a user.
minUid = 1000
// minGid is the minimum gid for a group.
minGid = 1000
)
// MemIdm implements an in memory identity manager using the avfs.IdentityMgr interface.
type MemIdm struct {
groupsByName groupsByName
groupsById groupsById
usersByName usersByName
usersById usersById
maxGid, maxUid int
grpMu, usrMu sync.RWMutex
}
// groupsByName is the map of groups by group name.
type groupsByName map[string]*Group
// groupsById is the map of the groups by group id.
type groupsById map[int]*Group
// usersByName is the map of the users by user name.
type usersByName map[string]*User
// usersById is the map of the users by user id.
type usersById map[int]*User
// User is the implementation of avfs.UserReader.
type User struct {
name string
uid int
gid int
}
// Group is the implementation of avfs.GroupReader.
type Group struct {
name string
gid int
}
|
package storage
import (
"context"
"errors"
"fmt"
"strconv"
"time"
"github.com/jmoiron/sqlx"
"github.com/authelia/authelia/v4/internal/model"
"github.com/authelia/authelia/v4/internal/utils"
)
// SchemaTables returns a list of tables.
func (p *SQLProvider) SchemaTables(ctx context.Context) (tables []string, err error) {
var rows *sqlx.Rows
switch p.schema {
case "":
rows, err = p.db.QueryxContext(ctx, p.sqlSelectExistingTables)
default:
rows, err = p.db.QueryxContext(ctx, p.sqlSelectExistingTables, p.schema)
}
if err != nil {
return tables, err
}
defer func() {
if err := rows.Close(); err != nil {
p.log.Errorf(logFmtErrClosingConn, err)
}
}()
var table string
for rows.Next() {
err = rows.Scan(&table)
if err != nil {
return []string{}, err
}
tables = append(tables, table)
}
return tables, nil
}
// SchemaVersion returns the version of the schema.
func (p *SQLProvider) SchemaVersion(ctx context.Context) (version int, err error) {
tables, err := p.SchemaTables(ctx)
if err != nil {
return -2, err
}
if len(tables) == 0 {
return 0, nil
}
if utils.IsStringInSlice(tableMigrations, tables) {
migration, err := p.schemaLatestMigration(ctx)
if err != nil {
return -2, err
}
return migration.After, nil
}
var tablesV1 = []string{tableDuoDevices, tableEncryption, tableIdentityVerification, tableMigrations, tableTOTPConfigurations}
if utils.IsStringSliceContainsAll(tablesPre1, tables) {
if utils.IsStringSliceContainsAny(tablesV1, tables) {
return -2, errors.New("pre1 schema contains v1 tables it shouldn't contain")
}
return -1, nil
}
return 0, nil
}
// SchemaLatestVersion returns the latest version available for migration.
func (p *SQLProvider) SchemaLatestVersion() (version int, err error) {
return latestMigrationVersion(p.name)
}
// SchemaMigrationsUp returns a list of migrations up available between the current version and the provided version.
func (p *SQLProvider) SchemaMigrationsUp(ctx context.Context, version int) (migrations []model.SchemaMigration, err error) {
current, err := p.SchemaVersion(ctx)
if err != nil {
return migrations, err
}
if version == 0 {
version = SchemaLatest
}
if current >= version {
return migrations, ErrNoAvailableMigrations
}
return loadMigrations(p.name, current, version)
}
// SchemaMigrationsDown returns a list of migrations down available between the current version and the provided version.
func (p *SQLProvider) SchemaMigrationsDown(ctx context.Context, version int) (migrations []model.SchemaMigration, err error) {
current, err := p.SchemaVersion(ctx)
if err != nil {
return migrations, err
}
if current <= version {
return migrations, ErrNoAvailableMigrations
}
return loadMigrations(p.name, current, version)
}
// SchemaMigrationHistory returns migration history rows.
func (p *SQLProvider) SchemaMigrationHistory(ctx context.Context) (migrations []model.Migration, err error) {
rows, err := p.db.QueryxContext(ctx, p.sqlSelectMigrations)
if err != nil {
return nil, err
}
defer func() {
if err := rows.Close(); err != nil {
p.log.Errorf(logFmtErrClosingConn, err)
}
}()
var migration model.Migration
for rows.Next() {
err = rows.StructScan(&migration)
if err != nil {
return nil, err
}
migrations = append(migrations, migration)
}
return migrations, nil
}
// SchemaMigrate migrates from the current version to the provided version.
func (p *SQLProvider) SchemaMigrate(ctx context.Context, up bool, version int) (err error) {
var (
tx *sqlx.Tx
conn SQLXConnection
)
if p.name != providerMySQL {
if tx, err = p.db.BeginTxx(ctx, nil); err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
conn = tx
} else {
conn = p.db
}
currentVersion, err := p.SchemaVersion(ctx)
if err != nil {
return err
}
if currentVersion != 0 {
if err = p.schemaMigrateLock(ctx, conn); err != nil {
return err
}
}
if err = schemaMigrateChecks(p.name, up, version, currentVersion); err != nil {
if tx != nil {
_ = tx.Rollback()
}
return err
}
if err = p.schemaMigrate(ctx, conn, currentVersion, version); err != nil {
if tx != nil && err == ErrNoMigrationsFound {
_ = tx.Rollback()
}
return err
}
if tx != nil {
if err = tx.Commit(); err != nil {
if rerr := tx.Rollback(); rerr != nil {
return fmt.Errorf("failed to commit the transaction with: commit error: %w, rollback error: %+v", err, rerr)
}
return fmt.Errorf("failed to commit the transaction but it has been rolled back: commit error: %w", err)
}
}
return nil
}
func (p *SQLProvider) schemaMigrate(ctx context.Context, conn SQLXConnection, prior, target int) (err error) {
migrations, err := loadMigrations(p.name, prior, target)
if err != nil {
return err
}
if len(migrations) == 0 {
return ErrNoMigrationsFound
}
p.log.Infof(logFmtMigrationFromTo, strconv.Itoa(prior), strconv.Itoa(migrations[len(migrations)-1].After()))
for i, migration := range migrations {
if migration.Up && prior == 0 && i == 1 {
if err = p.schemaMigrateLock(ctx, conn); err != nil {
return err
}
}
if err = p.schemaMigrateApply(ctx, conn, migration); err != nil {
return p.schemaMigrateRollback(ctx, conn, prior, migration.After(), err)
}
}
p.log.Infof(logFmtMigrationComplete, strconv.Itoa(prior), strconv.Itoa(migrations[len(migrations)-1].After()))
return nil
}
func (p *SQLProvider) schemaMigrateLock(ctx context.Context, conn SQLXConnection) (err error) {
if p.name != providerPostgres {
return nil
}
if _, err = conn.ExecContext(ctx, fmt.Sprintf(queryFmtPostgreSQLLockTable, tableMigrations, "ACCESS EXCLUSIVE")); err != nil {
return fmt.Errorf("failed to lock tables: %w", err)
}
return nil
}
func (p *SQLProvider) schemaMigrateApply(ctx context.Context, conn SQLXConnection, migration model.SchemaMigration) (err error) {
if migration.NotEmpty() {
if _, err = conn.ExecContext(ctx, migration.Query); err != nil {
return fmt.Errorf(errFmtFailedMigration, migration.Version, migration.Name, err)
}
if migration.Version == 1 && migration.Up {
// Add the schema encryption value if upgrading to v1.
if err = p.setNewEncryptionCheckValue(ctx, conn, &p.key); err != nil {
return err
}
}
}
if err = p.schemaMigrateFinalize(ctx, conn, migration); err != nil {
return err
}
return nil
}
func (p *SQLProvider) schemaMigrateFinalize(ctx context.Context, conn SQLXConnection, migration model.SchemaMigration) (err error) {
if migration.Version == 1 && !migration.Up {
return nil
}
if _, err = conn.ExecContext(ctx, p.sqlInsertMigration, time.Now(), migration.Before(), migration.After(), utils.Version()); err != nil {
return fmt.Errorf("failed inserting migration record: %w", err)
}
p.log.Debugf("Storage schema migrated from version %d to %d", migration.Before(), migration.After())
return nil
}
func (p *SQLProvider) schemaMigrateRollback(ctx context.Context, conn SQLXConnection, prior, after int, merr error) (err error) {
switch tx := conn.(type) {
case *sqlx.Tx:
return p.schemaMigrateRollbackWithTx(ctx, tx, merr)
default:
return p.schemaMigrateRollbackWithoutTx(ctx, prior, after, merr)
}
}
func (p *SQLProvider) schemaMigrateRollbackWithTx(_ context.Context, tx *sqlx.Tx, merr error) (err error) {
if err = tx.Rollback(); err != nil {
return fmt.Errorf("error applying rollback %+v. rollback caused by: %w", err, merr)
}
return fmt.Errorf("migration rollback complete. rollback caused by: %w", merr)
}
func (p *SQLProvider) schemaMigrateRollbackWithoutTx(ctx context.Context, prior, after int, merr error) (err error) {
migrations, err := loadMigrations(p.name, after, prior)
if err != nil {
return fmt.Errorf("error loading migrations from version %d to version %d for rollback: %+v. rollback caused by: %w", prior, after, err, merr)
}
for _, migration := range migrations {
if err = p.schemaMigrateApply(ctx, p.db, migration); err != nil {
return fmt.Errorf("error applying migration version %d to version %d for rollback: %+v. rollback caused by: %w", migration.Before(), migration.After(), err, merr)
}
}
return fmt.Errorf("migration rollback complete. rollback caused by: %w", merr)
}
func (p *SQLProvider) schemaLatestMigration(ctx context.Context) (migration *model.Migration, err error) {
migration = &model.Migration{}
if err = p.db.QueryRowxContext(ctx, p.sqlSelectLatestMigration).StructScan(migration); err != nil {
return nil, err
}
return migration, nil
}
func schemaMigrateChecks(providerName string, up bool, targetVersion, currentVersion int) (err error) {
switch {
case currentVersion == -1:
return fmt.Errorf(errFmtMigrationPre1, "up from", errFmtMigrationPre1SuggestedVersion)
case targetVersion == -1:
return fmt.Errorf(errFmtMigrationPre1, "down to", fmt.Sprintf("you should downgrade to schema version 1 using the current authelia version then use the suggested authelia version to downgrade to pre1: %s", errFmtMigrationPre1SuggestedVersion))
}
if targetVersion == currentVersion {
return fmt.Errorf(ErrFmtMigrateAlreadyOnTargetVersion, targetVersion, currentVersion)
}
latest, err := latestMigrationVersion(providerName)
if err != nil {
return err
}
if currentVersion > latest {
return fmt.Errorf(errFmtSchemaCurrentGreaterThanLatestKnown, latest)
}
if up {
if targetVersion < currentVersion {
return fmt.Errorf(ErrFmtMigrateUpTargetLessThanCurrent, targetVersion, currentVersion)
}
if targetVersion == SchemaLatest && latest == currentVersion {
return ErrSchemaAlreadyUpToDate
}
if targetVersion != SchemaLatest && latest < targetVersion {
return fmt.Errorf(ErrFmtMigrateUpTargetGreaterThanLatest, targetVersion, latest)
}
} else {
if targetVersion < 0 {
return fmt.Errorf(ErrFmtMigrateDownTargetLessThanMinimum, targetVersion)
}
if targetVersion > currentVersion {
return fmt.Errorf(ErrFmtMigrateDownTargetGreaterThanCurrent, targetVersion, currentVersion)
}
}
return nil
}
// SchemaVersionToString returns a version string given a version number.
func SchemaVersionToString(version int) (versionStr string) {
switch version {
case -2:
return "unknown"
case -1:
return "pre1"
case 0:
return na
default:
return strconv.Itoa(version)
}
}
|
package auth
import (
"fmt"
"managIncident/controllers/admin"
"managIncident/models"
"time"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"github.com/astaxie/beego/validation"
)
type RegisterController struct {
beego.Controller
}
func (this *RegisterController) Register() {
o := orm.NewOrm()
o.Using("default")
register := models.Register{}
flash := beego.NewFlash()
// this.Data["Form"] = ®ister
if err := this.ParseForm(®ister); err != nil {
beego.Error("Couldn't parse the form. Reason: ", err)
} else {
valid := validation.Validation{}
valid.Required(register.Mail, "mail")
isValid, _ := valid.Valid(register)
if this.Ctx.Input.Method() == "POST" {
if !isValid {
this.Data["errors"] = valid.ErrorsMap
for _, err := range valid.Errors {
beego.Error(err.Key, err.Message)
}
} else {
r := this.Ctx.Input
register.IP = r.IP()
fmt.Println(register.IP)
_, err := o.Insert(®ister)
// res, err := o.Raw("INSERT INTO `incident` (`cat`, `title`, `description`,`date_request`, `priority`, `user_id`) VALUES (?,?,?,?,?,?)", register.Cat, register.Title, register.Description, date, register.Priority, this.GetSession("uid")).Exec()
if err == nil {
err := admin.SendMailAdmin()
if err != nil {
fmt.Println(err)
}
flash.Success(register.Mail + " : votre demande a bien été envoyé ")
flash.Store(&this.Controller)
this.Redirect("/", 302)
} else {
flash.Warning("Attention car cette adresse mail : " + register.Mail + " est déjà dans les demandes. ")
flash.Store(&this.Controller)
beego.Debug("Couldn't insert in tableName Register. Reason: ", err)
}
}
}
}
Template(this, "user", "register", "Demande de Connexion / Nouveau mot de passe")
}
func (this *RegisterController) Password() {
flash := beego.NewFlash()
o := orm.NewOrm()
v := this.GetSession("uid")
if v != nil {
flash.Error("Une session existe déjà sur cette Ordinateur. Déconnectes toi afin d'éviter tout problème")
flash.Store(&this.Controller)
this.Redirect("/", 302)
}
o.Using("default")
mail := this.Ctx.Input.Param(":mail")
user := models.User{Md5Mail: mail}
err := o.Read(&user, "Md5Mail")
this.Data["mail"] = user.Mail
this.Data["md5Mail"] = mail
// Three return values:Is Created,Object Id,Error
if err == nil && user.Pass == "" {
if this.Ctx.Input.Method() == "POST" {
this.Ctx.Request.ParseForm()
password := this.Ctx.Request.Form.Get("password")
repassword := this.Ctx.Request.Form.Get("repassword")
if repassword == password {
newPass := admin.Md5Pass(password)
user := models.User{Id: user.Id, Mail: user.Mail, Role: user.Role, Pass: newPass, Created: time.Now()}
if _, err := o.Update(&user); err == nil {
flash.Success("Bienvenue " + user.Mail)
flash.Store(&this.Controller)
v := this.GetSession("IncidentManager")
if v == nil {
this.SetSession("IncidentID", int(1))
this.SetSession("uid", user.Id)
this.SetSession("mail", user.Mail)
this.SetSession("role", user.Role)
this.Data["num"] = 0
} else {
this.SetSession("IncidentID", v.(int)+1)
this.Data["num"] = v.(int)
}
this.Redirect("/incident-manager/", 302)
} else {
fmt.Println("update", err)
}
}
}
} else {
flash.Error("Dommage mais tu ne peux accéder à cette page")
flash.Store(&this.Controller)
this.Redirect("/", 302)
}
Template(this, "user", "password", "Enregistre ton mot de passe")
}
func Template(this *RegisterController, dossier string, tpl string, titre string) {
this.Data["dateRequest"] = time.Now()
this.Layout = "layout.tpl"
this.TplNames = dossier + "/" + tpl + ".tpl"
this.Data["title2"] = titre
this.LayoutSections = make(map[string]string)
this.LayoutSections["navbar"] = "index/navbar.tpl"
this.LayoutSections["footer"] = "index/footer.tpl"
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcutil
import (
"context"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/tikv/client-go/v2/oracle"
"github.com/tikv/client-go/v2/util"
)
const (
selectVariableValueSQL = `SELECT HIGH_PRIORITY variable_value FROM mysql.tidb WHERE variable_name=%?`
)
// CheckGCEnable is use to check whether GC is enable.
func CheckGCEnable(ctx sessionctx.Context) (enable bool, err error) {
val, err := ctx.GetSessionVars().GlobalVarsAccessor.GetGlobalSysVar(variable.TiDBGCEnable)
if err != nil {
return false, errors.Trace(err)
}
return variable.TiDBOptOn(val), nil
}
// DisableGC will disable GC enable variable.
func DisableGC(ctx sessionctx.Context) error {
return ctx.GetSessionVars().GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.TiDBGCEnable, variable.Off)
}
// EnableGC will enable GC enable variable.
func EnableGC(ctx sessionctx.Context) error {
return ctx.GetSessionVars().GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.TiDBGCEnable, variable.On)
}
// ValidateSnapshot checks that the newly set snapshot time is after GC safe point time.
func ValidateSnapshot(ctx sessionctx.Context, snapshotTS uint64) error {
safePointTS, err := GetGCSafePoint(ctx)
if err != nil {
return errors.Trace(err)
}
if safePointTS > snapshotTS {
return variable.ErrSnapshotTooOld.GenWithStackByArgs(model.TSConvert2Time(safePointTS).String())
}
return nil
}
// ValidateSnapshotWithGCSafePoint checks that the newly set snapshot time is after GC safe point time.
func ValidateSnapshotWithGCSafePoint(snapshotTS, safePointTS uint64) error {
if safePointTS > snapshotTS {
return variable.ErrSnapshotTooOld.GenWithStackByArgs(model.TSConvert2Time(safePointTS).String())
}
return nil
}
// GetGCSafePoint loads GC safe point time from mysql.tidb.
func GetGCSafePoint(sctx sessionctx.Context) (uint64, error) {
exec := sctx.(sqlexec.RestrictedSQLExecutor)
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnGC)
rows, _, err := exec.ExecRestrictedSQL(ctx, nil, selectVariableValueSQL, "tikv_gc_safe_point")
if err != nil {
return 0, errors.Trace(err)
}
if len(rows) != 1 {
return 0, errors.New("can not get 'tikv_gc_safe_point'")
}
safePointString := rows[0].GetString(0)
safePointTime, err := util.CompatibleParseGCTime(safePointString)
if err != nil {
return 0, errors.Trace(err)
}
ts := oracle.GoTimeToTS(safePointTime)
return ts, nil
}
|
package main
import (
"fmt"
"io"
"log"
"net"
)
func SetRequestHeader() string {
msg := "GET / HTTP/1.1\r\n"
msg += "Host:www.baidu.com\r\n"
msg += "Connection:close\r\n"
msg += "\r\n"
return msg
}
func main() {
conn, err := net.Dial("tcp", "www.baidu.com:80")
if err != nil {
log.Fatalf("Dial error:%v\n", err)
}
defer conn.Close()
headers := SetRequestHeader()
if _, err = io.WriteString(conn, headers); err != nil {
log.Fatalf("WriteString error:%v\n", err)
}
buf := make([]byte, 4096)
for {
n, err := conn.Read(buf)
if err != nil {
log.Printf("Read error:%v\n", err)
return
}
fmt.Println(string(buf[:n]))
}
}
|
package dbstore
import (
"testing"
"github.com/driftprogramming/pgxpoolmock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var constraintColumns = []string{"id", "namespace", "name", "selector", "allowed_processes", "allowed_files"}
func TestGetAll(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// setup
mockPool := pgxpoolmock.NewMockPgxPool(ctrl)
pgxRows := pgxpoolmock.NewRows(constraintColumns).
AddRow(1, "default", "nginx", `{"match_labels": [{"key": "app", "value": "nginx"}]}`, `[{"regex": "(.*)nginx(.*)"}]`, `[{"name": "/etc/nginx/nginx.conf", "sha256sum": "c01b39c7a35ccc3b081a3e83d2c71fa9a767ebfeb45c69f08e17dfe3ef375a7b"}]`).
AddRow(2, "default2", "worker", `{"match_labels": [{"key": "app", "value": "worker"}]}`, `[{"regex": "(.*)worker(.*)"}]`, `[{"name": "/etc/worker/worker.yaml", "sha256sum": "c01b39c7a35ccc3b081a3e83d2c71fa9a767ebfeb45c69f08e17dfe3ef375a7b"}]`).
ToPgxRows()
mockPool.EXPECT().Query(gomock.Any(), "SELECT * FROM constraints ORDER BY id ASC", gomock.Any()).Return(pgxRows, nil)
s := DbConstraintStore{pool: mockPool}
constraints, err := s.GetAll()
if err != nil {
t.Error(err)
}
require.Nil(t, err)
require.Len(t, constraints, 2)
constraint := constraints[0]
assert.Equal(t, "default", constraint.GetNamespace())
assert.Equal(t, "nginx", constraint.GetName())
assert.Equal(t, "app", constraint.GetSelector().GetMatchLabels()[0].GetKey())
assert.Equal(t, "nginx", constraint.GetSelector().GetMatchLabels()[0].GetValue())
assert.Equal(t, "(.*)nginx(.*)", constraint.GetAllowedProcesses()[0].GetRegex())
assert.Equal(t, "/etc/nginx/nginx.conf", constraint.GetAllowedFiles()[0].GetName())
constraint = constraints[1]
assert.Equal(t, "default2", constraint.GetNamespace())
assert.Equal(t, "worker", constraint.GetName())
assert.Equal(t, "app", constraint.GetSelector().GetMatchLabels()[0].GetKey())
assert.Equal(t, "worker", constraint.GetSelector().GetMatchLabels()[0].GetValue())
assert.Equal(t, "(.*)worker(.*)", constraint.GetAllowedProcesses()[0].GetRegex())
assert.Equal(t, "/etc/worker/worker.yaml", constraint.GetAllowedFiles()[0].GetName())
}
func TestFindByNamespace(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// setup
mockPool := pgxpoolmock.NewMockPgxPool(ctrl)
pgxRows := pgxpoolmock.NewRows(constraintColumns).
AddRow(1, "default", "nginx", `{"match_labels": [{"key": "app", "value": "nginx"}]}`, `[{"regex": "(.*)nginx(.*)"}]`, `[{"name": "/etc/nginx/nginx.conf", "sha256sum": "c01b39c7a35ccc3b081a3e83d2c71fa9a767ebfeb45c69f08e17dfe3ef375a7b"}]`).
ToPgxRows()
mockPool.EXPECT().Query(gomock.Any(), "SELECT * FROM constraints WHERE namespace = $1 ORDER BY id ASC", "default").Return(pgxRows, nil)
s := DbConstraintStore{pool: mockPool}
constraints, err := s.FindByNamespace("default")
if err != nil {
t.Error(err)
}
require.Nil(t, err)
require.Len(t, constraints, 1)
constraint := constraints[0]
assert.Equal(t, "default", constraint.GetNamespace())
assert.Equal(t, "nginx", constraint.GetName())
assert.Equal(t, "app", constraint.GetSelector().GetMatchLabels()[0].GetKey())
assert.Equal(t, "nginx", constraint.GetSelector().GetMatchLabels()[0].GetValue())
assert.Equal(t, "(.*)nginx(.*)", constraint.GetAllowedProcesses()[0].GetRegex())
assert.Equal(t, "/etc/nginx/nginx.conf", constraint.GetAllowedFiles()[0].GetName())
}
|
/////////////////////////////////////////////////////////////////////
// arataca89@gmail.com
// 20210420
//
// Implementação uma pilha de strings usando o tipo slice
//
// Referência:
// (DONOVAM e KERNIGHAN, 2017)
//
package main
import (
"fmt"
"os"
)
var stack = make([]string, 1)
func push(item string) {
stack = append(stack, item)
}
func pop() {
if len(stack) > 0 {
stack = stack[:len(stack)-1]
}
}
func printStack() {
for i := len(stack) - 1; i >= 0; i-- {
fmt.Println(stack[i])
}
}
func main() {
var i string
for {
fmt.Println("<< 1 >> Inserir item na pilha")
fmt.Println("<< 2 >> Retirar item da pilha")
fmt.Println("<< 3 >> Exibir pilha")
fmt.Println("<< 0 >> Sair")
fmt.Print("Entre com sua opção: ")
fmt.Scanf("%s\r", &i)
if i == "0" {
os.Exit(0)
} else if i == "1" {
fmt.Print("Entre com a string a ser inserida: ")
fmt.Scanf("%s\r", &i)
push(i)
fmt.Println()
} else if i == "2" {
if len(stack) == 0 {
fmt.Printf("\nPilha vazia\n\n")
} else {
top := stack[len(stack)-1]
pop()
fmt.Println("\nItem retirado", top)
fmt.Println()
}
} else if i == "3" {
fmt.Println("\nExibindo a pilha")
fmt.Println("----------------")
printStack()
fmt.Println()
} else {
fmt.Printf("\nOpção inválida!\n\n")
}
}
}
|
package router
import (
"encoding/json"
"github.com/golang/glog"
"qipai/dao"
"qipai/domain"
"qipai/enum"
"qipai/game"
"qipai/model"
"qipai/srv"
"qipai/utils"
"zero"
)
func init() {
game.AddAuthHandler(game.ReqCreateRoom, createRoom)
game.AddAuthHandler(game.ReqRoomList, roomList)
game.AddAuthHandler(game.ReqRoom, room) // 请求房间信息
game.AddAuthHandler(game.ReqJoinRoom, joinRoom) // 请求加入房间
game.AddAuthHandler(game.ReqSit, sit)
game.AddAuthHandler(game.ReqLeaveRoom, leaveRoom)
game.AddAuthHandler(game.ReqDeleteRoom, deleteRoom)
}
func deleteRoom(s *zero.Session, msg *zero.Message) {
type reqData struct {
Id uint `json:"id"`
}
res := utils.Msg("")
res = nil
defer func() {
if res == nil {
return
}
res.Send(game.ResDeleteRoom, s)
}()
var data reqData
err := json.Unmarshal(msg.GetData(), &data)
if err != nil {
res = utils.Msg(err.Error()).Code(-1)
return
}
p, e := game.GetPlayerFromSession(s)
if e != nil {
glog.Error(e)
res = utils.Msg(e.Error()).Code(-1)
return
}
err = srv.Room.Delete(data.Id, uint(p.Uid))
if err != nil {
res = utils.Msg(err.Error()).Code(-1)
return
}
}
func leaveRoom(s *zero.Session, msg *zero.Message) {
type reqData struct {
RoomId uint `json:"roomId"`
}
res := utils.Msg("")
defer func() {
if res == nil {
return
}
res.Send(game.ResLeaveRoom, s)
}()
var data reqData
err := json.Unmarshal(msg.GetData(), &data)
if err != nil {
res = utils.Msg(err.Error()).Code(-1)
return
}
p, e := game.GetPlayerFromSession(s)
if e != nil {
glog.Error(e)
res = utils.Msg(e.Error()).Code(-1)
return
}
// 退出之前获取玩家座位信息
var player model.Player
ret := dao.Db().Where("desk_id>0 and uid=?", p.Uid).First(&player)
if ret.RowsAffected == 0 {
res = utils.Msg("当前玩家不在该房间,无须退出")
return
}
err = srv.Room.Exit(data.RoomId, uint(p.Uid))
if err != nil {
res = utils.Msg(err.Error()).Code(-1)
return
}
res = nil
// 如果是普通房间,到此结束,后面是茶楼房间的逻辑
room, _ := dao.Room.Get(data.RoomId)
// 通知茶楼所有在线用户,有人退出房间
game.NotifyClubPlayers(
game.ResLeaveRoom,
data.RoomId,
utils.Msg("").
AddData("tableId", room.TableId).
AddData("uid", p.Uid).
AddData("deskId", player.DeskId),
)
}
func sit(s *zero.Session, msg *zero.Message) {
type reqData struct {
RoomId uint `json:"roomId"`
}
res := utils.Msg("")
defer func() {
if res == nil {
return
}
res.Send(game.ResSit, s)
}()
var data reqData
err := json.Unmarshal(msg.GetData(), &data)
if err != nil {
res = utils.Msg(err.Error()).Code(-1)
return
}
p, e := game.GetPlayerFromSession(s)
if e != nil {
glog.Error(e)
res = utils.Msg(e.Error()).Code(-1)
return
}
roomId, deskId, e := srv.Room.SitDown(data.RoomId, uint(p.Uid))
if e != nil {
res = utils.Msg(e.Error()).Code(-1).AddData("roomId", roomId)
return
}
res = utils.Msg("").AddData("deskId", deskId)
// 获取当前房间所有玩家
type playerV struct {
Uid uint `json:"uid"` // 用户编号
DeskId int `json:"deskId"` // 座位号
TotalScore int `json:"totalScore"` // 玩家总分
}
players := dao.Room.PlayersSitDown(data.RoomId)
var pvs []playerV
for _, v := range players {
var pv playerV
if !utils.Copy(v, &pv) {
res = utils.Msg("玩家数组赋值出错,请联系管理员").Code(-1)
return
}
pvs = append(pvs, pv)
}
// 通知房间中其他坐下的玩家,我坐下了
for _, v := range pvs {
// 不用通知自己
if v.Uid == uint(p.Uid) {
continue
}
otherPlayer := game.GetPlayer(v.Uid)
if otherPlayer == nil {
glog.Errorln("通知其他用户有用户坐下失败")
continue
}
utils.Msg("").
AddData("roomId", data.RoomId).
AddData("uid", p.Uid).
AddData("deskId", deskId).Send(game.BroadcastSitRoom, otherPlayer.Session)
}
res.AddData("uid", p.Uid).AddData("players", pvs)
// 通知茶楼在线用户,有人加入指定房间
room, _ := dao.Room.Get(data.RoomId)
if room.ClubId == 0 {
return
}
user, _ := dao.User.Get(p.Uid)
game.NotifyClubPlayers(
game.BroadcastSitRoom,
data.RoomId,
utils.Msg("").
AddData("tableId", room.TableId).
AddData("uid", p.Uid).
AddData("nick", p.Nick).
AddData("deskId", deskId).
AddData("avatar", user.Avatar),
)
}
func joinRoom(s *zero.Session, msg *zero.Message) {
type reqData struct {
RoomId uint `json:"roomId"`
}
res := utils.Msg("")
defer func() {
if res == nil {
return
}
res.Send(game.ResJoinRoom, s)
}()
var data reqData
err := json.Unmarshal(msg.GetData(), &data)
if err != nil {
res = utils.Msg(err.Error()).Code(-1)
return
}
p, e := game.GetPlayerFromSession(s)
if e != nil {
glog.Error(e)
res = utils.Msg(e.Error()).Code(-1)
return
}
err = srv.Room.Join(data.RoomId, uint(p.Uid))
if err != nil {
if err.Error() == "该房间不存在,或已解散" {
res = nil
utils.Msg("房间超过10分钟未开始或已经结束,自动解散").AddData("roomId", data.RoomId).Send(game.ResDeleteRoom, s)
return
}
res = utils.Msg(err.Error()).Code(-1)
return
}
// 获取当前房间所有玩家
type playerV struct {
Uid uint `json:"uid"` // 用户编号
DeskId int `json:"deskId"` // 座位号
}
players := dao.Room.PlayersSitDown(data.RoomId)
var pvs []playerV
for _, v := range players {
var pv playerV
if !utils.Copy(v, &pv) {
res = utils.Msg("玩家数组赋值出错,请联系管理员").Code(-1)
return
}
pvs = append(pvs, pv)
}
res.AddData("players", pvs)
}
func room(s *zero.Session, msg *zero.Message) {
type reqRoom struct {
RoomId uint `json:"roomId"`
}
res := utils.Msg("")
defer func() {
if res == nil {
return
}
res.Send(game.ResRoom, s)
}()
var data reqRoom
err := json.Unmarshal(msg.GetData(), &data)
if err != nil {
res = utils.Msg(err.Error()).Code(-1)
return
}
room, err := dao.Room.Get(data.RoomId)
if err != nil {
if err.Error() == "该房间不存在,或游戏已结束" {
res = nil
utils.Msg("房间超过10分钟未开始或已经结束,自动解散").AddData("id", data.RoomId).Send(game.ResDeleteRoom, s)
return
}
res = utils.Msg(err.Error()).Code(-1)
return
}
var rv domain.ResRoomV
if !utils.Copy(room, &rv) {
res = utils.Msg("复制房间信息出错,请联系管理员").Code(-1)
return
}
res = utils.Msg("").AddData("room", rv)
}
func createRoom(s *zero.Session, msg *zero.Message) {
res := utils.Msg("")
defer func() {
res.Send(game.ResCreateRoom, s)
}()
var form domain.ReqCreateRoom
err := json.Unmarshal(msg.GetData(), &form)
if err != nil {
res = utils.Msg(err.Error()).Code(-1)
return
}
// 限制只能 10 20 30 局
if form.Count != 10 && form.Count != 20 && form.Count != 30 {
res = utils.Msg("局数[count]只能是10/20/30").Code(-2)
return
}
// 限制游戏开始方式
if form.StartType != 0 && form.StartType != 1 {
res = utils.Msg("开始方式[start]只能是0或1").Code(-3)
return
}
// 限制支付模式
if form.Pay != 0 && form.Pay != 1 {
res = utils.Msg("支付方式[pay]只能是0或1").Code(-4)
return
}
// 限制翻倍规则
if form.Times < 0 || form.Times > 4 {
res = utils.Msg("翻倍规则[times]取值不合法,只能在0-4之间").Code(-7)
return
}
// 底分取值不合法
if form.Score < 0 || form.Score > 5 {
res = utils.Msg("底分类型取值只能在0-5之间").Code(-7)
return
}
var room model.Room
p, e := game.GetPlayerFromSession(s)
if e != nil {
glog.Error(e)
res = utils.Msg(e.Error()).Code(-1)
return
}
room.Uid = uint(p.Uid)
if ok := utils.Copy(form, &room); !ok {
res = utils.Msg("房间信息赋值失败,请联系管理员").Code(-8)
return
}
if err := srv.Room.Create(&room); err != nil {
res = utils.Msg(err.Error()).Code(-9)
return
}
err = srv.Room.Join(room.ID, room.Uid)
if err != nil {
res = utils.Msg(err.Error()).Code(-10)
return
}
res = utils.Msg("创建成功").AddData("roomId", room.ID)
}
func roomList(s *zero.Session, msg *zero.Message) {
type roomV struct {
ID uint `json:"id"`
Score enum.ScoreType `json:"score"` // 底分类型
Pay enum.PayType `json:"pay"` // 支付方式
Current int `json:"current"` // 当前第几局
Count int `json:"count"` // 总共可以玩几局
Uid uint `json:"uid"` // 房主用户编号
Players int `json:"players"` // 玩家个数
}
res := utils.Msg("")
defer func() {
res.Send(game.ResRoomList, s)
}()
p, e := game.GetPlayerFromSession(s)
if e != nil {
glog.Error(e)
res = utils.Msg(e.Error()).Code(-1)
}
rooms := dao.Room.MyRooms(uint(p.Uid))
var roomsV []roomV
for _, v := range rooms {
var r roomV
if !utils.Copy(v, &r) {
res = utils.Msg("内容转换出错").Code(-1)
return
}
roomsV = append(roomsV, r)
}
res = utils.Msg("获取房间列表成功").AddData("rooms", roomsV)
}
|
package main
import (
"flag"
"log"
"net"
"os"
"os/signal"
"time"
"github.com/valyala/fasthttp"
)
// guard is a high performance circuit breaker written in Go.
var (
proxyAddr = flag.String("proxyAddr", ":80", "proxy server listen at")
configAddr = flag.String("configAddr", ":8080", "config server listen at")
configPath = flag.String("configPath", "/tmp/guard.json", "configuration sync path")
breaker = NewBreaker()
)
func main() {
flag.Parse()
log.Printf("running with pid: %d\n", os.Getpid())
// config manager
go configManager()
// proxy listener
ln, err := net.Listen("tcp", *proxyAddr)
if err != nil {
log.Fatalf("error while listen at %s: %s", *proxyAddr, err)
}
gln := newGracefulListener(ln, time.Second*10)
// singal handler
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for {
<-c
log.Printf("graceful shutdown...")
gln.Close()
}
}()
// proxy server
if err := fasthttp.Serve(gln, breaker.ServeHTTP); err != nil {
log.Fatalf("error in fasthttp server: %s", err)
}
}
|
package kubemq_queue
import (
"context"
"encoding/json"
"time"
queuesStream "github.com/kubemq-io/kubemq-go/queues_stream"
"github.com/pkg/errors"
uuid "github.com/satori/go.uuid"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/plumber/validate"
)
func (k *KubeMQ) Read(ctx context.Context, readOpts *opts.ReadOptions, resultsChan chan *records.ReadRecord, errorChan chan *records.ErrorRecord) error {
if err := validateReadOptions(readOpts); err != nil {
return errors.Wrap(err, "unable to validate read options")
}
k.log.Info("Listening for message(s) ...")
var count int64
for {
response, err := k.client.Poll(context.Background(),
queuesStream.NewPollRequest().
SetChannel(readOpts.KubemqQueue.Args.QueueName).
SetMaxItems(1). // TODO: flag?
SetAutoAck(false).
SetWaitTimeout(DefaultReadTimeout))
if err != nil {
return err
}
if !response.HasMessages() {
continue
}
t := time.Now().UTC().Unix()
for _, msg := range response.Messages {
count++
serializedMsg, err := json.Marshal(msg)
if err != nil {
return errors.Wrap(err, "unable to serialize message to JSON")
}
rec := &records.ReadRecord{
MessageId: uuid.NewV4().String(),
Num: count,
Metadata: nil,
ReceivedAtUnixTsUtc: t,
Payload: msg.Body,
XRaw: serializedMsg,
Record: &records.ReadRecord_Kubemq{
Kubemq: &records.KubeMQ{
Id: msg.MessageID,
ClientId: msg.ClientID,
Channel: msg.Channel,
Value: msg.Body,
Timestamp: 0,
Sequence: 0,
},
},
}
if msg.Attributes != nil {
rec.GetKubemq().Sequence = int64(msg.Attributes.Sequence)
rec.GetKubemq().Timestamp = msg.Attributes.Timestamp
}
resultsChan <- rec
}
if err := response.AckAll(); err != nil {
return errors.Wrap(err, "unable to acknowledge message(s)")
}
count++
if !readOpts.Continuous {
return nil
}
}
return nil
}
func validateReadOptions(readOpts *opts.ReadOptions) error {
if readOpts == nil {
return errors.New("read options cannot be nil")
}
if readOpts.KubemqQueue == nil {
return validate.ErrEmptyBackendGroup
}
if readOpts.KubemqQueue.Args == nil {
return validate.ErrEmptyBackendArgs
}
if readOpts.KubemqQueue.Args.QueueName == "" {
return errors.New("queue name cannot be empty")
}
return nil
}
|
package main
import "fmt"
func foo() string {
return "hello world"
}
func main() {
fmt.Println("hello world")
}
|
package api
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net"
"os"
"runtime"
"strings"
"sync"
"time"
)
func CheckSSH() {
//check from image
filePath := fmt.Sprintf("imagesTemp/%s/layer/etc/rc.local", TopLayerID)
fmt.Println(filePath)
if _, err := os.Stat(filePath); os.IsNotExist(err) {
log.Println("ssh config file not exist")
} else {
r, err := ioutil.ReadFile(filePath)
errorPanic(err)
if strings.Contains(string(r), "start") {
fmt.Println("[+]image SSH Auto Start")
}
}
// check from container
URL := fmt.Sprintf("%s:%s/containers/json", DockerRemoteAddress, DockerRemotePort)
req := sendHTTPReq(URL, "GET")
var containListData ContainersList
err := json.Unmarshal(req, &containListData)
errorPanic(err)
iplist := make(map[string]string, 0)
rawImageID := "sha256:" + DockerID
for i := range containListData {
if containListData[i].ImageID == rawImageID {
id := containListData[i].ID
iplist[id] = fmt.Sprintf("%s:22", containListData[i].NetworkSettings.Networks.Bridge.IPAddress)
}
}
runtime.GOMAXPROCS(runtime.NumCPU())
//var portScanIn, portScanOut chan string
var wg sync.WaitGroup
SSHopenList := make(map[string]string, 0) // docker contain id : contain ip
portDetach := func(lockWrapper func(), containID string, ip string) {
conn, err := net.DialTimeout("tcp", ip, time.Millisecond*3000)
defer lockWrapper()
if err != nil {
return
}
defer conn.Close()
result, err := bufio.NewReader(conn).ReadString('\n')
if strings.Contains(result, "SSH") {
SSHopenList[containID] = ip
log.Println("[+]detach ssh open on " + ip)
return
}
}
for id, ip := range iplist {
wg.Add(1)
go portDetach(func() {
wg.Done()
}, id, ip)
}
wg.Wait()
}
|
package main
import (
"reflect"
validator "github.com/syssam/go-validator"
)
func CustomValidator(v reflect.Value, o reflect.Value, validTag *validator.ValidTag) bool {
return false
}
func main() {
validator.CustomTypeRuleMap.Set("customValidator", CustomValidator)
validator.CustomTypeRuleMap.Set("customValidator2", func(v reflect.Value, o reflect.Value, validTag *validator.ValidTag) bool {
return false
})
}
|
package dockertestspike_test
import (
"database/sql"
"fmt"
"io/ioutil"
"log"
"os"
"testing"
"time"
"github.com/google/uuid"
_ "github.com/lib/pq"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
dts "github.com/ayubmalik/dockertestspike"
)
var pool *dockertest.Pool
func TestMain(m *testing.M) {
p, err := dockertest.NewPool("")
if err != nil {
panic(err)
}
pool = p
os.Exit(m.Run())
}
func TestAdRepoInsert(t *testing.T) {
t.Parallel()
db := openDB(t)
repo := dts.NewAdRepository(db)
now := time.Now()
start := now.AddDate(0, 0, 1)
end := start.AddDate(0, 0, 1)
ad := dts.NewAd("my ad content", start, end)
err := repo.Insert(ad)
must(t, err)
var (
id uuid.UUID
content string
startAt, endAt time.Time
)
row := db.QueryRow("select id, content, start_at, end_at from ad where id = $1", ad.ID)
err = row.Scan(&id, &content, &startAt, &endAt)
must(t, err)
assert(t, id, ad.ID)
}
func TestAdRepoGet(t *testing.T) {
t.Parallel()
db := openDB(t)
repo := dts.NewAdRepository(db)
id := uuid.New()
now := time.Now()
_, err := db.Exec(
"insert into ad(id, content, start_at, end_at, created) values($1, $2, $3, $4, $5)",
id,
"hello",
now,
now,
now,
)
must(t, err)
ad, _ := repo.Get(id)
// must(t, err)
assert(t, ad.ID, id)
assert(t, ad.Content, "hello")
assert(t, ad.StartAt.Format(time.RFC3339), now.Format(time.RFC3339))
assert(t, ad.EndAt.Format(time.RFC3339), now.Format(time.RFC3339))
assert(t, ad.Created.Format(time.RFC3339), now.Format(time.RFC3339))
}
func TestAdRepoFindAll(t *testing.T) {
t.Parallel()
db := openDB(t)
repo := dts.NewAdRepository(db)
for i := 0; i < 100; i++ {
id := uuid.New()
now := time.Now()
_, err := db.Exec(
"insert into ad(id, content, start_at, end_at, created) values($1, $2, $3, $4, $5)",
id,
"hello",
now,
now,
now,
)
must(t, err)
}
ads := repo.FindAll()
assert(t, len(ads), 100)
}
func openDB(t *testing.T) *sql.DB {
port := "5432"
resource, err := pool.RunWithOptions(&dockertest.RunOptions{
Repository: "postgres",
Tag: "13.2-alpine",
Env: []string{
"POSTGRES_USER=postgres",
"POSTGRES_PASSWORD=password",
"POSTGRES_DB=dockertest",
},
ExposedPorts: []string{port},
}, func(config *docker.HostConfig) {
config.AutoRemove = true
config.RestartPolicy = docker.RestartPolicy{
Name: "no",
}
})
if err != nil {
t.Fatalf("Could not start resource: %s", err)
}
t.Cleanup(func() {
err := pool.Purge(resource)
if err != nil {
t.Logf("Could not purge resource: %s", err)
}
})
port = resource.GetPort(fmt.Sprintf("%s/tcp", port))
if err := pool.Retry(func() error {
_db, _err := sql.Open("postgres", dsn(port))
if _err != nil {
return _err
}
defer _db.Close()
return _db.Ping()
}); err != nil {
log.Fatalf("Could not connect to docker/postgres after retry %s", err)
}
fmt.Println("postgres ready on port:", port)
db, err := sql.Open("postgres", dsn(port))
if err != nil {
t.Fatalf("Could not connect DB: %s", err)
}
dbUp(t, db)
return db
}
func dsn(port string) string {
host := os.Getenv("DOCKERTESTSPIKE_HOST")
if host == "" {
host = "localhost"
}
return fmt.Sprintf("host=%s port=%s user=postgres password=password dbname=dockertest sslmode=disable", host, port)
}
func dbUp(t *testing.T, db *sql.DB) {
f, err := os.Open("migrations/001-create-db.sql")
must(t, err)
defer f.Close()
buf, err := ioutil.ReadAll(f)
must(t, err)
_, err = db.Exec(string(buf))
must(t, err)
}
func must(t *testing.T, err error) {
if err != nil {
t.Error(err)
}
}
func assert(t *testing.T, got, want interface{}) {
if got != want {
t.Errorf("got %v wanted %v", got, want)
}
}
|
package mongodb_test
import (
"context"
"fmt"
"reflect"
"testing"
"go.mongodb.org/mongo-driver/mongo/options"
"tagallery.com/api/config"
"tagallery.com/api/logger"
"tagallery.com/api/model"
"tagallery.com/api/mongodb"
"tagallery.com/api/testutil"
"tagallery.com/api/util"
)
func init() {
logger.Setup(true)
}
var imageFixtures = []model.Image{
{File: "test1.jpg"},
{File: "test2.jpg", AssignedCategories: []string{"Category 1", "Category 2"}},
{File: "test3.jpg", ProposedCategories: []string{"Category 2"}},
{File: "test4.jpg", StarredCategory: util.StringPtr("Category 1")},
{File: "test5.jpg", AssignedCategories: []string{"Category 2"}, ProposedCategories: []string{"Category 1", "Category 3"}},
{File: "test6.jpg", ProposedCategories: []string{"Category 1"}, StarredCategory: util.StringPtr("Category 2")},
}
// createImageFixtures inserts the image fixtures into the database.
func createImageFixtures(ctx context.Context, db string) error {
collection := mongodb.Client().Database(db).Collection("image")
images := make([]interface{}, len(imageFixtures))
for k, v := range imageFixtures {
images[k] = v
}
_, err := collection.InsertMany(ctx, images, options.InsertMany())
return err
}
func TestGetImages(t *testing.T) {
var dbImages []model.Image
var expectedImages []model.Image
configuration := config.Load()
mongodb.Connect(context.Background(), fmt.Sprintf(`mongodb://%s`, configuration.DatabaseHost))
defer testutil.CleanCollection(t, configuration.Database, "image")
if err := createImageFixtures(context.Background(), configuration.Database); err != nil {
format, args := testutil.FormatTestError(
"Unable to create image fixtures in the database.",
map[string]interface{}{
"error": err,
})
t.Fatalf(format, args...)
}
expectedImages = imageFixtures
dbImages, _ = mongodb.GetImages(
model.ImageOptions{Count: util.IntPtr(10)},
&model.CategoryMap{},
)
if !reflect.DeepEqual(dbImages, expectedImages) {
format, args := testutil.FormatTestError(
"Expected images from database to equal the fixture.", map[string]interface{}{
"dbImages": dbImages,
"expectedImages": expectedImages,
})
t.Errorf(format, args...)
}
expectedImages = []model.Image{
imageFixtures[0],
imageFixtures[1],
imageFixtures[2],
}
dbImages, _ = mongodb.GetImages(
model.ImageOptions{Count: util.IntPtr(3)},
&model.CategoryMap{},
)
if !reflect.DeepEqual(dbImages, expectedImages) {
format, args := testutil.FormatTestError(
"Expected images from database to equal the fixture.", map[string]interface{}{
"dbImages": dbImages,
"expectedImages": expectedImages,
})
t.Errorf(format, args...)
}
expectedImages = []model.Image{
imageFixtures[3],
imageFixtures[4],
imageFixtures[5],
}
dbImages, _ = mongodb.GetImages(
model.ImageOptions{
Count: util.IntPtr(10),
LastImage: util.StringPtr(imageFixtures[2].File),
},
&model.CategoryMap{},
)
if !reflect.DeepEqual(dbImages, expectedImages) {
format, args := testutil.FormatTestError(
"Expected images from database to equal the fixture.", map[string]interface{}{
"dbImages": dbImages,
"expectedImages": expectedImages,
})
t.Errorf(format, args...)
}
expectedImages = []model.Image{imageFixtures[0]}
dbImages, _ = mongodb.GetImages(
model.ImageOptions{Count: util.IntPtr(10)},
nil,
)
if !reflect.DeepEqual(dbImages, expectedImages) {
format, args := testutil.FormatTestError(
"Expected images from database to equal the fixture.", map[string]interface{}{
"dbImages": dbImages,
"expectedImages": expectedImages,
})
t.Errorf(format, args...)
}
expectedImages = []model.Image{
imageFixtures[1],
imageFixtures[4],
}
dbImages, _ = mongodb.GetImages(
model.ImageOptions{Count: util.IntPtr(10)},
&model.CategoryMap{
Assigned: []string{"Category 2"},
},
)
if !reflect.DeepEqual(dbImages, expectedImages) {
format, args := testutil.FormatTestError(
"Expected images from database to equal the fixture.", map[string]interface{}{
"dbImages": dbImages,
"expectedImages": expectedImages,
})
t.Errorf(format, args...)
}
expectedImages = []model.Image{
imageFixtures[1],
imageFixtures[4],
}
dbImages, _ = mongodb.GetImages(
model.ImageOptions{Count: util.IntPtr(10)},
&model.CategoryMap{
Assigned: []string{},
},
)
if !reflect.DeepEqual(dbImages, expectedImages) {
format, args := testutil.FormatTestError(
"Expected images from database to equal the fixture.", map[string]interface{}{
"dbImages": dbImages,
"expectedImages": expectedImages,
})
t.Errorf(format, args...)
}
expectedImages = []model.Image{
imageFixtures[4],
imageFixtures[5],
}
dbImages, _ = mongodb.GetImages(
model.ImageOptions{Count: util.IntPtr(10)},
&model.CategoryMap{
Proposed: []string{"Category 1"},
},
)
if !reflect.DeepEqual(dbImages, expectedImages) {
format, args := testutil.FormatTestError(
"Expected images from database to equal the fixture.", map[string]interface{}{
"dbImages": dbImages,
"expectedImages": expectedImages,
})
t.Errorf(format, args...)
}
expectedImages = []model.Image{
imageFixtures[2],
imageFixtures[4],
imageFixtures[5],
}
dbImages, _ = mongodb.GetImages(
model.ImageOptions{Count: util.IntPtr(10)},
&model.CategoryMap{
Proposed: []string{},
},
)
if !reflect.DeepEqual(dbImages, expectedImages) {
format, args := testutil.FormatTestError(
"Expected images from database to equal the fixture.", map[string]interface{}{
"dbImages": dbImages,
"expectedImages": expectedImages,
})
t.Errorf(format, args...)
}
expectedImages = []model.Image{imageFixtures[3]}
dbImages, _ = mongodb.GetImages(
model.ImageOptions{Count: util.IntPtr(10)},
&model.CategoryMap{
Starred: util.StringPtr("Category 1"),
},
)
if !reflect.DeepEqual(dbImages, expectedImages) {
format, args := testutil.FormatTestError(
"Expected images from database to equal the fixture.", map[string]interface{}{
"dbImages": dbImages,
"expectedImages": expectedImages,
})
t.Errorf(format, args...)
}
}
func TestUpsertImage(t *testing.T) {
configuration := config.Load()
image := model.Image{
File: "test",
}
mongodb.Connect(context.Background(), fmt.Sprintf(`mongodb://%s`, configuration.DatabaseHost))
defer testutil.CleanCollection(t, configuration.Database, "image")
err := mongodb.UpsertImage(image)
if err != nil {
format, args := testutil.FormatTestError(
"Expected image to be inserted.",
map[string]interface{}{
"error": err,
})
t.Errorf(format, args...)
}
}
|
package antlr
import "sort"
type DFA struct {
atnStartState DecisionState
decision int
states map[string]*DFAState
s0 *DFAState
precedenceDfa bool
}
func NewDFA(atnStartState DecisionState, decision int) *DFA {
d := new(DFA)
// From which ATN state did we create d DFA?
d.atnStartState = atnStartState
d.decision = decision
// A set of all DFA states. Use {@link Map} so we can get old state back
// ({@link Set} only allows you to see if it's there).
d.states = make(map[string]*DFAState)
d.s0 = nil
// {@code true} if d DFA is for a precedence decision otherwise,
// {@code false}. This is the backing field for {@link //isPrecedenceDfa},
// {@link //setPrecedenceDfa}.
d.precedenceDfa = false
return d
}
// Get the start state for a specific precedence value.
//
// @param precedence The current precedence.
// @return The start state corresponding to the specified precedence, or
// {@code nil} if no start state exists for the specified precedence.
//
// @panics IllegalStateException if d is not a precedence DFA.
// @see //isPrecedenceDfa()
func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
if !(d.precedenceDfa) {
panic("Only precedence DFAs may contain a precedence start state.")
}
// s0.edges is never nil for a precedence DFA
if precedence < 0 || precedence >= len(d.s0.edges) {
return nil
}
return d.s0.edges[precedence]
}
// Set the start state for a specific precedence value.
//
// @param precedence The current precedence.
// @param startState The start state corresponding to the specified
// precedence.
//
// @panics IllegalStateException if d is not a precedence DFA.
// @see //isPrecedenceDfa()
//
func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
if !(d.precedenceDfa) {
panic("Only precedence DFAs may contain a precedence start state.")
}
if precedence < 0 {
return
}
// Synchronization on s0 here is ok. when the DFA is turned into a
// precedence DFA, s0 will be initialized once and not updated again
// s0.edges is never nil for a precedence DFA
// s0.edges is never null for a precedence DFA
if precedence >= len(d.s0.edges) {
// enlarge the slice
d.s0.edges = append(d.s0.edges, make([]*DFAState, precedence+1-len(d.s0.edges))...)
}
d.s0.edges[precedence] = startState
}
//
// Sets whether d is a precedence DFA. If the specified value differs
// from the current DFA configuration, the following actions are taken
// otherwise no changes are made to the current DFA.
//
// <ul>
// <li>The {@link //states} map is cleared</li>
// <li>If {@code precedenceDfa} is {@code false}, the initial state
// {@link //s0} is set to {@code nil} otherwise, it is initialized to a new
// {@link DFAState} with an empty outgoing {@link DFAState//edges} array to
// store the start states for individual precedence values.</li>
// <li>The {@link //precedenceDfa} field is updated</li>
// </ul>
//
// @param precedenceDfa {@code true} if d is a precedence DFA otherwise,
// {@code false}
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
if d.precedenceDfa != precedenceDfa {
d.states = make(map[string]*DFAState)
if precedenceDfa {
var precedenceState = NewDFAState(-1, NewBaseATNConfigSet(false))
precedenceState.edges = make([]*DFAState, 0)
precedenceState.isAcceptState = false
precedenceState.requiresFullContext = false
d.s0 = precedenceState
} else {
d.s0 = nil
}
d.precedenceDfa = precedenceDfa
}
}
func (d *DFA) GetStates() map[string]*DFAState {
return d.states
}
type DFAStateList []*DFAState
func (a DFAStateList) Len() int { return len(a) }
func (a DFAStateList) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a DFAStateList) Less(i, j int) bool { return a[i].stateNumber < a[j].stateNumber }
// Return a list of all states in d DFA, ordered by state number.
func (d *DFA) sortedStates() []*DFAState {
// extract the values
vs := make([]*DFAState, len(d.states))
i := 0
for _, v := range d.states {
vs[i] = v
i++
}
sort.Sort(DFAStateList(vs))
return vs
}
func (d *DFA) String(literalNames []string, symbolicNames []string) string {
if d.s0 == nil {
return ""
}
var serializer = NewDFASerializer(d, literalNames, symbolicNames)
return serializer.String()
}
func (d *DFA) ToLexerString() string {
if d.s0 == nil {
return ""
}
var serializer = NewLexerDFASerializer(d)
return serializer.String()
}
|
package mcservice
import (
"log"
)
func (s *MCService) publish(req *JSONRequest) (*JSONResponse, error) {
if len(req.Params) < 3 {
return nil, errNumParameter
}
_, ok := req.Params[0].(string)
if ok != true {
return nil, errParameter
}
_, ok = req.Params[1].(string)
if ok != true {
return nil, errParameter
}
data, ok := req.Params[2].(string)
if ok != true {
return nil, errParameter
}
hexstr, err := s.boxer.Box([]byte(data), s.cfg.NativeEntity)
if err != nil {
log.Printf("could not encode: %s", err)
return nil, errInternal
}
req.Params[2] = hexstr
return s.platformAPI(req)
}
|
package context
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestContext(t *testing.T) {
ctx1 := context.WithValue(context.Background(), "go-kratos", "https://github.com/go-kratos/")
ctx2 := context.WithValue(context.Background(), "kratos", "https://go-kratos.dev/")
ctx, cancel := Merge(ctx1, ctx2)
defer cancel()
got := ctx.Value("go-kratos")
value1, ok := got.(string)
assert.Equal(t, ok, true)
assert.Equal(t, value1, "https://github.com/go-kratos/")
//
got2 := ctx.Value("kratos")
value2, ok := got2.(string)
assert.Equal(t, ok, true)
assert.Equal(t, value2, "https://go-kratos.dev/")
t.Log(value1)
t.Log(value2)
}
func TestErr(t *testing.T) {
ctx1, cancel := context.WithTimeout(context.Background(), time.Microsecond)
defer cancel()
time.Sleep(time.Millisecond)
ctx, cancel := Merge(ctx1, context.Background())
defer cancel()
assert.Equal(t, ctx.Err(), context.DeadlineExceeded)
}
func TestDone(t *testing.T) {
ctx1, cancel := context.WithCancel(context.Background())
defer cancel()
ctx, cancel := Merge(ctx1, context.Background())
go func() {
time.Sleep(time.Millisecond * 50)
cancel()
}()
assert.Equal(t, <-ctx.Done(), struct{}{})
}
func TestFinish(t *testing.T) {
mc := &mergeCtx{
parent1: context.Background(),
parent2: context.Background(),
done: make(chan struct{}),
cancelCh: make(chan struct{}),
}
err := mc.finish(context.DeadlineExceeded)
assert.Equal(t, err, context.DeadlineExceeded)
assert.Equal(t, mc.doneMark, uint32(1))
assert.Equal(t, <-mc.done, struct{}{})
}
func TestWait(t *testing.T) {
ctx1, cancel := context.WithCancel(context.Background())
mc := &mergeCtx{
parent1: ctx1,
parent2: context.Background(),
done: make(chan struct{}),
cancelCh: make(chan struct{}),
}
go func() {
time.Sleep(time.Millisecond * 50)
cancel()
}()
mc.wait()
t.Log(mc.doneErr)
assert.Equal(t, mc.doneErr, context.Canceled)
}
func TestCancel(t *testing.T) {
mc := &mergeCtx{
parent1: context.Background(),
parent2: context.Background(),
done: make(chan struct{}),
cancelCh: make(chan struct{}),
}
mc.cancel()
assert.Equal(t, <-mc.cancelCh, struct{}{})
}
|
package receiver
import (
"github.com/luno/moonbeam/address"
)
// Directory provides access to the set of targets.
// For example, a hosted wallet will have a list of targets corresponding to
// user accounts.
type Directory struct {
domain string
}
func NewDirectory(domain string) *Directory {
return &Directory{domain}
}
func (d *Directory) HasTarget(target string) (bool, error) {
_, domain, valid := address.Decode(target)
if !valid {
return false, nil
}
if domain != d.domain {
return false, nil
}
return true, nil
}
|
package ds
/***
*
*
Given an array nums with n integers, your task is to check if it could become non-decreasing by modifying at most 1 element.
We define an array is non-decreasing if nums[i] <= nums[i + 1] holds for every i (0-based) such that (0 <= i <= n - 2).
Example 1:
Input: nums = [4,2,3]
Output: true
Explanation: You could modify the first 4 to 1 to get a non-decreasing array.
Example 2:
Input: nums = [4,2,1]
Output: false
Explanation: You can't get a non-decreasing array by modify at most one element.
Constraints:
1 <= n <= 10 ^ 4
- 10 ^ 5 <= nums[i] <= 10 ^ 5
*
*
*
*/
/**
* @param {number[]} nums
* @return {boolean}
*/
//TODO
func checkPossibility(nums []int) int {
return 0
}
|
package api
import (
"github.com/jackc/pgtype"
"time"
)
type Game struct {
ID pgtype.UUID `json:"-"`
Location string `json:"location"`
TeamAID pgtype.UUID `json:"-"`
TeamBID pgtype.UUID `json:"-"`
TeamA Team `json:"teamA"`
TeamB Team `json:"teamB"`
ScoreA int `json:"scoreA"`
ScoreB int `json:"scoreB"`
Attacks int `json:"attacks"`
Assists int `json:"assists"`
StartsAt *time.Time `json:"startsAt,omitempty"`
FinishedAt *time.Time `json:"finishedAt,omitempty"`
}
|
package main
import (
"encoding/binary"
"flag"
"fmt"
"net"
"os"
"strconv"
"time"
)
func main() {
// Check for command-line flags
logfileFlag := flag.String("IP", "127.0.0.1", "Server IP to dial (default:127.0.0.1)")
flag.Parse()
fmt.Println("Writing output to latencyMeasurements.txt")
f, _ := os.Create("latencyMeasurements.txt")
// connect to this socket
fmt.Println("Launching Latency measurement client")
conn, _ := net.Dial("tcp", *logfileFlag+":8080")
count := 0
for {
// read in input from stdin
timeStart := uint64(time.Now().UnixNano())
bytes := make([]byte, 8)
binary.BigEndian.PutUint64(bytes, timeStart)
conn.Write(bytes)
// listen for reply
buf := make([]byte, 1024)
nbyte, _ := conn.Read(buf)
latency := (time.Now().UnixNano() - int64(binary.BigEndian.Uint64(buf[:nbyte])))
fmt.Println(count, ": Latency : ", latency, "ns")
f.WriteString(strconv.FormatInt(latency, 10) + "\n")
count = count + 1
time.Sleep(100 * time.Millisecond)
}
}
|
package main
import (
"mygolang/zhaoyu-json-rest/rest/trie"
)
func main() {
trie := trie.New()
// trie.AddRoute("GET", "/r/:id/property.*format", "property_format")
// trie.AddRoute("GET", "/user/#username/property", "user_property")
trie.AddRoute("GET", "/user/", "property_format")
trie.AddRoute("GET", "/a/", "user_property")
trie.PrintDebug()
trie.Compress()
trie.PrintDebug()
}
|
package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"os"
"github.com/contiamo/oku"
)
type Config struct {
Detect bool
Encoding string
Output string
}
var config Config
func init() {
flag.BoolVar(&config.Detect, "d", false, "detect encoding and exit")
flag.StringVar(&config.Encoding, "f", "", "from encoding: specify encoding of input (no detection)")
flag.StringVar(&config.Output, "o", "", "output file")
}
func main() {
flag.Parse()
var fileName string
args := flag.Args()
if len(args) > 0 {
fileName = args[len(args)-1]
}
var b []byte
var err error
if fileName == "" {
// can we read from stdin?
stat, _ := os.Stdin.Stat()
if (stat.Mode() & os.ModeCharDevice) == 0 {
b, err = ioutil.ReadAll(os.Stdin)
} else {
flag.Usage()
return
}
} else {
b, err = ioutil.ReadFile(fileName)
}
if err != nil {
panic(err)
}
var charset string
if config.Encoding == "" {
res, err := oku.DetectEncoding(b)
if err != nil {
// write err to stderr and exit
panic(err)
}
out := fmt.Sprintf("oku detected: %s, confidence: %d%%\n", res.Charset, res.Confidence)
if config.Detect {
// write detection to stdout and exit
fmt.Print(out)
return
} else {
fmt.Fprint(os.Stderr, out)
}
charset = res.Charset
} else {
charset = config.Encoding
}
reader := bytes.NewReader(b)
// write file to stdout
utf8Reader, err := oku.NewUTF8ReadCloser(reader, charset)
if err != nil {
panic(err)
}
// TODO: make buffered writes
text, err := ioutil.ReadAll(utf8Reader)
if err != nil {
panic(err)
}
if config.Output == "" {
fmt.Printf("%s", text)
} else {
if err := ioutil.WriteFile(config.Output, text, os.FileMode(0664)); err != nil {
panic(err)
}
}
}
|
package app
import (
"fmt"
"os"
"github.com/gin-gonic/gin"
"github.com/google/wire"
swaggerFiles "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
"github.com/thoohv5/template/api/docs"
"github.com/thoohv5/template/internal/pkg/config"
"github.com/thoohv5/template/pkg/app"
"github.com/thoohv5/template/pkg/hpx"
)
// ProviderSet is app providers.
var ProviderSet = wire.NewSet(
New,
)
type application struct {
cf config.IConfig
registerRouter hpx.RegisterRouter
}
func New(cf config.IConfig, registerRouter hpx.RegisterRouter) app.IApp {
return &application{
cf: cf,
registerRouter: registerRouter,
}
}
func (p *application) GetConfig() config.IConfig {
return p.cf
}
func (p *application) Run(addr ...string) error {
gen, err := hpx.New().Handle(p.registerRouter)
if nil != err {
panic(err)
}
_, _ = fmt.Fprintf(os.Stdout, "http://%s\n", p.cf.GetHttp().LocalAddr)
return gen.Run(addr...)
}
func InitSwagRouter(r *gin.Engine, localAddr string) {
// programmatically set swagger info
docs.SwaggerInfo.Title = "文档"
docs.SwaggerInfo.Description = "开发文档"
docs.SwaggerInfo.Version = "1.0"
docs.SwaggerInfo.Host = localAddr
docs.SwaggerInfo.BasePath = "/user"
docs.SwaggerInfo.Schemes = []string{"http", "https"}
// 文档
r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler))
_, _ = fmt.Fprintf(os.Stdout, "http://%s/swagger/index.html\n", localAddr)
} |
// Copyright © 2020 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package standard
import (
eth2client "github.com/attestantio/go-eth2-client"
"github.com/attestantio/vouch/services/accountmanager"
"github.com/attestantio/vouch/services/metrics"
"github.com/attestantio/vouch/services/signer"
"github.com/attestantio/vouch/services/submitter"
"github.com/pkg/errors"
"github.com/rs/zerolog"
)
type parameters struct {
logLevel zerolog.Level
processConcurrency int64
monitor metrics.AttestationMonitor
slotsPerEpochProvider eth2client.SlotsPerEpochProvider
attestationDataProvider eth2client.AttestationDataProvider
attestationsSubmitter submitter.AttestationsSubmitter
validatingAccountsProvider accountmanager.ValidatingAccountsProvider
beaconAttestationsSigner signer.BeaconAttestationsSigner
}
// Parameter is the interface for service parameters.
type Parameter interface {
apply(*parameters)
}
type parameterFunc func(*parameters)
func (f parameterFunc) apply(p *parameters) {
f(p)
}
// WithLogLevel sets the log level for the module.
func WithLogLevel(logLevel zerolog.Level) Parameter {
return parameterFunc(func(p *parameters) {
p.logLevel = logLevel
})
}
// WithProcessConcurrency sets the concurrency for the service.
func WithProcessConcurrency(concurrency int64) Parameter {
return parameterFunc(func(p *parameters) {
p.processConcurrency = concurrency
})
}
// WithSlotsPerEpochProvider sets the slots per epoch provider.
func WithSlotsPerEpochProvider(provider eth2client.SlotsPerEpochProvider) Parameter {
return parameterFunc(func(p *parameters) {
p.slotsPerEpochProvider = provider
})
}
// WithAttestationDataProvider sets the attestation data provider.
func WithAttestationDataProvider(provider eth2client.AttestationDataProvider) Parameter {
return parameterFunc(func(p *parameters) {
p.attestationDataProvider = provider
})
}
// WithAttestationsSubmitter sets the attestations submitter.
func WithAttestationsSubmitter(submitter submitter.AttestationsSubmitter) Parameter {
return parameterFunc(func(p *parameters) {
p.attestationsSubmitter = submitter
})
}
// WithMonitor sets the monitor for this module.
func WithMonitor(monitor metrics.AttestationMonitor) Parameter {
return parameterFunc(func(p *parameters) {
p.monitor = monitor
})
}
// WithValidatingAccountsProvider sets the account manager.
func WithValidatingAccountsProvider(provider accountmanager.ValidatingAccountsProvider) Parameter {
return parameterFunc(func(p *parameters) {
p.validatingAccountsProvider = provider
})
}
// WithBeaconAttestationsSigner sets the beacon attestations signer.
func WithBeaconAttestationsSigner(signer signer.BeaconAttestationsSigner) Parameter {
return parameterFunc(func(p *parameters) {
p.beaconAttestationsSigner = signer
})
}
// parseAndCheckParameters parses and checks parameters to ensure that mandatory parameters are present and correct.
func parseAndCheckParameters(params ...Parameter) (*parameters, error) {
parameters := parameters{
logLevel: zerolog.GlobalLevel(),
}
for _, p := range params {
if params != nil {
p.apply(¶meters)
}
}
if parameters.processConcurrency == 0 {
return nil, errors.New("no process concurrency specified")
}
if parameters.slotsPerEpochProvider == nil {
return nil, errors.New("no slots per epoch provider specified")
}
if parameters.attestationDataProvider == nil {
return nil, errors.New("no attestation data provider specified")
}
if parameters.attestationsSubmitter == nil {
return nil, errors.New("no attestations submitter specified")
}
if parameters.monitor == nil {
return nil, errors.New("no monitor specified")
}
if parameters.validatingAccountsProvider == nil {
return nil, errors.New("no validating accounts provider specified")
}
if parameters.beaconAttestationsSigner == nil {
return nil, errors.New("no beacon attestations signer specified")
}
return ¶meters, nil
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package alpha
import (
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
)
func DCLServiceBindingSchema() *dcl.Schema {
return &dcl.Schema{
Info: &dcl.Info{
Title: "NetworkServices/ServiceBinding",
Description: "The NetworkServices ServiceBinding resource",
StructName: "ServiceBinding",
},
Paths: &dcl.Paths{
Get: &dcl.Path{
Description: "The function used to get information about a ServiceBinding",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "serviceBinding",
Required: true,
Description: "A full instance of a ServiceBinding",
},
},
},
Apply: &dcl.Path{
Description: "The function used to apply information about a ServiceBinding",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "serviceBinding",
Required: true,
Description: "A full instance of a ServiceBinding",
},
},
},
Delete: &dcl.Path{
Description: "The function used to delete a ServiceBinding",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "serviceBinding",
Required: true,
Description: "A full instance of a ServiceBinding",
},
},
},
DeleteAll: &dcl.Path{
Description: "The function used to delete all ServiceBinding",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "project",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
dcl.PathParameters{
Name: "location",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
},
},
List: &dcl.Path{
Description: "The function used to list information about many ServiceBinding",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "project",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
dcl.PathParameters{
Name: "location",
Required: true,
Schema: &dcl.PathParametersSchema{
Type: "string",
},
},
},
},
},
Components: &dcl.Components{
Schemas: map[string]*dcl.Component{
"ServiceBinding": &dcl.Component{
Title: "ServiceBinding",
ID: "projects/{{project}}/locations/{{location}}/serviceBindings/{{name}}",
ParentContainer: "project",
LabelsField: "labels",
HasCreate: true,
SchemaProperty: dcl.Property{
Type: "object",
Required: []string{
"name",
"service",
"project",
"location",
},
Properties: map[string]*dcl.Property{
"createTime": &dcl.Property{
Type: "string",
Format: "date-time",
GoName: "CreateTime",
ReadOnly: true,
Description: "Output only. The timestamp when the resource was created.",
Immutable: true,
},
"description": &dcl.Property{
Type: "string",
GoName: "Description",
Description: "Optional. A free-text description of the resource. Max length 1024 characters.",
Immutable: true,
},
"labels": &dcl.Property{
Type: "object",
AdditionalProperties: &dcl.Property{
Type: "string",
},
GoName: "Labels",
Description: "Optional. Set of label tags associated with the ServiceBinding resource.",
Immutable: true,
},
"location": &dcl.Property{
Type: "string",
GoName: "Location",
Description: "The location for the resource",
Immutable: true,
},
"name": &dcl.Property{
Type: "string",
GoName: "Name",
Description: "Required. Name of the ServiceBinding resource. It matches pattern `projects/*/locations/global/serviceBindings/service_binding_name>`.",
Immutable: true,
},
"project": &dcl.Property{
Type: "string",
GoName: "Project",
Description: "The project for the resource",
Immutable: true,
ResourceReferences: []*dcl.PropertyResourceReference{
&dcl.PropertyResourceReference{
Resource: "Cloudresourcemanager/Project",
Field: "name",
Parent: true,
},
},
},
"service": &dcl.Property{
Type: "string",
GoName: "Service",
Description: "Required. The full service directory service name of the format projects/*/locations/*/namespaces/*/services/*",
Immutable: true,
ResourceReferences: []*dcl.PropertyResourceReference{
&dcl.PropertyResourceReference{
Resource: "Servicedirectory/Service",
Field: "name",
},
},
},
"updateTime": &dcl.Property{
Type: "string",
Format: "date-time",
GoName: "UpdateTime",
ReadOnly: true,
Description: "Output only. The timestamp when the resource was updated.",
Immutable: true,
},
},
},
},
},
},
}
}
|
package lcd
import (
"net/http"
"github.com/gorilla/mux"
"github.com/irisnet/irishub/client/context"
"github.com/irisnet/irishub/codec"
)
func registerQueryRoutes(cliCtx context.CLIContext, r *mux.Router, cdc *codec.Codec) {
// Query liquidity
r.HandleFunc(
"/coinswap/liquidities/{id}",
queryLiquidityHandlerFn(cliCtx, cdc),
).Methods("GET")
}
// queryLiquidityHandlerFn performs liquidity information query
func queryLiquidityHandlerFn(cliCtx context.CLIContext, cdc *codec.Codec) http.HandlerFunc {
return queryLiquidity(cliCtx, cdc, "custom/coinswap/liquidities/{id}")
}
|
package kala
// A Minter provides methods for minting unique IDs
type Minter interface {
Mint() (string, error)
}
|
package wire
import (
"bytes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go/internal/protocol"
"gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go/internal/utils"
)
var _ = Describe("MAX_STREAM_ID frame", func() {
Context("parsing", func() {
It("accepts sample frame", func() {
data := []byte{0x6}
data = append(data, encodeVarInt(0xdecafbad)...)
b := bytes.NewReader(data)
f, err := parseMaxStreamIDFrame(b, protocol.VersionWhatever)
Expect(err).ToNot(HaveOccurred())
Expect(f.StreamID).To(Equal(protocol.StreamID(0xdecafbad)))
Expect(b.Len()).To(BeZero())
})
It("errors on EOFs", func() {
data := []byte{0x06}
data = append(data, encodeVarInt(0xdeadbeefcafe13)...)
_, err := parseMaxStreamIDFrame(bytes.NewReader(data), protocol.VersionWhatever)
Expect(err).NotTo(HaveOccurred())
for i := range data {
_, err := parseMaxStreamIDFrame(bytes.NewReader(data[0:i]), protocol.VersionWhatever)
Expect(err).To(HaveOccurred())
}
})
})
Context("writing", func() {
It("writes a sample frame", func() {
b := &bytes.Buffer{}
frame := MaxStreamIDFrame{StreamID: 0x12345678}
frame.Write(b, protocol.VersionWhatever)
expected := []byte{0x6}
expected = append(expected, encodeVarInt(0x12345678)...)
Expect(b.Bytes()).To(Equal(expected))
})
It("has the correct min length", func() {
frame := MaxStreamIDFrame{StreamID: 0x1337}
Expect(frame.Length(protocol.VersionWhatever)).To(Equal(1 + utils.VarIntLen(0x1337)))
})
})
})
|
package logon
import (
"github.com/quickfixgo/quickfix"
"github.com/quickfixgo/quickfix/enum"
"github.com/quickfixgo/quickfix/field"
"github.com/quickfixgo/quickfix/fix41"
"github.com/quickfixgo/quickfix/tag"
)
//Logon is the fix41 Logon type, MsgType = A
type Logon struct {
fix41.Header
*quickfix.Body
fix41.Trailer
Message *quickfix.Message
}
//FromMessage creates a Logon from a quickfix.Message instance
func FromMessage(m *quickfix.Message) Logon {
return Logon{
Header: fix41.Header{&m.Header},
Body: &m.Body,
Trailer: fix41.Trailer{&m.Trailer},
Message: m,
}
}
//ToMessage returns a quickfix.Message instance
func (m Logon) ToMessage() *quickfix.Message {
return m.Message
}
//New returns a Logon initialized with the required fields for Logon
func New(encryptmethod field.EncryptMethodField, heartbtint field.HeartBtIntField) (m Logon) {
m.Message = quickfix.NewMessage()
m.Header = fix41.NewHeader(&m.Message.Header)
m.Body = &m.Message.Body
m.Trailer.Trailer = &m.Message.Trailer
m.Header.Set(field.NewMsgType("A"))
m.Set(encryptmethod)
m.Set(heartbtint)
return
}
//A RouteOut is the callback type that should be implemented for routing Message
type RouteOut func(msg Logon, sessionID quickfix.SessionID) quickfix.MessageRejectError
//Route returns the beginstring, message type, and MessageRoute for this Message type
func Route(router RouteOut) (string, string, quickfix.MessageRoute) {
r := func(msg *quickfix.Message, sessionID quickfix.SessionID) quickfix.MessageRejectError {
return router(FromMessage(msg), sessionID)
}
return "FIX.4.1", "A", r
}
//SetRawDataLength sets RawDataLength, Tag 95
func (m Logon) SetRawDataLength(v int) {
m.Set(field.NewRawDataLength(v))
}
//SetRawData sets RawData, Tag 96
func (m Logon) SetRawData(v string) {
m.Set(field.NewRawData(v))
}
//SetEncryptMethod sets EncryptMethod, Tag 98
func (m Logon) SetEncryptMethod(v enum.EncryptMethod) {
m.Set(field.NewEncryptMethod(v))
}
//SetHeartBtInt sets HeartBtInt, Tag 108
func (m Logon) SetHeartBtInt(v int) {
m.Set(field.NewHeartBtInt(v))
}
//SetResetSeqNumFlag sets ResetSeqNumFlag, Tag 141
func (m Logon) SetResetSeqNumFlag(v bool) {
m.Set(field.NewResetSeqNumFlag(v))
}
//GetRawDataLength gets RawDataLength, Tag 95
func (m Logon) GetRawDataLength() (v int, err quickfix.MessageRejectError) {
var f field.RawDataLengthField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetRawData gets RawData, Tag 96
func (m Logon) GetRawData() (v string, err quickfix.MessageRejectError) {
var f field.RawDataField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetEncryptMethod gets EncryptMethod, Tag 98
func (m Logon) GetEncryptMethod() (v enum.EncryptMethod, err quickfix.MessageRejectError) {
var f field.EncryptMethodField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetHeartBtInt gets HeartBtInt, Tag 108
func (m Logon) GetHeartBtInt() (v int, err quickfix.MessageRejectError) {
var f field.HeartBtIntField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//GetResetSeqNumFlag gets ResetSeqNumFlag, Tag 141
func (m Logon) GetResetSeqNumFlag() (v bool, err quickfix.MessageRejectError) {
var f field.ResetSeqNumFlagField
if err = m.Get(&f); err == nil {
v = f.Value()
}
return
}
//HasRawDataLength returns true if RawDataLength is present, Tag 95
func (m Logon) HasRawDataLength() bool {
return m.Has(tag.RawDataLength)
}
//HasRawData returns true if RawData is present, Tag 96
func (m Logon) HasRawData() bool {
return m.Has(tag.RawData)
}
//HasEncryptMethod returns true if EncryptMethod is present, Tag 98
func (m Logon) HasEncryptMethod() bool {
return m.Has(tag.EncryptMethod)
}
//HasHeartBtInt returns true if HeartBtInt is present, Tag 108
func (m Logon) HasHeartBtInt() bool {
return m.Has(tag.HeartBtInt)
}
//HasResetSeqNumFlag returns true if ResetSeqNumFlag is present, Tag 141
func (m Logon) HasResetSeqNumFlag() bool {
return m.Has(tag.ResetSeqNumFlag)
}
|
package gosnowth
import (
"bytes"
"encoding/json"
"fmt"
"math"
"strconv"
)
// DF4Response values represent time series data in the DF4 format.
type DF4Response struct {
Ver string `json:"version,omitempty"`
Head DF4Head `json:"head"`
Meta []DF4Meta `json:"meta"`
Data []DF4Data `json:"data"`
Query string `json:"-"`
}
// DF4Meta values contain information and metadata about the metrics in a DF4
// time series data response.
type DF4Meta struct {
Kind string `json:"kind"`
Label string `json:"label"`
Tags []string `json:"tags,omitempty"`
}
// DF4Head values contain information about the time range of the data elements
// in a DF4 time series data response.
type DF4Head struct {
Count int64 `json:"count"`
Start int64 `json:"start"`
Period int64 `json:"period"`
Error []string `json:"error,omitempty"`
Warning []string `json:"warning,omitempty"`
Explain json.RawMessage `json:"explain,omitempty"`
}
// MarshalJSON encodes a DF4Head value into a JSON format byte slice.
func (h *DF4Head) MarshalJSON() ([]byte, error) {
v := struct {
Count int64 `json:"count"`
Start int64 `json:"start"`
Period int64 `json:"period"`
Error json.RawMessage `json:"error,omitempty"`
Warning json.RawMessage `json:"warning,omitempty"`
Explain json.RawMessage `json:"explain,omitempty"`
}{
Count: h.Count,
Start: h.Start,
Period: h.Period,
Explain: h.Explain,
}
if len(h.Error) == 1 {
b, err := json.Marshal(h.Error[0])
if err != nil {
return nil, fmt.Errorf(
"unable to marshal df4 head error value into JSON data: %w",
err)
}
v.Error = b
} else if len(h.Error) > 1 {
b, err := json.Marshal(h.Error)
if err != nil {
return nil, fmt.Errorf(
"unable to marshal df4 head error value into JSON data: %w",
err)
}
v.Error = b
}
if len(h.Warning) == 1 {
b, err := json.Marshal(h.Warning[0])
if err != nil {
return nil, fmt.Errorf(
"unable to marshal df4 head warning value into JSON data: %w",
err)
}
v.Warning = b
} else if len(h.Warning) > 1 {
b, err := json.Marshal(h.Warning)
if err != nil {
return nil, fmt.Errorf(
"unable to marshal df4 head warning value into JSON data: %w",
err)
}
v.Warning = b
}
return json.Marshal(v)
}
// UnmarshalJSON decodes a DF4Head value from a JSON format byte slice.
func (h *DF4Head) UnmarshalJSON(b []byte) error { //nolint:gocyclo
m := map[string]interface{}{}
if err := json.Unmarshal(b, &m); err != nil {
return fmt.Errorf(
"unable to unmarshal df4 head value from JSON data: %w", err)
}
for k, v := range m {
switch k {
case "count":
switch vt := v.(type) {
case float64:
h.Count = int64(vt)
case string:
i, err := strconv.ParseInt(vt, 10, 64)
if err != nil {
return fmt.Errorf(
"unable to parse %s value from JSON data: %w",
k, err)
}
h.Count = i
default:
return fmt.Errorf("unable to parse %s value from JSON data", k)
}
case "start":
switch vt := v.(type) {
case float64:
h.Start = int64(vt)
case string:
i, err := strconv.ParseInt(vt, 10, 64)
if err != nil {
return fmt.Errorf(
"unable to parse %s value from JSON data: %w",
k, err)
}
h.Start = i
default:
return fmt.Errorf("unable to parse %s value from JSON data", k)
}
case "period":
switch vt := v.(type) {
case float64:
h.Period = int64(vt)
case string:
i, err := strconv.ParseInt(vt, 10, 64)
if err != nil {
return fmt.Errorf(
"unable to parse %s value from JSON data: %w",
k, err)
}
h.Period = i
default:
return fmt.Errorf("unable to parse %s value from JSON data", k)
}
case "error":
switch vt := v.(type) {
case string:
h.Error = []string{vt}
case []string:
h.Error = vt
case []interface{}:
if len(vt) > 0 {
ss := make([]string, len(vt))
for i, vs := range vt {
s, ok := vs.(string)
if !ok {
return fmt.Errorf(
"unable to parse %s value from JSON data", k)
}
ss[i] = s
}
h.Error = ss
}
default:
return fmt.Errorf("unable to parse %s value from JSON data", k)
}
case "warning":
switch vt := v.(type) {
case string:
h.Warning = []string{vt}
case []string:
h.Warning = vt
case []interface{}:
if len(vt) > 0 {
ss := make([]string, len(vt))
for i, vs := range vt {
s, ok := vs.(string)
if !ok {
return fmt.Errorf(
"unable to parse %s value from JSON data", k)
}
ss[i] = s
}
h.Warning = ss
}
default:
return fmt.Errorf("unable to parse %s value from JSON data", k)
}
case "explain":
b, err := json.Marshal(v)
if err != nil {
return fmt.Errorf(
"unable to parse %s value from JSON data: %w", k, err)
}
h.Explain = b
}
}
return nil
}
// DF4Data values contain slices of data points of DF4 format time series data.
type DF4Data []interface{}
// NullEmpty sets values within a DF4Data value equal to an empty array to nil.
func (d *DF4Data) NullEmpty() {
if d == nil {
return
}
for i, v := range *d {
if vv, ok := v.([]interface{}); ok && len(vv) == 0 {
(*d)[i] = nil
}
}
}
// Numeric retrieves the data in this value as a slice of float64 values.
func (dd *DF4Data) Numeric() []*float64 {
if dd == nil {
return nil
}
r := make([]*float64, len(*dd))
for i, v := range *dd {
switch tv := v.(type) {
case float64:
r[i] = &tv
case int64:
tvv := float64(tv)
r[i] = &tvv
case int:
tvv := float64(tv)
r[i] = &tvv
case float32:
tvv := float64(tv)
r[i] = &tvv
}
}
return r
}
// Text retrieves the data in this value as a slice of string values.
func (dd *DF4Data) Text() []*string {
if dd == nil {
return nil
}
r := make([]*string, len(*dd))
for i, v := range *dd {
switch vv := v.(type) {
case string:
r[i] = &vv
case []interface{}:
if len(vv) > 0 {
if vvs, ok := vv[0].([]interface{}); ok && len(vvs) > 1 {
if s, ok := vvs[1].(string); ok {
r[i] = &s
break
}
}
}
}
}
return r
}
// Histogram retrieves the data in this value as a slice of map[string]int64
// values.
func (dd *DF4Data) Histogram() []*map[string]int64 {
if dd == nil {
return nil
}
r := make([]*map[string]int64, len(*dd))
for i, v := range *dd {
if m, ok := v.(map[string]interface{}); ok {
mv := make(map[string]int64, len(m))
for k, iv := range m {
switch tv := iv.(type) {
case int64:
mv[k] = tv
case int:
mv[k] = int64(tv)
case float64:
mv[k] = int64(tv)
case float32:
mv[k] = int64(tv)
}
}
r[i] = &mv
} else if m, ok := v.(map[string]int64); ok {
r[i] = &m
}
}
return r
}
// Copy returns a deep copy of the base DF4 response.
func (dr *DF4Response) Copy() *DF4Response {
b := &DF4Response{
Data: make([]DF4Data, len(dr.Data)),
Meta: make([]DF4Meta, len(dr.Meta)),
Ver: dr.Ver,
Head: DF4Head{
Count: dr.Head.Count,
Start: dr.Head.Start,
Period: dr.Head.Period,
Error: dr.Head.Error,
Warning: dr.Head.Warning,
Explain: dr.Head.Explain,
},
}
copy(b.Meta, dr.Meta)
for i, v := range dr.Data {
b.Data[i] = make(DF4Data, len(v))
copy(b.Data[i], v)
}
return b
}
// replaceInf is used to remove infinity and NaN values from DF4 JSON strings
// prior to attempting to parse them into DF4Response values.
func replaceInf(b []byte) []byte {
v := make([]byte, len(b))
copy(v, b)
maxFloat := strconv.FormatFloat(math.MaxFloat64, 'g', -1, 64)
negMaxFloat := strconv.FormatFloat(-math.MaxFloat64, 'g', -1, 64)
v = bytes.ReplaceAll(v, []byte("+inf,"), []byte(maxFloat+","))
v = bytes.ReplaceAll(v, []byte("+inf]"), []byte(maxFloat+"]"))
v = bytes.ReplaceAll(v, []byte("+inf\n"), []byte(maxFloat+"\n"))
v = bytes.ReplaceAll(v, []byte("-inf,"), []byte(negMaxFloat+","))
v = bytes.ReplaceAll(v, []byte("-inf]"), []byte(negMaxFloat+"]"))
v = bytes.ReplaceAll(v, []byte("-inf\n"), []byte(negMaxFloat+"\n"))
v = bytes.ReplaceAll(v, []byte("inf,"), []byte(maxFloat+","))
v = bytes.ReplaceAll(v, []byte("inf]"), []byte(maxFloat+"]"))
v = bytes.ReplaceAll(v, []byte("inf\n"), []byte(maxFloat+"\n"))
v = bytes.ReplaceAll(v, []byte("NaN,"), []byte(maxFloat+","))
v = bytes.ReplaceAll(v, []byte("NaN]"), []byte(maxFloat+"]"))
v = bytes.ReplaceAll(v, []byte("NaN\n"), []byte(maxFloat+"\n"))
v = bytes.ReplaceAll(v, []byte("nan,"), []byte(maxFloat+","))
v = bytes.ReplaceAll(v, []byte("nan]"), []byte(maxFloat+"]"))
v = bytes.ReplaceAll(v, []byte("nan\n"), []byte(maxFloat+"\n"))
return v
}
|
package xattrsyscall
import (
"syscall"
"unsafe"
)
var _zero uintptr
func Getxattr(path string, attr string, dest []byte) (int, error) {
var destPtr *byte
var size int
if dest != nil {
destPtr = &dest[0]
size = len(dest)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), uintptr(unsafe.Pointer(syscall.StringBytePtr(attr))), uintptr(unsafe.Pointer(destPtr)), uintptr(size), uintptr(_zero), uintptr(_zero))
return int(r0), e1
}
func Listxattr(path string, dest []byte) (int, error) {
var destPtr *byte
var size int
if dest != nil {
destPtr = &dest[0]
size = len(dest)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), uintptr(unsafe.Pointer(destPtr)), uintptr(size), uintptr(_zero), 0, 0)
return int(r0), e1
}
func Setxattr(path string, attr string, data []byte) error {
_, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), uintptr(unsafe.Pointer(syscall.StringBytePtr(attr))), uintptr(unsafe.Pointer(&data[0])), uintptr(len(data)), uintptr(_zero), uintptr(_zero))
return e1
}
func Removexattr(path string, attr string) error {
_, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), uintptr(unsafe.Pointer(syscall.StringBytePtr(attr))), uintptr(_zero))
return e1
}
|
package util
import (
"context"
k8scontrollerclient "sigs.k8s.io/controller-runtime/pkg/client"
. "github.com/onsi/gomega"
)
// DeterminedE2EClient wraps E2eClient calls in an Eventually assertion to keep trying
// or bail after some time if unsuccessful
type DeterminedE2EClient struct {
*E2EKubeClient
}
func NewDeterminedClient(e2eKubeClient *E2EKubeClient) *DeterminedE2EClient {
return &DeterminedE2EClient{
e2eKubeClient,
}
}
func (m *DeterminedE2EClient) Create(context context.Context, obj k8scontrollerclient.Object, options ...k8scontrollerclient.CreateOption) error {
m.keepTrying(func() error {
err := m.E2EKubeClient.Create(context, obj, options...)
return err
})
return nil
}
func (m *DeterminedE2EClient) Update(context context.Context, obj k8scontrollerclient.Object, options ...k8scontrollerclient.UpdateOption) error {
m.keepTrying(func() error {
return m.E2EKubeClient.Update(context, obj, options...)
})
return nil
}
func (m *DeterminedE2EClient) Delete(context context.Context, obj k8scontrollerclient.Object, options ...k8scontrollerclient.DeleteOption) error {
m.keepTrying(func() error {
return m.E2EKubeClient.Delete(context, obj, options...)
})
return nil
}
func (m *DeterminedE2EClient) Patch(context context.Context, obj k8scontrollerclient.Object, patch k8scontrollerclient.Patch, options ...k8scontrollerclient.PatchOption) error {
m.keepTrying(func() error {
return m.E2EKubeClient.Patch(context, obj, patch, options...)
})
return nil
}
func (m *DeterminedE2EClient) keepTrying(fn func() error) {
Eventually(fn).Should(Succeed())
}
|
package apis
import (
"net/http"
"log"
"fmt"
"github.com/gin-gonic/gin"
. "farmer/autocs/models"
"strconv"
"github.com/mssola/user_agent"
)
type Reback struct {
Status int `json:"status"`
Msg string `json:"msg"`
Data interface{} `json:"data"`
}
func IndexApi(c *gin.Context) {
ua := user_agent.New(c.Request.UserAgent()) //获取用户UA
var tpl string
if ua.Mobile() {
tpl = "wap/index.html"
}else{
tpl = "web/index.html"
}
c.HTML(http.StatusOK, tpl,gin.H{"msg":"Hello World!","title":"熊猫快收自助客服系统"})
}
func ChatApi(c *gin.Context) {
c.HTML(http.StatusOK, "web/chat.html",nil)
}
func AddPersonApi(c *gin.Context) {
firstName := c.Request.FormValue("first_name")
lastName := c.Request.FormValue("last_name")
p := Person{FirstName: firstName, LastName: lastName}
ra, err := p.AddPerson()
if err != nil {
log.Fatalln(err)
}
msg := fmt.Sprintf("insert successful %d", ra)
c.JSON(http.StatusOK, gin.H{
"msg": msg,
})
}
func GetPersonsApi(c *gin.Context) {
var R Reback
p := Person{}
ra, err := p.GetPersons()
if err != nil {
log.Fatalln(err)
R.Msg = "暂无数据"
}else{
R.Data = ra
}
fmt.Println(ra)
//c.String(http.StatusOK, "It works")
c.JSON(http.StatusOK, R)
}
func GetPersonApi(c *gin.Context) {
var R Reback
cid := c.Param("id")
id, _ := strconv.Atoi(cid)
r := GetUserInfoById(id)
/*if err != nil {
log.Fatalln(r)
R.Msg = "用户不存在"
}else{
R.Data = r
}*/
R.Data = r
c.JSON(http.StatusOK, R)
}
func DelPersonApi(c *gin.Context) {
cid := c.Param("id")
id, _ := strconv.Atoi(cid)
p := Person{Id:id}
pid, _ := p.DelPerson()
msg := fmt.Sprintf("Query successful %d", pid)
c.JSON(http.StatusOK, gin.H{
"msg": msg,
"data":pid,
})
} |
// Copyright 2016 Google Inc. All Rights Reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
var (
apiHost = "127.0.0.1:8001"
bindingsEndpoint = "/api/v1/namespaces/default/pods/%s/binding/"
eventsEndpoint = "/api/v1/namespaces/default/events"
nodesEndpoint = "/api/v1/nodes"
podsEndpoint = "/api/v1/pods"
watchPodsEndpoint = "/api/v1/watch/pods"
)
func postEvent(event Event) error {
var b []byte
body := bytes.NewBuffer(b)
err := json.NewEncoder(body).Encode(event)
if err != nil {
return err
}
request := &http.Request{
Body: ioutil.NopCloser(body),
ContentLength: int64(body.Len()),
Header: make(http.Header),
Method: http.MethodPost,
URL: &url.URL{
Host: apiHost,
Path: eventsEndpoint,
Scheme: "http",
},
}
request.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(request)
if err != nil {
return err
}
if resp.StatusCode != 201 {
return errors.New("Event: Unexpected HTTP status code" + resp.Status)
}
return nil
}
func getNodes() (*NodeList, error) {
var nodeList NodeList
request := &http.Request{
Header: make(http.Header),
Method: http.MethodGet,
URL: &url.URL{
Host: apiHost,
Path: nodesEndpoint,
Scheme: "http",
},
}
request.Header.Set("Accept", "application/json, */*")
resp, err := http.DefaultClient.Do(request)
if err != nil {
return nil, err
}
err = json.NewDecoder(resp.Body).Decode(&nodeList)
if err != nil {
return nil, err
}
return &nodeList, nil
}
func watchUnscheduledPods() (<-chan Pod, <-chan error) {
pods := make(chan Pod)
errc := make(chan error, 1)
v := url.Values{}
v.Set("fieldSelector", "spec.nodeName=")
request := &http.Request{
Header: make(http.Header),
Method: http.MethodGet,
URL: &url.URL{
Host: apiHost,
Path: watchPodsEndpoint,
RawQuery: v.Encode(),
Scheme: "http",
},
}
request.Header.Set("Accept", "application/json, */*")
go func() {
for {
resp, err := http.DefaultClient.Do(request)
if err != nil {
errc <- err
time.Sleep(5 * time.Second)
continue
}
if resp.StatusCode != 200 {
errc <- errors.New("Invalid status code: " + resp.Status)
time.Sleep(5 * time.Second)
continue
}
decoder := json.NewDecoder(resp.Body)
for {
var event PodWatchEvent
err = decoder.Decode(&event)
if err != nil {
errc <- err
break
}
if event.Type == "ADDED" {
pods <- event.Object
}
}
}
}()
return pods, errc
}
func getUnscheduledPods() ([]*Pod, error) {
var podList PodList
unscheduledPods := make([]*Pod, 0)
v := url.Values{}
v.Set("fieldSelector", "spec.nodeName=")
request := &http.Request{
Header: make(http.Header),
Method: http.MethodGet,
URL: &url.URL{
Host: apiHost,
Path: podsEndpoint,
RawQuery: v.Encode(),
Scheme: "http",
},
}
request.Header.Set("Accept", "application/json, */*")
resp, err := http.DefaultClient.Do(request)
if err != nil {
return unscheduledPods, err
}
err = json.NewDecoder(resp.Body).Decode(&podList)
if err != nil {
return unscheduledPods, err
}
for _, pod := range podList.Items {
if pod.Metadata.Annotations["scheduler.alpha.kubernetes.io/name"] == schedulerName {
unscheduledPods = append(unscheduledPods, &pod)
}
}
return unscheduledPods, nil
}
func getPods() (*PodList, error) {
var podList PodList
v := url.Values{}
v.Add("fieldSelector", "status.phase=Running")
v.Add("fieldSelector", "status.phase=Pending")
request := &http.Request{
Header: make(http.Header),
Method: http.MethodGet,
URL: &url.URL{
Host: apiHost,
Path: podsEndpoint,
RawQuery: v.Encode(),
Scheme: "http",
},
}
request.Header.Set("Accept", "application/json, */*")
resp, err := http.DefaultClient.Do(request)
if err != nil {
return nil, err
}
err = json.NewDecoder(resp.Body).Decode(&podList)
if err != nil {
return nil, err
}
return &podList, nil
}
type ResourceUsage struct {
CPU int
}
func fit(pod *Pod) ([]Node, error) {
nodeList, err := getNodes()
if err != nil {
return nil, err
}
podList, err := getPods()
if err != nil {
return nil, err
}
resourceUsage := make(map[string]*ResourceUsage)
for _, node := range nodeList.Items {
resourceUsage[node.Metadata.Name] = &ResourceUsage{}
}
for _, p := range podList.Items {
if p.Spec.NodeName == "" {
continue
}
for _, c := range p.Spec.Containers {
if strings.HasSuffix(c.Resources.Requests["cpu"], "m") {
milliCores := strings.TrimSuffix(c.Resources.Requests["cpu"], "m")
cores, err := strconv.Atoi(milliCores)
if err != nil {
return nil, err
}
ru := resourceUsage[p.Spec.NodeName]
ru.CPU += cores
}
}
}
var nodes []Node
fitFailures := make([]string, 0)
var spaceRequired int
for _, c := range pod.Spec.Containers {
if strings.HasSuffix(c.Resources.Requests["cpu"], "m") {
milliCores := strings.TrimSuffix(c.Resources.Requests["cpu"], "m")
cores, err := strconv.Atoi(milliCores)
if err != nil {
return nil, err
}
spaceRequired += cores
}
}
for _, node := range nodeList.Items {
var allocatableCores int
var err error
if strings.HasSuffix(node.Status.Allocatable["cpu"], "m") {
milliCores := strings.TrimSuffix(node.Status.Allocatable["cpu"], "m")
allocatableCores, err = strconv.Atoi(milliCores)
if err != nil {
return nil, err
}
} else {
cpu := node.Status.Allocatable["cpu"]
cpuFloat, err := strconv.ParseFloat(cpu, 32)
if err != nil {
return nil, err
}
allocatableCores = int(cpuFloat * 1000)
}
freeSpace := (allocatableCores - resourceUsage[node.Metadata.Name].CPU)
if freeSpace < spaceRequired {
m := fmt.Sprintf("fit failure on node (%s): Insufficient CPU", node.Metadata.Name)
fitFailures = append(fitFailures, m)
continue
}
nodes = append(nodes, node)
}
if len(nodes) == 0 {
// Emit a Kubernetes event that the Pod was scheduled successfully.
timestamp := time.Now().UTC().Format(time.RFC3339)
event := Event{
Count: 1,
Message: fmt.Sprintf("pod (%s) failed to fit in any node\n%s", pod.Metadata.Name, strings.Join(fitFailures, "\n")),
Metadata: Metadata{GenerateName: pod.Metadata.Name + "-"},
Reason: "FailedScheduling",
LastTimestamp: timestamp,
FirstTimestamp: timestamp,
Type: "Warning",
Source: EventSource{Component: "hightower-scheduler"},
InvolvedObject: ObjectReference{
Kind: "Pod",
Name: pod.Metadata.Name,
Namespace: "default",
Uid: pod.Metadata.Uid,
},
}
postEvent(event)
}
return nodes, nil
}
func bind(pod *Pod, node Node) error {
binding := Binding{
ApiVersion: "v1",
Kind: "Binding",
Metadata: Metadata{Name: pod.Metadata.Name},
Target: Target{
ApiVersion: "v1",
Kind: "Node",
Name: node.Metadata.Name,
},
}
var b []byte
body := bytes.NewBuffer(b)
err := json.NewEncoder(body).Encode(binding)
if err != nil {
return err
}
request := &http.Request{
Body: ioutil.NopCloser(body),
ContentLength: int64(body.Len()),
Header: make(http.Header),
Method: http.MethodPost,
URL: &url.URL{
Host: apiHost,
Path: fmt.Sprintf(bindingsEndpoint, pod.Metadata.Name),
Scheme: "http",
},
}
request.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(request)
if err != nil {
return err
}
if resp.StatusCode != 201 {
return errors.New("Binding: Unexpected HTTP status code" + resp.Status)
}
// Emit a Kubernetes event that the Pod was scheduled successfully.
message := fmt.Sprintf("Successfully assigned %s to %s", pod.Metadata.Name, node.Metadata.Name)
timestamp := time.Now().UTC().Format(time.RFC3339)
event := Event{
Count: 1,
Message: message,
Metadata: Metadata{GenerateName: pod.Metadata.Name + "-"},
Reason: "Scheduled",
LastTimestamp: timestamp,
FirstTimestamp: timestamp,
Type: "Normal",
Source: EventSource{Component: "hightower-scheduler"},
InvolvedObject: ObjectReference{
Kind: "Pod",
Name: pod.Metadata.Name,
Namespace: "default",
Uid: pod.Metadata.Uid,
},
}
log.Println(message)
return postEvent(event)
}
|
/*
Implement PKCS#7 padding
A block cipher transforms a fixed-sized block (usually 8 or 16 bytes) of plaintext into ciphertext. But we almost never want to transform a single block; we encrypt irregularly-sized messages.
One way we account for irregularly-sized messages is by padding, creating a plaintext that is an even multiple of the blocksize. The most popular padding scheme is called PKCS#7.
So: pad any block to a specific block length, by appending the number of bytes of padding to the end of the block. For instance,
"YELLOW SUBMARINE"
... padded to 20 bytes would be:
"YELLOW SUBMARINE\x04\x04\x04\x04"
*/
package main
import (
"bufio"
"crypto/aes"
"crypto/cipher"
"flag"
"fmt"
"io"
"log"
"os"
)
func main() {
log.SetFlags(0)
log.SetPrefix("aes-pkcs7-ecb: ")
skey := "YELLOW SUBMARINE"
flag.Bool("e", true, "encrypt mode")
flag.Bool("d", false, "decrypt mode")
flag.StringVar(&skey, "k", skey, "key")
flag.Usage = usage
flag.Parse()
key := []byte(skey)
op := "e"
flag.Visit(func(f *flag.Flag) {
switch f.Name {
case "e", "d":
op = f.Name
}
})
var (
in = os.Stdin
out = os.Stdout
err error
)
switch n := flag.NArg(); {
case n >= 2:
out, err = os.Create(flag.Arg(1))
ck(err)
fallthrough
case n >= 1:
in, err = os.Open(flag.Arg(0))
ck(err)
}
defer in.Close()
r := bufio.NewReader(in)
w := bufio.NewWriter(out)
cipher, err := aes.NewCipher(key)
ck(err)
switch op {
case "e":
encrypt(r, w, cipher, key)
case "d":
decrypt(r, w, cipher, key)
default:
log.Fatalf("unknown op %q", op)
}
ck(w.Flush())
ck(out.Close())
}
func ck(err error) {
if err != nil {
log.Fatal(err)
}
}
func usage() {
fmt.Fprintln(os.Stderr, "usage: aes-pkcs7-ecb [options] [input] [output]")
flag.PrintDefaults()
os.Exit(2)
}
func encrypt(r io.Reader, w io.Writer, cipher cipher.Block, key []byte) {
var dst, src [aes.BlockSize]byte
for run := true; run; {
n, err := io.ReadFull(r, src[:])
if err == io.EOF {
for i := range src {
src[i] = byte(len(src))
}
run = false
} else if err == io.ErrUnexpectedEOF {
for i := n; i < len(src); i++ {
src[i] = byte(len(src) - n)
}
run = false
} else if err != nil {
ck(err)
}
cipher.Encrypt(dst[:], src[:])
w.Write(dst[:])
}
}
func decrypt(r io.Reader, w io.Writer, cipher cipher.Block, key []byte) {
var dst, src, tmp [aes.BlockSize]byte
var drain bool
for {
_, err := io.ReadFull(r, tmp[:])
if err == io.EOF {
break
} else if err != nil {
log.Fatalf("%v", err)
}
if drain {
cipher.Decrypt(dst[:], src[:])
w.Write(dst[:])
} else {
copy(src[:], tmp[:])
}
drain = !drain
}
cipher.Decrypt(dst[:], tmp[:])
v := dst[len(dst)-1]
if v == 0 || v > aes.BlockSize {
log.Fatalf("invalid pkcs7 padding value %v", v)
}
for i := 0; i < int(v); i++ {
n := len(dst)-1-i
if dst[n] != v {
log.Fatalf("invalid pkcs7 padding, expected %#x got %#x at pos %d", v, dst[n], n)
}
}
w.Write(dst[:len(dst)-int(v)])
}
|
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grumpy
import (
"bytes"
"fmt"
"math"
"math/big"
"strings"
"unicode"
)
var (
// Builtins contains all of the Python built-in identifiers.
Builtins = NewDict()
builtinStr = NewStr("__builtin__")
// ExceptionTypes contains all builtin exception types.
ExceptionTypes []*Type
// EllipsisType is the object representing the Python 'ellipsis' type
EllipsisType = newSimpleType("ellipsis", ObjectType)
// Ellipsis is the singleton ellipsis object representing the Python
// 'Ellipsis' object.
Ellipsis = &Object{typ: EllipsisType}
// NoneType is the object representing the Python 'NoneType' type.
NoneType = newSimpleType("NoneType", ObjectType)
// None is the singleton NoneType object representing the Python 'None'
// object.
None = &Object{typ: NoneType}
// NotImplementedType is the object representing the Python
// 'NotImplementedType' object.
NotImplementedType = newSimpleType("NotImplementedType", ObjectType)
// NotImplemented is the singleton NotImplementedType object
// representing the Python 'NotImplemented' object.
NotImplemented = newObject(NotImplementedType)
unboundLocalType = newSimpleType("UnboundLocalType", ObjectType)
// UnboundLocal is a singleton held by local variables in generated
// code before they are bound.
UnboundLocal = newObject(unboundLocalType)
)
func ellipsisRepr(*Frame, *Object) (*Object, *BaseException) {
return NewStr("Ellipsis").ToObject(), nil
}
func noneRepr(*Frame, *Object) (*Object, *BaseException) {
return NewStr("None").ToObject(), nil
}
func notImplementedRepr(*Frame, *Object) (*Object, *BaseException) {
return NewStr("NotImplemented").ToObject(), nil
}
func initEllipsisType(map[string]*Object) {
EllipsisType.flags &= ^(typeFlagInstantiable | typeFlagBasetype)
EllipsisType.slots.Repr = &unaryOpSlot{ellipsisRepr}
}
func initNoneType(map[string]*Object) {
NoneType.flags &= ^(typeFlagInstantiable | typeFlagBasetype)
NoneType.slots.Repr = &unaryOpSlot{noneRepr}
}
func initNotImplementedType(map[string]*Object) {
NotImplementedType.flags &= ^(typeFlagInstantiable | typeFlagBasetype)
NotImplementedType.slots.Repr = &unaryOpSlot{notImplementedRepr}
}
func initUnboundLocalType(map[string]*Object) {
unboundLocalType.flags &= ^(typeFlagInstantiable | typeFlagBasetype)
}
type typeState int
const (
typeStateNotReady typeState = iota
typeStateInitializing
typeStateReady
)
type builtinTypeInit func(map[string]*Object)
type builtinTypeInfo struct {
state typeState
init builtinTypeInit
global bool
}
var builtinTypes = map[*Type]*builtinTypeInfo{
ArithmeticErrorType: {global: true},
AssertionErrorType: {global: true},
AttributeErrorType: {global: true},
BaseExceptionType: {init: initBaseExceptionType, global: true},
BaseStringType: {init: initBaseStringType, global: true},
BoolType: {init: initBoolType, global: true},
ByteArrayType: {init: initByteArrayType, global: true},
BytesWarningType: {global: true},
CodeType: {},
ComplexType: {init: initComplexType, global: true},
ClassMethodType: {init: initClassMethodType, global: true},
DeprecationWarningType: {global: true},
dictItemIteratorType: {init: initDictItemIteratorType},
dictKeyIteratorType: {init: initDictKeyIteratorType},
dictValueIteratorType: {init: initDictValueIteratorType},
DictType: {init: initDictType, global: true},
EllipsisType: {init: initEllipsisType, global: true},
enumerateType: {init: initEnumerateType, global: true},
EnvironmentErrorType: {global: true},
EOFErrorType: {global: true},
ExceptionType: {global: true},
FileType: {init: initFileType, global: true},
FloatType: {init: initFloatType, global: true},
FrameType: {init: initFrameType},
FrozenSetType: {init: initFrozenSetType, global: true},
FunctionType: {init: initFunctionType},
FutureWarningType: {global: true},
GeneratorType: {init: initGeneratorType},
ImportErrorType: {global: true},
ImportWarningType: {global: true},
IndexErrorType: {global: true},
IntType: {init: initIntType, global: true},
IOErrorType: {global: true},
KeyboardInterruptType: {global: true},
KeyErrorType: {global: true},
listIteratorType: {init: initListIteratorType},
ListType: {init: initListType, global: true},
LongType: {init: initLongType, global: true},
LookupErrorType: {global: true},
MemoryErrorType: {global: true},
MethodType: {init: initMethodType},
ModuleType: {init: initModuleType},
NameErrorType: {global: true},
nativeBoolMetaclassType: {init: initNativeBoolMetaclassType},
nativeFuncType: {init: initNativeFuncType},
nativeMetaclassType: {init: initNativeMetaclassType},
nativeSliceType: {init: initNativeSliceType},
nativeType: {init: initNativeType},
NoneType: {init: initNoneType, global: true},
NotImplementedErrorType: {global: true},
NotImplementedType: {init: initNotImplementedType, global: true},
ObjectType: {init: initObjectType, global: true},
OSErrorType: {global: true},
OverflowErrorType: {global: true},
PendingDeprecationWarningType: {global: true},
PropertyType: {init: initPropertyType, global: true},
rangeIteratorType: {init: initRangeIteratorType, global: true},
ReferenceErrorType: {global: true},
RuntimeErrorType: {global: true},
RuntimeWarningType: {global: true},
seqIteratorType: {init: initSeqIteratorType},
SetType: {init: initSetType, global: true},
sliceIteratorType: {init: initSliceIteratorType},
SliceType: {init: initSliceType, global: true},
StandardErrorType: {global: true},
StaticMethodType: {init: initStaticMethodType, global: true},
StopIterationType: {global: true},
StrType: {init: initStrType, global: true},
superType: {init: initSuperType, global: true},
SyntaxErrorType: {global: true},
SyntaxWarningType: {global: true},
SystemErrorType: {global: true},
SystemExitType: {global: true, init: initSystemExitType},
TracebackType: {init: initTracebackType},
TupleType: {init: initTupleType, global: true},
TypeErrorType: {global: true},
TypeType: {init: initTypeType, global: true},
UnboundLocalErrorType: {global: true},
unboundLocalType: {init: initUnboundLocalType},
UnicodeDecodeErrorType: {global: true},
UnicodeEncodeErrorType: {global: true},
UnicodeErrorType: {global: true},
UnicodeType: {init: initUnicodeType, global: true},
UnicodeWarningType: {global: true},
UserWarningType: {global: true},
ValueErrorType: {global: true},
WarningType: {global: true},
WeakRefType: {init: initWeakRefType},
xrangeType: {init: initXRangeType, global: true},
ZeroDivisionErrorType: {global: true},
}
func initBuiltinType(typ *Type, info *builtinTypeInfo) {
if info.state == typeStateReady {
return
}
if info.state == typeStateInitializing {
logFatal(fmt.Sprintf("cycle in type initialization for: %s", typ.name))
}
info.state = typeStateInitializing
for _, base := range typ.bases {
baseInfo, ok := builtinTypes[base]
if !ok {
logFatal(fmt.Sprintf("base type not registered for: %s", typ.name))
}
initBuiltinType(base, baseInfo)
}
prepareBuiltinType(typ, info.init)
info.state = typeStateReady
if typ.isSubclass(BaseExceptionType) {
ExceptionTypes = append(ExceptionTypes, typ)
}
}
func builtinAbs(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "abs", args, ObjectType); raised != nil {
return nil, raised
}
return Abs(f, args[0])
}
func builtinMapFn(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
argc := len(args)
if argc < 2 {
return nil, f.RaiseType(TypeErrorType, "map() requires at least two args")
}
result := make([]*Object, 0, 2)
z, raised := zipLongest(f, args[1:])
if raised != nil {
return nil, raised
}
for _, tuple := range z {
if args[0] == None {
if argc == 2 {
result = append(result, tuple[0])
} else {
result = append(result, NewTuple(tuple...).ToObject())
}
} else {
ret, raised := args[0].Call(f, tuple, nil)
if raised != nil {
return nil, raised
}
result = append(result, ret)
}
}
return NewList(result...).ToObject(), nil
}
func builtinFilter(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkMethodArgs(f, "filter", args, ObjectType, ObjectType); raised != nil {
return nil, raised
}
fn := args[0]
l := args[1]
filterFunc := IsTrue
if fn != None {
filterFunc = func(f *Frame, o *Object) (bool, *BaseException) {
result, raised := fn.Call(f, Args{o}, nil)
if raised != nil {
return false, raised
}
return IsTrue(f, result)
}
}
switch {
// CPython will return the same type if the second type is tuple or string, else return a list.
case l.isInstance(TupleType):
result := make([]*Object, 0)
for _, item := range toTupleUnsafe(l).elems {
ret, raised := filterFunc(f, item)
if raised != nil {
return nil, raised
}
if ret {
result = append(result, item)
}
}
return NewTuple(result...).ToObject(), nil
case l.isInstance(StrType):
if fn == None {
return l, nil
}
var result bytes.Buffer
for _, item := range []byte(toStrUnsafe(l).Value()) {
ret, raised := filterFunc(f, NewStr(string(item)).ToObject())
if raised != nil {
return nil, raised
}
if ret {
result.WriteByte(item)
}
}
return NewStr(result.String()).ToObject(), nil
case l.isInstance(UnicodeType):
if fn == None {
return l, nil
}
var result []rune
for _, item := range toUnicodeUnsafe(l).Value() {
ret, raised := filterFunc(f, NewUnicodeFromRunes([]rune{item}).ToObject())
if raised != nil {
return nil, raised
}
if ret {
result = append(result, item)
}
}
return NewUnicodeFromRunes(result).ToObject(), nil
default:
result := make([]*Object, 0)
raised := seqForEach(f, l, func(item *Object) (raised *BaseException) {
ret, raised := filterFunc(f, item)
if raised != nil {
return raised
}
if ret {
result = append(result, item)
}
return nil
})
if raised != nil {
return nil, raised
}
return NewList(result...).ToObject(), nil
}
}
func builtinAll(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "all", args, ObjectType); raised != nil {
return nil, raised
}
pred := func(o *Object) (bool, *BaseException) {
ret, raised := IsTrue(f, o)
if raised != nil {
return false, raised
}
return !ret, nil
}
foundFalseItem, raised := seqFindFirst(f, args[0], pred)
if raised != nil {
return nil, raised
}
return GetBool(!foundFalseItem).ToObject(), raised
}
func builtinAny(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "any", args, ObjectType); raised != nil {
return nil, raised
}
pred := func(o *Object) (bool, *BaseException) {
ret, raised := IsTrue(f, o)
if raised != nil {
return false, raised
}
return ret, nil
}
foundTrueItem, raised := seqFindFirst(f, args[0], pred)
if raised != nil {
return nil, raised
}
return GetBool(foundTrueItem).ToObject(), raised
}
func builtinBin(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "bin", args, ObjectType); raised != nil {
return nil, raised
}
index, raised := Index(f, args[0])
if raised != nil {
return nil, raised
}
if index == nil {
format := "%s object cannot be interpreted as an index"
return nil, f.RaiseType(TypeErrorType, fmt.Sprintf(format, args[0].typ.Name()))
}
return NewStr(numberToBase("0b", 2, index)).ToObject(), nil
}
func builtinCallable(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "callable", args, ObjectType); raised != nil {
return nil, raised
}
o := args[0]
if call := o.Type().slots.Call; call == nil {
return False.ToObject(), nil
}
return True.ToObject(), nil
}
func builtinChr(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "chr", args, IntType); raised != nil {
return nil, raised
}
i := toIntUnsafe(args[0]).Value()
if i < 0 || i > 255 {
return nil, f.RaiseType(ValueErrorType, "chr() arg not in range(256)")
}
return NewStr(string([]byte{byte(i)})).ToObject(), nil
}
func builtinCmp(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "cmp", args, ObjectType, ObjectType); raised != nil {
return nil, raised
}
return Compare(f, args[0], args[1])
}
func builtinDelAttr(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "delattr", args, ObjectType, StrType); raised != nil {
return nil, raised
}
return None, DelAttr(f, args[0], toStrUnsafe(args[1]))
}
func builtinDir(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
// TODO: Support __dir__.
if raised := checkFunctionArgs(f, "dir", args, ObjectType); raised != nil {
return nil, raised
}
d := NewDict()
o := args[0]
switch {
case o.isInstance(TypeType):
for _, t := range toTypeUnsafe(o).mro {
if raised := d.Update(f, t.Dict().ToObject()); raised != nil {
return nil, raised
}
}
case o.isInstance(ModuleType):
d.Update(f, o.Dict().ToObject())
default:
d = NewDict()
if dict := o.Dict(); dict != nil {
if raised := d.Update(f, dict.ToObject()); raised != nil {
return nil, raised
}
}
for _, t := range o.typ.mro {
if raised := d.Update(f, t.Dict().ToObject()); raised != nil {
return nil, raised
}
}
}
l := d.Keys(f)
if raised := l.Sort(f); raised != nil {
return nil, raised
}
return l.ToObject(), nil
}
func builtinDivMod(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "divmod", args, ObjectType, ObjectType); raised != nil {
return nil, raised
}
return DivMod(f, args[0], args[1])
}
func builtinFrame(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "__frame__", args); raised != nil {
return nil, raised
}
f.taken = true
return f.ToObject(), nil
}
func builtinGetAttr(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
expectedTypes := []*Type{ObjectType, StrType, ObjectType}
argc := len(args)
if argc == 2 {
expectedTypes = expectedTypes[:2]
}
if raised := checkFunctionArgs(f, "getattr", args, expectedTypes...); raised != nil {
return nil, raised
}
var def *Object
if argc == 3 {
def = args[2]
}
return GetAttr(f, args[0], toStrUnsafe(args[1]), def)
}
func builtinGlobals(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "globals", args); raised != nil {
return nil, raised
}
return f.globals.ToObject(), nil
}
func builtinHasAttr(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "hasattr", args, ObjectType, StrType); raised != nil {
return nil, raised
}
if _, raised := GetAttr(f, args[0], toStrUnsafe(args[1]), nil); raised != nil {
if raised.isInstance(AttributeErrorType) {
f.RestoreExc(nil, nil)
return False.ToObject(), nil
}
return nil, raised
}
return True.ToObject(), nil
}
func builtinHash(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "hash", args, ObjectType); raised != nil {
return nil, raised
}
h, raised := Hash(f, args[0])
if raised != nil {
return nil, raised
}
return h.ToObject(), nil
}
func builtinHex(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
// In Python3 we would call __index__ similarly to builtinBin().
if raised := checkFunctionArgs(f, "hex", args, ObjectType); raised != nil {
return nil, raised
}
return Hex(f, args[0])
}
func builtinID(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "id", args, ObjectType); raised != nil {
return nil, raised
}
return NewInt(int(uintptr(args[0].toPointer()))).ToObject(), nil
}
func builtinIsInstance(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "isinstance", args, ObjectType, ObjectType); raised != nil {
return nil, raised
}
ret, raised := IsInstance(f, args[0], args[1])
if raised != nil {
return nil, raised
}
return GetBool(ret).ToObject(), nil
}
func builtinIsSubclass(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "issubclass", args, ObjectType, ObjectType); raised != nil {
return nil, raised
}
ret, raised := IsSubclass(f, args[0], args[1])
if raised != nil {
return nil, raised
}
return GetBool(ret).ToObject(), nil
}
func builtinIter(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "iter", args, ObjectType); raised != nil {
return nil, raised
}
return Iter(f, args[0])
}
func builtinLen(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "len", args, ObjectType); raised != nil {
return nil, raised
}
ret, raised := Len(f, args[0])
if raised != nil {
return nil, raised
}
return ret.ToObject(), nil
}
func builtinMax(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
return builtinMinMax(f, true, args, kwargs)
}
func builtinMin(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
return builtinMinMax(f, false, args, kwargs)
}
func builtinNext(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "next", args, ObjectType); raised != nil {
return nil, raised
}
ret, raised := Next(f, args[0])
if raised != nil {
return nil, raised
}
if ret != nil {
return ret, nil
}
return nil, f.Raise(StopIterationType.ToObject(), nil, nil)
}
func builtinOct(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
// In Python3 we would call __index__ similarly to builtinBin().
if raised := checkFunctionArgs(f, "oct", args, ObjectType); raised != nil {
return nil, raised
}
return Oct(f, args[0])
}
func builtinOpen(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
return FileType.Call(f, args, kwargs)
}
func builtinOrd(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
const lenMsg = "ord() expected a character, but string of length %d found"
if raised := checkFunctionArgs(f, "ord", args, BaseStringType); raised != nil {
return nil, raised
}
o := args[0]
var result int
if o.isInstance(StrType) {
s := toStrUnsafe(o).Value()
if numChars := len(s); numChars != 1 {
return nil, f.RaiseType(ValueErrorType, fmt.Sprintf(lenMsg, numChars))
}
result = int(([]byte(s))[0])
} else {
s := toUnicodeUnsafe(o).Value()
if numChars := len(s); numChars != 1 {
return nil, f.RaiseType(ValueErrorType, fmt.Sprintf(lenMsg, numChars))
}
result = int(s[0])
}
return NewInt(result).ToObject(), nil
}
func builtinPrint(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
sep := " "
end := "\n"
file := Stdout
for _, kwarg := range kwargs {
switch kwarg.Name {
case "sep":
kwsep, raised := ToStr(f, kwarg.Value)
if raised != nil {
return nil, raised
}
sep = kwsep.Value()
case "end":
kwend, raised := ToStr(f, kwarg.Value)
if raised != nil {
return nil, raised
}
end = kwend.Value()
case "file":
// TODO: need to map Python sys.stdout, sys.stderr etc. to os.Stdout,
// os.Stderr, but for other file-like objects would need to recover
// to the file descriptor probably
}
}
return nil, pyPrint(f, args, sep, end, file)
}
func builtinRange(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
r, raised := xrangeType.Call(f, args, nil)
if raised != nil {
return nil, raised
}
return ListType.Call(f, []*Object{r}, nil)
}
func builtinRawInput(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if len(args) > 1 {
msg := fmt.Sprintf("[raw_]input expcted at most 1 arguments, got %d", len(args))
return nil, f.RaiseType(TypeErrorType, msg)
}
if Stdin == nil {
msg := fmt.Sprintf("[raw_]input: lost sys.stdin")
return nil, f.RaiseType(RuntimeErrorType, msg)
}
if Stdout == nil {
msg := fmt.Sprintf("[raw_]input: lost sys.stdout")
return nil, f.RaiseType(RuntimeErrorType, msg)
}
if len(args) == 1 {
err := pyPrint(f, args, "", "", Stdout)
if err != nil {
return nil, err
}
}
line, err := Stdin.reader.ReadString('\n')
if err != nil {
return nil, f.RaiseType(EOFErrorType, "EOF when reading a line")
}
line = strings.TrimRight(line, "\n")
return NewStr(line).ToObject(), nil
}
func builtinRepr(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "repr", args, ObjectType); raised != nil {
return nil, raised
}
s, raised := Repr(f, args[0])
if raised != nil {
return nil, raised
}
return s.ToObject(), nil
}
func builtinRound(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
argc := len(args)
expectedTypes := []*Type{ObjectType, ObjectType}
if argc == 1 {
expectedTypes = expectedTypes[:1]
}
if raised := checkFunctionArgs(f, "round", args, expectedTypes...); raised != nil {
return nil, raised
}
ndigits := 0
if argc > 1 {
var raised *BaseException
if ndigits, raised = IndexInt(f, args[1]); raised != nil {
return nil, raised
}
}
number, isFloat := floatCoerce(args[0])
if !isFloat {
return nil, f.RaiseType(TypeErrorType, "a float is required")
}
if math.IsNaN(number) || math.IsInf(number, 0) || number == 0.0 {
return NewFloat(number).ToObject(), nil
}
neg := false
if number < 0 {
neg = true
number = -number
}
pow := math.Pow(10.0, float64(ndigits))
result := math.Floor(number*pow+0.5) / pow
if neg {
result = -result
}
return NewFloat(result).ToObject(), nil
}
func builtinSetAttr(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "setattr", args, ObjectType, StrType, ObjectType); raised != nil {
return nil, raised
}
return None, SetAttr(f, args[0], toStrUnsafe(args[1]), args[2])
}
func builtinSorted(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
// TODO: Support (cmp=None, key=None, reverse=False)
if raised := checkFunctionArgs(f, "sorted", args, ObjectType); raised != nil {
return nil, raised
}
result, raised := ListType.Call(f, Args{args[0]}, nil)
if raised != nil {
return nil, raised
}
toListUnsafe(result).Sort(f)
// Implement reverse.
reverse, raised := IsTrue(f, kwargs.get("reverse", None))
if raised != nil {
return nil, raised
}
if reverse {
toListUnsafe(result).Reverse()
}
return result, nil
}
func builtinSum(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
argc := len(args)
expectedTypes := []*Type{ObjectType, ObjectType}
if argc == 1 {
expectedTypes = expectedTypes[:1]
}
if raised := checkFunctionArgs(f, "sum", args, expectedTypes...); raised != nil {
return nil, raised
}
var result *Object
if argc > 1 {
if args[1].typ == StrType {
return nil, f.RaiseType(TypeErrorType, "sum() can't sum strings [use ''.join(seq) instead]")
}
result = args[1]
} else {
result = NewInt(0).ToObject()
}
raised := seqForEach(f, args[0], func(o *Object) (raised *BaseException) {
result, raised = Add(f, result, o)
return raised
})
if raised != nil {
return nil, raised
}
return result, nil
}
func builtinUniChr(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "unichr", args, IntType); raised != nil {
return nil, raised
}
i := toIntUnsafe(args[0]).Value()
if i < 0 || i > unicode.MaxRune {
return nil, f.RaiseType(ValueErrorType, fmt.Sprintf("unichr() arg not in range(0x%x)", unicode.MaxRune))
}
return NewUnicodeFromRunes([]rune{rune(i)}).ToObject(), nil
}
func builtinZip(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
argc := len(args)
if argc == 0 {
return NewList().ToObject(), nil
}
result := make([]*Object, 0, 2)
iters, raised := initIters(f, args)
if raised != nil {
return nil, raised
}
Outer:
for {
elems := make([]*Object, argc)
for i, iter := range iters {
elem, raised := Next(f, iter)
if raised != nil {
if raised.isInstance(StopIterationType) {
f.RestoreExc(nil, nil)
break Outer
}
return nil, raised
}
elems[i] = elem
}
result = append(result, NewTuple(elems...).ToObject())
}
return NewList(result...).ToObject(), nil
}
func init() {
builtinMap := map[string]*Object{
"__debug__": False.ToObject(),
"__frame__": newBuiltinFunction("__frame__", builtinFrame).ToObject(),
"abs": newBuiltinFunction("abs", builtinAbs).ToObject(),
"all": newBuiltinFunction("all", builtinAll).ToObject(),
"any": newBuiltinFunction("any", builtinAny).ToObject(),
"bin": newBuiltinFunction("bin", builtinBin).ToObject(),
"callable": newBuiltinFunction("callable", builtinCallable).ToObject(),
"chr": newBuiltinFunction("chr", builtinChr).ToObject(),
"cmp": newBuiltinFunction("cmp", builtinCmp).ToObject(),
"delattr": newBuiltinFunction("delattr", builtinDelAttr).ToObject(),
"dir": newBuiltinFunction("dir", builtinDir).ToObject(),
"divmod": newBuiltinFunction("divmod", builtinDivMod).ToObject(),
"Ellipsis": Ellipsis,
"False": False.ToObject(),
"filter": newBuiltinFunction("filter", builtinFilter).ToObject(),
"getattr": newBuiltinFunction("getattr", builtinGetAttr).ToObject(),
"globals": newBuiltinFunction("globals", builtinGlobals).ToObject(),
"hasattr": newBuiltinFunction("hasattr", builtinHasAttr).ToObject(),
"hash": newBuiltinFunction("hash", builtinHash).ToObject(),
"hex": newBuiltinFunction("hex", builtinHex).ToObject(),
"id": newBuiltinFunction("id", builtinID).ToObject(),
"isinstance": newBuiltinFunction("isinstance", builtinIsInstance).ToObject(),
"issubclass": newBuiltinFunction("issubclass", builtinIsSubclass).ToObject(),
"iter": newBuiltinFunction("iter", builtinIter).ToObject(),
"len": newBuiltinFunction("len", builtinLen).ToObject(),
"map": newBuiltinFunction("map", builtinMapFn).ToObject(),
"max": newBuiltinFunction("max", builtinMax).ToObject(),
"min": newBuiltinFunction("min", builtinMin).ToObject(),
"next": newBuiltinFunction("next", builtinNext).ToObject(),
"None": None,
"NotImplemented": NotImplemented,
"oct": newBuiltinFunction("oct", builtinOct).ToObject(),
"open": newBuiltinFunction("open", builtinOpen).ToObject(),
"ord": newBuiltinFunction("ord", builtinOrd).ToObject(),
"print": newBuiltinFunction("print", builtinPrint).ToObject(),
"range": newBuiltinFunction("range", builtinRange).ToObject(),
"raw_input": newBuiltinFunction("raw_input", builtinRawInput).ToObject(),
"repr": newBuiltinFunction("repr", builtinRepr).ToObject(),
"round": newBuiltinFunction("round", builtinRound).ToObject(),
"setattr": newBuiltinFunction("setattr", builtinSetAttr).ToObject(),
"sorted": newBuiltinFunction("sorted", builtinSorted).ToObject(),
"sum": newBuiltinFunction("sum", builtinSum).ToObject(),
"True": True.ToObject(),
"unichr": newBuiltinFunction("unichr", builtinUniChr).ToObject(),
"zip": newBuiltinFunction("zip", builtinZip).ToObject(),
}
// Do type initialization in two phases so that we don't have to think
// about hard-to-understand cycles.
for typ, info := range builtinTypes {
initBuiltinType(typ, info)
if info.global {
builtinMap[typ.name] = typ.ToObject()
}
}
for name := range builtinMap {
InternStr(name)
}
Builtins = newStringDict(builtinMap)
}
// builtinMinMax implements the builtin min/max() functions. When doMax is
// true, the max is found, otherwise the min is found. There are two forms of
// the builtins. The first takes a single iterable argument and the result is
// the min/max of the elements of that sequence. The second form takes two or
// more args and returns the min/max of those. For more details see:
// https://docs.python.org/2/library/functions.html#min
func builtinMinMax(f *Frame, doMax bool, args Args, kwargs KWArgs) (*Object, *BaseException) {
name := "min"
if doMax {
name = "max"
}
if raised := checkFunctionVarArgs(f, name, args, ObjectType); raised != nil {
return nil, raised
}
keyFunc := kwargs.get("key", nil)
// selected is the min/max element found so far.
var selected, selectedKey *Object
partialFunc := func(o *Object) (raised *BaseException) {
oKey := o
if keyFunc != nil {
oKey, raised = keyFunc.Call(f, Args{o}, nil)
if raised != nil {
return raised
}
}
// sel dictates whether o is the new min/max. It defaults to
// true when selected == nil (we don't yet have a selection).
sel := true
if selected != nil {
result, raised := LT(f, selectedKey, oKey)
if raised != nil {
return raised
}
lt, raised := IsTrue(f, result)
if raised != nil {
return raised
}
// Select o when looking for max and selection < o, or
// when looking for min and o < selection.
sel = doMax && lt || !doMax && !lt
}
if sel {
selected = o
selectedKey = oKey
}
return nil
}
if len(args) == 1 {
// Take min/max of the single iterable arg passed.
if raised := seqForEach(f, args[0], partialFunc); raised != nil {
return nil, raised
}
if selected == nil {
return nil, f.RaiseType(ValueErrorType, fmt.Sprintf("%s() arg is an empty sequence", name))
}
} else {
// Take min/max of the passed args.
for _, arg := range args {
if raised := partialFunc(arg); raised != nil {
return nil, raised
}
}
}
return selected, nil
}
// numberToBase implements the builtins "bin", "hex", and "oct".
// base must be between 2 and 36, and o must be an instance of
// IntType or LongType.
func numberToBase(prefix string, base int, o *Object) string {
z := big.Int{}
switch {
case o.isInstance(LongType):
z = toLongUnsafe(o).value
case o.isInstance(IntType):
z.SetInt64(int64(toIntUnsafe(o).Value()))
default:
panic("numberToBase requires an Int or Long argument")
}
s := z.Text(base)
if s[0] == '-' {
// Move the negative sign before the prefix.
return "-" + prefix + s[1:]
}
return prefix + s
}
// initIters return list of initiated Iter instances from the list of
// iterables.
func initIters(f *Frame, items []*Object) ([]*Object, *BaseException) {
l := len(items)
iters := make([]*Object, l)
for i, arg := range items {
iter, raised := Iter(f, arg)
if raised != nil {
return nil, raised
}
iters[i] = iter
}
return iters, nil
}
// zipLongest return the list of aggregates elements from each of the
// iterables. If the iterables are of uneven length, missing values are
// filled-in with None.
func zipLongest(f *Frame, args Args) ([][]*Object, *BaseException) {
argc := len(args)
result := make([][]*Object, 0, 2)
iters, raised := initIters(f, args)
if raised != nil {
return nil, raised
}
for {
noItems := true
elems := make([]*Object, argc)
for i, iter := range iters {
if iter == nil {
continue
}
elem, raised := Next(f, iter)
if raised != nil {
if raised.isInstance(StopIterationType) {
iters[i] = nil
elems[i] = None
f.RestoreExc(nil, nil)
continue
}
return nil, raised
}
noItems = false
elems[i] = elem
}
if noItems {
break
}
result = append(result, elems)
}
return result, nil
}
|
package field_test
import (
"bytes"
"encoding/hex"
"io"
"testing"
"github.com/tombell/go-serato/serato/field"
)
// XXX: Location field appears to always be empty in session files.
func TestNewLocationField(t *testing.T) {
data, _ := hex.DecodeString("0000000300000072002F00550073006500720073002F0074006F006D00620065006C006C002F004D0075007300690063002F005F005F0020004E006500770020005F005F002F0043006C0061007300730069006300200048006F007500730065002000530075006D006D006500720020002700310038002F0000")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
location, err := field.NewLocationField(hdr, buf)
if err != nil {
t.Fatalf("expected NewLocationField err to be nil, got %v", err)
}
if location == nil {
t.Fatal("expected location to not be nil")
}
}
func TestNewLocationFieldUnexpectedEOF(t *testing.T) {
data, _ := hex.DecodeString("0000000300000072002F00550073006500720073002F0074006F006D00620065006C006C002F004D0075007300690063002F005F005F0020004E006500770020005F005F002F0043006C0061007300730069006300200048006F007500730065002000530075006D006D006500720020002700310038002F")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
_, err = field.NewLocationField(hdr, buf)
if err != io.ErrUnexpectedEOF {
t.Fatalf("expected NewLocationField err to be io.ErrUnexpectedEOF, got %v", err)
}
}
func TestNewLocationFieldUnexpectedIdentifier(t *testing.T) {
data, _ := hex.DecodeString("0000000400000072002F00550073006500720073002F0074006F006D00620065006C006C002F004D0075007300690063002F005F005F0020004E006500770020005F005F002F0043006C0061007300730069006300200048006F007500730065002000530075006D006D006500720020002700310038002F")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
_, err = field.NewLocationField(hdr, buf)
if err != field.ErrUnexpectedIdentifier {
t.Fatalf("expected NewLocationField err to be field.ErrUnexpectedIdentifier, got %v", err)
}
}
func TestLocationValue(t *testing.T) {
data, _ := hex.DecodeString("0000000300000072002F00550073006500720073002F0074006F006D00620065006C006C002F004D0075007300690063002F005F005F0020004E006500770020005F005F002F0043006C0061007300730069006300200048006F007500730065002000530075006D006D006500720020002700310038002F0000")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
location, err := field.NewLocationField(hdr, buf)
if err != nil {
t.Fatalf("expected NewLocationField err to be nil, got %v", err)
}
actual := location.Value()
expected := "/Users/tombell/Music/__ New __/Classic House Summer '18/"
if actual != expected {
t.Fatalf("expected value to be %v, got %v", expected, actual)
}
}
func TestLocationString(t *testing.T) {
data, _ := hex.DecodeString("0000000300000072002F00550073006500720073002F0074006F006D00620065006C006C002F004D0075007300690063002F005F005F0020004E006500770020005F005F002F0043006C0061007300730069006300200048006F007500730065002000530075006D006D006500720020002700310038002F0000")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
location, err := field.NewLocationField(hdr, buf)
if err != nil {
t.Fatalf("expected NewLocationField err to be nil, got %v", err)
}
actual := location.String()
expected := "/Users/tombell/Music/__ New __/Classic House Summer '18/"
if actual != expected {
t.Fatalf("expected value to be %v, got %v", expected, actual)
}
}
|
package metrics
import "github.com/prometheus/client_golang/prometheus"
const (
// Controller names
operatorController = "operator"
adoptionCSVController = "adoption_csv"
adoptionSubscriptionController = "adoption_subscription"
operatorConditionController = "operator_condition"
operatorConditionGeneratorController = "operator_condition_generator"
)
var (
reconcileMetrics = map[string]*prometheus.CounterVec{}
)
func EmitOperatorReconcile(namespace, name string) {
emitReconcile(operatorController, namespace, name)
}
func EmitAdoptionCSVReconcile(namespace, name string) {
emitReconcile(adoptionCSVController, namespace, name)
}
func EmitAdoptionSubscriptionReconcile(namespace, name string) {
emitReconcile(adoptionSubscriptionController, namespace, name)
}
func EmitOperatorConditionReconcile(namespace, name string) {
emitReconcile(operatorConditionController, namespace, name)
}
func EmitOperatorConditionGeneratorReconcile(namespace, name string) {
emitReconcile(operatorConditionGeneratorController, namespace, name)
}
func emitReconcile(controllerName, namespace, name string) {
if counter, ok := reconcileMetrics[controllerName]; ok {
counter.WithLabelValues(namespace, name).Inc()
}
}
|
package filesystem
import (
"fmt"
"github.com/pkg/errors"
"got/internal/objects"
)
func (g *Got) Commit(message string) error {
headType, err := g.HeadType()
if err != nil {
return errors.Wrap(err, "couldn't perform commit")
}
if headType == HeadTypeRef {
return g.commitAtRef(message)
}
return g.firstCommit(message)
}
func (g *Got) commitAtRef(message string) error {
ref, err := g.HeadAsRef()
if err != nil {
return errors.Wrap(err, "couldn't perform commit")
}
currentCommitID, err := g.Refs.IDFromRef(ref)
if err != nil {
return errors.Wrap(err, "couldn't perform commit")
}
treeID, err := g.WriteTree()
if err != nil {
return errors.Wrap(err, "couldn't perform commit")
}
// Update branch head if it exists
newCommitID, err := g.CommitTree(message, treeID, ¤tCommitID)
err = g.Refs.UpdateRef(ref, newCommitID)
if err != nil {
return errors.Wrap(err, "couldn't perform commit")
}
return nil
}
func (g *Got) firstCommit(message string) error {
treeID, err := g.WriteTree()
if err != nil {
return errors.Wrap(err, "couldn't perform commit")
}
newCommitID, err := g.CommitTree(message, treeID, nil)
if err != nil {
return errors.Wrap(err, "couldn't perform commit")
}
ref, err := g.Refs.CreateBranchAt("master", newCommitID)
if err != nil {
return errors.Wrap(err, "couldn't perform commit")
}
err = g.updateHeadWithRef(ref)
if err != nil {
return errors.Wrap(err, "couldn't perform commit")
}
return nil
}
func (g *Got) CommitTree(msg string, treeID objects.ID, parentID *objects.ID) (objects.ID, error) {
commit := objects.NewCommit(treeID, parentID, "John Doe <john@doe.com> 0123456789 +0000", msg)
fmt.Printf("Committing %s", treeID)
if parentID != nil {
fmt.Printf(" with parent %s", *parentID)
}
fmt.Println("...")
var buf string
buf += fmt.Sprintf("tree %s\n", treeID)
if parentID != nil {
buf += fmt.Sprintf("parent %s\n", *parentID)
}
buf += fmt.Sprintln("author John Doe <john@doe.com> 0123456789 +0000")
buf += fmt.Sprintln("committer John Doe <john@doe.com> 0123456789 +0000")
buf += fmt.Sprintf("\n%s", msg)
return commit.ID(), g.Objects.Store(commit)
}
|
package db
import (
"core"
"encoding/json"
"entity"
)
type Replica struct {
GroupMembers []string
PdaConf entity.PDAConf
}
type InMemoryStore struct {
PdaProcessors map[string]core.PdaProcessor
ReplicaMembers map[int]Replica
}
func (inMemoryStore *InMemoryStore) InitStore() {
inMemoryStore.PdaProcessors = make(map[string]core.PdaProcessor, 0)
inMemoryStore.ReplicaMembers = make(map[int]Replica, 0)
}
func (inMemoryStore *InMemoryStore) Save(pdaId string, processor core.PdaProcessor) {
inMemoryStore.PdaProcessors[pdaId] = processor
}
func (inMemoryStore *InMemoryStore) Update(pdaId string, processor core.PdaProcessor) {
inMemoryStore.PdaProcessors[pdaId] = processor
}
func (inMemoryStore *InMemoryStore) Get(pdaId string) (string, error) {
if inMemoryStore.idExists(pdaId) {
jsonVal, _ := json.Marshal(inMemoryStore.PdaProcessors[pdaId])
return string(jsonVal), nil
}
return "", &core.PDARuntimeError{Message: "No PDA found with id " + pdaId}
}
func (inMemoryStore *InMemoryStore) idExists(pdaId string) bool {
_, ok := inMemoryStore.PdaProcessors[pdaId]
return ok
}
func (inMemoryStore *InMemoryStore) GetAllPDA() []string {
pdaStr := make([]string, 0)
for _, value := range inMemoryStore.PdaProcessors {
jsonVal, _ := json.Marshal(value)
pdaStr = append(pdaStr, string(jsonVal))
}
return pdaStr
}
func (inMemoryStore *InMemoryStore) Delete(pdaId string) {
delete(inMemoryStore.PdaProcessors, pdaId)
}
func (inMemoryStore *InMemoryStore) SaveReplica(gid int, processor core.PdaProcessor, group_members []string) {
for index := range group_members {
inMemoryStore.PdaProcessors[group_members[index]] = processor
}
inMemoryStore.ReplicaMembers[gid] = Replica{
GroupMembers: group_members,
PdaConf: processor.PdaConf,
}
}
func (inMemoryStore *InMemoryStore) JoinReplicaGroup(gid int, pdaid string, pdaConf entity.PDAConf) {
members := append(inMemoryStore.ReplicaMembers[gid].GroupMembers, pdaid)
inMemoryStore.ReplicaMembers[gid] = Replica{
GroupMembers: members,
PdaConf: pdaConf,
}
}
func (inMemoryStore *InMemoryStore) GetAllReplicaIds() []int {
var keys []int
if inMemoryStore.ReplicaMembers != nil {
for key, _ := range inMemoryStore.ReplicaMembers {
keys = append(keys, key)
}
}
return keys
}
func (inMemoryStore *InMemoryStore) GetAllMembers(id int) []string {
return inMemoryStore.ReplicaMembers[id].GroupMembers
}
func (inMemoryStore *InMemoryStore) GetPDA(gid int, pdaId string) core.PdaProcessor {
if contains(inMemoryStore.ReplicaMembers[gid].GroupMembers, pdaId) {
return inMemoryStore.PdaProcessors[pdaId]
}
return core.PdaProcessor{}
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
func (inMemoryStore *InMemoryStore) DeleteReplicaGrp(gid int) {
delete(inMemoryStore.ReplicaMembers, gid)
}
func (inMemoryStore *InMemoryStore) GetReplicaConf(gid int) entity.PDAConf {
return inMemoryStore.ReplicaMembers[gid].PdaConf
}
|
package main
import "fmt"
func breakcase1() {
/* local variable definition */
var a int = 10
/* for loop execution */
for a < 20 {
fmt.Printf("value of a: %d\n", a)
a++
if a > 15 {
/* terminate the loop using break statement */
break
}
}
}
func breakCase2() {
for outer := 0; outer < 5; outer++ {
if outer == 3 {
fmt.Println("Breaking out of outer loop")
break // break here
}
fmt.Println("The value of outer is", outer)
for inner := 0; inner < 5; inner++ {
if inner == 2 {
fmt.Println("Breaking out of inner loop")
break // break here
}
fmt.Println("The value of inner is", inner)
}
}
fmt.Println("Exiting program")
}
func continuecase() {
for i := 0; i < 10; i++ {
if i == 5 {
fmt.Println("Continuing loop")
continue // break here
}
fmt.Println("The value of i is", i)
}
fmt.Println("Exiting program")
}
|
package backends
import "errors"
var (
ErrAuthorizationFailed = errors.New("Authorization failed.")
)
|
package websockets
import (
"encoding/json"
"github.com/janwiemers/up/database"
)
var HubInstance *Hub
// Hub maintains the set of active clients and broadcasts messages to the
// clients.
type Hub struct {
// Registered clients.
clients map[*Client]bool
// Inbound messages from the clients.
Broadcast chan []byte
// Register requests from the clients.
register chan *Client
// Unregister requests from clients.
unregister chan *Client
}
func NewHub() *Hub {
return &Hub{
Broadcast: make(chan []byte),
register: make(chan *Client),
unregister: make(chan *Client),
clients: make(map[*Client]bool),
}
}
func (h *Hub) Run() {
for {
select {
case client := <-h.register:
h.clients[client] = true
apps := database.Applications()
d, _ := json.Marshal(NewBroadcastDataForMonitor("monitors", apps))
client.send <- d
for i := range apps {
app := apps[i]
checks, _ := database.Checks(app.ID)
d, _ := json.Marshal(&BroadcastData{
Type: "addChecks",
Checks: checks,
})
client.send <- d
}
case client := <-h.unregister:
if _, ok := h.clients[client]; ok {
delete(h.clients, client)
close(client.send)
}
case message := <-h.Broadcast:
for client := range h.clients {
select {
case client.send <- message:
default:
close(client.send)
delete(h.clients, client)
}
}
}
}
}
|
package main
import "fmt"
func main() {
var names[3]string
names[0] = "Nabil"
names[1] = "Fawwaz"
names[2] = "Elqayyim"
fmt.Println(names[0])
fmt.Println(names[1])
fmt.Println(names[2])
var values = [4]int{
1,
2,
3,
}
fmt.Println(values)
fmt.Println(len(values))
}
|
package models
type User struct {
ID uint `json:"id" gorm:"primary_key"`
Name string `json:"name"`
Email string `json:"email"`
}
type CreateUser struct {
Name string `json:"name" binding:"required"`
Email string `json:"email" binding:"required"`
}
type UpdateUser struct {
Name string `json:"name"`
Email string `json:"email"`
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"github.com/scjalliance/drivestream"
"github.com/scjalliance/drivestream/collection"
"github.com/scjalliance/drivestream/commit"
kingpin "gopkg.in/alecthomas/kingpin.v2"
)
func dump(ctx context.Context, app *kingpin.Application, repo drivestream.Repository, kinds []string, wanted []string) {
if ctx.Err() != nil {
return
}
ids, err := repo.Drives().List()
if err != nil {
app.Fatalf("failed to enumerate drivestream database: %v", err)
}
for _, driveID := range ids {
drv := repo.Drive(driveID)
prefix := fmt.Sprintf("DRIVE %s", driveID)
if data, ok := driveData(drv); ok {
if !isWanted(wanted, string(driveID), data.Name) {
continue
}
b, err := json.Marshal(data)
if err != nil {
fmt.Printf("%s: DATA: parse error: %v\n", prefix, err)
} else {
fmt.Printf("%s: DATA: %v\n", prefix, string(b))
}
} else {
if !isWanted(wanted, string(driveID)) {
continue
}
}
for _, kind := range kinds {
if ctx.Err() != nil {
return
}
switch kind {
case "collections", "collection", "cols", "col":
cursor, err := collection.NewCursor(drv.Collections())
if err != nil {
app.Fatalf("failed to create collection cursor for repository %s: %v", driveID, err)
}
for cursor.First(); cursor.Valid(); cursor.Next() {
reader, err := cursor.Reader()
if err != nil {
app.Fatalf("failed to create collection reader for repository %s: %v", driveID, err)
}
data, err := reader.Data()
if err != nil {
app.Fatalf("failed to read collection data from repository %s: %v", driveID, err)
}
{
b, err := json.Marshal(data)
if err != nil {
fmt.Printf("%s: COLLECTION %d: DATA: parse error: %v\n", prefix, cursor.SeqNum(), err)
} else {
fmt.Printf("%s: COLLECTION %d: DATA: %s\n", prefix, cursor.SeqNum(), string(b))
}
}
states, err := reader.States()
if err != nil {
app.Fatalf("failed to read collection states from repository %s: %v", driveID, err)
}
for i, state := range states {
b, err := json.Marshal(state)
if err != nil {
fmt.Printf("%s: COLLECTION %d: STATE %d: parse error: %v\n", prefix, cursor.SeqNum(), i, err)
} else {
fmt.Printf("%s: COLLECTION %d: STATE %d: %v\n", prefix, cursor.SeqNum(), i, string(b))
}
}
pages, err := reader.Pages()
if err != nil {
app.Fatalf("failed to read pages from repository %s: %v", driveID, err)
}
for i, pg := range pages {
if ctx.Err() != nil {
return
}
for c, change := range pg.Changes {
b, err := json.Marshal(change)
if err != nil {
fmt.Printf("%s: COLLECTION %d: PAGE %d: CHANGE %d: parse error: %v\n", prefix, cursor.SeqNum(), i, c, err)
} else {
fmt.Printf("%s: COLLECTION %d: PAGE %d: CHANGE %d: %v\n", prefix, cursor.SeqNum(), i, c, string(b))
}
}
}
}
case "commits", "commit", "com":
cursor, err := commit.NewCursor(drv.Commits())
if err != nil {
app.Fatalf("failed to create commit cursor for repository %s: %v", driveID, err)
}
for cursor.First(); cursor.Valid(); cursor.Next() {
reader, err := cursor.Reader()
if err != nil {
app.Fatalf("failed to create commit reader for repository %s: %v", driveID, err)
}
data, err := reader.Data()
if err != nil {
app.Fatalf("failed to read commit data from repository %s: %v", driveID, err)
}
{
b, err := json.Marshal(data)
if err != nil {
fmt.Printf("%s: COMMIT %d: DATA: parse error: %v\n", prefix, cursor.SeqNum(), err)
} else {
fmt.Printf("%s: COMMIT %d: DATA: %s\n", prefix, cursor.SeqNum(), string(b))
}
}
states, err := reader.States()
if err != nil {
app.Fatalf("failed to read commit states from repository %s: %v", driveID, err)
}
for i, state := range states {
b, err := json.Marshal(state)
if err != nil {
fmt.Printf("%s: COMMIT %d: STATE %d: parse error: %v\n", prefix, cursor.SeqNum(), i, err)
} else {
fmt.Printf("%s: COMMIT %d: STATE %d: %v\n", prefix, cursor.SeqNum(), i, string(b))
}
}
}
}
}
//fmt.Printf("%s: %s\n", prefix, teamDrive.Name)
}
printMemUsage()
}
|
package main
import (
"complie/src/craft"
"strconv"
"strings"
)
func main() {
simpleScript := craft.NewSimpleScript()
simpleScript.Strat()
}
func decode(code string) string {
ret := make([]string, 3)
for havaEncode(code) {
leftClose := 0
length := len(code)
for i := 0; i < length; i++ {
if code[i] == '[' {
leftClose = i
} else if code[i] == ']' {
ret[0] = code[0:leftClose]
ret[1] = code[leftClose : i+1]
ret[2] = code[i+1 : length]
break
}
}
ret[1] = decodeSingle(ret[1])
code = strings.Join(ret, "")
}
return code
}
func decodeSingle(code string) string {
code = code[1 : len(code)-1]
var num string
var chars string
for i := 0; i < len(code); i++ {
if code[i] >= '0' && code[i] <= '9' {
num += string(code[i])
}
if code[i] == '|' {
chars = code[i+1 : len(code)]
}
}
ret, _ := strconv.Atoi(num)
str := ""
for i := 0; i < ret; i++ {
str += chars
}
return str
}
func havaEncode(s string) bool {
var isleftClose bool = false
var isLine bool = false
for i := 0; i < len(s); i++ {
if s[i] == '[' && s[i+1] >= '0' && s[i+1] <= '9' {
isleftClose = true
}
if isleftClose && s[i] == '|' {
isLine = true
}
if isLine == true && s[i] == ']' {
return true
}
}
return false
}
|
package main
import (
"github.com/severedsea/go-web-boilerplate/cmd/serverd/banner"
"github.com/severedsea/go-web-boilerplate/cmd/serverd/server"
)
func main() {
// Print banner
banner.Print()
// Start server
server.New().Start()
}
|
package cluster
type Role int
const (
JoinElection Role = iota
AsLeader
AsFollower
AsWatcher
)
|
package popgun
import (
"fmt"
"strconv"
"strings"
)
type Executable interface {
Run(c *Client, args []string) (int, error)
}
type QuitCommand struct{}
func (cmd QuitCommand) Run(c *Client, args []string) (int, error) {
newState := c.currentState
if c.currentState == STATE_TRANSACTION {
err := c.backend.Update(c.user)
if err != nil {
return 0, fmt.Errorf("Error updating maildrop for user %s: %v", c.user, err)
}
err = c.backend.Unlock(c.user)
if err != nil {
c.printer.Err("Server was unable to unlock maildrop")
return 0, fmt.Errorf("Error unlocking maildrop for user %s: %v", c.user, err)
}
newState = STATE_UPDATE
}
c.isAlive = false
c.printer.Ok("Goodbye")
return newState, nil
}
type UserCommand struct{}
func (cmd UserCommand) Run(c *Client, args []string) (int, error) {
if c.currentState != STATE_AUTHORIZATION {
return 0, ErrInvalidState
}
if len(args) != 1 {
return 0, fmt.Errorf("Invalid arguments count: %d", len(args))
}
c.user = args[0]
c.printer.Ok("")
return STATE_AUTHORIZATION, nil
}
type PassCommand struct{}
func (cmd PassCommand) Run(c *Client, args []string) (int, error) {
if c.currentState != STATE_AUTHORIZATION {
return 0, ErrInvalidState
}
if c.lastCommand != "USER" {
c.printer.Err("PASS can be executed only directly after USER command")
return STATE_AUTHORIZATION, nil
}
if len(args) != 1 {
return 0, fmt.Errorf("Invalid arguments count: %d", len(args))
}
c.pass = args[0]
if !c.authorizator.Authorize(c.user, c.pass) {
c.printer.Err("Invalid username or password")
return STATE_AUTHORIZATION, nil
}
err := c.backend.Lock(c.user)
if err != nil {
c.printer.Err("Server was unable to lock maildrop")
return 0, fmt.Errorf("Error locking maildrop for user %s: %v", c.user, err)
}
c.printer.Ok("User Successfully Logged on")
return STATE_TRANSACTION, nil
}
type StatCommand struct{}
func (cmd StatCommand) Run(c *Client, args []string) (int, error) {
if c.currentState != STATE_TRANSACTION {
return 0, ErrInvalidState
}
messages, octets, err := c.backend.Stat(c.user)
if err != nil {
return 0, fmt.Errorf("Error calling Stat for user %s: %v", c.user, err)
}
c.printer.Ok("%d %d", messages, octets)
return STATE_TRANSACTION, nil
}
type ListCommand struct{}
func (cmd ListCommand) Run(c *Client, args []string) (int, error) {
if c.currentState != STATE_TRANSACTION {
return 0, ErrInvalidState
}
if len(args) > 0 {
msgId, err := strconv.Atoi(args[0])
if err != nil {
c.printer.Err("Invalid argument: %s", args[0])
return 0, fmt.Errorf("Invalid argument for LIST given by user %s: %v", c.user, err)
}
exists, octets, err := c.backend.ListMessage(c.user, msgId)
if err != nil {
return 0, fmt.Errorf("Error calling 'LIST %d' for user %s: %v", msgId, c.user, err)
}
if !exists {
c.printer.Err("no such message")
return STATE_TRANSACTION, nil
}
c.printer.Ok("%d %d", msgId, octets)
} else {
octets, err := c.backend.List(c.user)
if err != nil {
return 0, fmt.Errorf("Error calling LIST for user %s: %v", c.user, err)
}
c.printer.Ok("%d messages", len(octets))
messagesList := make([]string, len(octets))
for i, octet := range octets {
messagesList[i] = fmt.Sprintf("%d %d", i, octet)
}
c.printer.MultiLine(messagesList)
}
return STATE_TRANSACTION, nil
}
type RetrCommand struct{}
func (cmd RetrCommand) Run(c *Client, args []string) (int, error) {
if c.currentState != STATE_TRANSACTION {
return 0, ErrInvalidState
}
if len(args) == 0 {
c.printer.Err("Missing argument for RETR command")
return 0, fmt.Errorf("Missing argument for RETR called by user %s", c.user)
}
msgId, err := strconv.Atoi(args[0])
if err != nil {
c.printer.Err("Invalid argument: %s", args[0])
return 0, fmt.Errorf("Invalid argument for RETR given by user %s: %v", c.user, err)
}
message, err := c.backend.Retr(c.user, msgId)
if err != nil {
return 0, fmt.Errorf("Error calling 'RETR %d' for user %s: %v", msgId, c.user, err)
}
lines := strings.Split(message, "\n")
c.printer.Ok("")
c.printer.MultiLine(lines)
return STATE_TRANSACTION, nil
}
type DeleCommand struct{}
func (cmd DeleCommand) Run(c *Client, args []string) (int, error) {
if c.currentState != STATE_TRANSACTION {
return 0, ErrInvalidState
}
if len(args) == 0 {
c.printer.Err("Missing argument for DELE command")
return 0, fmt.Errorf("Missing argument for DELE called by user %s", c.user)
}
msgId, err := strconv.Atoi(args[0])
if err != nil {
c.printer.Err("Invalid argument: %s", args[0])
return 0, fmt.Errorf("Invalid argument for DELE given by user %s: %v", c.user, err)
}
err = c.backend.Dele(c.user, msgId)
if err != nil {
return 0, fmt.Errorf("Error calling 'DELE %d' for user %s: %v", msgId, c.user, err)
}
c.printer.Ok("Message %d deleted", msgId)
return STATE_TRANSACTION, nil
}
type NoopCommand struct{}
func (cmd NoopCommand) Run(c *Client, args []string) (int, error) {
if c.currentState != STATE_TRANSACTION {
return 0, ErrInvalidState
}
c.printer.Ok("")
return STATE_TRANSACTION, nil
}
type RsetCommand struct{}
func (cmd RsetCommand) Run(c *Client, args []string) (int, error) {
if c.currentState != STATE_TRANSACTION {
return 0, ErrInvalidState
}
err := c.backend.Rset(c.user)
if err != nil {
return 0, fmt.Errorf("Error calling 'RSET' for user %s: %v", c.user, err)
}
c.printer.Ok("")
return STATE_TRANSACTION, nil
}
type UidlCommand struct{}
func (cmd UidlCommand) Run(c *Client, args []string) (int, error) {
if c.currentState != STATE_TRANSACTION {
return 0, ErrInvalidState
}
if len(args) > 0 {
msgId, err := strconv.Atoi(args[0])
if err != nil {
c.printer.Err("Invalid argument: %s", args[0])
return 0, fmt.Errorf("Invalid argument for UIDL given by user %s: %v", c.user, err)
}
exists, uid, err := c.backend.UidlMessage(c.user, msgId)
if err != nil {
return 0, fmt.Errorf("Error calling 'UIDL %d' for user %s: %v", msgId, c.user, err)
}
if !exists {
c.printer.Err("no such message")
return STATE_TRANSACTION, nil
}
c.printer.Ok("%d %s", msgId, uid)
} else {
uids, err := c.backend.Uidl(c.user)
if err != nil {
return 0, fmt.Errorf("Error calling UIDL for user %s: %v", c.user, err)
}
c.printer.Ok("%d messages", len(uids))
uidsList := make([]string, len(uids))
for i, uid := range uids {
uidsList[i] = fmt.Sprintf("%d %s", i, uid)
}
c.printer.MultiLine(uidsList)
}
return STATE_TRANSACTION, nil
}
type CapaCommand struct{}
func (cmd CapaCommand) Run(c *Client, args []string) (int, error) {
c.printer.Ok("")
var commands []string
commands = []string{"USER", "UIDL"}
c.printer.MultiLine(commands)
return c.currentState, nil
}
|
package qiwi
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
)
type ApplePayToken struct {
// Type string `json:"type"`
PaymentData ApplePayTokenData `json:"paymentData"`
}
type ApplePayTokenData struct {
Version string `json:"version"`
Data string `json:"data"`
Header APHeader `json:"header"`
Signature string `json:"signature"`
}
type APHeader struct {
PubKey string `json:"ephemeralPublicKey"`
PubKeyHash string `json:"publicKeyHash"`
TransactionID string `json:"transactionId"`
}
// decodeBase64 return base64 decoded string
// used internally to "unpack" applepay token
func decodeBase64(enc string) ([]byte, error) {
dec, err := base64.StdEncoding.DecodeString(enc)
return dec, err
}
func (p *Payment) ApplePay(ctx context.Context, amount int, token string) (err error) {
// Decode token from base64
data, err := decodeBase64(token)
if err != nil {
return fmt.Errorf("[QIWI] %w: %s", ErrBadJSON, err)
}
// Parse JSON data+-
err = json.Unmarshal(data, &p.PaymentMethod.Token)
if err != nil {
return fmt.Errorf("[QIWI] %w: %s", ErrBadJSON, err)
}
p.PaymentMethod.Type = ApplePayPayment
p.Amount = NewAmountInRubles(amount)
// Make request link
requestLink := fmt.Sprintf("/payin/v1/sites/%s/payments/%s", p.SiteID, p.PaymentID)
// Send request
err = proceedRequest(ctx, "PUT", requestLink, p)
return p.checkErrors(err)
}
|
package cmd
import (
"fmt"
"io/ioutil"
"github.com/jpillora/opts"
antlr "github.com/wxio/goantlr"
"github.com/wxio/tron-go/adl"
)
func BuildAdlAst() opts.Opts {
return opts.New(&buildAdlAst{}).Name("build_ast")
}
type buildAdlAst struct {
File string `type:"arg" help:"adl file" predict:"files"`
}
func (cm *buildAdlAst) Run() error {
by, err := ioutil.ReadFile(cm.File)
if err != nil {
return err
}
tr, atr, bl, ts, err1 := adl.BuildAdlAST(string(by))
_, _, _, _ = tr, atr, bl, ts
if err1.Error() != nil {
// fmt.Printf("%v\n", tr.TreeString())
errColl := &errColl{}
antlr.ParseTreeWalkerDefault.Walk(errColl, atr)
fmt.Printf("Lex Errors\n")
for i, er := range err1.LexErr {
fmt.Printf(" %d %v\n", i, er)
}
fmt.Printf("Parse Errors\n")
for i, er := range err1.ParseErr {
fmt.Printf(" %d %v\n", i, er)
}
fmt.Printf("Syntax Errors\n")
for i, er := range err1.SyntaxErr {
fmt.Printf(" %d %v\n", i, er)
}
fmt.Printf("Error Nodes\n")
for i, er := range errColl.errs {
fmt.Printf(" %d %v\n", i, er.GetSymbol())
if i > 9 {
fmt.Printf(" ... total errs %d\n", len(errColl.errs))
break
}
}
return fmt.Errorf("build err '%v' errorNodes'%v' ", err1.Error(), errColl.errs)
}
// fmt.Printf("%v\n", tr)
// if err != nil {
// return err
// }
// fmt.Printf("%v\n", tr.TreeString())
_, err2 := adl.WalkADLWi(tr, &AdlWiListener{})
// fmt.Printf("%v\n", bl)
// fmt.Printf("%v\n", ts)
if err2.Error() != nil {
return fmt.Errorf("walk err '%v'", err2.Error())
}
return nil
}
type errColl struct {
errs []antlr.ErrorNode
}
func (s *errColl) VisitTerminal(node antlr.TerminalNode) {}
func (s *errColl) EnterEveryRule(ctx antlr.ParserRuleContext) {}
func (s *errColl) ExitEveryRule(ctx antlr.ParserRuleContext) {}
func (s *errColl) VisitErrorNode(node antlr.ErrorNode) {
s.errs = append(s.errs, node)
}
type AdlWiListener struct {
intend string
tokCount int
}
func (s *AdlWiListener) VisitTerminal(node antlr.TerminalNode) {
s.tokCount++
// fmt.Printf("%d%s >>%v\n", s.tokCount, s.intend, node)
}
func (s *AdlWiListener) VisitErrorNode(node antlr.ErrorNode) {
s.tokCount++
// fmt.Printf("%d Error %v\n", s.tokCount, node)
}
func (s *AdlWiListener) EnterEveryRule(ctx antlr.ParserRuleContext) {
s.tokCount++
// fmt.Printf("%d%s>>%T\n", s.tokCount, s.intend, ctx)
s.intend += "\t"
}
func (s *AdlWiListener) ExitEveryRule(ctx antlr.ParserRuleContext) {
s.intend = s.intend[1:]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.