text
stringlengths 11
4.05M
|
|---|
package passwordcombiner
import "github.com/cloudfoundry-incubator/cloud-service-broker/internal/encryption/gcmencryptor"
type CombinedPassword struct {
Label string
Secret string
Salt []byte
Encryptor gcmencryptor.GCMEncryptor
configuredPrimary bool
storedPrimary bool
}
type CombinedPasswords []CombinedPassword
func (c CombinedPasswords) ConfiguredPrimary() (CombinedPassword, bool) {
for _, p := range c {
if p.configuredPrimary {
return p, true
}
}
return CombinedPassword{}, false
}
func (c CombinedPasswords) StoredPrimary() (CombinedPassword, bool) {
for _, p := range c {
if p.storedPrimary {
return p, true
}
}
return CombinedPassword{}, false
}
|
package sqlutil
type TestDB struct {
DriverName string
ConnStr string
}
var TestDB_Sqlite3 = TestDB{DriverName: "sqlite3", ConnStr: ":memory:"}
var TestDB_Mysql = TestDB{DriverName: "mysql", ConnStr: "logdisplayplatform:password@tcp(localhost:3306)/logdisplayplatform_tests?collation=utf8mb4_unicode_ci"}
var TestDB_Postgres = TestDB{DriverName: "postgres", ConnStr: "user=logdisplayplatformtest password=logdisplayplatformtest host=localhost port=5432 dbname=logdisplayplatformtest sslmode=disable"}
var TestDB_Mssql = TestDB{DriverName: "mssql", ConnStr: "server=localhost;port=1433;database=logdisplayplatformtest;user id=logdisplayplatform;password=Password!"}
|
package main
import (
"fmt"
"github.com/akamensky/argparse"
"os"
"log"
"github.com/dariusbakunas/govirt-api/models"
"github.com/dariusbakunas/govirt-api/server"
)
func main() {
parser := argparse.NewParser("govirt-api", "Libvirt Rest API")
uri := parser.String("u", "uri", &argparse.Options{Required: true, Help: "Libvirt URI"})
port := parser.Int("p", "port", &argparse.Options{Required: false, Help: "Listening port"})
err := parser.Parse(os.Args)
if err != nil {
fmt.Print(parser.Usage(err))
} else {
if *port == 0 {
*port = 8000
}
conn, err := models.InitLibvirt(*uri)
if err != nil {
log.Fatalf("failed to connect: %v", err)
}
defer conn.Close()
server.Init(*port)
}
}
|
/*
* Go Library (C) 2017 Inc.
*
* @project Project Globo / avaliacao.com
* @author @jeffotoni
* @size 01/03/2018
*/
package handler
import (
"github.com/jeffotoni/gmongocrud/lib/context"
"github.com/jeffotoni/gmongocrud/repo"
"log"
"net/http"
)
func Ping(ctx *context.Context) {
pathNewOrg := repo.GetWdLocal(0)
log.Println("pring path: ", pathNewOrg)
msgJson := `{"msg":"pong..."}`
ctx.JSON(http.StatusOK, msgJson)
}
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bytes"
"context"
"fmt"
"runtime/trace"
"github.com/pingcap/tidb/executor/internal/exec"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/memory"
"github.com/tikv/client-go/v2/txnkv/txnsnapshot"
)
// UpdateExec represents a new update executor.
type UpdateExec struct {
exec.BaseExecutor
OrderedList []*expression.Assignment
// updatedRowKeys is a map for unique (TableAlias, handle) pair.
// The value is true if the row is changed, or false otherwise
updatedRowKeys map[int]*kv.MemAwareHandleMap[bool]
tblID2table map[int64]table.Table
// mergedRowData is a map for unique (Table, handle) pair.
// The value is cached table row
mergedRowData map[int64]*kv.MemAwareHandleMap[[]types.Datum]
multiUpdateOnSameTable map[int64]bool
matched uint64 // a counter of matched rows during update
// tblColPosInfos stores relationship between column ordinal to its table handle.
// the columns ordinals is present in ordinal range format, @see plannercore.TblColPosInfos
tblColPosInfos plannercore.TblColPosInfoSlice
assignFlag []int
evalBuffer chunk.MutRow
allAssignmentsAreConstant bool
virtualAssignmentsOffset int
drained bool
memTracker *memory.Tracker
stats *updateRuntimeStats
handles []kv.Handle
tableUpdatable []bool
changed []bool
matches []bool
// fkChecks contains the foreign key checkers. the map is tableID -> []*FKCheckExec
fkChecks map[int64][]*FKCheckExec
// fkCascades contains the foreign key cascade. the map is tableID -> []*FKCascadeExec
fkCascades map[int64][]*FKCascadeExec
}
// prepare `handles`, `tableUpdatable`, `changed` to avoid re-computations.
func (e *UpdateExec) prepare(row []types.Datum) (err error) {
if e.updatedRowKeys == nil {
e.updatedRowKeys = make(map[int]*kv.MemAwareHandleMap[bool])
}
e.handles = e.handles[:0]
e.tableUpdatable = e.tableUpdatable[:0]
e.changed = e.changed[:0]
e.matches = e.matches[:0]
for _, content := range e.tblColPosInfos {
if e.updatedRowKeys[content.Start] == nil {
e.updatedRowKeys[content.Start] = kv.NewMemAwareHandleMap[bool]()
}
handle, err := content.HandleCols.BuildHandleByDatums(row)
if err != nil {
return err
}
e.handles = append(e.handles, handle)
updatable := false
flags := e.assignFlag[content.Start:content.End]
for _, flag := range flags {
if flag >= 0 {
updatable = true
break
}
}
if unmatchedOuterRow(content, row) {
updatable = false
}
e.tableUpdatable = append(e.tableUpdatable, updatable)
changed, ok := e.updatedRowKeys[content.Start].Get(handle)
if ok {
e.changed = append(e.changed, changed)
e.matches = append(e.matches, false)
} else {
e.changed = append(e.changed, false)
e.matches = append(e.matches, true)
}
}
return nil
}
func (e *UpdateExec) merge(row, newData []types.Datum, mergeGenerated bool) error {
if e.mergedRowData == nil {
e.mergedRowData = make(map[int64]*kv.MemAwareHandleMap[[]types.Datum])
}
var mergedData []types.Datum
// merge updates from and into mergedRowData
for i, content := range e.tblColPosInfos {
if !e.multiUpdateOnSameTable[content.TblID] {
// No need to merge if not multi-updated
continue
}
if !e.tableUpdatable[i] {
// If there's nothing to update, we can just skip current row
continue
}
if e.changed[i] {
// Each matched row is updated once, even if it matches the conditions multiple times.
continue
}
handle := e.handles[i]
flags := e.assignFlag[content.Start:content.End]
if e.mergedRowData[content.TblID] == nil {
e.mergedRowData[content.TblID] = kv.NewMemAwareHandleMap[[]types.Datum]()
}
tbl := e.tblID2table[content.TblID]
oldData := row[content.Start:content.End]
newTableData := newData[content.Start:content.End]
if v, ok := e.mergedRowData[content.TblID].Get(handle); ok {
mergedData = v
for i, flag := range flags {
if tbl.WritableCols()[i].IsGenerated() != mergeGenerated {
continue
}
mergedData[i].Copy(&oldData[i])
if flag >= 0 {
newTableData[i].Copy(&mergedData[i])
} else {
mergedData[i].Copy(&newTableData[i])
}
}
} else {
mergedData = append([]types.Datum{}, newTableData...)
}
memDelta := e.mergedRowData[content.TblID].Set(handle, mergedData)
memDelta += types.EstimatedMemUsage(mergedData, 1) + int64(handle.ExtraMemSize())
e.memTracker.Consume(memDelta)
}
return nil
}
func (e *UpdateExec) exec(ctx context.Context, _ *expression.Schema, row, newData []types.Datum) error {
defer trace.StartRegion(ctx, "UpdateExec").End()
bAssignFlag := make([]bool, len(e.assignFlag))
for i, flag := range e.assignFlag {
bAssignFlag[i] = flag >= 0
}
for i, content := range e.tblColPosInfos {
if !e.tableUpdatable[i] {
// If there's nothing to update, we can just skip current row
continue
}
if e.changed[i] {
// Each matched row is updated once, even if it matches the conditions multiple times.
continue
}
if e.matches[i] {
// Row is matched for the first time, increment `matched` counter
e.matched++
}
tbl := e.tblID2table[content.TblID]
handle := e.handles[i]
oldData := row[content.Start:content.End]
newTableData := newData[content.Start:content.End]
flags := bAssignFlag[content.Start:content.End]
// Update row
fkChecks := e.fkChecks[content.TblID]
fkCascades := e.fkCascades[content.TblID]
changed, err1 := updateRecord(ctx, e.Ctx(), handle, oldData, newTableData, flags, tbl, false, e.memTracker, fkChecks, fkCascades)
if err1 == nil {
_, exist := e.updatedRowKeys[content.Start].Get(handle)
memDelta := e.updatedRowKeys[content.Start].Set(handle, changed)
if !exist {
memDelta += int64(handle.ExtraMemSize())
}
e.memTracker.Consume(memDelta)
continue
}
sc := e.Ctx().GetSessionVars().StmtCtx
if (kv.ErrKeyExists.Equal(err1) || table.ErrCheckConstraintViolated.Equal(err1)) && sc.DupKeyAsWarning {
sc.AppendWarning(err1)
continue
}
return err1
}
return nil
}
// unmatchedOuterRow checks the tableCols of a record to decide whether that record
// can not be updated. The handle is NULL only when it is the inner side of an
// outer join: the outer row can not match any inner rows, and in this scenario
// the inner handle field is filled with a NULL value.
//
// This fixes: https://github.com/pingcap/tidb/issues/7176.
func unmatchedOuterRow(tblPos plannercore.TblColPosInfo, waitUpdateRow []types.Datum) bool {
firstHandleIdx := tblPos.HandleCols.GetCol(0)
return waitUpdateRow[firstHandleIdx.Index].IsNull()
}
// Next implements the Executor Next interface.
func (e *UpdateExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if !e.drained {
if e.collectRuntimeStatsEnabled() {
ctx = context.WithValue(ctx, autoid.AllocatorRuntimeStatsCtxKey, e.stats.AllocatorRuntimeStats)
}
numRows, err := e.updateRows(ctx)
if err != nil {
return err
}
e.drained = true
e.Ctx().GetSessionVars().StmtCtx.AddRecordRows(uint64(numRows))
}
return nil
}
func (e *UpdateExec) updateRows(ctx context.Context) (int, error) {
fields := retTypes(e.Children(0))
colsInfo := plannercore.GetUpdateColumnsInfo(e.tblID2table, e.tblColPosInfos, len(fields))
globalRowIdx := 0
chk := tryNewCacheChunk(e.Children(0))
if !e.allAssignmentsAreConstant {
e.evalBuffer = chunk.MutRowFromTypes(fields)
}
composeFunc := e.fastComposeNewRow
if !e.allAssignmentsAreConstant {
composeFunc = e.composeNewRow
}
memUsageOfChk := int64(0)
totalNumRows := 0
for {
e.memTracker.Consume(-memUsageOfChk)
err := Next(ctx, e.Children(0), chk)
if err != nil {
return 0, err
}
if chk.NumRows() == 0 {
break
}
memUsageOfChk = chk.MemoryUsage()
e.memTracker.Consume(memUsageOfChk)
if e.collectRuntimeStatsEnabled() {
txn, err := e.Ctx().Txn(true)
if err == nil && txn.GetSnapshot() != nil {
txn.GetSnapshot().SetOption(kv.CollectRuntimeStats, e.stats.SnapshotRuntimeStats)
}
}
txn, err := e.Ctx().Txn(true)
if err == nil {
sc := e.Ctx().GetSessionVars().StmtCtx
txn.SetOption(kv.ResourceGroupTagger, sc.GetResourceGroupTagger())
if sc.KvExecCounter != nil {
// Bind an interceptor for client-go to count the number of SQL executions of each TiKV.
txn.SetOption(kv.RPCInterceptor, sc.KvExecCounter.RPCInterceptor())
}
}
for rowIdx := 0; rowIdx < chk.NumRows(); rowIdx++ {
chunkRow := chk.GetRow(rowIdx)
datumRow := chunkRow.GetDatumRow(fields)
// precomputes handles
if err := e.prepare(datumRow); err != nil {
return 0, err
}
// compose non-generated columns
newRow, err := composeFunc(globalRowIdx, datumRow, colsInfo)
if err != nil {
return 0, err
}
// merge non-generated columns
if err := e.merge(datumRow, newRow, false); err != nil {
return 0, err
}
if e.virtualAssignmentsOffset < len(e.OrderedList) {
// compose generated columns
newRow, err = e.composeGeneratedColumns(globalRowIdx, newRow, colsInfo)
if err != nil {
return 0, err
}
// merge generated columns
if err := e.merge(datumRow, newRow, true); err != nil {
return 0, err
}
}
// write to table
if err := e.exec(ctx, e.Children(0).Schema(), datumRow, newRow); err != nil {
return 0, err
}
}
totalNumRows += chk.NumRows()
chk = chunk.Renew(chk, e.MaxChunkSize())
}
return totalNumRows, nil
}
func (*UpdateExec) handleErr(colName model.CIStr, rowIdx int, err error) error {
if err == nil {
return nil
}
if types.ErrDataTooLong.Equal(err) {
return resetErrDataTooLong(colName.O, rowIdx+1, err)
}
if types.ErrOverflow.Equal(err) {
return types.ErrWarnDataOutOfRange.GenWithStackByArgs(colName.O, rowIdx+1)
}
return err
}
func (e *UpdateExec) fastComposeNewRow(rowIdx int, oldRow []types.Datum, cols []*table.Column) ([]types.Datum, error) {
newRowData := types.CloneRow(oldRow)
for _, assign := range e.OrderedList {
tblIdx := e.assignFlag[assign.Col.Index]
if tblIdx >= 0 && !e.tableUpdatable[tblIdx] {
continue
}
con := assign.Expr.(*expression.Constant)
val, err := con.Eval(emptyRow)
if err = e.handleErr(assign.ColName, rowIdx, err); err != nil {
return nil, err
}
// info of `_tidb_rowid` column is nil.
// No need to cast `_tidb_rowid` column value.
if cols[assign.Col.Index] != nil {
val, err = table.CastValue(e.Ctx(), val, cols[assign.Col.Index].ColumnInfo, false, false)
if err = e.handleErr(assign.ColName, rowIdx, err); err != nil {
return nil, err
}
}
val.Copy(&newRowData[assign.Col.Index])
}
return newRowData, nil
}
func (e *UpdateExec) composeNewRow(rowIdx int, oldRow []types.Datum, cols []*table.Column) ([]types.Datum, error) {
newRowData := types.CloneRow(oldRow)
e.evalBuffer.SetDatums(newRowData...)
for _, assign := range e.OrderedList[:e.virtualAssignmentsOffset] {
tblIdx := e.assignFlag[assign.Col.Index]
if tblIdx >= 0 && !e.tableUpdatable[tblIdx] {
continue
}
val, err := assign.Expr.Eval(e.evalBuffer.ToRow())
if err != nil {
return nil, err
}
// info of `_tidb_rowid` column is nil.
// No need to cast `_tidb_rowid` column value.
if cols[assign.Col.Index] != nil {
val, err = table.CastValue(e.Ctx(), val, cols[assign.Col.Index].ColumnInfo, false, false)
if err = e.handleErr(assign.ColName, rowIdx, err); err != nil {
return nil, err
}
}
val.Copy(&newRowData[assign.Col.Index])
}
return newRowData, nil
}
func (e *UpdateExec) composeGeneratedColumns(rowIdx int, newRowData []types.Datum, cols []*table.Column) ([]types.Datum, error) {
if e.allAssignmentsAreConstant {
return newRowData, nil
}
e.evalBuffer.SetDatums(newRowData...)
for _, assign := range e.OrderedList[e.virtualAssignmentsOffset:] {
tblIdx := e.assignFlag[assign.Col.Index]
if tblIdx >= 0 && !e.tableUpdatable[tblIdx] {
continue
}
val, err := assign.Expr.Eval(e.evalBuffer.ToRow())
if err = e.handleErr(assign.ColName, rowIdx, err); err != nil {
return nil, err
}
// info of `_tidb_rowid` column is nil.
// No need to cast `_tidb_rowid` column value.
if cols[assign.Col.Index] != nil {
val, err = table.CastValue(e.Ctx(), val, cols[assign.Col.Index].ColumnInfo, false, false)
if err = e.handleErr(assign.ColName, rowIdx, err); err != nil {
return nil, err
}
}
val.Copy(&newRowData[assign.Col.Index])
e.evalBuffer.SetDatum(assign.Col.Index, val)
}
return newRowData, nil
}
// Close implements the Executor Close interface.
func (e *UpdateExec) Close() error {
defer e.memTracker.ReplaceBytesUsed(0)
e.setMessage()
if e.RuntimeStats() != nil && e.stats != nil {
txn, err := e.Ctx().Txn(false)
if err == nil && txn.Valid() && txn.GetSnapshot() != nil {
txn.GetSnapshot().SetOption(kv.CollectRuntimeStats, nil)
}
defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats)
}
return e.Children(0).Close()
}
// Open implements the Executor Open interface.
func (e *UpdateExec) Open(ctx context.Context) error {
e.memTracker = memory.NewTracker(e.ID(), -1)
e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker)
return e.Children(0).Open(ctx)
}
// setMessage sets info message(ERR_UPDATE_INFO) generated by UPDATE statement
func (e *UpdateExec) setMessage() {
stmtCtx := e.Ctx().GetSessionVars().StmtCtx
numMatched := e.matched
numChanged := stmtCtx.UpdatedRows()
numWarnings := stmtCtx.WarningCount()
msg := fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUpdateInfo].Raw, numMatched, numChanged, numWarnings)
stmtCtx.SetMessage(msg)
}
func (e *UpdateExec) collectRuntimeStatsEnabled() bool {
if e.RuntimeStats() != nil {
if e.stats == nil {
e.stats = &updateRuntimeStats{
SnapshotRuntimeStats: &txnsnapshot.SnapshotRuntimeStats{},
AllocatorRuntimeStats: autoid.NewAllocatorRuntimeStats(),
}
}
return true
}
return false
}
// updateRuntimeStats is the execution stats about update statements.
type updateRuntimeStats struct {
*txnsnapshot.SnapshotRuntimeStats
*autoid.AllocatorRuntimeStats
}
func (e *updateRuntimeStats) String() string {
if e.SnapshotRuntimeStats == nil && e.AllocatorRuntimeStats == nil {
return ""
}
buf := bytes.NewBuffer(make([]byte, 0, 16))
if e.SnapshotRuntimeStats != nil {
stats := e.SnapshotRuntimeStats.String()
if stats != "" {
buf.WriteString(stats)
}
}
if e.AllocatorRuntimeStats != nil {
stats := e.AllocatorRuntimeStats.String()
if stats != "" {
if buf.Len() > 0 {
buf.WriteString(", ")
}
buf.WriteString(stats)
}
}
return buf.String()
}
// Clone implements the RuntimeStats interface.
func (e *updateRuntimeStats) Clone() execdetails.RuntimeStats {
newRs := &updateRuntimeStats{}
if e.SnapshotRuntimeStats != nil {
snapshotStats := e.SnapshotRuntimeStats.Clone()
newRs.SnapshotRuntimeStats = snapshotStats
}
if e.AllocatorRuntimeStats != nil {
newRs.AllocatorRuntimeStats = e.AllocatorRuntimeStats.Clone()
}
return newRs
}
// Merge implements the RuntimeStats interface.
func (e *updateRuntimeStats) Merge(other execdetails.RuntimeStats) {
tmp, ok := other.(*updateRuntimeStats)
if !ok {
return
}
if tmp.SnapshotRuntimeStats != nil {
if e.SnapshotRuntimeStats == nil {
snapshotStats := tmp.SnapshotRuntimeStats.Clone()
e.SnapshotRuntimeStats = snapshotStats
} else {
e.SnapshotRuntimeStats.Merge(tmp.SnapshotRuntimeStats)
}
}
if tmp.AllocatorRuntimeStats != nil {
if e.AllocatorRuntimeStats == nil {
e.AllocatorRuntimeStats = tmp.AllocatorRuntimeStats.Clone()
}
}
}
// Tp implements the RuntimeStats interface.
func (*updateRuntimeStats) Tp() int {
return execdetails.TpUpdateRuntimeStats
}
// GetFKChecks implements WithForeignKeyTrigger interface.
func (e *UpdateExec) GetFKChecks() []*FKCheckExec {
fkChecks := make([]*FKCheckExec, 0, len(e.fkChecks))
for _, fkc := range e.fkChecks {
fkChecks = append(fkChecks, fkc...)
}
return fkChecks
}
// GetFKCascades implements WithForeignKeyTrigger interface.
func (e *UpdateExec) GetFKCascades() []*FKCascadeExec {
fkCascades := make([]*FKCascadeExec, 0, len(e.fkChecks))
for _, fkc := range e.fkCascades {
fkCascades = append(fkCascades, fkc...)
}
return fkCascades
}
// HasFKCascades implements WithForeignKeyTrigger interface.
func (e *UpdateExec) HasFKCascades() bool {
return len(e.fkCascades) > 0
}
|
package grains
import (
"errors"
"math"
"sync"
)
const NUMBER_OF_SQUARE = 64
const ZERO = 0
const BASE_TWO = 2.0
func Square(num int) (uint64 ,error) {
if num > NUMBER_OF_SQUARE || num <= ZERO {
return ZERO, errors.New("error")
}
return uint64(math.Pow(BASE_TWO,float64(num-1))), nil
}
func Total() uint64 {
mtx := sync.Mutex{}
var totalGrain uint64
var wg sync.WaitGroup
for i := 0 ; i < NUMBER_OF_SQUARE ; i++ {
wg.Add(1)
go func(square float64) {
defer wg.Done()
gratin := uint64(math.Pow(BASE_TWO,square))
mtx.Lock()
totalGrain = totalGrain + gratin
mtx.Unlock()
}(float64(i))
}
wg.Wait()
return totalGrain
}
|
package el
//Handle packet morphing
import (
"math/rand"
"time"
)
type ElMorpher interface {
// return next packet size
NextPackSize() int
// TODO: take interarival time into account
// NextSizeAndInterval() (int, int)
// Close()
}
// randMopher is the most naive mopher
type randMorpher struct {
// channel to get next packet size
token chan int
mtu int
}
func newRandMorpher(mtu int) *randMorpher {
morpher := new(randMorpher)
morpher.token = make(chan int, 64)
morpher.mtu = mtu
go func() {
t := time.Now().UnixNano()
r := rand.New(rand.NewSource(t))
for {
morpher.token <- r.Intn(morpher.mtu)
}
}()
return morpher
}
func (m *randMorpher) NextPackSize() int {
return <-m.token
}
|
package repository
import (
"github.com/GoGroup/Movie-and-events/booking"
"github.com/GoGroup/Movie-and-events/model"
"github.com/jinzhu/gorm"
)
// CommentGormRepo implements menu.CommentRepository interface
type BookingGormRepo struct {
conn *gorm.DB
}
// NewHALLGormRepo returns new object of CommentGormRepo
func NewBookingGormRepo(db *gorm.DB) booking.BookingRepository {
return &BookingGormRepo{conn: db}
}
func (bkkRepo *BookingGormRepo) Bookings(uid uint) ([]model.Booking, []error) {
bkk := []model.Booking{}
errs := bkkRepo.conn.Where("user_id = ?", uid).Find(&bkk).GetErrors()
if len(errs) > 0 {
return nil, errs
}
return bkk, errs
}
//Hall retrieves a Hall from the database by its id
// func (hllRepo *HallGormRepo) Hall(id uint) (*model.Hall, []error) {
// hll := model.Hall{}
// errs := hllRepo.conn.First(&hll, id).GetErrors()
// if len(errs) > 0 {
// return nil, errs
// }
// return &hll, errs
// }
// // UpdateHall
// func (hllRepo *HallGormRepo) UpdateHall(hall *model.Hall) (*model.Hall, []error) {
// hll := hall
// errs := hllRepo.conn.Save(hll).GetErrors()
// if len(errs) > 0 {
// return nil, errs
// }
// return hll, errs
// }
// // DeleteHall
// func (hllRepo *HallGormRepo) DeleteHall(id uint) (*model.Hall, []error) {
// hll, errs := hllRepo.Hall(id)
// if len(errs) > 0 {
// return nil, errs
// }
// errs = hllRepo.conn.Delete(hll, id).GetErrors()
// if len(errs) > 0 {
// return nil, errs
// }
// return hll, errs
// }
// StoreComment stores a given customer comment in the database
func (bkkRepo *BookingGormRepo) StoreBooking(booking *model.Booking) (*model.Booking, []error) {
bkk := booking
errs := bkkRepo.conn.Create(bkk).GetErrors()
if len(errs) > 0 {
return nil, errs
}
return bkk, errs
}
|
package pathfileops
import "testing"
func TestDirMgr_MoveDirectoryTree_01(t *testing.T) {
baseDir := "../dirmgrtests/TestDirMgr_MoveDirectoryTree_01"
srcDir := baseDir + "/source"
targetDir := baseDir + "/target"
fh := FileHelper{}
err := fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(baseDir).\n"+
"baseDir='%v'\nError='%v'\n", baseDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDirMgr, err := DirMgr{}.New(srcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir).\n"+
"srcDir='%v'\nError='%v'\n", srcDir, err.Error())
return
}
origSrcDir := "../logTest"
origSrcDMgr, err := DirMgr{}.New(origSrcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(origSrcDir).\n"+
"origSrcDir='%v'\nError='%v'\n", origSrcDir, err.Error())
return
}
fsc := FileSelectionCriteria{}
_,
errs := origSrcDMgr.CopyDirectoryTree(srcDirMgr, true, fsc)
if len(errs) > 0 {
t.Errorf("Test Setup Error returned from origSrcDMgr."+
"CopyDirectoryTree(srcDirMgr, fsc)\n"+
"srcDirMgr='%v'\nErrors Follow:\n\n'%v'",
srcDirMgr.GetAbsolutePath(),
srcDirMgr.ConsolidateErrors(errs))
_ = fh.DeleteDirPathAll(baseDir)
return
}
fsc = FileSelectionCriteria{}
origDtreeInfo, err := origSrcDMgr.FindWalkDirFiles(fsc)
if err != nil {
t.Errorf("Test Setup Error returned by origSrcDMgr.FindWalkDirFiles(fsc).\n"+
"origSrcDMgr='%v'\nError='%v'\n", origSrcDMgr.GetAbsolutePath(), err.Error())
_ = fh.DeleteDirPathAll(baseDir)
return
}
dirMoveStats,
errs := srcDirMgr.MoveDirectoryTree(targetDMgr)
if len(errs) > 0 {
t.Errorf("Error returned from srcDirMgr.MoveDirectoryTree(targetDMgr)\n"+
"srcDirMgr='%v'\ntargetDir='%v'\nErrors Follow:\n\n%v",
srcDirMgr.GetAbsolutePath(),
targetDMgr.GetAbsolutePath(),
srcDirMgr.ConsolidateErrors(errs))
_ = fh.DeleteDirPathAll(baseDir)
return
}
fsc = FileSelectionCriteria{}
targetDtreeInfo, err := targetDMgr.FindWalkDirFiles(fsc)
if err != nil {
t.Errorf("Test Setup Error returned by targetDMgr.FindWalkDirFiles(fsc).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr.GetAbsolutePath(), err.Error())
_ = fh.DeleteDirPathAll(baseDir)
return
}
if origDtreeInfo.FoundFiles.GetNumOfFileMgrs() != targetDtreeInfo.FoundFiles.GetNumOfFileMgrs() {
t.Errorf("Expected the target directory would contain %v-files.\n"+
"Error: The target directory tree has %v-files.\n",
origDtreeInfo.FoundFiles.GetNumOfFileMgrs(), targetDtreeInfo.FoundFiles.GetNumOfFileMgrs())
_ = fh.DeleteDirPathAll(baseDir)
return
}
if origDtreeInfo.Directories.GetNumOfDirs() != targetDtreeInfo.Directories.GetNumOfDirs() {
t.Errorf("Expected the target directory would contain %v-directories.\n"+
"Error: The target directory tree has %v-directories.\n",
origDtreeInfo.Directories.GetNumOfDirs(), targetDtreeInfo.Directories.GetNumOfDirs())
_ = fh.DeleteDirPathAll(baseDir)
return
}
if srcDirMgr.DoesAbsolutePathExist() {
t.Errorf("Error: Expected that 'sourceDir' would NOT exist because all files were moved.\n"+
"Instead, the source directory DOES EXIST!\n"+
"Source Dir='%v'", srcDirMgr.GetAbsolutePath())
}
expectedNumFilesMoved := uint64(origDtreeInfo.FoundFiles.GetNumOfFileMgrs())
if expectedNumFilesMoved != dirMoveStats.SourceFilesMoved {
t.Errorf("ERROR: Expected dirMoveStats.SourceFilesMoved='%v'\n"+
"Instead, dirMoveStats.SourceFilesMoved='%v'\n",
expectedNumFilesMoved,
dirMoveStats.SourceFilesMoved)
}
expectedNumFileBytesMoved := origDtreeInfo.FoundFiles.GetTotalFileBytes()
if expectedNumFileBytesMoved != dirMoveStats.SourceFileBytesMoved {
t.Errorf("ERROR: Expected dirMoveStats.SourceFileBytesMoved='%v'\n"+
"Instead, dirMoveStats.SourceFileBytesMoved='%v'\n",
expectedNumFileBytesMoved,
dirMoveStats.SourceFileBytesMoved)
}
if expectedNumFilesMoved != dirMoveStats.TotalSrcFilesProcessed {
t.Errorf("ERROR: Expected dirMoveStats.TotalSrcFilesProcessed='%v'\n"+
"Instead, dirMoveStats.TotalSrcFilesProcessed='%v'\n",
expectedNumFilesMoved,
dirMoveStats.TotalSrcFilesProcessed)
}
expectedNumDirsMoved := uint64(origDtreeInfo.Directories.GetNumOfDirs())
if expectedNumDirsMoved != dirMoveStats.TotalDirsProcessed {
t.Errorf("ERROR: Expected dirMoveStats.TotalDirsProcessed='%v'\n"+
"Instead, dirMoveStats.TotalDirsProcessed='%v'\n",
expectedNumDirsMoved,
dirMoveStats.TotalDirsProcessed)
}
if expectedNumDirsMoved != dirMoveStats.DirsCreated {
t.Errorf("ERROR: Expected dirMoveStats.DirsCreated='%v'\n"+
"Instead, dirMoveStats.DirsCreated='%v'\n",
expectedNumDirsMoved,
dirMoveStats.DirsCreated)
}
expectedNumDirsMoved--
if expectedNumDirsMoved != dirMoveStats.NumOfSubDirectories {
t.Errorf("ERROR: Expected dirMoveStats.NumOfSubDirectories='%v'\n"+
"Instead, dirMoveStats.NumOfSubDirectories='%v'\n",
expectedNumDirsMoved,
dirMoveStats.NumOfSubDirectories)
}
if !dirMoveStats.SourceDirWasDeleted {
t.Error("ERROR: Expected dirMoveStats.SourceDirWasDeleted='true'.\n" +
"Instead, dirMoveStats.SourceDirWasDeleted='false'\n")
}
err = fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(baseDir)\baseDir='%v'\n"+
"Error='%v'\n", baseDir, err.Error())
}
return
}
func TestDirMgr_MoveDirectoryTree_02(t *testing.T) {
baseDir := "../dirmgrtests/TestDirMgr_MoveDirectoryTree_02"
srcDir := baseDir + "/source"
targetDir := baseDir + "/target"
fh := FileHelper{}
err := fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(baseDir).\n"+
"baseDir='%v'\nError='%v'\n", baseDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDirMgr, err := DirMgr{}.New(srcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir).\n"+
"srcDir='%v'\nError='%v'\n", srcDir, err.Error())
return
}
origSrcDir := "../logTest"
origSrcDMgr, err := DirMgr{}.New(origSrcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(origSrcDir).\n"+
"origSrcDir='%v'\nError='%v'\n", origSrcDir, err.Error())
return
}
fsc := FileSelectionCriteria{}
_,
errs := origSrcDMgr.CopyDirectoryTree(srcDirMgr, true, fsc)
if len(errs) > 0 {
t.Errorf("Test Setup Error returned from origSrcDMgr."+
"CopyDirectoryTree(srcDirMgr, fsc)\n"+
"srcDirMgr='%v'\nErrors Follow:\n\n'%v'",
srcDirMgr.GetAbsolutePath(),
srcDirMgr.ConsolidateErrors(errs))
_ = fh.DeleteDirPathAll(baseDir)
return
}
srcDirMgr.isInitialized = false
_,
errs = srcDirMgr.MoveDirectoryTree(targetDMgr)
if len(errs) == 0 {
t.Error("Expected an error return by srcDirMgr.MoveDirectoryTree(targetDMgr) because\n" +
"'srcDirMgr' is INVALID! However NO ERROR WAS RETURNED!!!\n")
}
srcDirMgr.isInitialized = true
err = fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(baseDir)\baseDir='%v'\n"+
"Error='%v'\n", baseDir, err.Error())
}
return
}
func TestDirMgr_MoveDirectoryTree_03(t *testing.T) {
baseDir := "../dirmgrtests/TestDirMgr_MoveDirectoryTree_03"
srcDir := baseDir + "/source"
targetDir := baseDir + "/target"
fh := FileHelper{}
err := fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(baseDir).\n"+
"baseDir='%v'\nError='%v'\n", baseDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDirMgr, err := DirMgr{}.New(srcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir).\n"+
"srcDir='%v'\nError='%v'\n", srcDir, err.Error())
return
}
origSrcDir := "../logTest"
origSrcDMgr, err := DirMgr{}.New(origSrcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(origSrcDir).\n"+
"origSrcDir='%v'\nError='%v'\n", origSrcDir, err.Error())
return
}
fsc := FileSelectionCriteria{}
_,
errs := origSrcDMgr.CopyDirectoryTree(srcDirMgr, true, fsc)
if len(errs) > 0 {
t.Errorf("Test Setup Error returned from origSrcDMgr."+
"CopyDirectoryTree(srcDirMgr, fsc)\n"+
"srcDirMgr='%v'\nErrors Follow:\n\n'%v'",
srcDirMgr.GetAbsolutePath(),
srcDirMgr.ConsolidateErrors(errs))
_ = fh.DeleteDirPathAll(baseDir)
return
}
targetDMgr.isInitialized = false
_,
errs = srcDirMgr.MoveDirectoryTree(targetDMgr)
if len(errs) == 0 {
t.Error("Expected an error return by srcDirMgr.MoveDirectoryTree(targetDMgr) because\n" +
"'targetDMgr' is INVALID! However NO ERROR WAS RETURNED!!!\n")
}
targetDMgr.isInitialized = true
err = fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(baseDir)\baseDir='%v'\n"+
"Error='%v'\n", baseDir, err.Error())
}
return
}
func TestDirMgr_MoveDirectoryTree_04(t *testing.T) {
baseDir := "../dirmgrtests/TestDirMgr_MoveDirectoryTree_04"
srcDir := baseDir + "/source"
targetDir := baseDir + "/target"
fh := FileHelper{}
err := fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(baseDir).\n"+
"baseDir='%v'\nError='%v'\n", baseDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDirMgr, err := DirMgr{}.New(srcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir).\n"+
"srcDir='%v'\nError='%v'\n", srcDir, err.Error())
return
}
_,
errs := srcDirMgr.MoveDirectoryTree(targetDMgr)
if len(errs) == 0 {
t.Error("Expected an error return by srcDirMgr.MoveDirectoryTree(targetDMgr) because\n" +
"'srcDirMgr' DOES NOT EXIST! However NO ERROR WAS RETURNED!!!\n")
}
err = fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(baseDir)\baseDir='%v'\n"+
"Error='%v'\n", baseDir, err.Error())
}
return
}
func TestDirMgr_MoveSubDirectoryTree_01(t *testing.T) {
baseDir := "../dirmgrtests/TestDirMgr_MoveSubDirectoryTree_01"
srcDir := baseDir + "/source"
targetDir := baseDir + "/target"
fh := FileHelper{}
err := fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(baseDir).\n"+
"baseDir='%v'\nError='%v'\n", baseDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDirMgr, err := DirMgr{}.New(srcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir).\n"+
"srcDir='%v'\nError='%v'\n", srcDir, err.Error())
return
}
origSrcDir := "../logTest"
origSrcDMgr, err := DirMgr{}.New(origSrcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(origSrcDir).\n"+
"origSrcDir='%v'\nError='%v'\n", origSrcDir, err.Error())
return
}
fsc := FileSelectionCriteria{}
_,
errs := origSrcDMgr.CopyDirectoryTree(srcDirMgr, true, fsc)
if len(errs) > 0 {
t.Errorf("Test Setup Error returned from origSrcDMgr."+
"CopyDirectoryTree(srcDirMgr, fsc)\n"+
"srcDirMgr='%v'\nErrors Follow:\n\n'%v'",
srcDirMgr.GetAbsolutePath(),
srcDirMgr.ConsolidateErrors(errs))
_ = fh.DeleteDirPathAll(baseDir)
return
}
fsc = FileSelectionCriteria{}
origDtreeInfo, err := origSrcDMgr.FindWalkDirFiles(fsc)
if err != nil {
t.Errorf("Test Setup Error returned by origSrcDMgr.FindWalkDirFiles(fsc).\n"+
"origSrcDMgr='%v'\nError='%v'\n", origSrcDMgr.GetAbsolutePath(), err.Error())
_ = fh.DeleteDirPathAll(baseDir)
return
}
dirMoveStats,
errs := srcDirMgr.MoveSubDirectoryTree(targetDMgr)
if len(errs) > 0 {
t.Errorf("Error returned from srcDirMgr.MoveSubDirectoryTree(targetDMgr)\n"+
"srcDirMgr='%v'\ntargetDir='%v'\nErrors Follow:\n\n'%v'",
srcDirMgr.GetAbsolutePath(),
targetDMgr.GetAbsolutePath(),
srcDirMgr.ConsolidateErrors(errs))
_ = fh.DeleteDirPathAll(baseDir)
return
}
fsc = FileSelectionCriteria{}
targetDtreeInfo, err := targetDMgr.FindWalkDirFiles(fsc)
if err != nil {
t.Errorf("Test Setup Error returned by targetDMgr.FindWalkDirFiles(fsc).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr.GetAbsolutePath(), err.Error())
_ = fh.DeleteDirPathAll(baseDir)
return
}
origDtreeFiles := origDtreeInfo.FoundFiles.GetNumOfFileMgrs()
expectedNumOfMovedFiles := uint64(origDtreeFiles - 4)
if expectedNumOfMovedFiles != uint64(targetDtreeInfo.FoundFiles.GetNumOfFileMgrs()) {
t.Errorf("Expected the target directory would contain %v-files.\n"+
"Error: The target directory tree has %v-files.\n",
origDtreeFiles, targetDtreeInfo.FoundFiles.GetNumOfFileMgrs())
_ = fh.DeleteDirPathAll(baseDir)
return
}
if origDtreeInfo.Directories.GetNumOfDirs() != targetDtreeInfo.Directories.GetNumOfDirs() {
t.Errorf("Expected the target directory would contain %v-directories.\n"+
"Error: The target directory tree has %v-directories.\n",
origDtreeInfo.Directories.GetNumOfDirs(), targetDtreeInfo.Directories.GetNumOfDirs())
_ = fh.DeleteDirPathAll(baseDir)
return
}
if !srcDirMgr.DoesAbsolutePathExist() {
t.Errorf("Error: Expected that 'sourceDir' would still exist because only sub-directories\n"+
"should have been moved.\nInstead, the source directory was deleted and DOES NOT EXIST!\n"+
"Source Dir='%v'", srcDirMgr.GetAbsolutePath())
_ = fh.DeleteDirPathAll(baseDir)
return
}
fsc = FileSelectionCriteria{}
sourceDtreeInfo, err := srcDirMgr.FindWalkDirFiles(fsc)
if err != nil {
t.Errorf("Error returned by srcDirMgr.FindWalkDirFiles(fsc).\n"+
"srcDirMgr='%v'\nError='%v'\n", srcDirMgr.GetAbsolutePath(), err.Error())
_ = fh.DeleteDirPathAll(baseDir)
return
}
if sourceDtreeInfo.Directories.GetNumOfDirs() != 1 {
t.Errorf("Expected the number of directories remaining in source directories\n"+
"would equal '1'.\nInstead, the number directories is %v'.",
sourceDtreeInfo.Directories.GetNumOfDirs())
_ = fh.DeleteDirPathAll(baseDir)
return
}
fsc = FileSelectionCriteria{}
origSrcFInfo, err := origSrcDMgr.FindFilesBySelectCriteria(fsc)
if err != nil {
t.Errorf("Test Setup Error returned by origSrcDMgr.FindFilesBySelectCriteria(fsc).\n"+
"origSrcDMgr='%v'\nError='%v'\n",
origSrcDMgr.GetAbsolutePath(), err.Error())
_ = fh.DeleteDirPathAll(baseDir)
return
}
if origSrcFInfo.GetNumOfFileMgrs() != sourceDtreeInfo.FoundFiles.GetNumOfFileMgrs() {
t.Errorf("Expected number of files in source directory='%v'.\n"+
"Instead, the number of files remaining in source directory='%v'.\n",
origSrcFInfo.GetNumOfFileMgrs(), sourceDtreeInfo.FoundFiles.GetNumOfFileMgrs())
}
if expectedNumOfMovedFiles != dirMoveStats.SourceFilesMoved {
t.Errorf("ERROR: Expected dirMoveStats.SourceFilesMoved='%v'\n"+
"Instead, dirMoveStats.SourceFilesMoved='%v'\n",
expectedNumOfMovedFiles,
dirMoveStats.SourceFilesMoved)
}
expectedNumOfMovedFileBytes :=
targetDtreeInfo.FoundFiles.GetTotalFileBytes()
if expectedNumOfMovedFileBytes != dirMoveStats.SourceFileBytesMoved {
t.Errorf("ERROR: Expected dirMoveStats.SourceFileBytesMoved='%v'\n"+
"Instead, dirMoveStats.SourceFileBytesMoved='%v'\n",
expectedNumOfMovedFileBytes,
dirMoveStats.SourceFileBytesMoved)
}
if dirMoveStats.SourceFilesRemaining != 0 {
t.Errorf("ERROR: Expected dirMoveStats.SourceFilesRemaining='0'\n"+
"Instead, dirMoveStats.SourceFilesRemaining='%v'\n",
dirMoveStats.SourceFilesRemaining)
}
if dirMoveStats.SourceFileBytesRemaining != 0 {
t.Errorf("ERROR: Expected dirMoveStats.SourceFileBytesRemaining='0'\n"+
"Instead, dirMoveStats.SourceFileBytesRemaining='%v'\n",
dirMoveStats.SourceFileBytesRemaining)
}
expectedNumOfSubDirs :=
uint64(targetDtreeInfo.Directories.GetNumOfDirs() - 1)
if expectedNumOfSubDirs != dirMoveStats.NumOfSubDirectories {
t.Errorf("ERROR: Expected dirMoveStats.NumOfSubDirectories='%v'\n"+
"Instead, dirMoveStats.NumOfSubDirectories='%v'\n",
expectedNumOfSubDirs,
dirMoveStats.NumOfSubDirectories)
}
err = fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(baseDir)\baseDir='%v'\n"+
"Error='%v'\n", baseDir, err.Error())
}
return
}
func TestDirMgr_MoveSubDirectoryTree_02(t *testing.T) {
baseDir := "../dirmgrtests/TestDirMgr_MoveSubDirectoryTree_02"
srcDir := baseDir + "/source"
targetDir := baseDir + "/target"
fh := FileHelper{}
err := fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(baseDir).\n"+
"baseDir='%v'\nError='%v'\n", baseDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDirMgr, err := DirMgr{}.New(srcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir).\n"+
"srcDir='%v'\nError='%v'\n", srcDir, err.Error())
return
}
origSrcDir := "../logTest"
origSrcDMgr, err := DirMgr{}.New(origSrcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(origSrcDir).\n"+
"origSrcDir='%v'\nError='%v'\n", origSrcDir, err.Error())
return
}
fsc := FileSelectionCriteria{}
_,
errs := origSrcDMgr.CopyDirectoryTree(srcDirMgr, true, fsc)
if len(errs) > 0 {
for i := 0; i < len(errs); i++ {
t.Errorf("Test Setup Error returned from origSrcDMgr."+
"CopyDirectoryTree(srcDirMgr, fsc)\n"+
"srcDirMgr='%v'\nErrors Follow:\n\n'%v'",
srcDirMgr.GetAbsolutePath(),
srcDirMgr.ConsolidateErrors(errs))
}
_ = fh.DeleteDirPathAll(baseDir)
return
}
srcDirMgr.isInitialized = false
_,
errs = srcDirMgr.MoveSubDirectoryTree(targetDMgr)
if len(errs) == 0 {
t.Errorf("Expected an error return from srcDirMgr.MoveSubDirectoryTree(targetDMgr)\n" +
"because 'srcDirMgr' is INVALID!\nHowever, NO ERROR WAS RETURNED!!!\n")
}
srcDirMgr.isInitialized = true
err = fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(baseDir)\baseDir='%v'\n"+
"Error='%v'\n", baseDir, err.Error())
}
return
}
func TestDirMgr_MoveSubDirectoryTree_03(t *testing.T) {
baseDir := "../dirmgrtests/TestDirMgr_MoveSubDirectoryTree_03"
srcDir := baseDir + "/source"
targetDir := baseDir + "/target"
fh := FileHelper{}
err := fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(baseDir).\n"+
"baseDir='%v'\nError='%v'\n", baseDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDirMgr, err := DirMgr{}.New(srcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir).\n"+
"srcDir='%v'\nError='%v'\n", srcDir, err.Error())
return
}
origSrcDir := "../logTest"
origSrcDMgr, err := DirMgr{}.New(origSrcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(origSrcDir).\n"+
"origSrcDir='%v'\nError='%v'\n", origSrcDir, err.Error())
return
}
fsc := FileSelectionCriteria{}
_,
errs := origSrcDMgr.CopyDirectoryTree(srcDirMgr, true, fsc)
if len(errs) > 0 {
for i := 0; i < len(errs); i++ {
t.Errorf("Test Setup Error returned from origSrcDMgr."+
"CopyDirectoryTree(srcDirMgr, fsc)\n"+
"srcDirMgr='%v'\nErrors Follow:\n\n'%v'",
srcDirMgr.GetAbsolutePath(),
srcDirMgr.ConsolidateErrors(errs))
}
_ = fh.DeleteDirPathAll(baseDir)
return
}
targetDMgr.isInitialized = false
_,
errs = srcDirMgr.MoveSubDirectoryTree(targetDMgr)
if len(errs) == 0 {
t.Errorf("Expected an error return from srcDirMgr.MoveSubDirectoryTree(targetDMgr)\n" +
"because 'targetDMgr' is INVALID!\nHowever, NO ERROR WAS RETURNED!!!\n")
}
targetDMgr.isInitialized = true
err = fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(baseDir)\baseDir='%v'\n"+
"Error='%v'\n", baseDir, err.Error())
}
return
}
func TestDirMgr_MoveSubDirectoryTree_04(t *testing.T) {
baseDir := "../dirmgrtests/TestDirMgr_MoveSubDirectoryTree_03"
srcDir := baseDir + "/source"
targetDir := baseDir + "/target"
fh := FileHelper{}
err := fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(baseDir).\n"+
"baseDir='%v'\nError='%v'\n", baseDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDirMgr, err := DirMgr{}.New(srcDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir).\n"+
"srcDir='%v'\nError='%v'\n", srcDir, err.Error())
return
}
_,
errs := srcDirMgr.MoveSubDirectoryTree(targetDMgr)
if len(errs) == 0 {
t.Errorf("Expected an error return from srcDirMgr.MoveSubDirectoryTree(targetDMgr)\n" +
"because 'srcDirMgr' Does Not Exist!\nHowever, NO ERROR WAS RETURNED!!!\n")
}
err = fh.DeleteDirPathAll(baseDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(baseDir)\baseDir='%v'\n"+
"Error='%v'\n", baseDir, err.Error())
}
return
}
|
//go:build demo
package main
func init() {
globals.ReleaseMode = "demo"
}
|
//author xinbing
//time 2018/9/3 16:56
package sms
import (
"common-utilities/http_utils"
"common-utilities/utilities"
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"net/url"
"sort"
"strings"
"time"
)
type AliCloudSMSClient struct {
AccessKeyId string //必填
AccessKeySecret string //必填
RegionId string //区域
Version string //
}
func GetAliCloudSMSClient(accessKeyId, accessKeySecret string) *AliCloudSMSClient {
client := &AliCloudSMSClient{
AccessKeyId:accessKeyId,
AccessKeySecret:accessKeySecret,
RegionId:defaultRegion,
Version:version,
}
return client
}
// un implement
func GetAliCloudSMSClientByProfile() *AliCloudSMSClient {
return nil
}
func (p *AliCloudSMSClient) SendMessage(req *AliCloudSMSSendReq) (*AliCloudSMSSendResp,error) {
params := p.getBaseParams()
content,err := json.Marshal(req.TemplateParam)
if err != nil {
return nil, errors.New("parse template param error:"+err.Error())
}
params["Action"] = sendMessageAction
params["PhoneNumbers"] = req.PhoneNumbers
params["SignName"] = req.SignName
params["TemplateCode"] = req.TemplateCode
params["TemplateParam"] = string(content)
params["SmsUpExtendCode"] = req.SmsUpExtendCode
params["OutId"] = req.OutId
queryStr,signature := p.generateQueryStringAndSignature(params)
reqUrl := smsProductDomain + "?Signature=" + signature + queryStr
fmt.Println("reqUrl:", reqUrl)
content, err = http_utils.Get(reqUrl, nil)
if err != nil {
return nil, errors.New("req to send message error:"+err.Error())
}
//fmt.Println(string(content))
var resp AliCloudSMSSendResp
err = json.Unmarshal(content, &resp)
if err != nil {
return nil, errors.New("unmarshal resp error:"+err.Error())
}
return &resp, nil
}
func (p *AliCloudSMSClient) getBaseParams() map[string]string {
baseParams := make(map[string]string)
timeStamp, nonce := getTimestampAndNonce()
baseParams["SignatureMethod"] = "HMAC-SHA1" //固定值HMAC-SHA1,
baseParams["SignatureNonce"] = nonce
baseParams["AccessKeyId"] = p.AccessKeyId
baseParams["SignatureVersion"] = "1.0"
baseParams["Timestamp"] = timeStamp
baseParams["Format"] = "JSON" //xml
baseParams["Version"] = p.Version
baseParams["RegionId"] = p.RegionId
return baseParams
}
func (p *AliCloudSMSClient) generateQueryStringAndSignature(params map[string]string) (string, string) {
delete(params,"Signature")
keys := make([]string, 0)
for key,item := range params {
if item != "" {
keys = append(keys, key)
}
}
sort.Strings(keys)
sortQueryStringTmp := ""
for _, key := range keys {
rstKey := specialUrlEncode(key)
rstVal := specialUrlEncode(params[key])
sortQueryStringTmp = sortQueryStringTmp + "&" + rstKey + "=" + rstVal
}
sortQueryString := strings.Replace(sortQueryStringTmp, "&", "", 1)
stringToSign := "GET" + "&" + specialUrlEncode("/") + "&" + specialUrlEncode(sortQueryString)
//fmt.Println("stringToSign:",stringToSign)
sign := sign(p.AccessKeySecret+"&", stringToSign)
//fmt.Println("before special:", sign)
signature := specialUrlEncode(sign)
//fmt.Println("queryStr:", sortQueryString)
return sortQueryStringTmp, signature
}
func specialUrlEncode(value string) string {
rstValue := url.QueryEscape(value)
rstValue = strings.Replace(rstValue, "+", "%20", -1)
rstValue = strings.Replace(rstValue, "*", "%2A", -1)
rstValue = strings.Replace(rstValue, "%7E", "~", -1)
return rstValue
}
func sign(accessKeySecret, sortQueryStr string) string {
h := hmac.New(sha1.New, []byte(accessKeySecret))
h.Write([]byte(sortQueryStr))
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
var logGMT,_ = time.LoadLocation("GMT")
func getTimestampAndNonce() (string,string) {
nonce := utilities.GetRandomStr(32)
t := time.Now()
timeStamp := t.In(logGMT).Format("2006-01-02T15:04:05Z")
return timeStamp, nonce
}
|
package middleware
import (
"context"
"crypto/hmac"
"encoding/hex"
"hash"
"io/ioutil"
"net/http"
"strings"
// this is only used for signature decryption, not for any sensitive content encryption
// nolint: gosec
"crypto/sha1"
"github.com/krostar/logger/logmid"
"github.com/pkg/errors"
)
type ctxEnsureGithubOrigin string
// ctxEnsureGithubOriginKey is the context key that holds the fact that the origin has been ensured.
var ctxEnsureGithubOriginKey = ctxEnsureGithubOrigin("github-ensure-origin") // nolint: gochecknoglobals
// EnsureGithubOrigin is a middleware that ensure that the caller is github.
func EnsureGithubOrigin(secret string) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var ctx = r.Context()
if err := ensureGithubOrigin(r, secret); err != nil {
logmid.AddErrorInContext(ctx, err)
w.WriteHeader(http.StatusForbidden)
return
}
ctx = context.WithValue(ctx, ctxEnsureGithubOriginKey, true)
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
})
}
}
func ensureGithubOrigin(r *http.Request, secret string) error {
if err := checkGithubHeaders(r); err != nil {
return errors.Wrap(err, "header does not contain what we expect them to")
}
macReceived, hash, err := getGithubSideGeneratedMAC(r)
if err != nil {
return errors.Wrap(err, "MAC given by github cannot be retrieved")
}
macGenerated, err := computeGithubRequestMAC(r, hash, secret)
if err != nil {
return errors.Wrap(err, "unable to generate request MAC")
}
if !hmac.Equal(macGenerated, macReceived) {
return errors.New("received and generated MAC are not the same")
}
return nil
}
func checkGithubHeaders(r *http.Request) error {
// the user agent is not one of github's one
if !strings.HasPrefix(r.Header.Get("User-Agent"), "GitHub-Hookshot/") {
return errors.Errorf("wrong user-agent prefix: %s", r.Header.Get("User-Agent"))
}
// the event is not a push event
if r.Header.Get("X-GitHub-Event") != "push" {
return errors.Errorf("wrong event: %s", r.Header.Get("X-GitHub-Event"))
}
// the content type is not JSON
if r.Header.Get("Content-Type") != "application/json" {
return errors.Errorf("wrong content type: %s", r.Header.Get("Content-Type"))
}
return nil
}
func getGithubSideGeneratedMAC(r *http.Request) ([]byte, func() hash.Hash, error) {
// X-Hub-Signature should look like alg=sig
signatureReceived := strings.SplitN(r.Header.Get("X-Hub-Signature"), "=", 2)
if len(signatureReceived) != 2 {
return nil, nil, errors.Errorf("signature should look like alg=sig")
}
// get the hash function from the the signature
// and the expected length of the output of the hash function
var hashFunc func() hash.Hash
var hashLength int
switch alg := signatureReceived[0]; alg {
case "sha1":
hashFunc = sha1.New
hashLength = 20
default:
return nil, nil, errors.Errorf("unhandled alg %s", alg)
}
var macReceived = make([]byte, hashLength)
if _, err := hex.Decode(macReceived, []byte(signatureReceived[1])); err != nil {
return nil, nil, errors.Wrap(err, "unable to hex decode received mac")
}
return macReceived, hashFunc, nil
}
func computeGithubRequestMAC(r *http.Request, hashFunc func() hash.Hash, secret string) ([]byte, error) {
// get a copy of the body to let handler read it later
bodyReader, err := r.GetBody()
if err != nil {
return nil, errors.Wrap(err, "unable to get a copy of the body")
}
body, err := ioutil.ReadAll(bodyReader)
if err != nil {
return nil, errors.Wrap(err, "unable to ready body")
}
// compute the mac of the body
hasher := hmac.New(hashFunc, []byte(secret))
if _, err := hasher.Write(body); err != nil {
return nil, errors.Wrap(err, "unable to write to hmac")
}
macGenerated := hasher.Sum(nil)
return macGenerated, nil
}
|
// Copyright 2017 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package daemon
import (
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"sync"
"time"
)
type BlobRepo struct {
Address string
Interval time.Duration
}
func FetchBlob(repos []BlobRepo, blob string, muRun *sync.Mutex, outputDir string) error {
muRun.Lock()
defer muRun.Unlock()
httpC := &http.Client{}
for i, _ := range repos {
reader, sz, err := FetchBlobFromRepo(repos[i], blob, httpC)
if err != nil {
log.Printf("Got error trying to get blob\n")
continue
}
err = WriteBlob(filepath.Join(outputDir, blob), sz, reader)
reader.Close()
if err == nil {
return nil
}
}
return fmt.Errorf("couldn't fetch blob %q from any repo", blob)
}
// FetchBlob attempts to pull the set of blobs requested from the supplied
// BlobRepo. FetchBlob returns the list of blobs successfully stored.
func FetchBlobFromRepo(r BlobRepo, blob string, client *http.Client) (io.ReadCloser, int64, error) {
u, err := url.Parse(r.Address)
if err != nil {
return nil, -1, err
}
tmp := *u
tmp.Path = filepath.Join(u.Path, blob)
srcAddr, err := url.Parse(tmp.String())
if err != nil {
return nil, -1, err
}
if r, err := client.Get(srcAddr.String()); err == nil {
if r.StatusCode == 200 {
return r.Body, r.ContentLength, nil
} else {
r.Body.Close()
return nil, -1, fmt.Errorf("fetch failed with status %s", r.StatusCode)
}
} else {
return nil, -1, err
}
}
func WriteBlob(name string, sz int64, con io.ReadCloser) error {
f, err := os.Create(name)
if err != nil {
return err
}
defer f.Close()
err = f.Truncate(sz)
if err != nil {
return err
}
_, err = io.Copy(f, con)
return err
}
|
package dushengchen
/**
Submission:
https://leetcode.com/submissions/detail/371249931/
*/
// https://leetcode.com/submissions/detail/371249931/
//O(n^2)的复杂度
// func insert(intervals [][]int, newInterval []int) [][]int {
// intervals = append(intervals, newInterval)
// return merge(intervals)
// }
//O(n)的复杂度
func insert(intervals [][]int, newInterval []int) [][]int {
if len(intervals) == 0 {
return [][]int{newInterval}
}
var ret [][]int
var cur []int
find := false
ret = append(ret, newInterval)
for i := 0; i < len(intervals); i++ {
// fmt.Println(i, cur, ret)
if len(cur) == 0 {
if intervals[i][0] < newInterval[0] && intervals[i][1] < newInterval[0] {
ret = append(ret, intervals[i])
continue
}
find = true
if intervals[i][0] > newInterval[0] {
cur = newInterval
} else {
cur = intervals[i]
intervals[i] = newInterval
}
}
if cur[1] >= intervals[i][0] {
if cur[1] < intervals[i][1] {
cur[1] = intervals[i][1]
}
} else {
ret = append(ret, cur)
cur = intervals[i]
}
}
if len(cur) > 0 {
ret = append(ret, cur)
}
// fmt.Println(find, ret)
if find {
return ret[1:]
}
if newInterval[0] < ret[1][0] {
return ret
}
ret = append(ret, newInterval)
return ret[1:]
}
|
package main
import (
"log"
"net/http"
"os"
"sync"
"time"
"github.com/skriptble/gabble/transport/bosh"
"github.com/skriptble/nine/bind"
"github.com/skriptble/nine/element/stanza"
"github.com/skriptble/nine/namespace"
"github.com/skriptble/nine/sasl"
"github.com/skriptble/nine/stream"
)
var dflt = bosh.Body{
Wait: 45 * time.Second,
Requests: 2,
Polling: 5 * time.Second,
Inactivity: 75 * time.Second,
Hold: 3,
HoldSet: true,
Ver: bosh.Version{Major: 1, Minor: 6},
XMPPVer: bosh.Version{Major: 1, Minor: 0},
RestartLogic: true,
MaxPause: 120 * time.Second,
Lang: "en",
Content: "text/xml; charset=utf8",
}
var server = "localhost"
func init() {
// turn on debugging
stream.Trace.SetOutput(os.Stderr)
stream.Debug.SetOutput(os.Stderr)
}
func main() {
reg := NewRegister()
bt := bosh.NewBodyTransformer(bosh.Body{})
handler := bosh.NewHandler(reg, bt, dflt, server)
mux := http.NewServeMux()
mux.Handle("/", handler)
mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("."))))
srv := &http.Server{
Addr: ":8088",
Handler: mux,
}
log.Fatal(srv.ListenAndServe())
}
type register struct {
sessions map[string]*bosh.Session
sync.RWMutex
}
// NewRegister returns a new initalized Register.
func NewRegister() bosh.Register {
r := new(register)
r.sessions = make(map[string]*bosh.Session)
return r
}
// Add adds a session to the Register.
func (r *register) Add(sid string, s *bosh.Session) {
r.Lock()
defer r.Unlock()
// create a new transport
tp := bosh.NewTransport(stream.Receiving, s)
runStream(tp)
// create ta new stream
r.sessions[sid] = s
}
// Remove removes a session from the Register.
func (r *register) Remove(sid string) {
r.Lock()
defer r.Unlock()
delete(r.sessions, sid)
}
// Lookup returns the Session associated with the given sid. If the session
// doesn't exist, ErrSessionNotFound is returned.
func (r *register) Lookup(sid string) (s *bosh.Session, err error) {
r.RLock()
s, ok := r.sessions[sid]
r.RUnlock()
if !ok {
err = bosh.ErrSessionNotFound
return
}
if s.Expired() {
r.Remove(sid)
err = bosh.ErrSessionNotFound
s = nil
}
return
}
func runStream(tp stream.Transport) {
saslHandler := sasl.NewHandler(map[string]sasl.Mechanism{
"PLAIN": sasl.NewPlainMechanism(sasl.FakePlain{}),
})
bindHandler := bind.NewHandler()
sessionHandler := bind.NewSessionHandler()
iqHandler := stream.NewIQMux().
Handle(namespace.Bind, "bind", string(stanza.IQSet), bindHandler).
Handle(namespace.Session, "session", string(stanza.IQSet), sessionHandler)
if iqHandler.Err() != nil {
log.Fatal(iqHandler.Err())
}
elHandler := stream.NewElementMux().
Handle(namespace.SASL, "auth", saslHandler).
Handle(namespace.SASL, "response", saslHandler).
Handle(namespace.Client, "iq", iqHandler).
Handle(namespace.Client, "presence", stream.Blackhole{}).
Handle(namespace.Client, "message", stream.Blackhole{})
if elHandler.Err() != nil {
log.Fatal(iqHandler.Err())
}
fhs := []stream.FeatureGenerator{
saslHandler,
bindHandler,
// sessionHandler,
}
props := stream.NewProperties()
props.Domain = "localhost"
s := stream.New(tp, elHandler, stream.Receiving).
AddFeatureHandlers(fhs...).
SetProperties(props)
go s.Run()
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/square/p2/pkg/grpc/podstore/client"
"github.com/square/p2/pkg/logging"
"github.com/square/p2/pkg/manifest"
"github.com/square/p2/pkg/types"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"gopkg.in/alecthomas/kingpin.v2"
)
const (
cmdScheduleText = "schedule"
cmdWatchStatusText = "watch-status"
defaultAddress = "localhost:3000"
)
var (
address = kingpin.Flag("address", "Address of the pod store server to talk to.").Default(defaultAddress).String()
caCert = kingpin.Flag("cacert", "Certificate file to use to verify server").ExistingFile()
cmdSchedule = kingpin.Command(cmdScheduleText, "Schedules a pod (as a UUID pod)")
manifestFile = cmdSchedule.Flag("manifest", "Path to pod manifest file to schedule").Required().ExistingFile()
node = cmdSchedule.Flag("node", "Node to schedule pod manifest to").Required().String()
cmdWatchStatus = kingpin.Command(cmdWatchStatusText, "Watch the status for a pod")
podUniqueKey = cmdWatchStatus.Flag("pod-unique-key", "Pod unique key (uuid) to watch status for").Short('k').Required().String()
numIterations = cmdWatchStatus.Flag("num-iterations", "Number of status updates to wait for before stopping").Short('n').Default("1").Int()
)
func main() {
cmd := kingpin.Parse()
logger := logging.DefaultLogger
var creds credentials.TransportCredentials
var err error
if *caCert != "" {
creds, err = credentials.NewClientTLSFromFile(*caCert, "")
if err != nil {
logger.Fatal(err)
}
}
dialOptions := []grpc.DialOption{grpc.WithBlock(), grpc.WithTimeout(5 * time.Second)}
if creds != nil {
dialOptions = append(dialOptions, grpc.WithTransportCredentials(creds))
} else {
dialOptions = append(dialOptions, grpc.WithInsecure())
}
conn, err := grpc.Dial(*address, dialOptions...)
if err != nil {
logger.Fatal(err)
}
client, err := client.New(conn, logger)
if err != nil {
logger.Fatalf("Could not set up grpc client: %s", err)
}
switch cmd {
case cmdScheduleText:
schedule(client, logger)
case cmdWatchStatusText:
watchStatus(client, logger)
}
}
func schedule(client client.Client, logger logging.Logger) {
m, err := manifest.FromPath(*manifestFile)
if err != nil {
logger.Fatalf("Could not read manifest: %s", err)
}
podUniqueKey, err := client.Schedule(m, types.NodeName(*node))
if err != nil {
logger.Fatalf("Could not schedule: %s", err)
}
output := struct {
PodID types.PodID `json:"pod_id"`
PodUniqueKey types.PodUniqueKey `json:"pod_unique_key"`
}{
PodID: m.ID(),
PodUniqueKey: podUniqueKey,
}
outBytes, err := json.Marshal(output)
if err != nil {
logger.Infof("Scheduled pod with key: %s", podUniqueKey)
return
}
fmt.Println(string(outBytes))
}
func watchStatus(client client.Client, logger logging.Logger) {
key, err := types.ToPodUniqueKey(*podUniqueKey)
if err != nil {
logger.Fatalf("Could not parse passed pod unique key %q as uuid: %s", *podUniqueKey, err)
}
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
outCh, err := client.WatchStatus(ctx, key, true)
if err != nil {
logger.Fatal(err)
}
for i := 0; i < *numIterations; i++ {
val, ok := <-outCh
if !ok {
logger.Fatal("Channel closed unexpectedly")
}
if val.Error != nil {
logger.WithError(val.Error).Infoln("status watcher encountered an error")
}
bytes, err := json.Marshal(val)
if err != nil {
logger.Fatal(err)
}
fmt.Println(string(bytes))
}
}
|
package main
import (
"fmt"
"math"
"math/rand"
"strings"
)
func calcQuality(min, max, equipQuality, continueMatNumb int32, jewelQualityMap map[int32]int32) int32 {
base := make(map[int32]float32)
for i := min; i <= max; i++ {
if i == equipQuality {
base[i] = float32(jewelQualityMap[i]) + float32(continueMatNumb)
} else {
base[i] = float32(jewelQualityMap[i])
}
}
var reduce float32 = 0.5
var sum float32 = 0
var sumWeight float32 = 0.0
tem := make(map[int32]float32)
for i := min; i <= max; i++ {
sum += base[i]
tem[i] = sum * float32(max-i) * reduce
sumWeight += base[i] + tem[i]
base[i] = base[i] + tem[i]
}
randRatio := rand.Float32() * sumWeight
var temRation float32 = 0.0
fmt.Printf("allllll :%v\n", sumWeight)
for i := min; i <= max; i++ {
fmt.Printf("oneeeee : %v %v ** %v \n", i, base[i], base[i]/sumWeight)
}
for i := min; i <= max; i++ {
temRation += base[i]
if randRatio < temRation {
return i
}
}
return min
}
func CeilToInt32(v float32) int32 {
return int32(math.Ceil(float64(v)))
}
func main() {
q := calcQuality(1, 4, 2, 4, map[int32]int32{1: 1, 3: 1, 4: 1})
fmt.Printf("get eeeeeeeeeee %v", q)
lastCost := CeilToInt32(float32(500 * 100.0 / (100 + 1)))
println("IMY********************", lastCost)
fmt.Printf("IMY******************** %v \n", strings.Split("5:6:7", ":"))
fmt.Printf("IMY******************** %v \n", strings.Split("8", ":"))
fmt.Printf("IMY******************** %v \n", int64(math.Ceil(float64(50)*float64(7197/3600.0))))
var duration int32 = 7197
A := float64(float64(duration) / 3600.0)
fmt.Printf("IMY********************%v \n", A)
fmap := make(map[int32]float64)
fmap[2] = 2.00
fmap[3] = 2.01
fmt.Printf("IMY*********%v %v \n", math.Ceil(fmap[1]), math.Ceil(fmap[1]))
fmt.Printf("IMY*********%v %v \n", math.Ceil(fmap[2]), math.Ceil(fmap[2]))
fmt.Printf("IMY*********%v %v \n", math.Ceil(fmap[3]), math.Ceil(fmap[3]))
vvv := make(map[int]map[int]int)
if _, ok := vvv[1]; !ok {
vvv[1] = make(map[int]int)
}
vvv[1][1] = 1
fmt.Printf("IMY*********%v %v \n", math.Ceil(fmap[3]), math.Ceil(fmap[3]))
var nonInitSlice []int
nonInitSlice = append(nonInitSlice, 111)
nonInitSlice = append(nonInitSlice, 2222)
fmt.Printf("IMY************** %v", nonInitSlice)
fmt.Printf("IMY**************float %v", 0x.Fp0)
}
|
package server
import (
"net"
"strings"
"sync"
"../log"
)
/*
Internal Communication Structure
*/
type CommUnit struct {
ConnMap map[string]net.Conn
RpcCh map[string]chan interface{}
cl sync.Mutex
rl sync.Mutex
}
var Comm *CommUnit
func NewCommUnit() *CommUnit {
comm := &CommUnit{
ConnMap: make(map[string]net.Conn),
RpcCh: make(map[string]chan interface{}),
}
return comm
}
func (c *CommUnit) AddConn(mac string, conn net.Conn) {
c.cl.Lock()
defer c.cl.Unlock()
c.ConnMap[mac] = conn
}
func (c *CommUnit) AddRpc(mac string, rpc chan interface{}) {
c.rl.Lock()
defer c.rl.Unlock()
c.RpcCh[mac] = rpc
}
func (c *CommUnit) SendRpcResponse(mac string, msg interface{}) {
c.rl.Lock()
defer c.rl.Unlock()
for k, v := range c.RpcCh {
log.Debug("k=%s, mac=%s", k, mac)
if 0 == strings.Compare(k, mac) {
v <- msg
}
}
}
func (c *CommUnit) RetriveConn(mac string) net.Conn {
c.cl.Lock()
defer c.cl.Unlock()
return c.ConnMap[mac]
}
func (c *CommUnit) RetriveMacByConn(conn net.Conn) string {
c.cl.Lock()
defer c.cl.Unlock()
for k, v := range c.ConnMap {
//log.Debug("conn=%v, v=%v", *conn, *v)
if (v) == (conn) {
//log.Debug("RetriveMacByConn: %s", k)
return k
}
}
log.Debug("Not found mac by conn")
return ""
}
func CommInit() {
// RpcResponse = make(chan interface{}, 1)
// ConnMap = make(map[string]net.Conn)
Comm = NewCommUnit()
}
|
package spatial
import (
"bytes"
"encoding/binary"
"errors"
)
type GeometryType uint32
type Srid uint32
const (
GEOMETRY_TYPE_GENERIC GeometryType = iota
GEOMETRY_TYPE_POINT
GEOMETRY_TYPE_LINE_STRING
GEOMETRY_TYPE_POLYGON
GEOMETRY_TYPE_MULTI_POINT
GEOMETRY_TYPE_MULTI_LINE_STRING
GEOMETRY_TYPE_MULTI_POLYGON
GEOMETRY_TYPE_COLLECTION
)
var (
byteOrder = binary.LittleEndian
geometryInstantiableMinType = GEOMETRY_TYPE_POINT
geometryInstantiableMaxType = GEOMETRY_TYPE_COLLECTION
)
type baseGeometry struct {
srid Srid
}
func (g baseGeometry) Srid() Srid {
return g.srid
}
func (g *baseGeometry) decodeHeaderFrom(data *bytes.Reader,
decodeSrid bool, expectedType GeometryType) (GeometryType, error) {
if decodeSrid {
if err := binary.Read(data, byteOrder, &g.srid); nil != err {
return 0, err
}
}
if _, err := data.ReadByte(); nil != err {
return 0, err
}
var typ GeometryType
if err := binary.Read(data, byteOrder, &typ); nil != err {
return 0, err
} else if (typ < geometryInstantiableMinType && typ > geometryInstantiableMaxType) ||
(GEOMETRY_TYPE_GENERIC != expectedType && typ != expectedType) {
return 0, errors.New("unexpected geometry type")
}
return typ, nil
}
func (g *baseGeometry) encodeHeaderTo(data *bytes.Buffer, encodeSrid bool, typ GeometryType) {
if encodeSrid {
binary.Write(data, byteOrder, g.Srid())
}
data.WriteByte(0x01)
binary.Write(data, byteOrder, typ)
}
func newEncodeBuffer() *bytes.Buffer {
return bytes.NewBuffer(make([]byte, 0, 25))
}
|
// Package irc contains a Bot implementation that works over the IRC protocol
package irc
|
package main
import (
"bytes"
"compress/zlib"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
flag "github.com/spf13/pflag"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/transform"
)
const (
userAgent = "Mozilla/3.0 (compatible; Indy Library)"
urlCopywrite = "http://update.cz88.net/ip/copywrite.rar"
urlQqwryDat = "http://update.cz88.net/ip/qqwry.rar"
)
var (
outputFile string
quiet, help bool
fileSize uint64
fileSizeW int
)
type FetchWriter struct {
n uint64
}
func (w *FetchWriter) Write(p []byte) (int, error) {
n := len(p)
w.n += uint64(n)
w.progress()
return n, nil
}
func (w *FetchWriter) progress() {
printf("\rfetch: %6.2f%% %*d/%d bytes", float64(w.n)/float64(fileSize)*100, fileSizeW, w.n, fileSize)
}
func fetch(key uint32) error {
resp, err := newRequest(urlQqwryDat)
if err != nil {
return err
}
defer resp.Body.Close()
if uint64(resp.ContentLength) != fileSize {
return fmt.Errorf("the file size of the agreement is different")
}
var b []byte
buffer := bytes.NewBuffer(b[:])
_, err = io.Copy(buffer, io.TeeReader(resp.Body, &FetchWriter{}))
printf("\n")
if err != nil {
return err
}
b = buffer.Bytes()
// 解码前512字节
for i := 0; i < 0x200; i++ {
key *= 0x805
key++
key &= 0xFF
b[i] = b[i] ^ byte(key)
}
r, err := zlib.NewReader(bytes.NewBuffer(b))
if err != nil {
return err
}
defer r.Close()
f, err := os.Create(outputFile)
if err != nil {
return err
}
_, err = io.Copy(f, r)
return err
}
func init() {
flag.BoolVarP(&quiet, "quiet", "q", false, "only output error")
flag.BoolVarP(&help, "help", "h", false, "this help")
flag.CommandLine.SortFlags = false
flag.Parse()
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "iploc-fetch: fetch qqwry.dat.\nUsage: iploc-fetch [output filename] [arguments]\nOptions:\n")
flag.PrintDefaults()
}
outputFile = flag.Arg(0)
if outputFile == "" || help {
flag.Usage()
if help {
os.Exit(0)
}
os.Exit(1)
}
}
func main() {
resp, err := newRequest(urlCopywrite)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fatal(err)
s := readVersion(b)
if s == nil {
fatal(fmt.Errorf("invalid file description"))
}
fileSize = uint64(binary.LittleEndian.Uint32(b[12:]))
fileSizeW = len(fmt.Sprint(fileSize))
key := binary.LittleEndian.Uint32(b[20:])
printf("version: %s\n", toUTF8(s))
printf("fetch: ...")
fatal(fetch(key))
}
func newRequest(urls string) (*http.Response, error) {
req, err := http.NewRequest("GET", urls, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", userAgent)
client := &http.Client{}
return client.Do(req)
}
func readVersion(p []byte) []byte {
var start = 24
var end int
if start >= len(p) {
return nil
}
// 0x20 ASCII space
for p[start] != 0x20 {
start++
}
start += 1
end = start
if end >= len(p) {
return nil
}
for p[end] != 0x00 {
end++
}
return p[start:end]
}
func toUTF8(s []byte) (b []byte) {
r := transform.NewReader(bytes.NewReader(s), simplifiedchinese.GBK.NewDecoder())
b, _ = ioutil.ReadAll(r)
return
}
func fatal(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, err.Error()+"\n")
os.Exit(1)
}
}
func printf(format string, args ...interface{}) {
if quiet {
return
}
fmt.Printf(format, args...)
}
|
package pie
import (
"golang.org/x/exp/constraints"
)
// Min is the minimum value, or zero.
func Min[T constraints.Ordered](ss []T) (min T) {
if len(ss) == 0 {
return
}
min = ss[0]
for _, s := range ss {
if s < min {
min = s
}
}
return
}
|
package counter
import "sync/atomic"
var _ Counter = new(gaugeCounter)
// A value is a thread-safe counter implementation.
type gaugeCounter int64
// NewGauge return a guage counter.
func NewGauge() Counter {
return new(gaugeCounter)
}
// Add method increments the counter by some value and return new value
func (v *gaugeCounter) Add(val int64) {
atomic.AddInt64((*int64)(v), val)
}
// Value method returns the counter's current value.
func (v *gaugeCounter) Value() int64 {
return atomic.LoadInt64((*int64)(v))
}
// Reset reset the counter.
func (v *gaugeCounter) Reset() {
atomic.StoreInt64((*int64)(v), 0)
}
|
package redisz
// "fmt"
// "testing"
// "github.com/futurez/litego/util"
const (
PostsPerPage = 10
)
//func TestListCommon(t *testing.T) {
// redisPool := NewRedisPool("hash", "192.168.1.141:6379", "", 1)
// curPage := int(util.RandRange(0, 10))
// start := (curPage - 1) * PostsPerPage
// end := curPage*PostsPerPage - 1
// postsIds := redisPool.LrangeInt("posts:list", start, end)
// for _, postsId := range postsIds {
// key := fmt.Sprintf("post:%d", postsId)
// post, _ := redisPool.Hgetall(key)
// t.Log("doc titel = ", post["title"])
// }
//}
|
package main
import (
"fmt"
)
func main() {
array := [5]int{10, 20, 30, 40, 50}
for i, v := range array {
fmt.Println(i, v)
}
fmt.Printf("%T", array)
}
|
package catalog
import (
"encoding/json"
"fmt"
"sort"
"github.com/pkg/errors"
"github.com/spf13/afero"
)
// CatalogFileName is the file where coback stores the catalog in json format
const CatalogFileName = "coback.catalog"
type catalogState int
// Checksum is a string type that helps avoiding confusion between files identified by path and by checksum
type Checksum string
// Catalog stores information about the contents of a folder
type Catalog interface {
// Add adds a new items to the Catalog
Add(item Item) error
// Set ensures that the item in the catalog with the given path contains the given data.
// If an item with the same path already exists it is replaced with the new item.
// Simply adds the item if the path it is not yet in the Catalog.
Set(item Item) error
// ForgetPath completely removes and item with the given path from the catalog. It's hash will not be stored as deleted,
// and other items will not be removed even if they have the same hash.
ForgetPath(path string)
// DeletePath removes the path from the Catalog. If there are no other items stored with the same hash, the hash is stored as deleted
DeletePath(path string)
// Item returns the Item with the given path. Returns error if the path doesn't exist.
Item(path string) (Item, error)
// ItemsByChecksum returns the Items that have the given checksum. Returns error if the no such Item exist.
// As a copy of the same file can be stored more than once with different paths, a slice of Items is returned,
// sorted by the path of the items
ItemsByChecksum(sum Checksum) ([]Item, error)
// AllItems returns a channel with all the items currently in the Catalog in alphabetical order of the path
AllItems() <-chan Item
// DeletedChecksums returns a channel with all the deleted checksums in alphabetical order
DeletedChecksums() <-chan Checksum
// Count returns the number of items stored in the Catalog
Count() int
// DeletedCount returns the number of items stored in the Catalog which are marked as deleted
DeletedCount() int
// DeleteChecksum stores the checksum as deleted and removes all items from the Catalog that have the given checksum (if any)
DeleteChecksum(sum Checksum)
// UnDeleteChecksum removes the checksum from the deleted checksums. It does not affect the stored items.
UnDeleteChecksum(sum Checksum)
// IsDeletedChecksum returns true all the items with the given checksum are marked as deleted.
// Returns error if the path doesn't exist or some items are marked as deleted, but not all of them.
IsDeletedChecksum(sum Checksum) bool
// IsKnownChecksum returns true is the checksum is in the Catalog, either as an actual item or as a checksum marked as deleted
IsKnownChecksum(sum Checksum) bool
// WriteAs writes the Catalog as a file at the given path and file system
WriteAs(fs afero.Fs, path string) error
// Write writes the Catalog as 'coback.catalog' in the root of the file system
Write(fs afero.Fs) error
// Clone creates a deep copy of the Catalog
Clone() Catalog
// FilterNew returns a catalog that contains all items that are present in this Catalog, but not in the other
// (either as regular items or deleted hashes)
FilterNew(other Catalog) Catalog
}
type catalog struct {
State catalogState `json:"state"`
Items map[string]Item `json:"content"`
Deleted map[Checksum]bool `json:"deleted_checksums"`
checksumToPaths map[Checksum][]string
}
func newcatalog() *catalog {
return &catalog{
Items: make(map[string]Item),
checksumToPaths: make(map[Checksum][]string),
Deleted: make(map[Checksum]bool),
}
}
// NewCatalog creates a new empty Catalog
func NewCatalog() Catalog {
return newcatalog()
}
func (c *catalog) Clone() Catalog {
clone := newcatalog()
for k, v := range c.Items {
clone.Items[k] = v
}
for k, v := range c.checksumToPaths {
clone.checksumToPaths[k] = v
}
for k, v := range c.Deleted {
clone.Deleted[k] = v
}
return clone
}
func (c *catalog) Add(item Item) error {
if _, ok := c.Items[item.Path]; ok {
return fmt.Errorf("File is already in the catalog: '%v'", item.Path)
}
delete(c.Deleted, item.Md5Sum)
c.Items[item.Path] = item
c.checksumToPaths[item.Md5Sum] = append(c.checksumToPaths[item.Md5Sum], item.Path)
return nil
}
// removeItem removes the first occurrence of a value from a slice
func removeItem(slice []string, v string) []string {
for idx, item := range slice {
if item == v {
return append(slice[:idx], slice[idx+1:]...)
}
}
return slice
}
func (c *catalog) Set(newItem Item) error {
if item, ok := c.Items[newItem.Path]; ok {
origChecksum := c.Items[item.Path].Md5Sum
delete(c.Items, item.Path)
c.removeChecksumToPathMapping(origChecksum, item.Path)
}
return c.Add(newItem)
}
func (c *catalog) DeletePath(path string) {
item, ok := c.Items[path]
if !ok {
return
}
paths, _ := c.checksumToPaths[item.Md5Sum]
if len(paths) == 1 {
c.Deleted[item.Md5Sum] = true
}
c.removeChecksumToPathMapping(item.Md5Sum, item.Path)
delete(c.Items, path)
}
func (c *catalog) DeleteChecksum(sum Checksum) {
paths, ok := c.checksumToPaths[sum]
if ok {
for _, p := range paths {
item := c.Items[p]
c.removeChecksumToPathMapping(item.Md5Sum, item.Path)
delete(c.Items, p)
}
}
c.Deleted[sum] = true
}
func (c *catalog) UnDeleteChecksum(sum Checksum) {
_, ok := c.Deleted[sum]
if !ok {
return
}
delete(c.Deleted, sum)
}
func (c *catalog) Item(path string) (Item, error) {
item, ok := c.Items[path]
if !ok {
return Item{}, errors.Errorf("No such file: %v", path)
}
return item, nil
}
func (c *catalog) Count() int {
return len(c.Items)
}
func (c *catalog) DeletedCount() int {
return len(c.Deleted)
}
func (c *catalog) ItemsByChecksum(sum Checksum) ([]Item, error) {
paths, ok := c.checksumToPaths[sum]
if !ok {
return []Item{}, errors.Errorf("No such file: %v", sum)
}
ret := make([]Item, 0, len(paths))
for _, path := range paths {
ret = append(ret, c.Items[path])
}
sort.Slice(ret, func(i, j int) bool {
return ret[i].Path < ret[j].Path
})
return ret, nil
}
func (c *catalog) IsDeletedChecksum(sum Checksum) bool {
deleted, ok := c.Deleted[sum]
return ok && deleted
}
func (c *catalog) Write(fs afero.Fs) error {
return c.WriteAs(fs, CatalogFileName)
}
func (c *catalog) WriteAs(fs afero.Fs, path string) error {
json, _ := json.Marshal(c)
err := afero.WriteFile(fs, path, json, 0644)
return errors.Wrapf(err, "Cannot save catalog to file: '%v'", path)
}
func (c *catalog) FilterNew(other Catalog) Catalog {
ret := NewCatalog()
for _, item := range c.Items {
if other.IsDeletedChecksum(item.Md5Sum) {
continue
}
if _, err := other.ItemsByChecksum(item.Md5Sum); err == nil {
continue
}
ret.Add(item)
}
return ret
}
func (c *catalog) IsKnownChecksum(sum Checksum) bool {
_, ok := c.checksumToPaths[sum]
if ok {
return true
}
_, ok = c.Deleted[sum]
if ok {
return true
}
return false
}
func (c *catalog) removeChecksumToPathMapping(sum Checksum, path string) {
if len(c.checksumToPaths[sum]) == 1 {
delete(c.checksumToPaths, sum)
} else {
c.checksumToPaths[sum] = removeItem(c.checksumToPaths[sum], path)
}
}
func (c *catalog) ForgetPath(path string) {
item, ok := c.Items[path]
if !ok {
return
}
c.removeChecksumToPathMapping(item.Md5Sum, item.Path)
delete(c.Items, item.Path)
}
func (c *catalog) AllItems() <-chan Item {
ret := make(chan Item, 100)
go func() {
defer func() {
ret <- Item{}
close(ret)
}()
if len(c.Items) == 0 {
return
}
keys := make([]string, 0, len(c.Items))
for _, item := range c.Items {
keys = append(keys, item.Path)
}
sort.Strings(keys)
for _, k := range keys {
ret <- c.Items[k]
}
}()
return ret
}
func (c *catalog) DeletedChecksums() <-chan Checksum {
ret := make(chan Checksum, 100)
keys := make([]Checksum, 0, len(c.Deleted))
for checksum := range c.Deleted {
keys = append(keys, checksum)
}
go func() {
defer close(ret)
if len(keys) == 0 {
return
}
sort.Slice(keys, func(i, j int) bool {
return keys[i] < keys[j]
})
for _, k := range keys {
ret <- k
}
}()
return ret
}
// Read reads catalog stored in a json file
func Read(fs afero.Fs, path string) (Catalog, error) {
buf, err := afero.ReadFile(fs, path)
if err != nil {
return nil, errors.Wrapf(err, "Cannot read catalog: '%v'", path)
}
c := newcatalog()
err = json.Unmarshal(buf, c)
if err != nil {
return nil, errors.Wrapf(err, "Cannot parse catalog json: '%v'", path)
}
for _, item := range c.Items {
c.checksumToPaths[item.Md5Sum] = append(c.checksumToPaths[item.Md5Sum], item.Path)
}
return c, nil
}
|
package downcase
func Downcase(s string) (string, error) {
str := make([]byte, len(s))
for i := 0; i < len(s); i++ {
tmp := s[i]
if tmp > 64 && tmp < 91 {
tmp = s[i] + 32
}
str[i] = tmp
}
return string(str), nil
}
|
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
package export
import (
tcontext "github.com/pingcap/tidb/dumpling/context"
"go.uber.org/zap"
)
func filterDatabases(tctx *tcontext.Context, conf *Config, databases []string) []string {
tctx.L().Debug("start to filter databases")
newDatabases := make([]string, 0, len(databases))
ignoreDatabases := make([]string, 0, len(databases))
for _, database := range databases {
if conf.TableFilter.MatchSchema(database) {
newDatabases = append(newDatabases, database)
} else {
ignoreDatabases = append(ignoreDatabases, database)
}
}
if len(ignoreDatabases) > 0 {
tctx.L().Debug("ignore database", zap.Strings("databases", ignoreDatabases))
}
return newDatabases
}
func filterTables(tctx *tcontext.Context, conf *Config) {
filterTablesFunc(tctx, conf, conf.TableFilter.MatchTable)
}
func filterTablesFunc(tctx *tcontext.Context, conf *Config, matchTable func(string, string) bool) {
tctx.L().Debug("start to filter tables")
dbTables := DatabaseTables{}
ignoredDBTable := DatabaseTables{}
for dbName, tables := range conf.Tables {
for _, table := range tables {
if matchTable(dbName, table.Name) {
dbTables.AppendTable(dbName, table)
} else {
ignoredDBTable.AppendTable(dbName, table)
}
}
// 1. this dbName doesn't match block allow list, don't add
// 2. this dbName matches block allow list, but there is no table in this database, add
if conf.DumpEmptyDatabase {
if _, ok := dbTables[dbName]; !ok && conf.TableFilter.MatchSchema(dbName) {
dbTables[dbName] = make([]*TableInfo, 0)
}
}
}
if len(ignoredDBTable) > 0 {
tctx.L().Debug("ignore table", zap.String("tables", ignoredDBTable.Literal()))
}
conf.Tables = dbTables
}
|
package task
import (
"bitbucket.org/inehealth/idonia-pacs/configuration"
"bitbucket.org/inehealth/idonia-pacs/model"
"bitbucket.org/inehealth/idonia-pacs/repository"
"bitbucket.org/inehealth/idonia-pacs/service/idonia/idonia-core"
"database/sql"
"encoding/json"
"errors"
"fmt"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
"gopkg.in/oleiade/lane.v1"
"io/ioutil"
"net/http"
"strconv"
"sync"
"time"
)
type Engine struct {
db *sql.DB
queue *lane.Queue
stopThreads map[int]chan bool
stop chan bool
logger *logrus.Logger
config *configuration.Configuration
studies map[string]string
CreateTask *sync.Mutex
TasksWaiting map[string]chan interface{}
}
func NewEngine(db *sql.DB, logger *logrus.Logger, config *configuration.Configuration) (engine *Engine) {
studies := make(map[string]string)
return &Engine{
db: db,
queue: lane.NewQueue(),
stopThreads: make(map[int]chan bool),
stop: make(chan bool),
logger: logger,
config: config,
studies: studies,
CreateTask: &sync.Mutex{},
TasksWaiting: make(map[string]chan interface{}),
}
}
func (engine *Engine) Add(task *Task) (err error) {
err = insertTaskToDB(engine.db, engine.logger, task)
if err != nil {
engine.logger.Debugf("[ engine ] Add: engine.insertTaskToDB(task) err %v", err)
return
}
engine.queue.Enqueue(task)
if engine.logger != nil {
engine.logger.WithFields(logrus.Fields{
"UUID": task.UUID,
"IdoniaAPIKey": task.Authorization.APIKey,
}).Infof("[ engine ] New task queued")
}
return
}
func (engine *Engine) Start(threads int) {
pendingTasks, err := readPendingTasksFromDB(engine.db, engine.logger)
if err != nil {
engine.logger.Debugf("[ engine ] Start: engine.readPendingTasksFromDB() err %v", err)
panic(err)
}
for _, v := range pendingTasks {
engine.queue.Enqueue(v)
if engine.logger != nil {
engine.logger.WithFields(logrus.Fields{
"UUID": v.UUID,
"IdoniaAPIKey": v.Authorization.APIKey,
}).Infof("[ engine ] New task queued")
}
}
for thread := 0; thread < threads; thread++ {
go func(thread int) {
for {
select {
case <-engine.stopThreads[thread]:
if engine.logger != nil {
if engine.logger != nil {
engine.logger.WithFields(logrus.Fields{
"Thread": thread,
}).Debugf("[ engine ] Stopping thread")
}
}
break
default:
if taskElement := engine.queue.Dequeue(); taskElement != nil {
task := taskElement.(*Task)
if err = engine.executeTask(task); err != nil {
engine.logger.WithError(err).WithFields(logrus.Fields{
"Step": task.Step,
}).Error("[ engine ] Error executing task!")
}
} else {
time.Sleep(5 * time.Second)
}
}
}
}(thread)
}
<-engine.stop
for thread := 0; thread < threads; thread++ {
engine.stopThreads[thread] <- true
}
}
func (engine *Engine) Stop() {
if engine.logger != nil {
engine.logger.Debugf("[ engine ] Stopping task engine")
}
engine.stop <- true
}
func insertTaskToDB(db *sql.DB, logger *logrus.Logger, task *Task) (err error) {
taskUUID := task.UUID
if len(taskUUID) == 0 {
taskUUID = uuid.New().String()
}
for _, v := range task.DICOMFiles {
err = repository.InsertTaskDicom(db, &repository.TaskDicom{
UUID: v.UUID,
TaskUUID: taskUUID,
SeriesInstanceUID: v.SeriesInstanceUID,
StudyUID: v.StudyUID,
ContainerID: 0,
IsPending: v.IsPending,
SOPInstanceUID: v.SOPInstanceUID,
Status: string(New),
})
if err != nil {
logger.Debugf("[ engine ] insertTaskToDB: repository.InsertTaskDicom(engine.db, &repository.TaskDicom...) err %v", err)
return err
}
}
for k, v := range task.AdditionalFields {
err = repository.InsertTaskAdditionalField(db, &repository.TaskAdditionalField{
TaskUUID: taskUUID,
Key: k,
Value: v,
})
if err != nil {
logger.Debugf("[ engine ] insertTaskToDB: repository.InsertTaskAdditionalField(engine.db, &repository.TaskAdditionalField...) err %v", err)
return
}
}
authorization, err := json.Marshal(task.Authorization)
if err != nil {
logger.Debugf("[ engine ] insertTaskToDB: json.Marshal(task.Destination) err %v", err)
return
}
steps, err := json.Marshal(task.Steps)
if err != nil {
logger.Debugf("[ engine ] insertTaskToDB: json.Marshal(task.Destination) err %v", err)
return
}
err = repository.InsertTask(db, &repository.Task{
UUID: taskUUID,
Authorization: string(authorization),
Steps: string(steps),
Status: string(New),
Error: model.Error{},
StudyID: task.StudyID,
StudyUID: task.StudyUID,
ContainerID: task.ContainerID,
})
if err != nil {
logger.Debugf("[ engine ] insertTaskToDB: repository.InsertTask(engine.db, &repository.Task... err %v", err)
}
return
}
func updateTaskInDB(db *sql.DB, logger *logrus.Logger, task *Task) (err error) {
steps, err := json.Marshal(task.Steps)
if err != nil {
logger.Debugf("[ engine ] updateTaskInDB: json.Marshal(task.Destination) err %v", err)
return
}
auth, err := json.Marshal(task.Authorization)
if err != nil {
logger.Debugf("[ engine ] updateTaskInDB: json.Marshal(v.Description) err %v", err)
return err
}
err = repository.EditTask(db, task.UUID, &repository.Task{
UUID: task.UUID,
Authorization: string(auth),
Steps: string(steps),
StudyID: task.StudyID,
IsReported: task.IsReported,
Step: task.Step,
Status: string(task.Status),
Error: task.Error,
StudyUID: task.StudyUID,
ContainerID: task.ContainerID,
})
if err != nil {
logger.Debugf("[ engine ] updateTaskInDB: repository.EditTask(engine.db, task.UUID, &repository.Task... err %v", err)
return
}
for _, v := range task.DICOMFiles { // UPSERT
_, err := repository.GetTaskDicom(db, v.UUID)
if err != nil {
err = nil
err = repository.InsertTaskDicom(db, &repository.TaskDicom{
UUID: v.UUID,
TaskUUID: task.UUID,
SeriesInstanceUID: v.SeriesInstanceUID,
StudyUID: v.StudyUID,
SOPInstanceUID: v.SOPInstanceUID,
Status: string(v.Status),
Error: v.Error,
})
} else {
err = repository.EditTaskDicom(db, v.UUID, &repository.TaskDicom{
UUID: v.UUID,
TaskUUID: task.UUID,
SeriesInstanceUID: v.SeriesInstanceUID,
StudyUID: v.StudyUID,
SOPInstanceUID: v.SOPInstanceUID,
Status: string(v.Status),
Error: v.Error,
})
}
if err != nil {
logger.Debugf("[ engine ] updateTaskInDB: repository.EditTaskDicom(engine.db, v.UUID, &repository.TaskDicom... err %v", err)
return err
}
}
err = repository.DeleteTaskAdditionalFields(db, task.UUID)
if err != nil {
logger.Debugf("[ engine ] updateTaskInDB: repository.DeleteTaskAdditionalFields(engine.db, task.UUID) err %v", err)
return
}
for k, v := range task.AdditionalFields {
err = repository.InsertTaskAdditionalField(db, &repository.TaskAdditionalField{
TaskUUID: task.UUID,
Key: k,
Value: v,
})
if err != nil {
logger.Debugf("[ engine ] updateTaskInDB: InsertTaskAdditionalField(engine.db, &repository.TaskAdditionalField... err %v", err)
return
}
}
return
}
func readTaskFromDB(db *sql.DB, logger *logrus.Logger, uuid string) (task *Task, err error) {
fmt.Println(uuid)
simpleTask, err := repository.GetTask(db, uuid)
if err != nil {
fmt.Println(1)
logger.Debugf("[ engine ] readTaskFromDB: repository.GetTask(engine.db, uuid) err %v", err)
return
}
task = &Task{
UUID: simpleTask.UUID,
Status: Status(simpleTask.Status),
Step: simpleTask.Step,
Error: simpleTask.Error,
StudyUID: simpleTask.StudyUID,
StudyID: simpleTask.StudyID,
IsReported: simpleTask.IsReported,
ContainerID: simpleTask.ContainerID,
}
err = json.Unmarshal([]byte(simpleTask.Steps), &task.Steps)
if err != nil {
fmt.Println(2)
logger.Debugf("[ engine ] readTaskFromDB: json.Unmarshal([]byte(simpleTask.Destination), &task.Destination) err %v", err)
return nil, err
}
err = json.Unmarshal([]byte(simpleTask.Authorization), &task.Authorization)
if err != nil {
fmt.Println(3)
logger.Debugf("[ engine ] readTaskFromDB: json.Unmarshal([]byte(v.Description), &description) err %v", err)
return nil, err
}
task.AdditionalFields = make(map[string]string)
DICOMFiles, err := repository.GetTaskDicoms(db, simpleTask.UUID)
if err != nil {
fmt.Println(4)
logger.Debugf("[ engine ] readTaskFromDB: repository.GetTaskDicoms(engine.db, simpleTask.UUID) err %v", err)
return nil, err
}
for _, v := range DICOMFiles {
task.DICOMFiles = append(task.DICOMFiles, DICOMFile{
UUID: v.UUID,
SeriesInstanceUID: v.SeriesInstanceUID,
StudyUID: v.StudyUID,
ContainerID: v.ContainerID,
SOPInstanceUID: v.SOPInstanceUID,
Status: Status(v.Status),
Error: v.Error,
})
}
AdditionalFields, err := repository.GetTaskAdditionalFields(db, simpleTask.UUID)
if err != nil {
fmt.Println(5)
logger.Debugf("[ engine ] readTaskFromDB: repository.GetTaskAdditionalFields(engine.db, simpleTask.UUID) err %v", err)
return nil, err
}
for _, v := range AdditionalFields {
task.AdditionalFields[v.Key] = v.Value
}
return
}
func readPendingTasksFromDB(db *sql.DB, logger *logrus.Logger) (tasks []*Task, err error) {
repoTasks, err := repository.GetPendingTasks(db)
if err != nil {
fmt.Println("pending")
logger.Debugf("[ engine ] readPendingTasksFromDB: repository.GetPendingTasks(engine.db) err %v", err)
return nil, err
}
for _, v := range repoTasks {
task, err := readTaskFromDB(db, logger, v.UUID)
if err != nil {
fmt.Println("read task")
logger.Debugf("[ engine ] readPendingTasksFromDB: engine.readTaskFromDB(v.UUID) err %v", err)
return nil, err
}
tasks = append(tasks, task)
}
return
}
func readAffectedTasksByUpdateFromDB(db *sql.DB, logger *logrus.Logger, studyUID string) (tasks []*Task, err error) {
repoTasks, err := repository.GetTasksByStudyUID(db, studyUID)
if err != nil {
logger.Debugf("[ engine ] readAffectedTasksByUpdateFromDB: repository.GetTasksByStudyUID(engine.db) err %v", err)
return nil, err
}
for _, v := range repoTasks {
task, err := readTaskFromDB(db, logger, v.UUID)
if err != nil {
logger.Debugf("[ engine ] readAffectedTasksByUpdateFromDB: engine.readTaskFromDB(v.UUID) err %v", err)
return nil, err
}
tasks = append(tasks, task)
}
return
}
func (engine *Engine) updateDICOMFiles(task *Task) (err error) {
//TODO RESYNC FOR NEW changes
return
}
func (engine *Engine) retrieveDICOM(task *Task) (err error) {
if len(task.StudyUID) > 0 {
var InstancesInPacs []DICOMInstance
url := GetStudyInstancesURL(task.StudyUID, *engine.config)
response, err := http.Get(url)
if response == nil {
if err != nil {
fmt.Println("Error 8")
return err
}
fmt.Println("Error 9")
return err
}
fmt.Println(response.Body)
fmt.Println(url)
body, err := ioutil.ReadAll(response.Body)
err = json.Unmarshal(body, &InstancesInPacs)
if err != nil {
fmt.Println("Error 10")
return err
}
var DICOMsInPacs []DICOMFile
DICOMsInPacs = GetDicomFilesFromInstance(InstancesInPacs)
study, err := repository.GetStudy(engine.db, task.StudyUID)
if err != nil {
if err != sql.ErrNoRows {
fmt.Println("Error 1")
return err
}
task.AddUploadAllToIdoniaSteps()
task.DICOMFiles = DICOMsInPacs
err = updateTaskInDB(engine.db, engine.logger, task)
fmt.Println("Error 2")
return err
}
container, err := idonia_core.GetContainer(&idonia_core.GetContainerReq{
ContainerID: &study.ContainerID}, task.Authorization)
if err != nil {
// suponer que se ha eliminado el estudio
engine.logger.Error(err.Error())
task.AddUploadAllToIdoniaSteps()
task.DICOMFiles = DICOMsInPacs
err = repository.DeleteStudy(engine.db, task.StudyUID)
if err != nil {
fmt.Println("Error 3")
return err
}
err = updateTaskInDB(engine.db, engine.logger, task)
if err != nil {
fmt.Println("Error 4")
return err
}
fmt.Println("Error 5")
return err
}
var studyDICOMs []DICOMFile
err = json.Unmarshal([]byte(study.DICOMFiles), &studyDICOMs)
if err != nil {
if err != nil {
fmt.Println("Error 6")
return err
}
if err != nil {
fmt.Println("Error 7")
return err
}
return err
}
DICOMsInIdonia := GetNumberOfDicomInstances(container)
if DICOMsInIdonia != len(studyDICOMs) {
//TODO
}
task.StudyID = study.StudyID
task.ContainerID = study.ContainerID
task.IsReported = study.IsReported
if len(DICOMsInPacs) != len(studyDICOMs) {
task.DICOMFiles = DICOMsInPacs
task.markPendingDICOM(studyDICOMs)
task.AddUploadPendingStep()
}
//call get container if numImages are == to completeFiles
// SE puede repetir upload task.Steps.type == reupload entonces ignoramos el ignorar
// sino saltamos todo esto
//engine.updateDICOMFiles(task)
}
err = updateTaskInDB(engine.db, engine.logger, task)
fmt.Println("Error 11")
return
}
func GetNumberOfDicomInstances(container *idonia_core.GetContainerRes) (numberOfDICOM int) {
for _, patient := range container.DicomPatients {
for _, study := range patient.DicomStudies {
for _, serie := range study.DicomSeries {
numberOfDICOM = numberOfDICOM + serie.InstancesCount
}
}
}
return
}
func (engine *Engine) executeTask(task *Task) (err error) {
for {
if engine.logger != nil {
engine.logger.WithFields(logrus.Fields{
"UUID": task.UUID,
"IdoniaAPIKey": task.Authorization.APIKey,
"Status": task.Status,
}).Infof("[ engine ] Task Processing")
}
if err != nil {
engine.logger.Debugf("[ engine ] executeTask: err %v", err)
break
}
switch task.Status {
default:
fallthrough
case New:
task.Status = Retrieving
err = updateTaskInDB(engine.db, engine.logger, task)
case Retrieving:
err = engine.retrieveDICOM(task)
if err != nil {
break
}
task.Status = Retrieved
err = updateTaskInDB(engine.db, engine.logger, task)
case Retrieved:
task.Status = Sending
err = updateTaskInDB(engine.db, engine.logger, task)
case Sending:
var noMoreSteps bool
if noMoreSteps, err = task.RunNextStep(*engine.config); err != nil || noMoreSteps {
if err != nil {
break
}
if noMoreSteps {
task.Status = Sent
}
}
err = updateTaskInDB(engine.db, engine.logger, task)
case Sent:
task.Status = Completed
err = updateTaskInDB(engine.db, engine.logger, task)
case Error:
if err == nil {
return task.Error
}
return
case Completed:
err = saveStudyInDB(engine.db, engine.logger, task)
if err != nil {
fmt.Println(err.Error())
task.Error = model.Error{Code: 500, Message: err.Error(), AdditionalMessage: "Cant save study as completed"}
return task.Error
}
err = updateTaskInDB(engine.db, engine.logger, task)
return
}
}
if err != nil {
task.Status = Error
task.Error = model.ErrConf
task.Error.AdditionalMessage = err.Error()
erro := updateTaskInDB(engine.db, engine.logger, task)
if erro != nil {
engine.logger.Debugf("[ engine ] executeTask: Error during updateTask (status=Error): err %v", erro)
}
engine.logger.WithFields(logrus.Fields{
"UUID": task.UUID,
"IdoniaAPIKey": task.Authorization.APIKey,
"Status": task.Status,
}).WithError(err).Error("[ engine ] Task Error")
}
return
}
func saveStudyInDB(db *sql.DB, logger *logrus.Logger, task *Task) error {
if task.ContainerID == 0 {
fmt.Println(fmt.Sprintf("%+v", task))
if cID, ok := task.AdditionalFields["ContainerID"]; ok {
containerId, err := strconv.Atoi(cID)
if err != nil {
return errors.New("ContainerID cant be empty")
}
task.ContainerID = uint32(containerId)
if task.ContainerID == 0 {
return errors.New("ContainerID cant be empty")
}
}
}
if task.StudyID == "" {
fmt.Println(fmt.Sprintf("%+v", task))
if studyID, ok := task.AdditionalFields["StudyID"]; ok {
task.StudyID = studyID
if task.StudyID == "" {
return errors.New("StudyID cant be empty")
}
}
return errors.New("StudyID cant be empty")
}
study, err := repository.GetStudy(db, task.StudyUID)
if err != nil {
if err != sql.ErrNoRows {
return err
}
logger.Info("Study no exists in DB:", task.StudyUID)
}
dicom, err := json.Marshal(task.DICOMFiles)
if err != nil {
return err
}
aStudy := &repository.Study{
UUID: uuid.New().String(),
StudyUID: task.StudyUID,
IsReported: task.IsReported,
ContainerID: task.ContainerID,
StudyID: task.StudyID,
DICOMFiles: string(dicom),
}
if url,ok:=task.AdditionalFields["PublicURL"]; ok{
aStudy.MagicLink = url
}
fmt.Println(study)
if study == nil {
err = repository.InsertStudy(db, *aStudy)
if err != nil {
logger.Error("Error inserting study:", err.Error())
}
} else {
err = repository.UpdateStudy(db, *aStudy)
if err != nil {
logger.Error("Error updating study:", err.Error())
}
}
return err
}
/*func RunContainerUpdate(db *sql.DB, logger *logrus.Logger, task *Task, datasets map[string]*dicom.DataSet) (updated bool, err error) {
var newDICOMFiles []DICOMFile
for path, dataset := range datasets {
found := false
for _, dicom := range task.DICOMFiles {
if dicom.Path == path {
found = true
}
}
if !found {
namestring := ""
patientNamestring := ""
patientIdstring := ""
modalitystring := ""
modalitiesInStudystring := ""
institutionNamestring := ""
studyInstanceUIDstring := ""
studyIDstring := ""
studyDatestring := ""
studyTimestring := ""
studyDescriptionstring := ""
bodyPartExaminedstring := ""
name, err := dataset.FindElementByTag(dicomtag.SOPInstanceUID)
if err == nil {
namestring, _ = name.GetString()
}
patientName, err := dataset.FindElementByTag(dicomtag.PatientName)
if err == nil {
patientNamestring, _ = patientName.GetString()
}
patientId, err := dataset.FindElementByTag(dicomtag.PatientID)
if err == nil {
patientIdstring, _ = patientId.GetString()
}
modality, err := dataset.FindElementByTag(dicomtag.Modality)
if err == nil {
modalitystring, _ = modality.GetString()
}
modalitiesInStudy, err := dataset.FindElementByTag(dicomtag.ModalitiesInStudy)
if err == nil {
modalitiesInStudystring, _ = modalitiesInStudy.GetString()
}
institutionName, err := dataset.FindElementByTag(dicomtag.InstitutionName)
if err == nil {
institutionNamestring, _ = institutionName.GetString()
}
studyInstanceUID, err := dataset.FindElementByTag(dicomtag.StudyInstanceUID)
if err == nil {
studyInstanceUIDstring, _ = studyInstanceUID.GetString()
}
studyID, err := dataset.FindElementByTag(dicomtag.StudyID)
if err == nil {
studyIDstring, _ = studyID.GetString()
}
studyDate, err := dataset.FindElementByTag(dicomtag.StudyDate)
if err == nil {
studyDatestring, _ = studyDate.GetString()
}
studyTime, err := dataset.FindElementByTag(dicomtag.StudyTime)
if err == nil {
studyTimestring, _ = studyTime.GetString()
}
studyDescription, err := dataset.FindElementByTag(dicomtag.StudyDescription)
if err == nil {
studyDescriptionstring, _ = studyDescription.GetString()
}
bodyPartExamined, err := dataset.FindElementByTag(dicomtag.BodyPartExamined)
if err == nil {
bodyPartExaminedstring, _ = bodyPartExamined.GetString()
}
newDICOMFiles = append(newDICOMFiles, DICOMFile{
UUID: uuid.New().String(),
Name: namestring + ".dcm",
Description: DICOMDescription{
PatientName: patientNamestring,
PatientId: patientIdstring,
Modality: modalitystring,
ModalitiesInStudy: modalitiesInStudystring,
InstitutionName: institutionNamestring,
StudyInstanceUID: studyInstanceUIDstring,
StudyID: studyIDstring,
StudyDate: studyDatestring,
StudyTime: studyTimestring,
StudyDescription: studyDescriptionstring,
BodyPartExamined: bodyPartExaminedstring,
},
Path: path,
StudyUID: studyInstanceUIDstring,
SOPInstanceUID: namestring,
Status: New,
})
}
}
task.DICOMFiles = append(task.DICOMFiles, newDICOMFiles...)
for _, dicom := range newDICOMFiles {
policy, err := idonia_core.GetPolicy(&idonia.Auth{
AccountID: task.IdoniaAccountID,
Token: task.IdoniaToken,
})
if err != nil {
task.Error = model.ErrIdonia
task.Error.AdditionalMessage = err.Error()
task.Status = Error
return false, err
}
err = gcs.UploadFile(dicom.Path, policy)
if err != nil {
task.Error = model.ErrIdonia
task.Error.AdditionalMessage = err.Error()
task.Status = Error
return false, err
}
_, err = idonia_core.PostContainers_ContainerIDDICOM(&idonia_core.PostContainers_ContainerIDDICOMReq{
UUID: &policy.UUID,
}, int(task.ContainerID), &idonia.Auth{
AccountID: task.IdoniaAccountID,
Token: task.IdoniaToken,
})
if err != nil {
task.Error = model.ErrIdonia
task.Error.AdditionalMessage = err.Error()
task.Status = Error
return false, err
}
}
updateTaskInDB(db, logger, task)
return len(newDICOMFiles) > 0, nil
}
*/
/*func (engine *Engine) UpdatedStudy(studyUID string) (affected int) {
tasks, _ := readAffectedTasksByUpdateFromDB(engine.db, engine.logger, studyUID)
affectedTasks := 0
wg := sync.WaitGroup{}
for _, task := range tasks {
wg.Add(1)
go func(task *Task) {
defer wg.Done()
for {
task, _ := readTaskFromDB(engine.db, engine.logger, task.UUID)
switch task.Status {
case Error:
return
case Completed:
paths, _ := engine.dicomManager.GetStudyFiles(studyUID)
files := make(map[string]*dicom.DataSet)
for _, path := range paths {
files[path], _ = engine.dicomManager.GetDCMDataset(path)
}
updated, err := RunContainerUpdate(engine.db, engine.logger, task, files)
if err == nil && updated {
affectedTasks++
}
return
case Retrieving:
if engine.TasksWaiting[task.UUID] != nil {
close(engine.TasksWaiting[task.UUID])
}
affectedTasks++
return
default:
//Wait for the task to finish
time.Sleep(10 * time.Second)
}
}
}(task)
}
wg.Wait()
return affectedTasks
}*/
func (engine *Engine) ErrorWhileRetrieving(studyUID string, err error) {
tasks, _ := readAffectedTasksByUpdateFromDB(engine.db, engine.logger, studyUID)
for _, task := range tasks {
if task.Status == Retrieving && engine.TasksWaiting[task.UUID] != nil {
engine.TasksWaiting[task.UUID] <- err
}
}
return
}
func (engine *Engine) GetCompletedFiles(taskUUID string) (dicomFiles []DICOMFile, err error) {
dicomTask, err := repository.GetTaskDicomWithStatus(engine.db, taskUUID, Completed)
for _, dicom := range dicomTask {
dicomFile := DICOMFile{
UUID: dicom.UUID,
StudyUID: dicom.StudyUID,
SeriesInstanceUID: dicom.SeriesInstanceUID,
ContainerID: dicom.ContainerID,
SOPInstanceUID: dicom.SOPInstanceUID,
Status: Status(dicom.Status),
Error: dicom.Error,
}
dicomFiles = append(dicomFiles, dicomFile)
}
return
}
|
// Copyright (c) 2013-2016 by Michael Dvorkin. All Rights Reserved.
// Use of this source code is governed by a MIT-style license that can
// be found in the LICENSE file.
package mop
// Stock stores quote information for the particular stock ticker. The data
// for all the fields except 'Advancing' is fetched using Yahoo market API.
type Stock struct {
Ticker string // Stock ticker.
LastTrade string // l1: last trade.
Change string // c6: change real time.
ChangePct string // k2: percent change real time.
Open string // o: market open price.
Low string // g: day's low.
High string // h: day's high.
Low52 string // j: 52-weeks low.
High52 string // k: 52-weeks high.
Volume string // v: volume.
AvgVolume string // a2: average volume.
PeRatio string // r2: P/E ration real time.
PeRatioX string // r: P/E ration (fallback when real time is N/A).
Dividend string // d: dividend.
Yield string // y: dividend yield.
MarketCap string // j3: market cap real time.
MarketCapX string // j1: market cap (fallback when real time is N/A).
Advancing bool // True when change is >= $0.
}
// Quotes stores relevant pointers as well as the array of stock quotes for
// the tickers we are tracking.
type Quotes struct {
market *Market // Pointer to Market.
profile *Profile // Pointer to Profile.
stocks []Stock // Array of stock quote data.
errors string // Error string if any.
}
|
package main
import (
"bufio"
"fmt"
"os"
"strings"
)
func getInput() [][]string {
file, _ := os.Open("input.txt")
defer file.Close()
var lines [][]string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, strings.Split(scanner.Text(), ""))
}
return lines
}
func countTreesOnSlope(input [][]string, dx int, dy int) int {
numRows := len(input)
widthPattern := len(input[0])
count := 0
for y := 0; y < numRows; y += dy {
x := ((dx*y)/dy) % widthPattern
if input[y][x] == "#" {
count++
}
}
return count
}
func main() {
input := getInput()
numTrees := countTreesOnSlope(input, 3, 1)
fmt.Println("Tree count for slope right 3, down 1:\t", numTrees)
slopes := [4][2]int{[2]int{1, 1}, [2]int{5, 1}, [2]int{7, 1}, [2]int{1, 2}}
for _, slope := range slopes {
numTrees *= countTreesOnSlope(input, slope[0], slope[1])
}
fmt.Println("Tree count product for all slopes:\t", numTrees)
}
|
package service
import (
"go/internal/pkg/model"
"gorm.io/gorm"
)
type Users interface {
Create(m *model.Users, tx *gorm.DB) (*model.Users, error)
FindOneBy(criteria map[string]interface{}) (*model.Users, error)
Count(criteria map[string]interface{}) int
}
//private struct
type userServices struct {
db *gorm.DB
}
//constructor
func NewUserServices(db *gorm.DB) Users {
return &userServices{db: db}
}
func (svc *userServices) Create(m *model.Users, tx *gorm.DB) (*model.Users, error) {
err := tx.Create(&m).Error
if err != nil {
return nil, err
}
return m, nil
}
func (svc *userServices) FindOneBy(criteria map[string]interface{}) (*model.Users, error) {
var m model.Users
// svc.db.LogMode(true)
err := svc.db.Where(criteria).Find(&m).Error
if err != nil {
return nil, err
}
return &m, nil
}
func (svc *userServices) Count(criteria map[string]interface{}) int {
var result int64
err := svc.db.Where(model.Users{}).Where(criteria).Count(&result).Error
if err != nil {
return 0
}
return int(result)
}
|
package main
import "testing"
func TestSolve(t *testing.T) {
var cases = []struct {
n int
arr []int
out int64
}{
{4, []int{1, 2, 3}, 4},
{10, []int{2, 5, 3, 6}, 5},
}
for _, c := range cases {
if out := Solve(c.n, c.arr); out != c.out {
t.Errorf("Solve(%v)=%v, expected %v", c.arr, out, c.out)
}
}
}
|
// return先于defer执行,defer对有名返回值或者指针返回值会产生影响。对无名返回值不会产生影响。
package main
func main() {
println(DeferFunc1(1))
println(DeferFunc2(1))
println(DeferFunc3(1))
println(*DeferFunc4(1))
}
//return 时 t=1; defer对t操作 t+=3; t为有名返回值,所以返回4
func DeferFunc1(i int) (t int) {
t = i
defer func() {
t += 3
}()
return t
}
//return 时 t=1 ; defer对t操作 t+=3; 无名返回值,所以虽然t=4,但是返回1
func DeferFunc2(i int) int {
t := i
defer func() {
t += 3
}()
return t
}
//return 时 t=2 ; defer对t操作 t+=1; t为有名返回值,所以t=3,返回3
func DeferFunc3(i int) (t int) {
defer func() {
t += i
}()
return 2
}
//return 时 t=1 ; 函数为指针返回值,defer对t操作 t+=3,对返回结果产生影响;所以t=4,返回4
func DeferFunc4(i int) *int {
t := i
defer func() {
t += 3
}()
return &t
}
|
package seev
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00400104 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:seev.004.001.04 Document"`
Message *MeetingInstructionV04 `xml:"MtgInstr"`
}
func (d *Document00400104) AddMessage() *MeetingInstructionV04 {
d.Message = new(MeetingInstructionV04)
return d.Message
}
// Scope
// A party holding the right to vote sends the MeetingInstruction message to an intermediary, the issuer or its agent to request the receiving party to act upon one or several instructions.
// Usage
// The MeetingInstruction message is used to register for a shareholders meeting, request blocking or registration of securities. It is used to assign a proxy, to specify the names of meeting attendees and to relay vote instructions per resolution electronically.
// The MeetingInstruction message may only be sent for one security, though several safekeeping places may be specified.
// Once the message is sent, it cannot be modified. It must be cancelled by a MeetingInstructionCancellationRequest. Only after receipt of a confirmed cancelled status via the MeetingInstructionStatus message, a new MeetingInstruction message can be sent.
type MeetingInstructionV04 struct {
// Identifies the meeting instruction message.
Identification *iso20022.MessageIdentification1 `xml:"Id"`
// Series of elements which allow to identify a meeting.
MeetingReference *iso20022.MeetingReference4 `xml:"MtgRef"`
// Party notifying the instructions.
InstructingParty *iso20022.PartyIdentification9Choice `xml:"InstgPty"`
// Identifies the security for which the meeting is organised.
SecurityIdentification *iso20022.SecurityIdentification11 `xml:"SctyId"`
// Identifies the position of the instructing party and the action that they want to take.
Instruction []*iso20022.Instruction2 `xml:"Instr"`
// Additional information that can not be captured in the structured fields and/or any other specific block.
Extension []*iso20022.Extension2 `xml:"Xtnsn,omitempty"`
}
func (m *MeetingInstructionV04) AddIdentification() *iso20022.MessageIdentification1 {
m.Identification = new(iso20022.MessageIdentification1)
return m.Identification
}
func (m *MeetingInstructionV04) AddMeetingReference() *iso20022.MeetingReference4 {
m.MeetingReference = new(iso20022.MeetingReference4)
return m.MeetingReference
}
func (m *MeetingInstructionV04) AddInstructingParty() *iso20022.PartyIdentification9Choice {
m.InstructingParty = new(iso20022.PartyIdentification9Choice)
return m.InstructingParty
}
func (m *MeetingInstructionV04) AddSecurityIdentification() *iso20022.SecurityIdentification11 {
m.SecurityIdentification = new(iso20022.SecurityIdentification11)
return m.SecurityIdentification
}
func (m *MeetingInstructionV04) AddInstruction() *iso20022.Instruction2 {
newValue := new(iso20022.Instruction2)
m.Instruction = append(m.Instruction, newValue)
return newValue
}
func (m *MeetingInstructionV04) AddExtension() *iso20022.Extension2 {
newValue := new(iso20022.Extension2)
m.Extension = append(m.Extension, newValue)
return newValue
}
|
package main
import "fmt"
import "github.com/roessland/gopkg/mathutil"
func Next(n int64) int64 {
factorialSum := int64(0)
for _, digit := range mathutil.ToDigits(n, 10) {
factorialSum += mathutil.Factorial(digit)
}
return factorialSum
}
func ChainLength(n int64) int64 {
terms := make(map[int64]bool)
terms[n] = true
prevTerm := n
for {
term := Next(prevTerm)
// if term has already been added, return
_, ok := terms[term]
if ok {
return int64(len(terms))
}
// else, add term and keep going
terms[term] = true
prevTerm = term
}
}
func ChainsWithLength(maxNumber, chainLength int64) int64 {
chainsWithThatLength := int64(0)
for n := int64(0); n < maxNumber; n++ {
if ChainLength(n) == chainLength {
chainsWithThatLength++
}
}
return chainsWithThatLength
}
func main() {
fmt.Printf("Chains with length 60: %v\n", ChainsWithLength(1000000, 60))
}
|
package graphicgo
import (
"fmt"
"os"
)
const screenSize = screenWidth * screenHeight * pixWidth
var drawBuff [screenSize]byte
var dev *os.File
var bgColor = BLACK
/**
* @Description: to start the module
* @return error
*/
func GraphInit() error {
file, err := os.OpenFile(devPath, os.O_RDWR, 0664)
if err != nil {
fmt.Println("open file failed!, err:", err)
return err
} else {
dev = file
fmt.Println("GraphGo start successfully!")
}
SetBgColor(LightSkyBlue)
refreshBg()
// start building the screen buff
resetScreen()
return err
}
/**
* @Description: to end up the module
*/
func GraphBye() {
fmt.Println("GraphGo goodbye!")
dev.Close()
dev = nil
}
func GraphWrong() {
fmt.Println("Wrong")
dev.Close()
dev = nil
}
func SetBgColor(color [4]byte) {
bgColor = color
}
func refreshBg() {
for i := int64(0); i < screenSize; i += 4 {
drawBuff[i] = bgColor[0]
drawBuff[i+1] = bgColor[1]
drawBuff[i+2] = bgColor[2]
drawBuff[i+3] = bgColor[3]
}
}
/**
* @Description: to fill screen with bgColor
*/
func resetScreen() {
dev.Seek(0, 0)
_, err := dev.Write(drawBuff[:])
if err != nil {
fmt.Println(err)
GraphWrong()
}
}
|
//go:generate enumer -type K8sState -trimprefix K8s
package types
// This package contains the go code for a enumeration that represents the application
// state for the go runner. This code will be scanned and used by the enumer code generator
// to produce utility methods for the enumeration
// K8sState represents a desired state for the go runner and the lifecycle it has
// for servicing requests
//
type K8sState int
const (
K8sUnknown K8sState = iota
K8sRunning // The runner should restart retrieving work and running if it is not doing so
K8sDrainAndTerminate // The runner should complete its current outstanding work and then exit
K8sDrainAndSuspend // The runner should complete its current outstanding work and then wait for a K8sResume
)
|
// Copyright 2019 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cc
import (
"testing"
)
func TestLinkerScript(t *testing.T) {
t.Run("script", func(t *testing.T) {
testCc(t, `
cc_object {
name: "foo",
srcs: ["baz.o"],
linker_script: "foo.lds",
}`)
})
}
|
package main
import "strings"
/*
* @lc app=leetcode id=140 lang=golang
*
* [140] Word Break II
*/
// 超时的暴力解法
func wordBreak_TLE(s string, wordDict []string) []string {
hash := make(map[string]interface{})
for _, w := range wordDict {
hash[w] = nil
}
contains := func(s string) bool {
_, ok := hash[s]
return ok
}
res := make([]string, 0)
segs := make([]string, 0, len(wordDict)*2)
helper140(s, contains, &res, segs)
return res
}
func helper140(s string, contains func(string) bool, res *[]string, segs []string) {
if s == "" {
if len(segs) > 0 {
*res = append(*res, strings.Join(segs, " "))
}
return
}
t := len(segs)
segs = append(segs, "")
for i := 1; i <= len(s); i++ {
if contains(s[:i]) {
segs[t] = s[:i]
helper140(s[i:], contains, res, segs)
}
}
}
// 假设wordDict很小,我们可以可以每次遍历wordDict列表,而非拆分原始字符串
// 这样的话遇到极端case的情况比较小
func wordBreak2(s string, wordDict []string) []string {
return dfs140(s, wordDict, make(map[string][]string))
}
// m 存放每个字符串可以划分的小串
func dfs140(s string, words []string, m map[string][]string) []string {
if val, ok := m[s]; ok {
return val
}
res := make([]string, 0)
if len(s) == 0 { // 挺重要的哈
res = append(res, "")
m[s] = res
return res
}
for _, w := range words {
if strings.HasPrefix(s, w) {
sublist := dfs140(s[len(w):], words, m)
for _, substr := range sublist {
res = append(res, w+AorB(len(substr) == 0, "", " ")+substr)
}
}
}
m[s] = res
return res
}
func AorB(val bool, a string, b string) string {
if val {
return a
}
return b
}
|
package Problem0347
import "sort"
func topKFrequent(nums []int, k int) []int {
res := make([]int, 0, k)
// 统计每个数字出现的次数
rec := make(map[int]int, len(nums))
for _, n := range nums {
rec[n]++
}
// 对出现次数进行排序
counts := make([]int, 0, len(rec))
for _, c := range rec {
counts = append(counts, c)
}
sort.Ints(counts)
// min 是 前 k 个高频数字的底线
min := counts[len(counts)-k]
// 收集所有 不低于 底线的数字
for n, c := range rec {
if c >= min {
res = append(res, n)
}
}
return res
}
|
/* _ _
*__ _____ __ ___ ___ __ _| |_ ___
*\ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
* \ V V / __/ (_| |\ V /| | (_| | || __/
* \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
*
* Copyright © 2016 - 2019 Weaviate. All rights reserved.
* LICENSE: https://github.com/semi-technologies/weaviate/blob/develop/LICENSE.md
* DESIGN & CONCEPT: Bob van Luijt (@bobvanluijt)
* CONTACT: hello@semi.technology
*/
// Package contextionary provides the toolset to add context to words.
package contextionary
// ItemIndex is an opaque type that models an index number used to identify a
// word.
type ItemIndex int
// IsPresent can be used after retrieving a word index (which does not error on
// its own), to see if the word was actually present in the contextionary.
func (i *ItemIndex) IsPresent() bool {
return *i >= 0
}
// Contextionary is the API to decouple the K-nn interface that is needed for
// Weaviate from a concrete implementation.
type Contextionary interface {
// Return the number of items that is stored in the index.
GetNumberOfItems() int
// Returns the length of the used vectors.
GetVectorLength() int
// Look up a word, return an index.
// Check for presence of the index with index.IsPresent()
WordToItemIndex(word string) ItemIndex
// Based on an index, return the assosiated word.
ItemIndexToWord(item ItemIndex) (string, error)
// Based on an index, return the assosiated word.
ItemIndexToOccurrence(item ItemIndex) (uint64, error)
//OccurrencePercentile shows the occurrence of the mentioned percentile in ascending order
OccurrencePercentile(perc int) uint64
// Get the vector of an item index.
GetVectorForItemIndex(item ItemIndex) (*Vector, error)
// Compute the distance between two items.
GetDistance(a ItemIndex, b ItemIndex) (float32, error)
// Get the n nearest neighbours of item, examining k trees.
// Returns an array of indices, and of distances between item and the n-nearest neighbors.
GetNnsByItem(item ItemIndex, n, k int) ([]ItemIndex, []float32, error)
// Get the n nearest neighbours of item, examining k trees.
// Returns an array of indices, and of distances between item and the n-nearest neighbors.
GetNnsByVector(vector Vector, n, k int) ([]ItemIndex, []float32, error)
// SafeGetSimilarWords returns n similar words in the contextionary,
// examining k trees. It is guaratueed to have results, even if the word is
// not in the contextionary. In this case the list only contains the word
// itself. It can then still be used for exact match or levensthein-based
// searches against db backends.
SafeGetSimilarWords(word string, n, k int) ([]string, []float32)
// SafeGetSimilarWordsWithCertainty returns similar words in the
// contextionary, if they are close enough to match the required certainty.
// It is guaratueed to have results, even if the word is not in the
// contextionary. In this case the list only contains the word itself. It can
// then still be used for exact match or levensthein-based searches against
// db backends.
SafeGetSimilarWordsWithCertainty(word string, certainty float32) []string
}
|
package philifence
import (
"fmt"
)
type Coordinate struct {
lat, lon float64
}
func (c Coordinate) Lon() float64 {
return c.lon
}
func (c Coordinate) Lat() float64 {
return c.lat
}
func (c Coordinate) String() string {
return fmt.Sprintf("[%.5f, %.5f]", c.lat, c.lon)
}
|
package cache
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"testing"
"github.com/docker/go-units"
)
func benchmarkBackendSequentialReadBuf(b *testing.B, c Cacher, size int64) {
key := make([]byte, 32)
rand.Read(key)
// Put non-empty cacheline in
info := make([]byte, size)
rand.Read(info)
tx := c.PutTransaction("bench", key)
tx.Put(int64(len(info)), KIND_INFO, bytes.NewReader(info))
tx.Commit()
b.SetBytes(size)
b.ResetTimer()
// Try again
for i := 0; i < b.N; i += 1 {
size, reader, err := c.Get("bench", KIND_INFO, key)
if err != nil {
b.Fatalf("Unexpected error calling Has(): %s", err)
}
if size == 0 {
b.Errorf("Expected Get() to return %d, got %d", 0, size)
}
if reader != nil {
io.Copy(ioutil.Discard, reader)
reader.Close()
}
}
}
func BenchmarkFSPositive(b *testing.B) {
c, err := NewFS(func(f *FS) {
f.Basepath = "./testdata"
f.Quota = 1024 * 1024 * 1024
})
if err != nil {
b.Fatalf("Error creating FS: %s", err)
}
defer func() {
os.RemoveAll(c.Basepath)
}()
for _, size := range []int64{1024, 1024 * 128, 1024 * 1024, 1024 * 1024 * 128} {
b.Run(fmt.Sprintf("streaming,size=%s", units.BytesSize(float64(size))), func(b *testing.B) {
benchmarkBackendSequentialReadBuf(b, c, size)
})
}
}
func BenchmarkFSPositiveStream(b *testing.B) {
c, err := NewFS(func(f *FS) {
f.Basepath = "./testdata"
f.Quota = 1024 * 10
})
if err != nil {
b.Fatalf("Error creating FS: %s", err)
}
defer func() {
os.RemoveAll(c.Basepath)
}()
key := make([]byte, 32)
rand.Read(key)
// Put non-empty cacheline in
info := make([]byte, 1024)
rand.Read(info)
tx := c.PutTransaction("bench", key)
tx.Put(int64(len(info)), KIND_INFO, bytes.NewReader(info))
tx.Commit()
b.SetBytes(int64(len(info)))
b.ResetTimer()
// Try again
for i := 0; i < b.N; i += 1 {
size, reader, err := c.Get("bench", KIND_INFO, key)
if err != nil {
b.Fatalf("Unexpected error calling Has(): %s", err)
}
if size == 0 {
b.Errorf("Expected Get() to return %d, got %d", 0, size)
}
io.Copy(ioutil.Discard, reader)
reader.Close()
}
}
|
package orm
import "database/sql"
var dbConn *sql.DB
var err error
func init() {
dbConn, err = sql.Open("mysql", "root:root@tcp(127.0.0.1:3306)/soup?charset=utf8")
if err != nil {
panic(err.Error())
}
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-08-17 09:32
# @File : lt_76_Minimum_Window_Substring.go
# @Description :
# @Attention :
*/
package slide_window
import (
"fmt"
"testing"
)
func Test_minWindow(t *testing.T) {
window := minWindow2("ADOBECODEBANC", "ABC")
fmt.Println(window)
}
|
/*
Package web handle requests and send HTML pages for general browsers.
*/
package web
import (
"html/template"
"net/http"
"strconv"
"newton449.com/dao"
)
func init() {
/* NOTE Patterns ending with "/" will be treated as prefix-matching. Others are exact-matching.*/
// registers handlers
http.HandleFunc("/", rootHandler)
http.HandleFunc("/web/", indexHandler)
http.HandleFunc("/web/products/", productsHandler)
//http.HandleFunc("/web/products/actions/", productsActionsHandler)
http.HandleFunc("/web/products/actions/adding", productsAddingHandler)
http.HandleFunc("/web/products/actions/add", productsAddHandler)
http.HandleFunc("/web/products/actions/editing", productsEditingHandler)
http.HandleFunc("/web/products/actions/edit", productsEditHandler)
http.HandleFunc("/web/products/actions/delete", productsDeleteHandler)
}
// parse a HTML template file in default template directory
func parseTemplate(tmpName string) (*template.Template, error) {
return template.ParseFiles("newton449.com/web/" + tmpName + ".html", "newton449.com/web/header.html", "newton449.com/web/footer.html", "newton449.com/web/links.html")
}
// redirect to index page
func rootHandler(w http.ResponseWriter, r *http.Request){
if r.URL.Path != "/" {
// send 404 error
handleNotFound(w, r)
return
}
// redirect it to index
http.Redirect(w, r, "/web/", 302)
}
// show index page
func indexHandler(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/web/" {
// send 404 error
handleNotFound(w, r)
return
}
// show index page
t, err := parseTemplate("index")
if err != nil {
handleError(err, w, r)
return
}
t.Execute(w, nil)
}
// Handles requests for products.
func productsHandler(w http.ResponseWriter, r *http.Request) {
// check URL
path := r.URL.Path
subpath := path[14:]
if r.Method == "GET" {
if subpath == "" {
// show product list
showProductList(w, r)
} else {
id, err := strconv.ParseInt(subpath, 0, 64)
if err != nil {
}
// show one product
showProductDetail(id, w, r)
}
}
}
func showProductList(w http.ResponseWriter, r *http.Request) {
pdao := dao.NewProductDAO(r)
list, err := pdao.Select()
if err!=nil {
handleError(err, w, r)
return
}
t, err := parseTemplate("productList")
if err!=nil {
handleError(err, w, r)
return
}
t.Execute(w, map[string]interface{}{
"list": list,
})
}
func showProductDetail(id int64, w http.ResponseWriter, r *http.Request) {
pdao := dao.NewProductDAO(r)
// retrieve the product
p, err := pdao.Get(id)
if err != nil {
handleError(err, w, r)
return
}
// parse page
t, err := parseTemplate("productDetail")
if err != nil {
handleError(err, w, r)
return
}
t.Execute(w, map[string]interface{}{
"product": p,
})
}
// show product form to add one
func productsAddingHandler(w http.ResponseWriter, r *http.Request) {
showProductForm("Add A Product", "/web/products/actions/add", nil, "", w, r)
}
// add a product
func productsAddHandler(w http.ResponseWriter, r *http.Request) {
// create a product
p := dao.NewProduct()
// properties
if err := r.ParseForm(); err!=nil {
handleError(err, w, r)
return
}
p.Title = r.PostFormValue("title")
p.Body = (dao.Clob)(r.PostFormValue("body"))
// validation
var errMsg string
if len(p.Title)<1 || len(p.Title)>150 {
errMsg = "Invalid title!"
} else if len(p.Title)<1 {
errMsg = "Invalid body!"
}
if errMsg!="" {
showProductForm("Add A Product", "/web/products/actions/add", nil, errMsg, w, r)
return
}
// add to database
pdao :=dao.NewProductDAO(r)
err := pdao.Add(p)
if err!=nil {
handleError(err, w, r)
return
}
// show successful message
showProductMessage("Added Successful", "A product has been added to databases.", w, r)
}
// show product form to edit one
func productsEditingHandler(w http.ResponseWriter, r *http.Request) {
// get id
if err := r.ParseForm(); err!=nil {
handleError(err, w, r)
return
}
id, err := strconv.ParseInt(r.FormValue("id"), 10, 64)
if err!=nil {
handleError(err, w, r)
return
}
// read product
pdao := dao.NewProductDAO(r)
p, err := pdao.Get(id)
if err != nil {
handleError(err, w, r)
return
}
// parse page
var fields map[string]string = map[string]string {
"id": strconv.FormatInt(p.Id, 10),
"title": p.Title,
"body": string(p.Body),
}
showProductForm("Edit A Product", "/web/products/actions/edit", fields, "", w, r)
}
func productsEditHandler(w http.ResponseWriter, r *http.Request) {
// create a product
p := dao.NewProduct()
// properties
if err := r.ParseForm(); err!=nil {
handleError(err, w, r)
return
}
var err error
p.Id, err = strconv.ParseInt(r.PostFormValue("id"), 10, 64)
if err!=nil {
handleError(err, w, r)
return
}
p.Title = r.PostFormValue("title")
p.Body = (dao.Clob)(r.PostFormValue("body"))
// validation
var errMsg string
if len(p.Title)<1 || len(p.Title)>150 {
errMsg = "Invalid title!"
} else if len(p.Title)<1 {
errMsg = "Invalid body!"
}
if errMsg!="" {
showProductForm("Add A Product", "/web/products/actions/add", nil, errMsg, w, r)
return
}
// put to database
pdao := dao.NewProductDAO(r)
err = pdao.Update(p)
if err!=nil {
handleError(err, w, r)
return
}
// show successful message
showProductMessage("Edited Successful", "A product has been updated to databases.", w, r)
}
func productsDeleteHandler(w http.ResponseWriter, r *http.Request) {
// get id
if err := r.ParseForm(); err!=nil {
handleError(err, w, r)
return
}
var err error
id, err := strconv.ParseInt(r.PostFormValue("id"), 10, 64)
if err!=nil {
handleError(err, w, r)
return
}
// delete
pdao := dao.NewProductDAO(r)
err = pdao.Delete(id)
if err!=nil {
handleError(err, w, r)
return
}
// show successful message
showProductMessage("Deleted Successful", "A product has been deleted.", w, r)
}
func showProductForm(title string, formUrl string, fields map[string]string, errMsg string, w http.ResponseWriter, r *http.Request) {
t, err := parseTemplate("productForm")
if err!=nil {
handleError(err, w, r)
return
}
t.Execute(w, map[string]interface{}{
"title": title,
"formUrl": formUrl,
"fields": fields,
"errMsg": errMsg,
})
}
func showProductMessage(title string, msg string, w http.ResponseWriter, r *http.Request) {
t, err := parseTemplate("productMessage")
if err!=nil {
handleError(err, w, r)
return
}
t.Execute(w, map[string]string{
"title": title,
"msg": msg,
})
}
|
package middleware
import (
"bytes"
"net/http"
"testing"
"github.com/stretchr/testify/require"
"github.com/root-gg/plik/server/common"
)
func TestLog(t *testing.T) {
ctx := newTestingContext(common.NewConfiguration())
log := ctx.GetLogger()
ctx.GetConfig().Debug = true
buffer := &bytes.Buffer{}
log.SetOutput(buffer)
req, err := http.NewRequest("GET", "file", bytes.NewBuffer([]byte("request body")))
require.NoError(t, err, "unable to create new request")
req.RequestURI = "/file"
rr := ctx.NewRecorder(req)
Log(ctx, common.DummyHandler).ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Code, "invalid handler response status code")
require.Contains(t, string(buffer.Bytes()), "GET /file", "invalid log message")
require.NotContains(t, string(buffer.Bytes()), "request body", "invalid log message")
}
func TestLogDebug(t *testing.T) {
ctx := newTestingContext(common.NewConfiguration())
log := ctx.GetLogger()
ctx.GetConfig().DebugRequests = true
buffer := &bytes.Buffer{}
log.SetOutput(buffer)
req, err := http.NewRequest("GET", "/version", bytes.NewBuffer([]byte("request body")))
require.NoError(t, err, "unable to create new request")
rr := ctx.NewRecorder(req)
Log(ctx, common.DummyHandler).ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Code, "invalid handler response status code")
require.Contains(t, string(buffer.Bytes()), "GET /version HTTP/1.1", "invalid log message")
require.Contains(t, string(buffer.Bytes()), "request body", "invalid log message")
}
func TestLogDebugNoBody(t *testing.T) {
ctx := newTestingContext(common.NewConfiguration())
log := ctx.GetLogger()
ctx.GetConfig().DebugRequests = true
buffer := &bytes.Buffer{}
log.SetOutput(buffer)
req, err := http.NewRequest("POST", "/file", bytes.NewBuffer([]byte("request body")))
require.NoError(t, err, "unable to create new request")
req.RequestURI = "/file"
rr := ctx.NewRecorder(req)
Log(ctx, common.DummyHandler).ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Code, "invalid handler response status code")
require.Contains(t, string(buffer.Bytes()), "POST /file", "invalid log message")
require.NotContains(t, string(buffer.Bytes()), "request body", "invalid log message")
}
|
package sdk
import (
"testing"
rmTesting "github.com/brigadecore/brigade/sdk/v3/internal/restmachinery/testing" // nolint: lll
"github.com/stretchr/testify/require"
)
func TestNewSystemAuthzClient(t *testing.T) {
client, ok := NewSystemAuthzClient(
rmTesting.TestAPIAddress,
rmTesting.TestAPIToken,
nil,
).(*systemAuthzClient)
require.True(t, ok)
require.Equal(t, client.roleAssignmentsClient, client.RoleAssignments())
}
|
/**
Create TYPED and UNTYPED constants. Print the values of the constants.
*/
package main
import "fmt"
const (
typedConstant int = 10
untypedConstant = 20
)
func main() {
fmt.Println("Typed Constant :: ", typedConstant)
fmt.Println("Untyped Constant :: ", untypedConstant)
}
|
package pie
//go:generate pie Strings.*
type Strings []string
|
/*
* @lc app=leetcode.cn id=1122 lang=golang
*
* [1122] 数组的相对排序
*/
// @lc code=start
package main
import "sort"
import "fmt"
func main() {
var a, b []int
// a = []int{2,3,1,3,2,4,6,7,9,2,19}
// b = []int{2,1,4,3,9,6}
a = []int{28,6,22,8,44,17}
b = []int{22,28,8,6}
c := relativeSortArray(a,b)
fmt.Println(c)
}
func relativeSortArray(arr1 []int, arr2 []int) []int {
res1 := []int{}
res2 := []int{}
fmt.Printf("%v, %v\n", arr1, arr2)
copyarr2 := append([]int{}, arr2...)
sort.Ints(copyarr2)
fmt.Printf("copyarr2, %v\n", copyarr2)
map1 := map[int]int{}
for i := 0 ; i < len(arr1) ; i++ {
if ok := find(copyarr2, arr1[i]) ; ok {
map1[arr1[i]]++
} else {
res2 = append(res2, arr1[i])
}
}
fmt.Printf("res2 is %v\n",res2)
for i := 0 ; i < len(arr2) ; i ++ {
size, _ := map1[arr2[i]]
for j := 0 ; j < size ; j++ {
res1 = append(res1, arr2[i])
}
}
sort.Ints(res2)
return append(res1, res2...)
}
func find(a []int, target int) bool {
// fmt.Println("hello")
if len(a) < 1 {
return false
}
var mid int
down := 0
upper := len(a) - 1
for down <= upper {
// mid = (down + upper ) >> 2
mid = down + (upper - down) >> 1
if a[mid] == target {
return true
}
if target > a[mid] {
down = mid + 1
} else {
upper = mid - 1
}
}
return false
}
// @lc code=end
|
package meta_test
import (
"fmt"
"reflect"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/messagedb/messagedb/sql"
"github.com/messagedb/messagedb/meta"
)
// Ensure a node can be created.
func TestData_CreateNode(t *testing.T) {
var data meta.Data
if err := data.CreateNode("host0"); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(data.Nodes, []meta.NodeInfo{{ID: 1, Host: "host0"}}) {
t.Fatalf("unexpected node: %#v", data.Nodes[0])
}
}
// Ensure a node can be removed.
func TestData_DeleteNode(t *testing.T) {
var data meta.Data
if err := data.CreateNode("host0"); err != nil {
t.Fatal(err)
} else if err = data.CreateNode("host1"); err != nil {
t.Fatal(err)
} else if err := data.CreateNode("host2"); err != nil {
t.Fatal(err)
}
if err := data.DeleteNode(1); err != nil {
t.Fatal(err)
} else if len(data.Nodes) != 2 {
t.Fatalf("unexpected node count: %d", len(data.Nodes))
} else if data.Nodes[0] != (meta.NodeInfo{ID: 2, Host: "host1"}) {
t.Fatalf("unexpected node: %#v", data.Nodes[0])
} else if data.Nodes[1] != (meta.NodeInfo{ID: 3, Host: "host2"}) {
t.Fatalf("unexpected node: %#v", data.Nodes[1])
}
}
// Ensure a database can be created.
func TestData_CreateDatabase(t *testing.T) {
var data meta.Data
if err := data.CreateDatabase("db0"); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(data.Databases, []meta.DatabaseInfo{{Name: "db0"}}) {
t.Fatalf("unexpected databases: %#v", data.Databases)
}
}
// Ensure that creating a database without a name returns an error.
func TestData_CreateDatabase_ErrNameRequired(t *testing.T) {
var data meta.Data
if err := data.CreateDatabase(""); err != meta.ErrDatabaseNameRequired {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure that creating an already existing database returns an error.
func TestData_CreateDatabase_ErrDatabaseExists(t *testing.T) {
var data meta.Data
if err := data.CreateDatabase("db0"); err != nil {
t.Fatal(err)
}
if err := data.CreateDatabase("db0"); err != meta.ErrDatabaseExists {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure a database can be removed.
func TestData_DropDatabase(t *testing.T) {
var data meta.Data
for i := 0; i < 3; i++ {
if err := data.CreateDatabase(fmt.Sprintf("db%d", i)); err != nil {
t.Fatal(err)
}
}
if err := data.DropDatabase("db1"); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(data.Databases, []meta.DatabaseInfo{{Name: "db0"}, {Name: "db2"}}) {
t.Fatalf("unexpected databases: %#v", data.Databases)
}
}
// Ensure a retention policy can be created.
func TestData_CreateRetentionPolicy(t *testing.T) {
data := meta.Data{Nodes: []meta.NodeInfo{{ID: 1}, {ID: 2}}}
if err := data.CreateDatabase("db0"); err != nil {
t.Fatal(err)
}
// Create policy.
if err := data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{
Name: "rp0",
ReplicaN: 2,
Duration: 4 * time.Hour,
}); err != nil {
t.Fatal(err)
}
// Verify policy exists.
if !reflect.DeepEqual(data.Databases[0].RetentionPolicies, []meta.RetentionPolicyInfo{
{
Name: "rp0",
ReplicaN: 2,
Duration: 4 * time.Hour,
ShardGroupDuration: 1 * time.Hour,
},
}) {
t.Fatalf("unexpected policies: %#v", data.Databases[0].RetentionPolicies)
}
}
// Ensure that creating a policy without a name returns an error.
func TestData_CreateRetentionPolicy_ErrNameRequired(t *testing.T) {
data := meta.Data{Nodes: []meta.NodeInfo{{ID: 1}}}
if err := data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: ""}); err != meta.ErrRetentionPolicyNameRequired {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure that creating a policy with a replication factor that doesn't match
// the number of nodes in the cluster will return an error. This is a temporary
// restriction until v0.9.1 is released.
func TestData_CreateRetentionPolicy_ErrReplicationFactorMismatch(t *testing.T) {
data := meta.Data{
Nodes: []meta.NodeInfo{{ID: 1}, {ID: 2}, {ID: 3}},
}
if err := data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 2}); err != meta.ErrReplicationFactorMismatch {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure that creating a retention policy on a non-existent database returns an error.
func TestData_CreateRetentionPolicy_ErrDatabaseNotFound(t *testing.T) {
data := meta.Data{Nodes: []meta.NodeInfo{{ID: 1}}}
if err := data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 1}); err != meta.ErrDatabaseNotFound {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure that creating an already existing policy returns an error.
func TestData_CreateRetentionPolicy_ErrRetentionPolicyExists(t *testing.T) {
var data meta.Data
if err := data.CreateDatabase("db0"); err != nil {
t.Fatal(err)
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0"}); err != nil {
t.Fatal(err)
}
if err := data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0"}); err != meta.ErrRetentionPolicyExists {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure that a retention policy can be updated.
func TestData_UpdateRetentionPolicy(t *testing.T) {
var data meta.Data
if err := data.CreateDatabase("db0"); err != nil {
t.Fatal(err)
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0"}); err != nil {
t.Fatal(err)
}
// Update the policy.
var rpu meta.RetentionPolicyUpdate
rpu.SetName("rp1")
rpu.SetDuration(10 * time.Hour)
rpu.SetReplicaN(3)
if err := data.UpdateRetentionPolicy("db0", "rp0", &rpu); err != nil {
t.Fatal(err)
}
// Verify the policy was changed.
if rpi, _ := data.RetentionPolicy("db0", "rp1"); !reflect.DeepEqual(rpi, &meta.RetentionPolicyInfo{
Name: "rp1",
Duration: 10 * time.Hour,
ShardGroupDuration: 604800000000000,
ReplicaN: 3,
}) {
t.Fatalf("unexpected policy: %#v", rpi)
}
}
// Ensure a retention policy can be removed.
func TestData_DropRetentionPolicy(t *testing.T) {
var data meta.Data
if err := data.CreateDatabase("db0"); err != nil {
t.Fatal(err)
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0"}); err != nil {
t.Fatal(err)
}
if err := data.DropRetentionPolicy("db0", "rp0"); err != nil {
t.Fatal(err)
} else if len(data.Databases[0].RetentionPolicies) != 0 {
t.Fatalf("unexpected policies: %#v", data.Databases[0].RetentionPolicies)
}
}
// Ensure an error is returned when deleting a policy from a non-existent database.
func TestData_DropRetentionPolicy_ErrDatabaseNotFound(t *testing.T) {
var data meta.Data
if err := data.DropRetentionPolicy("db0", "rp0"); err != meta.ErrDatabaseNotFound {
t.Fatal(err)
}
}
// Ensure an error is returned when deleting a non-existent policy.
func TestData_DropRetentionPolicy_ErrRetentionPolicyNotFound(t *testing.T) {
var data meta.Data
if err := data.CreateDatabase("db0"); err != nil {
t.Fatal(err)
}
if err := data.DropRetentionPolicy("db0", "rp0"); err != meta.ErrRetentionPolicyNotFound {
t.Fatal(err)
}
}
// Ensure that a retention policy can be retrieved.
func TestData_RetentionPolicy(t *testing.T) {
var data meta.Data
if err := data.CreateDatabase("db0"); err != nil {
t.Fatal(err)
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0"}); err != nil {
t.Fatal(err)
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp1"}); err != nil {
t.Fatal(err)
}
if rpi, err := data.RetentionPolicy("db0", "rp0"); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(rpi, &meta.RetentionPolicyInfo{
Name: "rp0",
ShardGroupDuration: 604800000000000,
}) {
t.Fatalf("unexpected value: %#v", rpi)
}
}
// Ensure that retrieving a policy from a non-existent database returns an error.
func TestData_RetentionPolicy_ErrDatabaseNotFound(t *testing.T) {
var data meta.Data
if _, err := data.RetentionPolicy("db0", "rp0"); err != meta.ErrDatabaseNotFound {
t.Fatal(err)
}
}
// Ensure that a default retention policy can be set.
func TestData_SetDefaultRetentionPolicy(t *testing.T) {
var data meta.Data
if err := data.CreateDatabase("db0"); err != nil {
t.Fatal(err)
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0"}); err != nil {
t.Fatal(err)
}
// Verify there is no default policy on the database initially.
if name := data.Database("db0").DefaultRetentionPolicy; name != "" {
t.Fatalf("unexpected initial default retention policy: %s", name)
}
// Set the default policy.
if err := data.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
t.Fatal(err)
}
// Verify the default policy is now set.
if name := data.Database("db0").DefaultRetentionPolicy; name != "rp0" {
t.Fatalf("unexpected default retention policy: %s", name)
}
}
// Ensure that a shard group can be created on a database for a given timestamp.
func TestData_CreateShardGroup(t *testing.T) {
var data meta.Data
if err := data.CreateNode("node0"); err != nil {
t.Fatal(err)
} else if err = data.CreateNode("node1"); err != nil {
t.Fatal(err)
} else if err = data.CreateDatabase("db0"); err != nil {
t.Fatal(err)
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 2, Duration: 1 * time.Hour}); err != nil {
t.Fatal(err)
}
// Create shard group.
if err := data.CreateShardGroup("db0", "rp0", time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil {
t.Fatal(err)
}
// Verify the shard group was created.
if sgi, _ := data.ShardGroupByTimestamp("db0", "rp0", time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)); !reflect.DeepEqual(sgi, &meta.ShardGroupInfo{
ID: 1,
StartTime: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
EndTime: time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC),
Shards: []meta.ShardInfo{
{ID: 1, OwnerIDs: []uint64{1, 2}},
},
}) {
t.Fatalf("unexpected shard group: %#v", sgi)
} else if !sgi.Shards[0].OwnedBy(1) || !sgi.Shards[0].OwnedBy(2) || sgi.Shards[0].OwnedBy(3) {
// Verify shard is correctly owned-by the node.
t.Fatalf("new shard is not owned by correct node")
}
}
// Ensure that a shard group is correctly detected as expired.
func TestData_ShardGroupExpiredDeleted(t *testing.T) {
var data meta.Data
if err := data.CreateNode("node0"); err != nil {
t.Fatal(err)
} else if err = data.CreateNode("node1"); err != nil {
t.Fatal(err)
} else if err = data.CreateDatabase("db0"); err != nil {
t.Fatal(err)
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 2, Duration: 1 * time.Hour}); err != nil {
t.Fatal(err)
}
// Create shard groups.
if err := data.CreateShardGroup("db0", "rp0", time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil {
t.Fatal(err)
}
if err := data.CreateShardGroup("db0", "rp0", time.Date(2001, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil {
t.Fatal(err)
}
// Check expiration.
rp, _ := data.RetentionPolicy("db0", "rp0")
groups := rp.ExpiredShardGroups(time.Date(2001, time.January, 1, 0, 0, 0, 0, time.UTC))
if len(groups) != 1 {
t.Fatalf("wrong number of expired shard groups returned, got %d, exp 1", len(groups))
}
if groups[0].StartTime != time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC) {
t.Fatal("wrong shard group marked as expired")
}
// Check deletion.
if err := data.DeleteShardGroup("db0", "rp0", groups[0].ID); err != nil {
t.Fatal(err)
}
groups = rp.DeletedShardGroups()
if len(groups) != 1 {
t.Fatalf("wrong number of deleted shard groups returned, got %d, exp 1", len(groups))
}
if groups[0].StartTime != time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC) {
t.Fatal("wrong shard group marked as expired")
}
}
// Test shard group selection.
func TestShardGroup_Overlaps(t *testing.T) {
// Make a shard group 1 hour in duration
startTime, _ := time.Parse(time.RFC3339, "2000-01-01T00:00:00Z")
endTime := startTime.Add(time.Hour)
g := &meta.ShardGroupInfo{StartTime: startTime, EndTime: endTime}
if !g.Overlaps(g.StartTime.Add(-time.Minute), g.EndTime) {
t.Fatal("shard group not selected when min before start time")
}
if !g.Overlaps(g.StartTime.Add(-time.Minute), g.StartTime) {
t.Fatal("shard group not selected when min before start time and max equals start time")
}
if !g.Overlaps(g.StartTime, g.EndTime.Add(time.Minute)) {
t.Fatal("shard group not selected when max after after end time")
}
if !g.Overlaps(g.StartTime.Add(-time.Minute), g.EndTime.Add(time.Minute)) {
t.Fatal("shard group not selected when min before start time and when max after end time")
}
if !g.Overlaps(g.StartTime.Add(time.Minute), g.EndTime.Add(-time.Minute)) {
t.Fatal("shard group not selected when min after start time and when max before end time")
}
if !g.Overlaps(g.StartTime, g.EndTime) {
t.Fatal("shard group not selected when min at start time and when max at end time")
}
if !g.Overlaps(g.StartTime, g.StartTime) {
t.Fatal("shard group not selected when min and max set to start time")
}
if !g.Overlaps(g.StartTime.Add(1*time.Minute), g.EndTime.Add(24*time.Hour)) {
t.Fatal("shard group selected when both min in range")
}
if g.Overlaps(g.EndTime, g.EndTime) {
t.Fatal("shard group selected when min and max set to end time")
}
if g.Overlaps(g.StartTime.Add(-10*time.Hour), g.EndTime.Add(-9*time.Hour)) {
t.Fatal("shard group selected when both min and max before shard times")
}
if g.Overlaps(g.StartTime.Add(24*time.Hour), g.EndTime.Add(25*time.Hour)) {
t.Fatal("shard group selected when both min and max after shard times")
}
}
// Ensure a shard group can be removed by ID.
func TestData_DeleteShardGroup(t *testing.T) {
var data meta.Data
if err := data.CreateNode("node0"); err != nil {
t.Fatal(err)
} else if err := data.CreateDatabase("db0"); err != nil {
t.Fatal(err)
} else if err = data.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 1}); err != nil {
t.Fatal(err)
} else if err := data.CreateShardGroup("db0", "rp0", time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil {
t.Fatal(err)
}
if err := data.DeleteShardGroup("db0", "rp0", 1); err != nil {
t.Fatal(err)
}
if sg := data.Databases[0].RetentionPolicies[0].ShardGroups[0]; !sg.Deleted() {
t.Fatalf("shard group not correctly flagged as deleted")
}
}
// Ensure a continuous query can be created.
// func TestData_CreateContinuousQuery(t *testing.T) {
// var data meta.Data
// if err := data.CreateDatabase("db0"); err != nil {
// t.Fatal(err)
// } else if err := data.CreateContinuousQuery("db0", "cq0", "SELECT count() FROM foo"); err != nil {
// t.Fatal(err)
// } else if !reflect.DeepEqual(data.Databases[0].ContinuousQueries, []meta.ContinuousQueryInfo{
// {Name: "cq0", Query: "SELECT count() FROM foo"},
// }) {
// t.Fatalf("unexpected queries: %#v", data.Databases[0].ContinuousQueries)
// }
// }
//
// // Ensure a continuous query can be removed.
// func TestData_DropContinuousQuery(t *testing.T) {
// var data meta.Data
// if err := data.CreateDatabase("db0"); err != nil {
// t.Fatal(err)
// } else if err := data.CreateContinuousQuery("db0", "cq0", "SELECT count() FROM foo"); err != nil {
// t.Fatal(err)
// } else if err = data.CreateContinuousQuery("db0", "cq1", "SELECT count() FROM bar"); err != nil {
// t.Fatal(err)
// }
//
// if err := data.DropContinuousQuery("db0", "cq0"); err != nil {
// t.Fatal(err)
// } else if !reflect.DeepEqual(data.Databases[0].ContinuousQueries, []meta.ContinuousQueryInfo{
// {Name: "cq1", Query: "SELECT count() FROM bar"},
// }) {
// t.Fatalf("unexpected queries: %#v", data.Databases[0].ContinuousQueries)
// }
// }
// Ensure a user can be created.
func TestData_CreateUser(t *testing.T) {
var data meta.Data
if err := data.CreateUser("susy", "ABC123", true); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(data.Users, []meta.UserInfo{
{Name: "susy", Hash: "ABC123", Admin: true},
}) {
t.Fatalf("unexpected users: %#v", data.Users)
}
}
// Ensure that creating a user with no username returns an error.
func TestData_CreateUser_ErrUsernameRequired(t *testing.T) {
var data meta.Data
if err := data.CreateUser("", "", false); err != meta.ErrUsernameRequired {
t.Fatal(err)
}
}
// Ensure that creating the same user twice returns an error.
func TestData_CreateUser_ErrUserExists(t *testing.T) {
var data meta.Data
if err := data.CreateUser("susy", "", false); err != nil {
t.Fatal(err)
}
if err := data.CreateUser("susy", "", false); err != meta.ErrUserExists {
t.Fatal(err)
}
}
// Ensure a user can be removed.
func TestData_DropUser(t *testing.T) {
var data meta.Data
if err := data.CreateUser("susy", "", false); err != nil {
t.Fatal(err)
} else if err := data.CreateUser("bob", "", false); err != nil {
t.Fatal(err)
}
if err := data.DropUser("bob"); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(data.Users, []meta.UserInfo{
{Name: "susy"},
}) {
t.Fatalf("unexpected users: %#v", data.Users)
}
}
// Ensure that removing a non-existent user returns an error.
func TestData_DropUser_ErrUserNotFound(t *testing.T) {
var data meta.Data
if err := data.DropUser("bob"); err != meta.ErrUserNotFound {
t.Fatal(err)
}
}
// Ensure a user can be updated.
func TestData_UpdateUser(t *testing.T) {
var data meta.Data
if err := data.CreateUser("susy", "", false); err != nil {
t.Fatal(err)
} else if err := data.CreateUser("bob", "", false); err != nil {
t.Fatal(err)
}
// Update password hash.
if err := data.UpdateUser("bob", "XXX"); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(data.User("bob"), &meta.UserInfo{Name: "bob", Hash: "XXX"}) {
t.Fatalf("unexpected user: %#v", data.User("bob"))
}
}
// Ensure that updating a non-existent user returns an error.
func TestData_UpdateUser_ErrUserNotFound(t *testing.T) {
var data meta.Data
if err := data.UpdateUser("bob", "ZZZ"); err != meta.ErrUserNotFound {
t.Fatal(err)
}
}
// Ensure the data can be deeply copied.
func TestData_Clone(t *testing.T) {
data := meta.Data{
Term: 10,
Index: 20,
Nodes: []meta.NodeInfo{
{ID: 1, Host: "host0"},
{ID: 2, Host: "host1"},
},
Databases: []meta.DatabaseInfo{
{
Name: "db0",
DefaultRetentionPolicy: "default",
RetentionPolicies: []meta.RetentionPolicyInfo{
{
Name: "rp0",
ReplicaN: 3,
Duration: 10 * time.Second,
ShardGroupDuration: 3 * time.Millisecond,
ShardGroups: []meta.ShardGroupInfo{
{
ID: 100,
StartTime: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
EndTime: time.Date(2000, time.February, 1, 0, 0, 0, 0, time.UTC),
Shards: []meta.ShardInfo{
{
ID: 200,
OwnerIDs: []uint64{1, 3, 4},
},
},
},
},
},
},
// ContinuousQueries: []meta.ContinuousQueryInfo{
// {Query: "SELECT count() FROM foo"},
// },
},
},
Users: []meta.UserInfo{
{
Name: "susy",
Hash: "ABC123",
Admin: true,
Privileges: map[string]sql.Privilege{"db0": sql.AllPrivileges},
},
},
}
// Copy the root structure.
other := data.Clone()
if !reflect.DeepEqual(data.Nodes, other.Nodes) {
t.Fatalf("unexpected cloned nodes: %#v", other.Nodes)
} else if !reflect.DeepEqual(data.Databases, other.Databases) {
t.Fatalf("unexpected cloned databases: %#v", other.Databases)
} else if !reflect.DeepEqual(data.Users, other.Users) {
t.Fatalf("unexpected cloned users: %#v", other.Users)
}
// Ensure that changing data in the clone does not affect the original.
other.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].OwnerIDs[1] = 9
if v := data.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].OwnerIDs[1]; v != 3 {
t.Fatalf("editing clone changed original: %v", v)
}
}
// Ensure the data can be marshaled and unmarshaled.
func TestData_MarshalBinary(t *testing.T) {
data := meta.Data{
Term: 10,
Index: 20,
Nodes: []meta.NodeInfo{
{ID: 1, Host: "host0"},
{ID: 2, Host: "host1"},
},
Databases: []meta.DatabaseInfo{
{
Name: "db0",
DefaultRetentionPolicy: "default",
RetentionPolicies: []meta.RetentionPolicyInfo{
{
Name: "rp0",
ReplicaN: 3,
Duration: 10 * time.Second,
ShardGroupDuration: 3 * time.Millisecond,
ShardGroups: []meta.ShardGroupInfo{
{
ID: 100,
StartTime: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
EndTime: time.Date(2000, time.February, 1, 0, 0, 0, 0, time.UTC),
Shards: []meta.ShardInfo{
{
ID: 200,
OwnerIDs: []uint64{1, 3, 4},
},
},
},
},
},
},
// ContinuousQueries: []meta.ContinuousQueryInfo{
// {Query: "SELECT count() FROM foo"},
// },
},
},
Users: []meta.UserInfo{
{
Name: "susy",
Hash: "ABC123",
Admin: true,
Privileges: map[string]sql.Privilege{"db0": sql.AllPrivileges},
},
},
}
// Marshal the data struture.
buf, err := data.MarshalBinary()
if err != nil {
t.Fatal(err)
}
// Unmarshal into new data.
var other meta.Data
if err := other.UnmarshalBinary(buf); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(data.Nodes, other.Nodes) {
t.Fatalf("unexpected nodes: %#v", other.Nodes)
} else if !reflect.DeepEqual(data.Databases, other.Databases) {
spew.Dump(data.Databases)
spew.Dump(other.Databases)
t.Fatalf("unexpected databases: %#v", other.Databases)
} else if !reflect.DeepEqual(data.Users, other.Users) {
t.Fatalf("unexpected users: %#v", other.Users)
}
}
|
package go_ping_sweep
import (
"errors"
"fmt"
"strings"
)
// struct to create the table
type Table struct {
Title string //table title
Header []string // table header
Data [][]string // table row * column
tail int // hold the index for the end data row
}
// function to set the table title
func (t *Table) SetTitle(title string) {
t.Title = title
}
// function to set the table header.
func (t *Table) SetHeader(header ...string) {
t.Header = header
}
// function to add the data to the table.
func (t *Table) AddData(data ...string) error {
// if the header is not set return err
if t.Header == nil {
return errors.New("Header is not set")
}
if t.Data == nil {
t.Data = make([][]string, 10, 100)
}
// add the data to the table data
if data != nil {
t.Data[t.tail] = make([]string, len(t.Header))
for i := 0; i < len(data) && i < len(t.Header); i++ {
t.Data[t.tail][i] = data[i]
}
t.tail++
}
return nil
}
//funtion print the data in table format
func (t *Table) CreateTable() {
// print title
if t.Title != "" {
fmt.Println("=========" + t.Title + "==============")
}
// print the header first
fmt.Println("")
for i := 0; i < len(t.Header); i++ {
fmt.Print(t.Header[i] + " | ")
}
fmt.Println("")
for i := 0; i < t.tail; i++ {
for j := 0; j < len(t.Header); j++ {
fmt.Print(t.Data[i][j] + " | ")
}
fmt.Println("")
}
fmt.Println("")
}
// function print the data in table
func (t *Table) PrintTable() {
for i := 0; i < t.tail; i++ {
for j := 0; j < len(t.Header); j++ {
fmt.Print(t.Data[i][j])
}
}
}
// function print the header data
func (t *Table) PrintHeader() {
for i := 0; i < len(t.Header); i++ {
fmt.Println(t.Header[i])
}
}
// return the header indes if exist
// ex if table is:
// TimePing | DataSize | PacketSize | status |
// then getColumnIndes("Status") will be 4 otherwise return -1.
func (t *Table) getCoulumnIndex(header string) int {
if t != nil {
for i := 0; i < t.tail; i++ {
if strings.Compare(t.Header[i], header) == 0 {
return i
}
}
}
return -1
}
// function returns the the column.
func (t *Table) GetColumn(columHeader string) []string {
headerIndex := t.getCoulumnIndex(columHeader)
columnData := make([]string, 10, 100)
if headerIndex != -1 {
// retrieve the all the element from the column.
for i:=0; i < t.tail; i++ {
columnData[i] = t.Data[i][headerIndex]
//fmt.Print(t.Data[i][headerIndex])
}
}
return columnData
}
|
package main
import (
"fmt"
)
func main() {
var a [3]int
a[1] = 1
a[2] = 3
fmt.Println(a)
v := a[1]
fmt.Println(v)
}
|
/*
* Create a new server. Does not support creation of bare-metal servers (yet).
*/
package cmd
import (
"encoding/hex"
"log"
"time"
"github.com/grrtrr/clcv2"
"github.com/spf13/cobra"
)
// createFlags wraps the flags used by create
var createFlags struct {
srcPass string // when using a source-server, use this password
seed string // 4-6 character name seed for the server name
desc string // description of the server
primDNS string // primary DNS
secDNS string // secondary DNS
net string // ID or name of the network to use
password string // desired password to use
serverType string // server type: standard, hyperscale, or bareMetal
numCpu uint8 // number of CPU cores to use
memGB uint32 // amount of memory in GB
extraDrv uint32 // extra amount of storage in GB
ttl time.Duration // time span (counting from time of creation) until server gets deleted
}
func init() {
Create.Flags().StringVar(&createFlags.srcPass, "srcPass", "", "When cloning from a source-server, use this createFlags.password")
Create.Flags().StringVarP(&createFlags.seed, "seed", "s", "AUTO", "The createFlags.seed for the server name")
Create.Flags().StringVar(&createFlags.desc, "desc", "", "Textual description of the server")
Create.Flags().StringVar(&createFlags.net, "net", "", "ID or name of the Network to use")
Create.Flags().StringVar(&createFlags.primDNS, "dns1", "8.8.8.8", "Primary DNS to use")
Create.Flags().StringVar(&createFlags.secDNS, "dns2", "8.8.4.4", "Secondary DNS to use")
Create.Flags().StringVar(&createFlags.password, "pass", "", "Desired createFlags.password. Leave blank to auto-generate")
Create.Flags().StringVar(&createFlags.serverType, "type", "standard", "The type of server to create (standard, hyperscale, or bareMetal)")
Create.Flags().Uint8Var(&createFlags.numCpu, "cpu", 1, "Number of Cpus to use")
Create.Flags().Uint32Var(&createFlags.memGB, "memory", 4, "Amount of memory in GB")
Create.Flags().Uint32Var(&createFlags.extraDrv, "drive", 0, "Extra storage (in GB) to add to server as a raw disk")
Create.Flags().DurationVar(&createFlags.ttl, "ttl", 0, "Time span (counting from time of creation) until server gets deleted")
Root.AddCommand(Create)
}
var Create = &cobra.Command{
Use: "create <source|template name> <destFolder>",
Short: "Create server from template/source",
Long: "Create a new server from @srcName (server or template) and put it into @dstFolder",
PreRunE: checkArgs(2, "Need a source (template) name and a destination folder"),
Run: func(cmd *cobra.Command, args []string) {
var srcServer, hwGroup = args[0], args[1]
// hwGroup may be hex uuid or group name
if _, err := hex.DecodeString(hwGroup); err != nil {
log.Printf("Resolving ID of Hardware Group %q ...", hwGroup)
if group, err := client.GetGroupByName(hwGroup, conf.Location); err != nil {
log.Fatalf("failed to resolve group name %q: %s", hwGroup, err)
} else if group == nil {
log.Fatalf("no group named %q was found in %s", hwGroup, conf.Location)
} else {
hwGroup = group.Id
}
}
// createFlags.net is supposed to be a (hex) ID, but allow network names, too
if createFlags.net != "" {
if _, err := hex.DecodeString(createFlags.net); err == nil {
/* already looks like a HEX ID */
} else if conf.Location == "" {
log.Fatalf("Need a location argument (-l) if not using a network ID (%s)", createFlags.net)
} else {
log.Printf("resolving network id of %q ...", createFlags.net)
if netw, err := client.GetNetworkIdByName(createFlags.net, conf.Location); err != nil {
log.Fatalf("failed to resolve network name %q: %s", createFlags.net, err)
} else if netw == nil {
log.Fatalf("No network named %q was found in %s", createFlags.net, conf.Location)
} else {
createFlags.net = netw.Id
}
}
}
req := clcv2.CreateServerReq{
// Name of the server to create. Alphanumeric characters and dashes only.
Name: createFlags.seed,
// User-defined description of this server
Description: createFlags.desc,
// ID of the parent HW group.
GroupId: hwGroup,
// ID of the server to use a source. May be the ID of a srcServer, or when cloning, an existing server ID.
SourceServerId: srcServer,
// The primary DNS to set on the server
PrimaryDns: createFlags.primDNS,
// The secondary DNS to set on the server
SecondaryDns: createFlags.secDNS,
// ID of the network to which to deploy the server.
NetworkId: createFlags.net,
// Password of administrator or root user on server.
Password: createFlags.password,
// Password of the source server, used only when creating a clone from an existing server.
SourceServerPassword: createFlags.srcPass,
// Number of processors to configure the server with (1-16)
Cpu: int(createFlags.numCpu),
// Number of GB of memory to configure the server with (1-128)
MemoryGB: int(createFlags.memGB),
// Whether to create a 'standard', 'hyperscale', or 'bareMetal' server
Type: createFlags.serverType,
// FIXME: the following are not populated in this request:
// - IpAddress
// - IsManagedOs
// - IsManagedBackup
// - AntiAffinityPolicyId
// - CpuAutoscalePolicyId
// - CustomFields
// - Packages
//
// The following items relevant specific to bare-metal servers are also ignored:
// - ConfigurationId
// - OsType
}
if createFlags.extraDrv != 0 {
req.AdditionalDisks = append(req.AdditionalDisks,
clcv2.ServerAdditionalDisk{SizeGB: createFlags.extraDrv, Type: "raw"})
}
// Date/time that the server should be deleted.
if createFlags.ttl != 0 {
req.Ttl = new(time.Time)
*req.Ttl = time.Now().Add(createFlags.ttl)
}
// The CreateServer request resolves the server name at the end.
// This second call can fail at the remote end; it does not mean that
// the server has not been created yet.
url, reqID, err := client.CreateServer(&req)
if err != nil {
log.Fatalf("failed to create server: %s", err)
}
log.Printf("Status Id: %s", reqID)
client.PollStatus(reqID, 5*time.Second)
// Print details after job completes
server, err := client.GetServerByURI(url)
if err != nil {
log.Fatalf("failed to query server details at %s: %s", url, err)
}
showServer(client, server)
},
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/zerotermsquery"
)
// MatchQuery type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/_types/query_dsl/fulltext.ts#L133-L158
type MatchQuery struct {
Analyzer *string `json:"analyzer,omitempty"`
AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"`
Boost *float32 `json:"boost,omitempty"`
CutoffFrequency *Float64 `json:"cutoff_frequency,omitempty"`
Fuzziness Fuzziness `json:"fuzziness,omitempty"`
FuzzyRewrite *string `json:"fuzzy_rewrite,omitempty"`
FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"`
Lenient *bool `json:"lenient,omitempty"`
MaxExpansions *int `json:"max_expansions,omitempty"`
MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"`
Operator *operator.Operator `json:"operator,omitempty"`
PrefixLength *int `json:"prefix_length,omitempty"`
Query string `json:"query"`
QueryName_ *string `json:"_name,omitempty"`
ZeroTermsQuery *zerotermsquery.ZeroTermsQuery `json:"zero_terms_query,omitempty"`
}
func (s *MatchQuery) UnmarshalJSON(data []byte) error {
if !bytes.HasPrefix(data, []byte(`{`)) {
err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Query)
return err
}
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "analyzer":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.Analyzer = &o
case "auto_generate_synonyms_phrase_query":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.AutoGenerateSynonymsPhraseQuery = &value
case bool:
s.AutoGenerateSynonymsPhraseQuery = &v
}
case "boost":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseFloat(v, 32)
if err != nil {
return err
}
f := float32(value)
s.Boost = &f
case float64:
f := float32(v)
s.Boost = &f
}
case "cutoff_frequency":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseFloat(v, 64)
if err != nil {
return err
}
f := Float64(value)
s.CutoffFrequency = &f
case float64:
f := Float64(v)
s.CutoffFrequency = &f
}
case "fuzziness":
if err := dec.Decode(&s.Fuzziness); err != nil {
return err
}
case "fuzzy_rewrite":
if err := dec.Decode(&s.FuzzyRewrite); err != nil {
return err
}
case "fuzzy_transpositions":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.FuzzyTranspositions = &value
case bool:
s.FuzzyTranspositions = &v
}
case "lenient":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.Lenient = &value
case bool:
s.Lenient = &v
}
case "max_expansions":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.MaxExpansions = &value
case float64:
f := int(v)
s.MaxExpansions = &f
}
case "minimum_should_match":
if err := dec.Decode(&s.MinimumShouldMatch); err != nil {
return err
}
case "operator":
if err := dec.Decode(&s.Operator); err != nil {
return err
}
case "prefix_length":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.PrefixLength = &value
case float64:
f := int(v)
s.PrefixLength = &f
}
case "query":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.Query = o
case "_name":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.QueryName_ = &o
case "zero_terms_query":
if err := dec.Decode(&s.ZeroTermsQuery); err != nil {
return err
}
}
}
return nil
}
// NewMatchQuery returns a MatchQuery.
func NewMatchQuery() *MatchQuery {
r := &MatchQuery{}
return r
}
|
package main
import (
"fmt"
)
func main() {
fmt.Println("Sutawee Taenkratok"[0:7])
fmt.Println("Sutawee Taenkratok"[7:18])
}
|
package main
import (
"math/rand"
"time"
"github.com/forestgiant/eff"
"github.com/forestgiant/eff/sdl"
)
const (
windowW = 1024
windowH = 768
pixelSize = 3
)
type sprite struct {
frames [][]eff.Point
frameIndex int
ticks int
frameTickCount int
point eff.Point
}
func (s *sprite) getRects() []eff.Rect {
rects := []eff.Rect{}
for _, p := range s.frames[s.frameIndex] {
rects = append(rects, eff.Rect{
X: s.point.X + p.X*pixelSize,
Y: s.point.Y + p.Y*pixelSize,
W: pixelSize,
H: pixelSize,
})
}
return rects
}
func newSprite(frames [][]eff.Point, frameTickCount int) *sprite {
s := &sprite{
frames: frames,
frameIndex: 0,
}
return s
}
func main() {
canvas := sdl.NewCanvas("Eff Invader Art", windowW, windowH, eff.Color{R: 0x00, B: 0x00, G: 0x00, A: 0xFF}, 60, true)
canvas.Run(func() {
canvas.SetPrintFPS(true)
rand.Seed(time.Now().UnixNano())
white := eff.Color{R: 0xFF, G: 0xFF, B: 0xFF, A: 0xFF}
sizeX := 11 * pixelSize
sizeY := 8 * pixelSize
padding := 1 * pixelSize
w := sizeX + (padding * 2)
h := sizeY + (padding * 2)
cols := (windowW / w) + 2
rows := (windowH / h) + 2
invaders := []*sprite{}
for j := 0; j < rows; j++ {
for i := 0; i < cols; i++ {
invader := newSprite(Invader1, 10)
invader.point = eff.Point{
X: i * w,
Y: j * h,
}
invaders = append(invaders, invader)
}
}
ticks := 0
frameTickCount := 10
frameIndex := 0
canvas.SetUpdateHandler(func() {
ticks++
if ticks == frameTickCount {
ticks = 0
frameIndex = (frameIndex + 1) % 2
canvas.Clear()
rects := []eff.Rect{}
for _, inv := range invaders {
inv.frameIndex = frameIndex
rects = append(rects, inv.getRects()...)
}
canvas.FillRects(rects, white)
}
})
canvas.Clear()
rects := []eff.Rect{}
for _, inv := range invaders {
rects = append(rects, inv.getRects()...)
}
canvas.FillRects(rects, white)
})
}
|
package handshake
import (
"bytes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Crypto Stream Conn", func() {
var (
stream *bytes.Buffer
csc *cryptoStreamConn
)
BeforeEach(func() {
stream = &bytes.Buffer{}
csc = newCryptoStreamConn(stream)
})
It("buffers writes", func() {
_, err := csc.Write([]byte("foo"))
Expect(err).ToNot(HaveOccurred())
Expect(stream.Len()).To(BeZero())
_, err = csc.Write([]byte("bar"))
Expect(err).ToNot(HaveOccurred())
Expect(stream.Len()).To(BeZero())
Expect(csc.Flush()).To(Succeed())
Expect(stream.Bytes()).To(Equal([]byte("foobar")))
})
It("reads from the stream", func() {
stream.Write([]byte("foobar"))
b := make([]byte, 6)
n, err := csc.Read(b)
Expect(err).ToNot(HaveOccurred())
Expect(n).To(Equal(6))
Expect(b).To(Equal([]byte("foobar")))
})
})
|
package jcmd
import (
"github.com/chroblert/jgoutils/jconfig"
"github.com/chroblert/jgoutils/jlog"
"github.com/spf13/cobra"
)
var cfgFile string
var isVerbose bool
var RootCmd = &cobra.Command{
Use:"Z0SecT00ls",
Short: "Zer0ne Sec T00ls",
Long: "A tools that contains some attack tool....developing",
Run:func(cmd *cobra.Command,args []string) {
if isVerbose{
jlog.SetLevel(jlog.DEBUG)
}else{
jlog.SetLevel(jlog.INFO)
}
jlog.Debug("初始化配置文件")
jconfig.InitWithFile(cfgFile)
jlog.Debug("配置文件加载成功")
// 如果没有输入任何flag,则输出帮助信息
if len(args) == 0 {
cmd.Help()
return
}
},
}
func init(){
RootCmd.PersistentFlags().StringVar(&cfgFile,"config","conf/config.json","config file")
RootCmd.PersistentFlags().BoolVarP(&isVerbose,"verbose","v",false,"verbose msg")
//jlog.Debug(RootCmd.PersistentFlags().Lookup("config"))
//jlog.Debug(cfgFile)
}
|
package series
import (
"fmt"
)
func All(n int, s string) []string {
var i int
var res []string
fmt.Println(len(res))
for i = 0; i < len(s)-n+1; i = i + 1 {
res = append(res, s[i:i+n])
}
return res
}
func UnsafeFirst(n int, s string) string {
var res string
res = s[0:n]
return res
}
func First(n int, s string) (string, bool) {
if n < 0 || n > len(s) {
return s, false
}
return s[0:n], true
}
func series() {
fmt.Println(UnsafeFirst(3, "49142"))
}
|
/*
* Copyright (c) 2014-2015, Yawning Angel <yawning at torproject dot org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
// Go language Tor Pluggable Transport suite. Works only as a managed
// client/server.
package transparent_udp
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"net"
"net/url"
"github.com/OperatorFoundation/shapeshifter-dispatcher/common/log"
"github.com/OperatorFoundation/shapeshifter-dispatcher/common/pt_extras"
"github.com/OperatorFoundation/shapeshifter-dispatcher/modes"
"github.com/kataras/golog"
)
func ClientSetup(socksAddr string, ptClientProxy *url.URL, names []string, options string) bool {
return modes.ClientSetupUDP(socksAddr, ptClientProxy, names, options, clientHandler)
}
func clientHandler(name string, options string, conn *net.UDPConn, proxyURI *url.URL) {
var length16 uint16
tracker := make(modes.ConnTracker)
buf := make([]byte, 1024)
// Receive UDP packets and forward them over transport connections forever
for {
numBytes, addr, err := conn.ReadFromUDP(buf)
if err != nil {
fmt.Println("Error: ", err)
}
goodBytes := buf[:numBytes]
fmt.Println(tracker)
if state, ok := tracker[addr.String()]; ok {
// There is an open transport connection, or a connection attempt is in progress.
if state.Waiting {
// The connection attempt is in progress.
// Drop the packet.
} else {
// There is an open transport connection.
// Send the packet through the transport.
length16 = uint16(numBytes)
lengthBuf := new(bytes.Buffer)
err = binary.Write(lengthBuf, binary.LittleEndian, length16)
if err != nil {
fmt.Println("binary.Write failed:", err)
} else {
println("writing data to server")
println(len(lengthBuf.Bytes()))
_, writErr := state.Conn.Write(lengthBuf.Bytes())
if writErr != nil {
continue
} else {
println("writing data to server")
println(len(goodBytes))
_, writeBufErr := state.Conn.Write(goodBytes)
if writeBufErr != nil {
_ = state.Conn.Close()
_ = conn.Close()
}
}
}
}
} else {
// There is not an open transport connection and a connection attempt is not in progress.
// Open a transport connection.
modes.OpenConnection(&tracker, addr.String(), name, options, proxyURI, false, "")
// Drop the packet.
}
}
}
func ServerSetup(ptServerInfo pt_extras.ServerInfo, stateDir string, options string) (launched bool) {
return modes.ServerSetupUDP(ptServerInfo, stateDir, options, serverHandler)
}
func serverHandler(name string, remote net.Conn, info *pt_extras.ServerInfo) {
var length16 uint16
addrStr := log.ElideAddr(remote.RemoteAddr().String())
fmt.Println("### handling", name)
golog.Infof("%s(%s) - new connection", name, addrStr)
serverAddr, err := net.ResolveUDPAddr("udp", info.OrAddr.String())
if err != nil {
golog.Fatal(err)
}
localAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
if err != nil {
golog.Fatal(err)
}
dest, err := net.DialUDP("udp", localAddr, serverAddr)
if err != nil {
golog.Fatal(err)
}
fmt.Println("pumping")
lengthBuffer := make([]byte, 2)
for {
fmt.Println("reading...")
// Read the incoming connection into the buffer.
readLen, err := io.ReadFull(remote, lengthBuffer)
if err != nil {
fmt.Println("read error")
break
}
fmt.Println(readLen)
err = binary.Read(bytes.NewReader(lengthBuffer), binary.LittleEndian, &length16)
if err != nil {
fmt.Println("deserialization error")
_ = dest.Close()
return
}
fmt.Println("reading data")
fmt.Println(length16)
readBuffer := make([]byte, length16)
readLen, err = io.ReadFull(remote, readBuffer)
if err != nil {
fmt.Println("read error")
break
}
if readLen != int(length16) {
println("short read")
break
}
_, _ = dest.Write(readBuffer)
}
_ = dest.Close()
}
|
// Copyright © 2017 Aeneas Rekkas <aeneas+oss@aeneas.io>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package oauth2
import (
"net/url"
"time"
"github.com/gorilla/sessions"
"github.com/ory/fosite"
"github.com/ory/herodot"
"github.com/ory/hydra/firewall"
"github.com/sirupsen/logrus"
)
type Handler struct {
OAuth2 fosite.OAuth2Provider
Consent ConsentStrategy
H herodot.Writer
ForcedHTTP bool
ConsentURL url.URL
AccessTokenLifespan time.Duration
CookieStore sessions.Store
L logrus.FieldLogger
ScopeStrategy fosite.ScopeStrategy
Issuer string
W firewall.Firewall
ResourcePrefix string
ClaimsSupported string
ScopesSupported string
UserinfoEndpoint string
}
func (h *Handler) PrefixResource(resource string) string {
if h.ResourcePrefix == "" {
h.ResourcePrefix = "rn:hydra"
}
if h.ResourcePrefix[len(h.ResourcePrefix)-1] == ':' {
h.ResourcePrefix = h.ResourcePrefix[:len(h.ResourcePrefix)-1]
}
return h.ResourcePrefix + ":" + resource
}
|
package main
import (
"bufio"
"encoding/base64"
"flag"
"fmt"
"os"
"strings"
"time"
"github.com/valyala/fastjson"
)
var (
from = flag.String("from", "/tmp/lb-log", "")
to = flag.String("to", "/tmp/lb-log-new", "")
clear = flag.Bool("clear", false, "")
testLine = flag.Int("test-line", 1000, "")
)
func main() {
flag.Parse()
if *clear {
if err := os.Remove(*to); err != nil {
panic(err)
}
}
// 2019-03-09T23:50:59.144177Z multimedia-proxy-lb 223.72.51.200:29190 10.190.249.220:80 0.000022 0.17247 0.000023 200 200 0 16275 "GET https://cloud-cdn.tantanapp.com:443/v1/images/eyJpZCI6IlZLNFhFQVNZQ08zS1JaQUtLSlZDUUxPR09GQU1BTiIsInciOjkzMSwiaCI6OTMxLCJkIjowLCJtdCI6ImltYWdlL2pwZWciLCJkaCI6MTMyMjEyNjM2MTAzMTQzNzA1MTAsImFiIjowfQ.jpg?format=480x480 HTTP/1.1" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2
// 2019-03-09T23:50:59.324213Z multimedia-proxy-lb 183.134.9.22:64588 10.190.255.22:80 0.000026 0.000393 0.000024 200 200 0 22272 "GET https://cloud-cdn.tantanapp.com:443/do_not_delete/noc.gif HTTP/1.1" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36 JianKongBao Monitor 1.1" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2
// 2019-03-09T23:50:59.331016Z multimedia-proxy-lb 183.216.176.90:52904 10.190.249.144:80 0.000026 0.000407 0.000024 200 200 0 22272 "GET https://cloud-cdn.tantanapp.com:443/do_not_delete/noc.gif HTTP/1.1" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36 JianKongBao Monitor 1.1" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2
// 2019-03-09T23:50:59.342935Z multimedia-proxy-lb 124.225.168.51:46636 10.190.255.22:80 0.000024 0.000405 0.000024 200 200 0 22272 "GET https://cloud-cdn.tantanapp.com:443/do_not_delete/noc.gif HTTP/1.1" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36 JianKongBao Monitor 1.1" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2
// 2019-03-09T23:50:59.343502Z multimedia-proxy-lb 106.120.178.23:47691 10.190.249.144:80 0.000023 0.000468 0.000026 200 200 0 22272 "GET https://cloud-cdn.tantanapp.com:443/do_not_delete/noc.gif HTTP/1.1" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36 JianKongBao Monitor 1.1" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2
// 2019-03-09T23:50:59.353701Z multimedia-proxy-lb 123.128.14.179:55886 10.190.255.22:80 0.000023 0.000396 0.000023 200 200 0 22272 "GET https://cloud-cdn.tantanapp.com:443/do_not_delete/noc.gif HTTP/1.1" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36 JianKongBao Monitor 1.1" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2
// 2019-03-09T23:50:59.334127Z multimedia-proxy-lb 113.7.18.98:53523 10.190.249.220:80 0.000024 0.034196 0.000024 200 200 0 13043 "GET https://cloud-cdn.tantanapp.com:443/v1/images/eyJpZCI6IlhDVlhQNDZCQVZIT0lSSFFNR1pOVVM1VExaTURNVyIsInciOjk4NCwiaCI6OTYxLCJkIjowLCJtdCI6ImltYWdlL2pwZWciLCJkaCI6MTAwMjcxMDUwNDgxMjg3MzA0MjQsImFiIjowfQ.jpg?format=480x480 HTTP/1.1" "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2
// 2019-03-09T23:50:59.369625Z multimedia-proxy-lb 113.59.38.17:57444 10.190.249.144:80 0.000026 0.000541 0.000024 200 200 0 22272 "GET https://cloud-cdn.tantanapp.com:443/do_not_delete/noc.gif HTTP/1.1" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36 JianKongBao Monitor 1.1" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2
// 2019-03-09T23:50:59.248593Z multimedia-proxy-lb 117.136.0.165:41414 10.190.249.220:80 0.000026 0.125169 0.000022 200 200 16439 303 "POST https://cloud.tantanapp.com:443/v1/upload/audio HTTP/1.1" "Putong/3.4.1 Android/25 OPPO/OPPO+A73" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2
// 2019-03-09T23:50:59.375420Z multimedia-proxy-lb 121.22.247.208:45206 10.190.255.22:80 0.000024 0.000436 0.000023 200 200 0 22272 "GET https://cloud-cdn.tantanapp.com:443/do_not_delete/noc.gif HTTP/1.1" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36 JianKongBao Monitor 1.1" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2
// 2019-03-08T23:55:31.319991Z images eyJpZCI6IkZTRk9FRkVKSVZRRTNGVFREVVlVNlVDNVBRTDVEWCIsInciOjk2MCwiaCI6OTYwLCJkIjowLCJtdCI6ImltYWdlL2pwZWciLCJkaCI6NzY5ODU3NjI0NjA4ODI5MjMxM30
// 2019-03-08T23:55:31.321042Z images eyJpZCI6IldMTEJZNlU2QlZFWExUN1czSDY3N1ZDNlZNQlRZUCIsInciOjcyMCwiaCI6NzIwLCJkIjowLCJtdCI6ImltYWdlL2pwZWciLCJkaCI6MTUwMzQ4ODc1ODg3MDcyNDgwNzcsImFiIjowfQ
// 2019-03-08T23:55:31.320376Z images eyJpZCI6Ik5UV0ZOSVgzT1BNU1dJN09FSFNRVUVFNUYzWldXRiIsInciOjE0NDAsImgiOjE0NDAsImQiOjAsIm10IjoiaW1hZ2UvanBlZyIsImRoIjo1MjgxMDA2NjYwMzMyNzcwMDY3LCJhYiI6MH0
// 2019-03-08T23:55:31.304268Z images images
f1, err := os.Open(*from)
if err != nil {
panic(err)
}
defer f1.Close()
f2, err := os.OpenFile(*to, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
panic(err)
}
defer f2.Close()
scanner := bufio.NewScanner(f1)
counter := 0
for scanner.Scan() {
fields := strings.Split(scanner.Text(), " ")
if len(fields) < 3 {
fmt.Println("not good", fields)
continue
}
t, err := time.Parse(time.RFC3339Nano, fields[0])
if err != nil {
fmt.Println("cannot parse time", err, fields[0])
}
fields[0] = fmt.Sprint(t.Unix())
key := fields[2]
bs, err := base64.RawURLEncoding.DecodeString(strings.TrimSpace(key))
if err != nil {
fmt.Println("failed to base 64 decode", err, key)
goto final
}
if id := fastjson.GetString(bs, "id"); id != "" {
key = id
}
final:
fields[2] = key
// fmt.Println(fields)
_, err = f2.WriteString(strings.Join(fields, " ") + "\n")
if err != nil {
panic(err)
}
counter++
if *testLine > 0 && counter > *testLine {
break
}
}
}
|
package develop
import (
"errors"
"net/http"
"testing"
"github.com/cli/cli/v2/context"
"github.com/cli/cli/v2/git"
"github.com/cli/cli/v2/internal/config"
"github.com/cli/cli/v2/internal/ghrepo"
"github.com/cli/cli/v2/internal/run"
"github.com/cli/cli/v2/pkg/httpmock"
"github.com/cli/cli/v2/pkg/iostreams"
"github.com/cli/cli/v2/pkg/prompt"
"github.com/cli/cli/v2/test"
"github.com/stretchr/testify/assert"
)
func Test_developRun(t *testing.T) {
featureEnabledPayload := `{
"data": {
"LinkedBranch": {
"fields": [
{
"name": "id"
},
{
"name": "ref"
}
]
}
}
}`
featureDisabledPayload := `{ "data": { "LinkedBranch": null } }`
tests := []struct {
name string
setup func(*DevelopOptions, *testing.T) func()
cmdStubs func(*run.CommandStubber)
runStubs func(*run.CommandStubber)
remotes map[string]string
askStubs func(*prompt.AskStubber) // TODO eventually migrate to PrompterMock
httpStubs func(*httpmock.Registry, *testing.T)
expectedOut string
expectedErrOut string
expectedBrowse string
wantErr string
tty bool
}{
{name: "list branches for an issue",
setup: func(opts *DevelopOptions, t *testing.T) func() {
opts.IssueSelector = "42"
opts.List = true
return func() {}
},
httpStubs: func(reg *httpmock.Registry, t *testing.T) {
reg.Register(
httpmock.GraphQL(`query LinkedBranch_fields\b`),
httpmock.StringResponse(featureEnabledPayload),
)
reg.Register(
httpmock.GraphQL(`query BranchIssueReferenceListLinkedBranches\b`),
httpmock.GraphQLQuery(`{
"data": {
"repository": {
"issue": {
"linkedBranches": {
"edges": [
{
"node": {
"ref": {
"name": "foo"
}
}
},
{
"node": {
"ref": {
"name": "bar"
}
}
}
]
}
}
}
}
}
`, func(query string, inputs map[string]interface{}) {
assert.Equal(t, float64(42), inputs["issueNumber"])
assert.Equal(t, "OWNER", inputs["repositoryOwner"])
assert.Equal(t, "REPO", inputs["repositoryName"])
}))
},
expectedOut: "foo\nbar\n",
},
{name: "list branches for an issue in tty",
setup: func(opts *DevelopOptions, t *testing.T) func() {
opts.IssueSelector = "42"
opts.List = true
return func() {}
},
tty: true,
httpStubs: func(reg *httpmock.Registry, t *testing.T) {
reg.Register(
httpmock.GraphQL(`query LinkedBranch_fields\b`),
httpmock.StringResponse(featureEnabledPayload),
)
reg.Register(
httpmock.GraphQL(`query BranchIssueReferenceListLinkedBranches\b`),
httpmock.GraphQLQuery(`{
"data": {
"repository": {
"issue": {
"linkedBranches": {
"edges": [
{
"node": {
"ref": {
"name": "foo",
"repository": {
"url": "http://github.localhost/OWNER/REPO"
}
}
}
},
{
"node": {
"ref": {
"name": "bar",
"repository": {
"url": "http://github.localhost/OWNER/OTHER-REPO"
}
}
}
}
]
}
}
}
}
}
`, func(query string, inputs map[string]interface{}) {
assert.Equal(t, float64(42), inputs["issueNumber"])
assert.Equal(t, "OWNER", inputs["repositoryOwner"])
assert.Equal(t, "REPO", inputs["repositoryName"])
}))
},
expectedOut: "\nShowing linked branches for OWNER/REPO#42\n\nfoo http://github.localhost/OWNER/REPO/tree/foo\nbar http://github.localhost/OWNER/OTHER-REPO/tree/bar\n",
},
{name: "list branches for an issue providing an issue url",
setup: func(opts *DevelopOptions, t *testing.T) func() {
opts.IssueSelector = "https://github.com/cli/test-repo/issues/42"
opts.List = true
return func() {}
},
httpStubs: func(reg *httpmock.Registry, t *testing.T) {
reg.Register(
httpmock.GraphQL(`query LinkedBranch_fields\b`),
httpmock.StringResponse(featureEnabledPayload),
)
reg.Register(
httpmock.GraphQL(`query BranchIssueReferenceListLinkedBranches\b`),
httpmock.GraphQLQuery(`{
"data": {
"repository": {
"issue": {
"linkedBranches": {
"edges": [
{
"node": {
"ref": {
"name": "foo"
}
}
},
{
"node": {
"ref": {
"name": "bar"
}
}
}
]
}
}
}
}
}
`, func(query string, inputs map[string]interface{}) {
assert.Equal(t, float64(42), inputs["issueNumber"])
assert.Equal(t, "cli", inputs["repositoryOwner"])
assert.Equal(t, "test-repo", inputs["repositoryName"])
}))
},
expectedOut: "foo\nbar\n",
},
{name: "list branches for an issue providing an issue repo",
setup: func(opts *DevelopOptions, t *testing.T) func() {
opts.IssueSelector = "42"
opts.IssueRepoSelector = "cli/test-repo"
opts.List = true
return func() {}
},
httpStubs: func(reg *httpmock.Registry, t *testing.T) {
reg.Register(
httpmock.GraphQL(`query LinkedBranch_fields\b`),
httpmock.StringResponse(featureEnabledPayload),
)
reg.Register(
httpmock.GraphQL(`query BranchIssueReferenceListLinkedBranches\b`),
httpmock.GraphQLQuery(`{
"data": {
"repository": {
"issue": {
"linkedBranches": {
"edges": [
{
"node": {
"ref": {
"name": "foo"
}
}
},
{
"node": {
"ref": {
"name": "bar"
}
}
}
]
}
}
}
}
}
`, func(query string, inputs map[string]interface{}) {
assert.Equal(t, float64(42), inputs["issueNumber"])
assert.Equal(t, "cli", inputs["repositoryOwner"])
assert.Equal(t, "test-repo", inputs["repositoryName"])
}))
},
expectedOut: "foo\nbar\n",
},
{name: "list branches for an issue providing an issue url and specifying the same repo works",
setup: func(opts *DevelopOptions, t *testing.T) func() {
opts.IssueSelector = "https://github.com/cli/test-repo/issues/42"
opts.IssueRepoSelector = "cli/test-repo"
opts.List = true
return func() {}
},
httpStubs: func(reg *httpmock.Registry, t *testing.T) {
reg.Register(
httpmock.GraphQL(`query LinkedBranch_fields\b`),
httpmock.StringResponse(featureEnabledPayload),
)
reg.Register(
httpmock.GraphQL(`query BranchIssueReferenceListLinkedBranches\b`),
httpmock.GraphQLQuery(`{
"data": {
"repository": {
"issue": {
"linkedBranches": {
"edges": [
{
"node": {
"ref": {
"name": "foo"
}
}
},
{
"node": {
"ref": {
"name": "bar"
}
}
}
]
}
}
}
}
}
`, func(query string, inputs map[string]interface{}) {
assert.Equal(t, float64(42), inputs["issueNumber"])
assert.Equal(t, "cli", inputs["repositoryOwner"])
assert.Equal(t, "test-repo", inputs["repositoryName"])
}))
},
expectedOut: "foo\nbar\n",
},
{name: "list branches for an issue providing an issue url and specifying a different repo returns an error",
setup: func(opts *DevelopOptions, t *testing.T) func() {
opts.IssueSelector = "https://github.com/cli/test-repo/issues/42"
opts.IssueRepoSelector = "cli/other"
opts.List = true
return func() {}
},
httpStubs: func(reg *httpmock.Registry, t *testing.T) {
reg.Register(
httpmock.GraphQL(`query LinkedBranch_fields\b`),
httpmock.StringResponse(featureEnabledPayload),
)
},
wantErr: "issue repo in url cli/test-repo does not match the repo from --issue-repo cli/other",
},
{name: "returns an error when the feature isn't enabled in the GraphQL API",
setup: func(opts *DevelopOptions, t *testing.T) func() {
opts.IssueSelector = "https://github.com/cli/test-repo/issues/42"
opts.List = true
return func() {}
},
httpStubs: func(reg *httpmock.Registry, t *testing.T) {
reg.Register(
httpmock.GraphQL(`query LinkedBranch_fields\b`),
httpmock.StringResponse(featureDisabledPayload),
)
},
wantErr: "the `gh issue develop` command is not currently available",
},
{name: "develop new branch with a name provided",
setup: func(opts *DevelopOptions, t *testing.T) func() {
opts.Name = "my-branch"
opts.BaseBranch = "main"
opts.IssueSelector = "123"
return func() {}
},
httpStubs: func(reg *httpmock.Registry, t *testing.T) {
reg.Register(
httpmock.GraphQL(`query LinkedBranch_fields\b`),
httpmock.StringResponse(featureEnabledPayload),
)
reg.Register(
httpmock.GraphQL(`query RepositoryInfo\b`),
httpmock.StringResponse(`
{ "data": { "repository": {
"id": "REPOID",
"hasIssuesEnabled": true
} } }`),
)
reg.Register(
httpmock.GraphQL(`query IssueByNumber\b`),
httpmock.StringResponse(`{"data":{"repository":{ "hasIssuesEnabled": true, "issue":{"id": "yar", "number":123, "title":"my issue"} }}}`))
reg.Register(
httpmock.GraphQL(`query BranchIssueReferenceFindBaseOid\b`),
httpmock.StringResponse(`{"data":{"repository":{"ref":{"target":{"oid":"123"}}}}}`))
reg.Register(
httpmock.GraphQL(`(?s)mutation CreateLinkedBranch\b.*issueId: \$issueId,\s+name: \$name,\s+oid: \$oid,`),
httpmock.GraphQLQuery(`{ "data": { "createLinkedBranch": { "linkedBranch": {"id": "2", "ref": {"name": "my-branch"} } } } }`,
func(query string, inputs map[string]interface{}) {
assert.Equal(t, "REPOID", inputs["repositoryId"])
assert.Equal(t, "my-branch", inputs["name"])
assert.Equal(t, "yar", inputs["issueId"])
}),
)
},
expectedOut: "github.com/OWNER/REPO/tree/my-branch\n",
},
{name: "develop new branch without a name provided omits the param from the mutation",
setup: func(opts *DevelopOptions, t *testing.T) func() {
opts.Name = ""
opts.BaseBranch = "main"
opts.IssueSelector = "123"
return func() {}
},
httpStubs: func(reg *httpmock.Registry, t *testing.T) {
reg.Register(
httpmock.GraphQL(`query LinkedBranch_fields\b`),
httpmock.StringResponse(featureEnabledPayload),
)
reg.Register(
httpmock.GraphQL(`query RepositoryInfo\b`),
httpmock.StringResponse(`
{ "data": { "repository": {
"id": "REPOID",
"hasIssuesEnabled": true
} } }`),
)
reg.Register(
httpmock.GraphQL(`query IssueByNumber\b`),
httpmock.StringResponse(`{"data":{"repository":{ "hasIssuesEnabled": true, "issue":{"id": "yar", "number":123, "title":"my issue"} }}}`))
reg.Register(
httpmock.GraphQL(`query BranchIssueReferenceFindBaseOid\b`),
httpmock.StringResponse(`{"data":{"repository":{"ref":{"target":{"oid":"123"}}}}}`))
reg.Register(
httpmock.GraphQL(`(?s)mutation CreateLinkedBranch\b.*\$oid: GitObjectID!, \$repositoryId:.*issueId: \$issueId,\s+oid: \$oid,`),
httpmock.GraphQLQuery(`{ "data": { "createLinkedBranch": { "linkedBranch": {"id": "2", "ref": {"name": "my-issue-1"} } } } }`,
func(query string, inputs map[string]interface{}) {
assert.Equal(t, "REPOID", inputs["repositoryId"])
assert.Equal(t, "", inputs["name"])
assert.Equal(t, "yar", inputs["issueId"])
}),
)
},
expectedOut: "github.com/OWNER/REPO/tree/my-issue-1\n",
},
{name: "develop providing an issue url and specifying a different repo returns an error",
setup: func(opts *DevelopOptions, t *testing.T) func() {
opts.IssueSelector = "https://github.com/cli/test-repo/issues/42"
opts.IssueRepoSelector = "cli/other"
return func() {}
},
httpStubs: func(reg *httpmock.Registry, t *testing.T) {
reg.Register(
httpmock.GraphQL(`query LinkedBranch_fields\b`),
httpmock.StringResponse(featureEnabledPayload),
)
reg.Register(
httpmock.GraphQL(`query RepositoryInfo\b`),
httpmock.StringResponse(`
{ "data": { "repository": {
"id": "REPOID",
"hasIssuesEnabled": true
} } }`),
)
},
wantErr: "issue repo in url cli/test-repo does not match the repo from --issue-repo cli/other",
},
{name: "develop new branch with checkout when the branch exists locally",
setup: func(opts *DevelopOptions, t *testing.T) func() {
opts.Name = "my-branch"
opts.BaseBranch = "main"
opts.IssueSelector = "123"
opts.Checkout = true
return func() {}
},
remotes: map[string]string{
"origin": "OWNER/REPO",
},
httpStubs: func(reg *httpmock.Registry, t *testing.T) {
reg.Register(
httpmock.GraphQL(`query LinkedBranch_fields\b`),
httpmock.StringResponse(featureEnabledPayload),
)
reg.Register(
httpmock.GraphQL(`query RepositoryInfo\b`),
httpmock.StringResponse(`
{ "data": { "repository": {
"id": "REPOID",
"hasIssuesEnabled": true
} } }`),
)
reg.Register(
httpmock.GraphQL(`query IssueByNumber\b`),
httpmock.StringResponse(`{"data":{"repository":{ "hasIssuesEnabled": true, "issue":{"id": "yar", "number":123, "title":"my issue"} }}}`))
reg.Register(
httpmock.GraphQL(`query BranchIssueReferenceFindBaseOid\b`),
httpmock.StringResponse(`{"data":{"repository":{"ref":{"target":{"oid":"123"}}}}}`))
reg.Register(
httpmock.GraphQL(`mutation CreateLinkedBranch\b`),
httpmock.GraphQLQuery(`{ "data": { "createLinkedBranch": { "linkedBranch": {"id": "2", "ref": {"name": "my-branch"} } } } }`,
func(query string, inputs map[string]interface{}) {
assert.Equal(t, "REPOID", inputs["repositoryId"])
assert.Equal(t, "my-branch", inputs["name"])
assert.Equal(t, "yar", inputs["issueId"])
}),
)
},
runStubs: func(cs *run.CommandStubber) {
cs.Register(`git rev-parse --verify refs/heads/my-branch`, 0, "")
cs.Register(`git checkout my-branch`, 0, "")
cs.Register(`git pull --ff-only origin my-branch`, 0, "")
},
expectedOut: "github.com/OWNER/REPO/tree/my-branch\n",
},
{name: "develop new branch with checkout when the branch does not exist locally",
setup: func(opts *DevelopOptions, t *testing.T) func() {
opts.Name = "my-branch"
opts.BaseBranch = "main"
opts.IssueSelector = "123"
opts.Checkout = true
return func() {}
},
remotes: map[string]string{
"origin": "OWNER/REPO",
},
httpStubs: func(reg *httpmock.Registry, t *testing.T) {
reg.Register(
httpmock.GraphQL(`query LinkedBranch_fields\b`),
httpmock.StringResponse(featureEnabledPayload),
)
reg.Register(
httpmock.GraphQL(`query RepositoryInfo\b`),
httpmock.StringResponse(`
{ "data": { "repository": {
"id": "REPOID",
"hasIssuesEnabled": true
} } }`),
)
reg.Register(
httpmock.GraphQL(`query IssueByNumber\b`),
httpmock.StringResponse(`{"data":{"repository":{ "hasIssuesEnabled": true, "issue":{"id": "yar", "number":123, "title":"my issue"} }}}`))
reg.Register(
httpmock.GraphQL(`query BranchIssueReferenceFindBaseOid\b`),
httpmock.StringResponse(`{"data":{"repository":{"ref":{"target":{"oid":"123"}}}}}`))
reg.Register(
httpmock.GraphQL(`mutation CreateLinkedBranch\b`),
httpmock.GraphQLQuery(`{ "data": { "createLinkedBranch": { "linkedBranch": {"id": "2", "ref": {"name": "my-branch"} } } } }`,
func(query string, inputs map[string]interface{}) {
assert.Equal(t, "REPOID", inputs["repositoryId"])
assert.Equal(t, "my-branch", inputs["name"])
assert.Equal(t, "yar", inputs["issueId"])
}),
)
},
runStubs: func(cs *run.CommandStubber) {
cs.Register(`git rev-parse --verify refs/heads/my-branch`, 1, "")
cs.Register(`git fetch origin \+refs/heads/my-branch:refs/remotes/origin/my-branch`, 0, "")
cs.Register(`git checkout -b my-branch --track origin/my-branch`, 0, "")
cs.Register(`git pull --ff-only origin my-branch`, 0, "")
},
expectedOut: "github.com/OWNER/REPO/tree/my-branch\n",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
reg := &httpmock.Registry{}
defer reg.Verify(t)
if tt.httpStubs != nil {
tt.httpStubs(reg, t)
}
opts := DevelopOptions{}
ios, _, stdout, stderr := iostreams.Test()
ios.SetStdoutTTY(tt.tty)
ios.SetStdinTTY(tt.tty)
ios.SetStderrTTY(tt.tty)
opts.IO = ios
opts.BaseRepo = func() (ghrepo.Interface, error) {
return ghrepo.New("OWNER", "REPO"), nil
}
opts.HttpClient = func() (*http.Client, error) {
return &http.Client{Transport: reg}, nil
}
opts.Config = func() (config.Config, error) {
return config.NewBlankConfig(), nil
}
opts.Remotes = func() (context.Remotes, error) {
if len(tt.remotes) == 0 {
return nil, errors.New("no remotes")
}
var remotes context.Remotes
for name, repo := range tt.remotes {
r, err := ghrepo.FromFullName(repo)
if err != nil {
return remotes, err
}
remotes = append(remotes, &context.Remote{
Remote: &git.Remote{Name: name},
Repo: r,
})
}
return remotes, nil
}
opts.GitClient = &git.Client{
GhPath: "some/path/gh",
GitPath: "some/path/git",
}
cmdStubs, cmdTeardown := run.Stub()
defer cmdTeardown(t)
if tt.runStubs != nil {
tt.runStubs(cmdStubs)
}
cleanSetup := func() {}
if tt.setup != nil {
cleanSetup = tt.setup(&opts, t)
}
defer cleanSetup()
var err error
if opts.List {
err = developRunList(&opts)
} else {
err = developRunCreate(&opts)
}
output := &test.CmdOut{
OutBuf: stdout,
ErrBuf: stderr,
}
if tt.wantErr != "" {
assert.EqualError(t, err, tt.wantErr)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expectedOut, output.String())
assert.Equal(t, tt.expectedErrOut, output.Stderr())
}
})
}
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-08-14 10:18
# @File : of_Offer_04_二维数组中的查找.go
# @Description :
在一个 n * m 的二维数组中,每一行都按照从左到右递增的顺序排序,
每一列都按照从上到下递增的顺序排序。请完成一个函数,输入这样的一个二维数组和一个整数,判断数组中是否含有该整数。
# @Attention :
*/
package offer
func findNumberIn2DArray2(matrix [][]int, target int) bool {
if len(matrix) == 0 || len(matrix[0]) == 0 {
return false
}
rows := len(matrix)
cols := len(matrix[0])
start := 0
end := rows*cols - 1
for start <= end {
mid := start + (end-start)>>1
midValue := matrix[mid/cols][mid%cols]
if midValue < target {
start = mid + 1
} else if midValue > target {
end = mid - 1
} else {
return true
}
}
return false
}
func findNumberIn2DArray(matrix [][]int, target int) bool {
if len(matrix) == 0 || len(matrix[0]) == 0 {
return false
}
for i, j := 0, len(matrix[0])-1; i < len(matrix) && j >= 0; {
if matrix[i][j] < target {
i++
} else if matrix[i][j] > target {
j--
} else {
return true
}
}
return false
}
|
// Copyright 2014 The Sporting Exchange Limited. All rights reserved.
// Use of this source code is governed by a free license that can be
// found in the LICENSE file.
package collect
import (
"log"
"math/rand"
"strings"
"sync"
"time"
"opentsp.org/contrib/collect-netscaler/nitro"
"opentsp.org/internal/tsdb"
)
func init() {
registerStatFunc("service", service)
}
func service(emit emitFn, r *nitro.ResponseStat) {
for vserver, services := range byVServer(r.Service) {
var total struct {
ActiveConn uint64
ActiveTransactions uint64
CurClntConnections uint64
CurReusePool uint64
CurSrvrConnections uint64
RequestBytesRate float64
RequestsRate float64
ResponseBytesRate float64
ResponsesRate float64
StateUp int
SurgeCount uint64
SvrEstablishedConn uint64
SvrNotEstablishedConn uint64
}
vserver = strings.Replace(vserver, ".", "-", -1)
for _, svc := range services {
pre := "service." + tsdb.Clean(vserver) + "."
post := " service=" + tsdb.Clean(svc.Name)
stateUp := 1
if svc.State != "UP" {
stateUp = 0
}
svrNotEstablishedConn := *svc.CurSrvrConnections - *svc.SvrEstablishedConn
emit(pre+"CurClntConnections"+post, *svc.CurClntConnections)
emit(pre+"CurSrvrConnections"+post, *svc.CurSrvrConnections)
emit(pre+"MaxClients"+post, *svc.MaxClients)
emit(pre+"RequestBytesRate"+post, *svc.RequestBytesRate)
emit(pre+"ResponseBytesRate"+post, *svc.ResponseBytesRate)
emit(pre+"StateUp"+post, stateUp)
emit(pre+"SurgeCount"+post, *svc.SurgeCount)
emit(pre+"SvrEstablishedConn"+post, *svc.SvrEstablishedConn)
emit(pre+"SvrNotEstablishedConn"+post, svrNotEstablishedConn)
// NB: MaxClients deliberately omitted; it's confusing when aggregated.
total.CurClntConnections += *svc.CurClntConnections
total.CurSrvrConnections += *svc.CurSrvrConnections
total.RequestBytesRate += *svc.RequestBytesRate
total.ResponseBytesRate += *svc.ResponseBytesRate
total.StateUp += stateUp
total.SurgeCount += *svc.SurgeCount
total.SvrEstablishedConn += *svc.SvrEstablishedConn
total.SvrNotEstablishedConn += svrNotEstablishedConn
if svc.ServiceType == "HTTP" {
activeConn := *svc.SvrEstablishedConn - *svc.CurReusePool
emit(pre+"ActiveConn"+post, activeConn)
emit(pre+"ActiveTransactions"+post, *svc.ActiveTransactions)
emit(pre+"AvgSvrTTFB"+post, *svc.AvgSvrTTFB)
emit(pre+"CurReusePool"+post, *svc.CurReusePool)
emit(pre+"RequestsRate"+post, *svc.RequestsRate)
emit(pre+"ResponsesRate"+post, *svc.ResponsesRate)
total.ActiveConn += activeConn
total.ActiveTransactions += *svc.ActiveTransactions
total.CurReusePool += *svc.CurReusePool
total.RequestsRate += *svc.RequestsRate
total.ResponsesRate += *svc.ResponsesRate
}
}
post := " vserver=" + tsdb.Clean(vserver)
emit("total.service.CurClntConnections"+post, total.CurClntConnections)
emit("total.service.CurSrvrConnections"+post, total.CurSrvrConnections)
emit("total.service.RequestBytesRate"+post, total.RequestBytesRate)
emit("total.service.ResponseBytesRate"+post, total.ResponseBytesRate)
emit("total.service.StateUp"+post, total.StateUp)
emit("total.service.SurgeCount"+post, total.SurgeCount)
emit("total.service.SvrEstablishedConn"+post, total.SvrEstablishedConn)
emit("total.service.SvrNotEstablishedConn"+post, total.SvrNotEstablishedConn)
if len(services) > 0 && services[0].ServiceType == "HTTP" {
emit("total.service.ActiveConn"+post, total.ActiveConn)
emit("total.service.ActiveTransactions"+post, total.ActiveTransactions)
emit("total.service.CurReusePool"+post, total.CurReusePool)
emit("total.service.RequestsRate"+post, total.RequestsRate)
emit("total.service.ResponsesRate"+post, total.ResponsesRate)
}
}
}
// byVServer partitions service list by the VServer they are bound to.
// It also discards VServers that are idle or bindingless.
func byVServer(all []nitro.Service) map[string][]nitro.Service {
active := lbvservers.RecentlyActive()
lookup := serviceMap(all)
m := make(map[string][]nitro.Service, len(active))
for vserver, services := range serviceBindings.Map() {
if !active[vserver] || len(services) == 0 {
continue
}
m[vserver] = make([]nitro.Service, 0, len(services))
for _, name := range services {
m[vserver] = append(m[vserver], lookup[name])
}
}
return m
}
func serviceMap(services []nitro.Service) map[string]nitro.Service {
clash := make(map[string]bool)
m := make(map[string]nitro.Service, len(services))
for _, s := range services {
if clash[s.Name] {
continue
}
if _, ok := m[s.Name]; ok {
log.Printf("nitro data error: duplicate entry for service %s", s.Name)
clash[s.Name] = true
delete(m, s.Name)
continue
}
m[s.Name] = s
}
return m
}
var serviceBindings = newServiceBindingsPoller()
type currentServiceBindings struct {
byLBVServer map[string][]string
byLBVServerMu sync.Mutex
running map[string]*bindingReaderJob
}
func newServiceBindingsPoller() *currentServiceBindings {
sb := ¤tServiceBindings{
byLBVServer: make(map[string][]string),
running: make(map[string]*bindingReaderJob),
}
go sb.loop()
return sb
}
// Map returns a copy of the bindings map.
func (sb *currentServiceBindings) Map() map[string][]string {
sb.byLBVServerMu.Lock()
defer sb.byLBVServerMu.Unlock()
m := make(map[string][]string)
for vserver, bindings := range sb.byLBVServer {
m[vserver] = bindings
}
return m
}
func (sb *currentServiceBindings) loop() {
updateChan := make(chan binding)
tick := time.Tick(10 * time.Second)
for {
select {
case <-tick:
avail := lbvservers.RecentlyActive()
for id := range avail {
if sb.running[id] == nil {
sb.running[id] = newBindingReaderJob(id, updateChan)
}
}
for id := range sb.running {
if !avail[id] {
sb.running[id].Exit <- true
delete(sb.running, id)
}
}
case b := <-updateChan:
sb.byLBVServerMu.Lock()
sb.byLBVServer[b.LBID] = b.ServiceList
sb.byLBVServerMu.Unlock()
}
}
}
type binding struct {
LBID string
ServiceList []string
}
// bindingReaderJob is a background job that keeps LBVServer-Service binding data
// up to date.
type bindingReaderJob struct {
lbID string
out chan<- binding
Exit chan bool
rate *time.Ticker
}
// newBindingReaderJob starts a new binding-reading job. The binding readings
// will be sent to the given out channel.
func newBindingReaderJob(lbID string, out chan<- binding) *bindingReaderJob {
job := &bindingReaderJob{
lbID: lbID,
out: out,
Exit: make(chan bool, 1),
}
go job.mainloop()
return job
}
// getBoundServices returns names of services bound to the given LBVServer.
func getBoundServices(lbID string) ([]string, error) {
resp, err := Client.Config.Get("lbvserver_service_binding/" + lbID)
if err != nil {
return nil, err
}
got := resp.LBVServerServiceBinding
names := make([]string, 0, len(got))
for _, s := range got {
names = append(names, s.ServiceName)
}
return names, nil
}
func (job *bindingReaderJob) mainloop() {
gotOK := 0
for {
job.sleep(gotOK)
svcList, err := getBoundServices(job.lbID)
if err != nil {
log.Print(err)
continue
}
gotOK++
select {
case job.out <- binding{job.lbID, svcList}:
// ok
case <-job.Exit:
if job.rate != nil {
job.rate.Stop()
}
return
}
}
}
// bindingReadInterval sets a per-LBVServer rate limit on calls to the Nitro API
// requesting the LBVServer's service bindings. Bindings retrieval is a background
// job that enables Loop to make cheaper Nitro calls, and thus meet the performance
// requirement of ~5s collection cycle.
const bindingReadInterval = 10 * time.Minute
// bindingReadRateLimit is an upper bound on the rate of calls to the Nitro API
// requesting the VServer-Service bindings. Without the limit, Nitro API crashes
// and takes up to a minute to recover causing gaps in data.
var bindingReadRateLimit = time.NewTicker(100 * time.Millisecond)
// sleep controls the delay between binding read requests.
func (job *bindingReaderJob) sleep(got int) {
switch got {
case 0:
// No binding data available yet; request it as fast as permitted.
<-bindingReadRateLimit.C
case 1:
// Got first binding. Delay second read to avoid thundering herds.
splay(bindingReadInterval)
job.rate = time.NewTicker(bindingReadInterval)
default:
// Regular binding request: no special delay needed. Only need to
// respect the rate limits.
<-job.rate.C
<-bindingReadRateLimit.C
}
}
func splay(n time.Duration) {
time.Sleep(time.Duration(rand.Int63n(n.Nanoseconds())))
}
|
package c12_ecb_decription_simple
import (
"bytes"
"errors"
"github.com/vodafon/cryptopals/set1/c7_aes_ecb"
)
type Enc struct {
key []byte
tail []byte
}
type Encryptor interface {
Encrypt([]byte) []byte
}
func NewEnc(key, tail []byte) Enc {
return Enc{
key: key,
tail: tail,
}
}
func (e Enc) Encrypt(src []byte) []byte {
var buf bytes.Buffer
buf.Write(src)
buf.Write(e.tail)
return c7_aes_ecb.Encrypt(buf.Bytes(), e.key)
}
func (e Enc) BruteForce(blockSize int) []byte {
decr := []byte{}
for i := 0; i < len(e.tail); i++ {
decr = append(decr, findByte(decr, blockSize, e))
}
return decr
}
func findByte(decr []byte, bs int, enc Encryptor) byte {
size := (len(decr)/bs + 1) * bs
prefix := bytes.Repeat([]byte("A"), size-len(decr)-1)
target := enc.Encrypt(prefix)[0:size]
for i := 0; i < 255; i++ {
input := append(prefix, decr...)
input = append(input, byte(i))
output := enc.Encrypt(input)
if bytes.Equal(output[0:size], target) {
return byte(i)
}
}
return byte(0)
}
func BlockSizeDetect(enc Encryptor, maxLen int) (int, error) {
var src bytes.Buffer
// padding
i, err := lenChangeIteration(src, enc, maxLen, 1)
if err != nil {
return 0, err
}
// new block
i, err = lenChangeIteration(src, enc, maxLen, i+1)
if err != nil {
return 0, err
}
return i, err
}
func lenChangeIteration(src bytes.Buffer, enc Encryptor, maxLen, startPos int) (int, error) {
src.Write(bytes.Repeat([]byte("A"), startPos))
l := len(enc.Encrypt(src.Bytes()))
for i := 2; i < maxLen; i++ {
src.Write([]byte("A"))
l1 := len(enc.Encrypt(src.Bytes()))
if l1 < l {
return 0, errors.New("not ECB")
}
if l1 > l {
return i, nil
}
}
return 0, errors.New("not found")
}
|
package files
import (
"io"
"mime/multipart"
"testing"
)
var text = "Some text! :)"
func getTestMultiFileReader(t *testing.T) *MultiFileReader {
sf := NewMapDirectory(map[string]Node{
"file.txt": NewBytesFile([]byte(text)),
"boop": NewMapDirectory(map[string]Node{
"a.txt": NewBytesFile([]byte("bleep")),
"b.txt": NewBytesFile([]byte("bloop")),
}),
"beep.txt": NewBytesFile([]byte("beep")),
})
// testing output by reading it with the go stdlib "mime/multipart" Reader
return NewMultiFileReader(sf, true)
}
func TestMultiFileReaderToMultiFile(t *testing.T) {
mfr := getTestMultiFileReader(t)
mpReader := multipart.NewReader(mfr, mfr.Boundary())
mf, err := NewFileFromPartReader(mpReader, multipartFormdataType)
if err != nil {
t.Fatal(err)
}
md, ok := mf.(Directory)
if !ok {
t.Fatal("Expected a directory")
}
it := md.Entries()
if !it.Next() || it.Name() != "beep.txt" {
t.Fatal("iterator didn't work as expected")
}
if !it.Next() || it.Name() != "boop" || DirFromEntry(it) == nil {
t.Fatal("iterator didn't work as expected")
}
subIt := DirFromEntry(it).Entries()
if !subIt.Next() || subIt.Name() != "a.txt" || DirFromEntry(subIt) != nil {
t.Fatal("iterator didn't work as expected")
}
if !subIt.Next() || subIt.Name() != "b.txt" || DirFromEntry(subIt) != nil {
t.Fatal("iterator didn't work as expected")
}
if subIt.Next() || it.Err() != nil {
t.Fatal("iterator didn't work as expected")
}
// try to break internal state
if subIt.Next() || it.Err() != nil {
t.Fatal("iterator didn't work as expected")
}
if !it.Next() || it.Name() != "file.txt" || DirFromEntry(it) != nil || it.Err() != nil {
t.Fatal("iterator didn't work as expected")
}
if it.Next() || it.Err() != nil {
t.Fatal("iterator didn't work as expected")
}
}
func TestMultiFileReaderToMultiFileSkip(t *testing.T) {
mfr := getTestMultiFileReader(t)
mpReader := multipart.NewReader(mfr, mfr.Boundary())
mf, err := NewFileFromPartReader(mpReader, multipartFormdataType)
if err != nil {
t.Fatal(err)
}
md, ok := mf.(Directory)
if !ok {
t.Fatal("Expected a directory")
}
it := md.Entries()
if !it.Next() || it.Name() != "beep.txt" {
t.Fatal("iterator didn't work as expected")
}
if !it.Next() || it.Name() != "boop" || DirFromEntry(it) == nil {
t.Fatal("iterator didn't work as expected")
}
if !it.Next() || it.Name() != "file.txt" || DirFromEntry(it) != nil || it.Err() != nil {
t.Fatal("iterator didn't work as expected")
}
if it.Next() || it.Err() != nil {
t.Fatal("iterator didn't work as expected")
}
}
func TestOutput(t *testing.T) {
mfr := getTestMultiFileReader(t)
mpReader := &peekReader{r: multipart.NewReader(mfr, mfr.Boundary())}
buf := make([]byte, 20)
part, err := mpReader.NextPart()
if part == nil || err != nil {
t.Fatal("Expected non-nil part, nil error")
}
mpname, mpf, err := newFileFromPart("", part, mpReader)
if mpf == nil || err != nil {
t.Fatal("Expected non-nil multipartFile, nil error")
}
mpr, ok := mpf.(File)
if !ok {
t.Fatal("Expected file to be a regular file")
}
if mpname != "beep.txt" {
t.Fatal("Expected filename to be \"file.txt\"")
}
if n, err := mpr.Read(buf); n != 4 || err != nil {
t.Fatal("Expected to read from file", n, err)
}
if string(buf[:4]) != "beep" {
t.Fatal("Data read was different than expected")
}
part, err = mpReader.NextPart()
if part == nil || err != nil {
t.Fatal("Expected non-nil part, nil error")
}
mpname, mpf, err = newFileFromPart("", part, mpReader)
if mpf == nil || err != nil {
t.Fatal("Expected non-nil multipartFile, nil error")
}
mpd, ok := mpf.(Directory)
if !ok {
t.Fatal("Expected file to be a directory")
}
if mpname != "boop" {
t.Fatal("Expected filename to be \"boop\"")
}
part, err = mpReader.NextPart()
if part == nil || err != nil {
t.Fatal("Expected non-nil part, nil error")
}
cname, child, err := newFileFromPart("boop", part, mpReader)
if child == nil || err != nil {
t.Fatal("Expected to be able to read a child file")
}
if _, ok := child.(File); !ok {
t.Fatal("Expected file to not be a directory")
}
if cname != "a.txt" {
t.Fatal("Expected filename to be \"a.txt\"")
}
part, err = mpReader.NextPart()
if part == nil || err != nil {
t.Fatal("Expected non-nil part, nil error")
}
cname, child, err = newFileFromPart("boop", part, mpReader)
if child == nil || err != nil {
t.Fatal("Expected to be able to read a child file")
}
if _, ok := child.(File); !ok {
t.Fatal("Expected file to not be a directory")
}
if cname != "b.txt" {
t.Fatal("Expected filename to be \"b.txt\"")
}
it := mpd.Entries()
if it.Next() {
t.Fatal("Expected to get false")
}
part, err = mpReader.NextPart()
if part == nil || err != nil {
t.Fatal("Expected non-nil part, nil error")
}
mpname, mpf, err = newFileFromPart("", part, mpReader)
if mpf == nil || err != nil {
t.Fatal("Expected non-nil multipartFile, nil error")
}
if mpname != "file.txt" {
t.Fatal("Expected filename to be \"b.txt\"")
}
part, err = mpReader.NextPart()
if part != nil || err != io.EOF {
t.Fatal("Expected to get (nil, io.EOF)")
}
}
|
package duck
import "design-patterns-go/strategyPattern/flyingBehavior"
type MallardDuck struct {
name string
}
func NewMallardDuck() MallardDuck {
return MallardDuck{
name: "Mallard Duck",
}
}
func (md MallardDuck) Swim() string {
return "I swim"
}
func (md MallardDuck) Display() string {
return md.name
}
func (md MallardDuck) Fly() string {
fly := flyingbehavior.FlyingBehavior{flyingbehavior.FlyWithWings{}}
return fly.PerformFly()
}
|
package files
import (
"errors"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net/url"
"path"
"strings"
)
const (
multipartFormdataType = "multipart/form-data"
multipartMixedType = "multipart/mixed"
applicationDirectory = "application/x-directory"
applicationSymlink = "application/symlink"
applicationFile = "application/octet-stream"
contentTypeHeader = "Content-Type"
)
var ErrPartOutsideParent = errors.New("file outside parent dir")
var ErrPartInChildTree = errors.New("file in child tree")
// multipartFile implements Node, and is created from a `multipart.Part`.
type multipartFile struct {
Node
part *multipart.Part
reader *peekReader
mediatype string
}
func NewFileFromPartReader(reader *multipart.Reader, mediatype string) (Directory, error) {
if !isDirectory(mediatype) {
return nil, ErrNotDirectory
}
f := &multipartFile{
reader: &peekReader{r: reader},
mediatype: mediatype,
}
return f, nil
}
func newFileFromPart(parent string, part *multipart.Part, reader *peekReader) (string, Node, error) {
f := &multipartFile{
part: part,
reader: reader,
}
dir, base := path.Split(f.fileName())
dir = path.Clean(dir)
parent = path.Clean(parent)
if dir == "." {
dir = ""
}
if parent == "." {
parent = ""
}
if dir != parent {
if strings.HasPrefix(dir, parent) {
return "", nil, ErrPartInChildTree
}
return "", nil, ErrPartOutsideParent
}
contentType := part.Header.Get(contentTypeHeader)
switch contentType {
case applicationSymlink:
out, err := ioutil.ReadAll(part)
if err != nil {
return "", nil, err
}
return base, NewLinkFile(string(out), nil), nil
case "": // default to application/octet-stream
fallthrough
case applicationFile:
return base, &ReaderFile{
reader: part,
abspath: part.Header.Get("abspath"),
}, nil
}
var err error
f.mediatype, _, err = mime.ParseMediaType(contentType)
if err != nil {
return "", nil, err
}
if !isDirectory(f.mediatype) {
return base, &ReaderFile{
reader: part,
abspath: part.Header.Get("abspath"),
}, nil
}
return base, f, nil
}
func isDirectory(mediatype string) bool {
return mediatype == multipartFormdataType || mediatype == applicationDirectory
}
type multipartIterator struct {
f *multipartFile
curFile Node
curName string
err error
}
func (it *multipartIterator) Name() string {
return it.curName
}
func (it *multipartIterator) Node() Node {
return it.curFile
}
func (it *multipartIterator) Next() bool {
if it.f.reader == nil {
return false
}
var part *multipart.Part
for {
var err error
part, err = it.f.reader.NextPart()
if err != nil {
if err == io.EOF {
return false
}
it.err = err
return false
}
name, cf, err := newFileFromPart(it.f.fileName(), part, it.f.reader)
if err == ErrPartOutsideParent {
break
}
if err != ErrPartInChildTree {
it.curFile = cf
it.curName = name
it.err = err
return err == nil
}
}
it.err = it.f.reader.put(part)
return false
}
func (it *multipartIterator) Err() error {
return it.err
}
func (f *multipartFile) Entries() DirIterator {
return &multipartIterator{f: f}
}
func (f *multipartFile) fileName() string {
if f == nil || f.part == nil {
return ""
}
filename, err := url.QueryUnescape(f.part.FileName())
if err != nil {
// if there is a unescape error, just treat the name as unescaped
return f.part.FileName()
}
return filename
}
func (f *multipartFile) Close() error {
if f.part != nil {
return f.part.Close()
}
return nil
}
func (f *multipartFile) Size() (int64, error) {
return 0, ErrNotSupported
}
type PartReader interface {
NextPart() (*multipart.Part, error)
}
type peekReader struct {
r PartReader
next *multipart.Part
}
func (pr *peekReader) NextPart() (*multipart.Part, error) {
if pr.next != nil {
p := pr.next
pr.next = nil
return p, nil
}
if pr.r == nil {
return nil, io.EOF
}
p, err := pr.r.NextPart()
if err == io.EOF {
pr.r = nil
}
return p, err
}
func (pr *peekReader) put(p *multipart.Part) error {
if pr.next != nil {
return errors.New("cannot put multiple parts")
}
pr.next = p
return nil
}
var _ Directory = &multipartFile{}
|
package semt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01600105 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:semt.016.001.05 Document"`
Message *IntraPositionMovementPostingReportV05 `xml:"IntraPosMvmntPstngRpt"`
}
func (d *Document01600105) AddMessage() *IntraPositionMovementPostingReportV05 {
d.Message = new(IntraPositionMovementPostingReportV05)
return d.Message
}
// Scope
// An account servicer sends an IntraPositionMovementPostingReport to an account owner to provide the details of increases and decreases in securities with a given status within a holding, that is, intra-position transfers, which occurred during a specified period, for all or selected securities in a specified safekeeping account which the account servicer holds for the account owner.
//
// The account servicer/owner relationship may be:
// - a central securities depository or another settlement market infrastructure acting on behalf of their participants
// - an agent (sub-custodian) acting on behalf of their global custodian customer, or
// - a custodian acting on behalf of an investment management institution or a broker/dealer.
//
// Usage
// :
// The message may also be used to:
// - re-send a message previously sent,
// - provide a third party with a copy of a message for information,
// - re-send to a third party a copy of a message for information
// using the relevant elements in the Business Application Header.
type IntraPositionMovementPostingReportV05 struct {
// Page number of the message (within a statement) and continuation indicator to indicate that the statement is to continue or that the message is the last page of the statement.
Pagination *iso20022.Pagination `xml:"Pgntn"`
// General information related to report.
StatementGeneralDetails *iso20022.Statement43 `xml:"StmtGnlDtls"`
// Party that legally owns the account.
AccountOwner *iso20022.PartyIdentification92Choice `xml:"AcctOwnr,omitempty"`
// Account to or from which a securities entry is made.
SafekeepingAccount *iso20022.SecuritiesAccount24 `xml:"SfkpgAcct"`
// Reporting per financial instrument.
FinancialInstrument []*iso20022.FinancialInstrumentDetails21 `xml:"FinInstrm,omitempty"`
}
func (i *IntraPositionMovementPostingReportV05) AddPagination() *iso20022.Pagination {
i.Pagination = new(iso20022.Pagination)
return i.Pagination
}
func (i *IntraPositionMovementPostingReportV05) AddStatementGeneralDetails() *iso20022.Statement43 {
i.StatementGeneralDetails = new(iso20022.Statement43)
return i.StatementGeneralDetails
}
func (i *IntraPositionMovementPostingReportV05) AddAccountOwner() *iso20022.PartyIdentification92Choice {
i.AccountOwner = new(iso20022.PartyIdentification92Choice)
return i.AccountOwner
}
func (i *IntraPositionMovementPostingReportV05) AddSafekeepingAccount() *iso20022.SecuritiesAccount24 {
i.SafekeepingAccount = new(iso20022.SecuritiesAccount24)
return i.SafekeepingAccount
}
func (i *IntraPositionMovementPostingReportV05) AddFinancialInstrument() *iso20022.FinancialInstrumentDetails21 {
newValue := new(iso20022.FinancialInstrumentDetails21)
i.FinancialInstrument = append(i.FinancialInstrument, newValue)
return newValue
}
|
package main
import (
"fmt"
"unsafe"
)
const (
a="abc"
b=len(a)
c=unsafe.Sizeof(a)
)
//interface
type Books struct {
title string
author string
}
type Phone interface {
call()
}
type NokiaPhone struct {
}
func (nokiaPhone NokiaPhone)call(){
fmt.Println("I am Nokia, I can call you!")
}
type IPhone struct {
}
func (iphone IPhone)call(){
fmt.Println("I am iphone, i can call you!")
}
/*
type error interface {
Error() string
}
*/
type Error2 interface {
Call2() string
}
type MyExError struct {
code int
ermsg string
}
func (er MyExError)Error() string {
return fmt.Sprintf("%d%s%d", er.code, er.ermsg, 5)
}
func (er MyExError)Call2() string {
return fmt.Sprintf("%s", "I am iphone, i can call you!!!!!!!")
}
func main(){
kvs := map[string]string{"a":"apple", "b":"blue"}
for k, v := range kvs {
fmt.Printf("%s -> %s\n", k,v)
}
var book Books
book.title = "bbb"
book.author = "zhao"
fmt.Println("title:", book.title, "author:", book.author)
var phone Phone
phone = new(NokiaPhone)
phone.call()
var myer Error2
myer = &MyExError{1001, "this is error 1001"}
fmt.Println(myer.Call2())
}
|
package proxyMonitor
import (
"github.com/stretchr/testify/assert"
"master/master"
"net/http"
"net/http/httptest"
"testing"
"time"
)
const (
TEST_SLAVE_IP = "10.0.214"
TEST_SLAVE_URL = "http://10.0.214:8686"
TEST_SLAVE_NAME = "yoyo"
)
// TODO: There is a pretty much identical function in slave for SendURLValueMessageToSlave. Refactor into network package.
func TestRequestProxyToAddNewSlaveToIPTables(t *testing.T) {
var numberOfMessagesSent = 0
var slaveIP = ""
testProxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, request *http.Request) {
numberOfMessagesSent++
slaveIP = request.PostFormValue("IPAddressToAdd")
}))
err := RequestProxyToAddNewSlaveToIPTables(testProxy.URL, TEST_SLAVE_IP)
assert.Equal(t, 1, numberOfMessagesSent)
assert.Equal(t, TEST_SLAVE_IP, slaveIP)
assert.Nil(t, err)
}
func TestGetSlaveIPAddresses(t *testing.T) {
slaveMap := make(map[string]master.Slave)
slaveMap[TEST_SLAVE_NAME] = master.Slave{URL: TEST_SLAVE_URL, Heartbeat: time.Now(), PreviouslyDisplayedURL: "http://google.com", DisplayedURL: "http://google.com"}
IPAddresses := getSlaveIPAddresses(slaveMap)
assert.Equal(t, []string{TEST_SLAVE_IP}, IPAddresses)
}
|
package server
import (
"log"
"net"
"time"
// "github.com/lexkong/log"
"google.golang.org/grpc"
pb "logstream/pkg/proto"
"logstream/pkg/utils"
)
func NewServer(hostPort string, writeFx time.Duration) *Server {
return &Server{
grpcSrv: grpc.NewServer(),
localAddr: hostPort,
writeFx: writeFx,
}
}
type Server struct {
grpcSrv *grpc.Server
localAddr string
writeFx time.Duration
}
func (s *Server) Start() {
log.Println("starting server on host port", s.localAddr)
pb.RegisterReaderServer(s.grpcSrv, utils.NewReaderService(s.writeFx))
lis, err := net.Listen("tcp", s.localAddr)
if err != nil {
log.Fatal("failed to listen on hostport", s.localAddr)
}
s.grpcSrv.Serve(lis)
}
|
package helper
import "testing"
func TestPluralize(t *testing.T) {
t.Log(Plural("addresses"))
}
|
package concur
import (
"fmt"
"time"
"errors"
)
// Runner is an interface describing anything
// that is capable of running Tasks.
type Runner interface {
Run(tasks ...Task) error
}
// Concurrent creates a new ConcurrentRunner
// which is responsible for running Tasks
// concurrently.
func Concurrent() ConcurrentRunner {
return ConcurrentRunner{}
}
// SetTimeout sets the timeout for the current Runner
// If execution of any Tasks exceeds the timeout, the Runner will return an error.
// Please not that the remaining Tasks will continue to execute in their own goroutine
func (runner ConcurrentRunner) SetTimeout(dur time.Duration) ConcurrentRunner {
runner.timeout = &dur
return runner
}
// ConcurrentRunner runs Tasks concurrently.
type ConcurrentRunner struct {
timeout *time.Duration
successChan chan bool
errorChan chan error
timeoutChan <-chan time.Time
}
// Run takes a list of Tasks and runs them concurrently.
// An error is returned if any Tasks return an error.
// Please note that one Task returning an error
// will not halt execution of the remaining Tasks.
//
// Run block until execution of all Tasks is complete.
func (runner ConcurrentRunner) Run(tasks ...Task) (err error) {
if runner.timeout != nil {
timer := time.NewTimer(*runner.timeout)
runner.timeoutChan = timer.C
defer timer.Stop()
}
runner.errorChan = make(chan error)
runner.successChan = make(chan bool)
for _, task := range tasks {
go func(task Task) {
defer func() {
if r := recover(); r != nil {
switch x := r.(type) {
case string:
err = errors.New(x)
case error:
err = x
default:
err = fmt.Errorf("%+v", r)
}
runner.errorChan <- err
}
}()
err := task.Exec()
if err != nil {
runner.errorChan <- err
} else {
runner.successChan <- true
}
}(task)
}
err = runner.waitOnChannels(len(tasks))
return err
}
func (runner ConcurrentRunner) waitOnChannels(num int) error {
var cumulativeErr CumulativeError
for i := 0; i < num; {
select {
case err := <-runner.errorChan:
cumulativeErr.add(err)
i++
case <-runner.successChan:
i++
case <-runner.timeoutChan:
cumulativeErr.add(errors.New("timed out waiting for task(s) to complete"))
close(runner.errorChan)
close(runner.successChan)
return cumulativeErr
}
}
if cumulativeErr.isError() {
return cumulativeErr
}
return nil
}
|
package Collections
type Dict struct {
Root *MapTrieNode
Words []*MapTrieNode //所有单词的指针列表,方便遍历
TotalFrequency int //总词频
}
func NewDict() *Dict {
NewDict := &Dict{}
NewDict.Root = newMapTrieNode()
return NewDict
}
//树节点 用Hash表存储<Character, Node>
type MapTrieNode struct {
Sons map[rune]*MapTrieNode //所有子节点
IsEnd bool //是否是某个单词的终点
Character rune //存储的值
Frequency int //该词频
Weight float64 //-log(该词频/总词频)= log(总词频/该词频)=log(总词频)-log(该词频) 用作图中边的权重
}
func newMapTrieNode() *MapTrieNode {
node := &MapTrieNode{}
node.Sons = make(map[rune]*MapTrieNode)
node.IsEnd = false
return node
}
//加载字典文件
func (d *Dict) loadDictionaryFile(filePath string) {
}
//增加一个词
func (d *Dict) Insert(word string,frequency int) {
if len(word) == 0 {
return
}
tmpNode := d.Root
letters := []rune(word)
for _, letter := range letters {
//如果没有这个字母
if _, ok := tmpNode.Sons[letter]; !ok {
tmpNode.Sons[letter] = newMapTrieNode()
tmpNode.Sons[letter].Character = letter
}
tmpNode = tmpNode.Sons[letter]
}
tmpNode.IsEnd = true
tmpNode.Frequency = frequency
d.TotalFrequency += frequency
d.Words=append(d.Words, tmpNode)
}
//计算所有词的权重
//查询某个词是否存在,给出权值
//和一段文本比对,给出词典中有的某个字开头的所有词
func (d *Dict) findMatchWord(words []rune,from int) {
}
|
package pkg
import (
b64 "encoding/base64"
"errors"
"io/ioutil"
"log"
"net/http"
)
type ScheduleClientConfig struct {
User string
Password string
Date string
BaseURL string
}
func RequestXML(config ScheduleClientConfig) ([]byte, error) {
requestURL := config.BaseURL + config.Date + ".xml"
httpClient := http.Client{}
request, _ := http.NewRequest("GET", requestURL, nil)
request.Header.Add("authorization", encodeAsBasicAuth(config.User, config.Password))
log.Println("executing request to " + requestURL)
resp, respErr := httpClient.Do(request)
if respErr != nil {
log.Println(respErr.Error())
return nil, errors.New("HTTP Error: " + respErr.Error())
}
data, parseBodyErr := ioutil.ReadAll(resp.Body)
if parseBodyErr == nil {
return data, nil
} else {
return nil, errors.New("Parse Response Body Error: " + parseBodyErr.Error())
}
}
func encodeAsBasicAuth(user string, password string) string {
return "Basic " + b64.URLEncoding.EncodeToString([]byte(user+":"+password))
}
|
package main
import (
"github.com/funkygao/gobench/util"
"sync"
"testing"
)
var nop = func() {}
func main() {
b := testing.Benchmark(benchmarkNop)
util.ShowBenchResult("nop", b)
b = testing.Benchmark(benchmarkOnceNop)
util.ShowBenchResult("once.do(nop)", b)
}
func benchmarkNop(b *testing.B) {
for i := 0; i < b.N; i++ {
nop()
}
}
func benchmarkOnceNop(b *testing.B) {
var once sync.Once
for i := 0; i < b.N; i++ {
once.Do(nop)
}
}
|
package api
import (
"context"
"encoding/json"
"errors"
"log"
"net/http"
"strings"
)
type Handler func(w http.ResponseWriter, r *http.Request)
type Response struct {
Res interface{} `json:"response"`
Base Base `json:"base"`
}
type Base struct {
Error string
}
var ErrUnAuthorized = errors.New("unauthorized")
func (api *API) Wrapper(hand func(w http.ResponseWriter, r *http.Request) (interface{}, error)) Handler {
return Handler(
func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
res, err := hand(w, r)
resp := Response{}
if err != nil {
//resp.Base.Error = err.Error()
log.Println("Error in api=", err)
resp.Base.Error = "Something Went Wrong"
}
resp.Res = res
j, err := json.Marshal(resp)
if err != nil {
log.Println("[Error while Marshall]Error in api=", err)
}
_, err = w.Write(j)
if err != nil {
log.Println("Error while Writing Response=", err)
}
})
}
func noListing(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "" || strings.HasSuffix(r.URL.Path, "/") {
http.Redirect(w, r, "/login", http.StatusSeeOther)
return
}
next.ServeHTTP(w, r)
})
}
func GetUserId(ctx context.Context) int64 {
userId, _ := ctx.Value("user_id").(int64)
return userId
}
func GetUserEmail(ctx context.Context) string {
email, _ := ctx.Value("user_email").(string)
return email
}
|
package tstune
import (
"bufio"
"io"
"os"
)
const exitLabel = "exit"
var exitFn = os.Exit
// ioHandler manages the reading and writing for a Tuner
type ioHandler struct {
p printer // handles output
br *bufio.Reader // handles input
out io.Writer
outErr io.Writer
}
func (h *ioHandler) exit(errCode int, format string, args ...interface{}) {
h.p.Error(exitLabel, format, args...)
exitFn(errCode)
}
func (h *ioHandler) errorExit(err error) {
h.exit(1, err.Error())
}
|
package Services
import (
"fmt"
"github.com/james-vaughn/PersonalWebsite/Models"
"github.com/james-vaughn/PersonalWebsite/Repositories"
)
type PagesService struct {
PagesRepository *Repositories.PagesRepository
}
func NewPagesService(PagesRepository *Repositories.PagesRepository) *PagesService {
return &PagesService{PagesRepository: PagesRepository}
}
func (s *PagesService) GetPagesFor(controller string) []Models.Page{
return s.PagesRepository.GetPagesFor(controller)
}
func (s *PagesService) GetPageFromList(pages []Models.Page, id uint) Models.Page {
for _, page := range pages {
if page.ID == id {
return page
}
}
return Models.Page{}
}
func (s *PagesService) GetUrlFor(page Models.Page) string {
return fmt.Sprintf("/%s/%s", page.Controller, page.Url)
}
|
package service
import (
"github.com/jinzhu/gorm"
)
//Group model used for all groups
type Group struct {
gorm.Model
Name string `json:"name";gorm:not null`
Private bool `json:"private"`
}
//GroupMember many2many for groups
type GroupMember struct {
UserID uint `json:"user_id"`
GroupID uint `json:"group_id"`
}
//GroupAdmin denotes who is an admin on a group
type GroupAdmin struct {
UserID uint `json:"user_id"`
GroupID uint `json:"group_id"`
}
//Post used for group posts
type Post struct {
gorm.Model
GroupID uint `json:"group_id"`
UserID uint `json:"user_id"`
Content string `json:"content";gorm:"type:varchar(500)`
Title string `json:"title"`
}
//Comment connects to posts
type Comment struct {
gorm.Model
PostID uint `json:"post_id"`
Content string `json:"content";gorm:"type:varchar(500)`
UserID uint `json:"user_id"`
}
//Token struct handles authentication
type Token struct {
Key string `json:"token"`
UserID uint `json:"user_id"`
ExpiresAt int64 `json:"expires_at"`
}
type Service struct {
Name string `json:"name"`
URL string `json:"url"`
}
|
package Models
type Tarefa struct {
ID int `json:NOT NULL AUTO_INCREMENT`
Text string `json:"text"`
Feito bool `json:"feito"`
Status string `json:"status"`
IDUsuario int `json:NOT NULL AUTO_INCREMENT`
}
func (b *Tarefa) TableName() string {
return "tarefa"
}
|
// Copyright 2021 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
alphapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/containeraws/alpha/containeraws_alpha_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/alpha"
)
// Server implements the gRPC interface for AwsCluster.
type AwsClusterServer struct{}
// ProtoToAwsClusterControlPlaneRootVolumeVolumeTypeEnum converts a AwsClusterControlPlaneRootVolumeVolumeTypeEnum enum from its proto representation.
func ProtoToContainerawsAlphaAwsClusterControlPlaneRootVolumeVolumeTypeEnum(e alphapb.ContainerawsAlphaAwsClusterControlPlaneRootVolumeVolumeTypeEnum) *alpha.AwsClusterControlPlaneRootVolumeVolumeTypeEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.ContainerawsAlphaAwsClusterControlPlaneRootVolumeVolumeTypeEnum_name[int32(e)]; ok {
e := alpha.AwsClusterControlPlaneRootVolumeVolumeTypeEnum(n[len("ContainerawsAlphaAwsClusterControlPlaneRootVolumeVolumeTypeEnum"):])
return &e
}
return nil
}
// ProtoToAwsClusterControlPlaneMainVolumeVolumeTypeEnum converts a AwsClusterControlPlaneMainVolumeVolumeTypeEnum enum from its proto representation.
func ProtoToContainerawsAlphaAwsClusterControlPlaneMainVolumeVolumeTypeEnum(e alphapb.ContainerawsAlphaAwsClusterControlPlaneMainVolumeVolumeTypeEnum) *alpha.AwsClusterControlPlaneMainVolumeVolumeTypeEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.ContainerawsAlphaAwsClusterControlPlaneMainVolumeVolumeTypeEnum_name[int32(e)]; ok {
e := alpha.AwsClusterControlPlaneMainVolumeVolumeTypeEnum(n[len("ContainerawsAlphaAwsClusterControlPlaneMainVolumeVolumeTypeEnum"):])
return &e
}
return nil
}
// ProtoToAwsClusterStateEnum converts a AwsClusterStateEnum enum from its proto representation.
func ProtoToContainerawsAlphaAwsClusterStateEnum(e alphapb.ContainerawsAlphaAwsClusterStateEnum) *alpha.AwsClusterStateEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.ContainerawsAlphaAwsClusterStateEnum_name[int32(e)]; ok {
e := alpha.AwsClusterStateEnum(n[len("ContainerawsAlphaAwsClusterStateEnum"):])
return &e
}
return nil
}
// ProtoToAwsClusterNetworking converts a AwsClusterNetworking resource from its proto representation.
func ProtoToContainerawsAlphaAwsClusterNetworking(p *alphapb.ContainerawsAlphaAwsClusterNetworking) *alpha.AwsClusterNetworking {
if p == nil {
return nil
}
obj := &alpha.AwsClusterNetworking{
VPCId: dcl.StringOrNil(p.VpcId),
}
for _, r := range p.GetPodAddressCidrBlocks() {
obj.PodAddressCidrBlocks = append(obj.PodAddressCidrBlocks, r)
}
for _, r := range p.GetServiceAddressCidrBlocks() {
obj.ServiceAddressCidrBlocks = append(obj.ServiceAddressCidrBlocks, r)
}
for _, r := range p.GetServiceLoadBalancerSubnetIds() {
obj.ServiceLoadBalancerSubnetIds = append(obj.ServiceLoadBalancerSubnetIds, r)
}
return obj
}
// ProtoToAwsClusterControlPlane converts a AwsClusterControlPlane resource from its proto representation.
func ProtoToContainerawsAlphaAwsClusterControlPlane(p *alphapb.ContainerawsAlphaAwsClusterControlPlane) *alpha.AwsClusterControlPlane {
if p == nil {
return nil
}
obj := &alpha.AwsClusterControlPlane{
Version: dcl.StringOrNil(p.Version),
InstanceType: dcl.StringOrNil(p.InstanceType),
SshConfig: ProtoToContainerawsAlphaAwsClusterControlPlaneSshConfig(p.GetSshConfig()),
IamInstanceProfile: dcl.StringOrNil(p.IamInstanceProfile),
RootVolume: ProtoToContainerawsAlphaAwsClusterControlPlaneRootVolume(p.GetRootVolume()),
MainVolume: ProtoToContainerawsAlphaAwsClusterControlPlaneMainVolume(p.GetMainVolume()),
DatabaseEncryption: ProtoToContainerawsAlphaAwsClusterControlPlaneDatabaseEncryption(p.GetDatabaseEncryption()),
AwsServicesAuthentication: ProtoToContainerawsAlphaAwsClusterControlPlaneAwsServicesAuthentication(p.GetAwsServicesAuthentication()),
}
for _, r := range p.GetSubnetIds() {
obj.SubnetIds = append(obj.SubnetIds, r)
}
for _, r := range p.GetSecurityGroupIds() {
obj.SecurityGroupIds = append(obj.SecurityGroupIds, r)
}
return obj
}
// ProtoToAwsClusterControlPlaneSshConfig converts a AwsClusterControlPlaneSshConfig resource from its proto representation.
func ProtoToContainerawsAlphaAwsClusterControlPlaneSshConfig(p *alphapb.ContainerawsAlphaAwsClusterControlPlaneSshConfig) *alpha.AwsClusterControlPlaneSshConfig {
if p == nil {
return nil
}
obj := &alpha.AwsClusterControlPlaneSshConfig{
Ec2KeyPair: dcl.StringOrNil(p.Ec2KeyPair),
}
return obj
}
// ProtoToAwsClusterControlPlaneRootVolume converts a AwsClusterControlPlaneRootVolume resource from its proto representation.
func ProtoToContainerawsAlphaAwsClusterControlPlaneRootVolume(p *alphapb.ContainerawsAlphaAwsClusterControlPlaneRootVolume) *alpha.AwsClusterControlPlaneRootVolume {
if p == nil {
return nil
}
obj := &alpha.AwsClusterControlPlaneRootVolume{
SizeGib: dcl.Int64OrNil(p.SizeGib),
VolumeType: ProtoToContainerawsAlphaAwsClusterControlPlaneRootVolumeVolumeTypeEnum(p.GetVolumeType()),
Iops: dcl.Int64OrNil(p.Iops),
KmsKeyArn: dcl.StringOrNil(p.KmsKeyArn),
}
return obj
}
// ProtoToAwsClusterControlPlaneMainVolume converts a AwsClusterControlPlaneMainVolume resource from its proto representation.
func ProtoToContainerawsAlphaAwsClusterControlPlaneMainVolume(p *alphapb.ContainerawsAlphaAwsClusterControlPlaneMainVolume) *alpha.AwsClusterControlPlaneMainVolume {
if p == nil {
return nil
}
obj := &alpha.AwsClusterControlPlaneMainVolume{
SizeGib: dcl.Int64OrNil(p.SizeGib),
VolumeType: ProtoToContainerawsAlphaAwsClusterControlPlaneMainVolumeVolumeTypeEnum(p.GetVolumeType()),
Iops: dcl.Int64OrNil(p.Iops),
KmsKeyArn: dcl.StringOrNil(p.KmsKeyArn),
}
return obj
}
// ProtoToAwsClusterControlPlaneDatabaseEncryption converts a AwsClusterControlPlaneDatabaseEncryption resource from its proto representation.
func ProtoToContainerawsAlphaAwsClusterControlPlaneDatabaseEncryption(p *alphapb.ContainerawsAlphaAwsClusterControlPlaneDatabaseEncryption) *alpha.AwsClusterControlPlaneDatabaseEncryption {
if p == nil {
return nil
}
obj := &alpha.AwsClusterControlPlaneDatabaseEncryption{
KmsKeyArn: dcl.StringOrNil(p.KmsKeyArn),
}
return obj
}
// ProtoToAwsClusterControlPlaneAwsServicesAuthentication converts a AwsClusterControlPlaneAwsServicesAuthentication resource from its proto representation.
func ProtoToContainerawsAlphaAwsClusterControlPlaneAwsServicesAuthentication(p *alphapb.ContainerawsAlphaAwsClusterControlPlaneAwsServicesAuthentication) *alpha.AwsClusterControlPlaneAwsServicesAuthentication {
if p == nil {
return nil
}
obj := &alpha.AwsClusterControlPlaneAwsServicesAuthentication{
RoleArn: dcl.StringOrNil(p.RoleArn),
RoleSessionName: dcl.StringOrNil(p.RoleSessionName),
}
return obj
}
// ProtoToAwsClusterAuthorization converts a AwsClusterAuthorization resource from its proto representation.
func ProtoToContainerawsAlphaAwsClusterAuthorization(p *alphapb.ContainerawsAlphaAwsClusterAuthorization) *alpha.AwsClusterAuthorization {
if p == nil {
return nil
}
obj := &alpha.AwsClusterAuthorization{}
for _, r := range p.GetAdminUsers() {
obj.AdminUsers = append(obj.AdminUsers, *ProtoToContainerawsAlphaAwsClusterAuthorizationAdminUsers(r))
}
return obj
}
// ProtoToAwsClusterAuthorizationAdminUsers converts a AwsClusterAuthorizationAdminUsers resource from its proto representation.
func ProtoToContainerawsAlphaAwsClusterAuthorizationAdminUsers(p *alphapb.ContainerawsAlphaAwsClusterAuthorizationAdminUsers) *alpha.AwsClusterAuthorizationAdminUsers {
if p == nil {
return nil
}
obj := &alpha.AwsClusterAuthorizationAdminUsers{
Username: dcl.StringOrNil(p.Username),
}
return obj
}
// ProtoToAwsClusterWorkloadIdentityConfig converts a AwsClusterWorkloadIdentityConfig resource from its proto representation.
func ProtoToContainerawsAlphaAwsClusterWorkloadIdentityConfig(p *alphapb.ContainerawsAlphaAwsClusterWorkloadIdentityConfig) *alpha.AwsClusterWorkloadIdentityConfig {
if p == nil {
return nil
}
obj := &alpha.AwsClusterWorkloadIdentityConfig{
IssuerUri: dcl.StringOrNil(p.IssuerUri),
WorkloadPool: dcl.StringOrNil(p.WorkloadPool),
IdentityProvider: dcl.StringOrNil(p.IdentityProvider),
}
return obj
}
// ProtoToAwsCluster converts a AwsCluster resource from its proto representation.
func ProtoToAwsCluster(p *alphapb.ContainerawsAlphaAwsCluster) *alpha.AwsCluster {
obj := &alpha.AwsCluster{
Name: dcl.StringOrNil(p.Name),
Description: dcl.StringOrNil(p.Description),
Networking: ProtoToContainerawsAlphaAwsClusterNetworking(p.GetNetworking()),
AwsRegion: dcl.StringOrNil(p.AwsRegion),
ControlPlane: ProtoToContainerawsAlphaAwsClusterControlPlane(p.GetControlPlane()),
Authorization: ProtoToContainerawsAlphaAwsClusterAuthorization(p.GetAuthorization()),
State: ProtoToContainerawsAlphaAwsClusterStateEnum(p.GetState()),
Endpoint: dcl.StringOrNil(p.Endpoint),
Uid: dcl.StringOrNil(p.Uid),
Reconciling: dcl.Bool(p.Reconciling),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
Etag: dcl.StringOrNil(p.Etag),
WorkloadIdentityConfig: ProtoToContainerawsAlphaAwsClusterWorkloadIdentityConfig(p.GetWorkloadIdentityConfig()),
Project: dcl.StringOrNil(p.Project),
Location: dcl.StringOrNil(p.Location),
}
return obj
}
// AwsClusterControlPlaneRootVolumeVolumeTypeEnumToProto converts a AwsClusterControlPlaneRootVolumeVolumeTypeEnum enum to its proto representation.
func ContainerawsAlphaAwsClusterControlPlaneRootVolumeVolumeTypeEnumToProto(e *alpha.AwsClusterControlPlaneRootVolumeVolumeTypeEnum) alphapb.ContainerawsAlphaAwsClusterControlPlaneRootVolumeVolumeTypeEnum {
if e == nil {
return alphapb.ContainerawsAlphaAwsClusterControlPlaneRootVolumeVolumeTypeEnum(0)
}
if v, ok := alphapb.ContainerawsAlphaAwsClusterControlPlaneRootVolumeVolumeTypeEnum_value["AwsClusterControlPlaneRootVolumeVolumeTypeEnum"+string(*e)]; ok {
return alphapb.ContainerawsAlphaAwsClusterControlPlaneRootVolumeVolumeTypeEnum(v)
}
return alphapb.ContainerawsAlphaAwsClusterControlPlaneRootVolumeVolumeTypeEnum(0)
}
// AwsClusterControlPlaneMainVolumeVolumeTypeEnumToProto converts a AwsClusterControlPlaneMainVolumeVolumeTypeEnum enum to its proto representation.
func ContainerawsAlphaAwsClusterControlPlaneMainVolumeVolumeTypeEnumToProto(e *alpha.AwsClusterControlPlaneMainVolumeVolumeTypeEnum) alphapb.ContainerawsAlphaAwsClusterControlPlaneMainVolumeVolumeTypeEnum {
if e == nil {
return alphapb.ContainerawsAlphaAwsClusterControlPlaneMainVolumeVolumeTypeEnum(0)
}
if v, ok := alphapb.ContainerawsAlphaAwsClusterControlPlaneMainVolumeVolumeTypeEnum_value["AwsClusterControlPlaneMainVolumeVolumeTypeEnum"+string(*e)]; ok {
return alphapb.ContainerawsAlphaAwsClusterControlPlaneMainVolumeVolumeTypeEnum(v)
}
return alphapb.ContainerawsAlphaAwsClusterControlPlaneMainVolumeVolumeTypeEnum(0)
}
// AwsClusterStateEnumToProto converts a AwsClusterStateEnum enum to its proto representation.
func ContainerawsAlphaAwsClusterStateEnumToProto(e *alpha.AwsClusterStateEnum) alphapb.ContainerawsAlphaAwsClusterStateEnum {
if e == nil {
return alphapb.ContainerawsAlphaAwsClusterStateEnum(0)
}
if v, ok := alphapb.ContainerawsAlphaAwsClusterStateEnum_value["AwsClusterStateEnum"+string(*e)]; ok {
return alphapb.ContainerawsAlphaAwsClusterStateEnum(v)
}
return alphapb.ContainerawsAlphaAwsClusterStateEnum(0)
}
// AwsClusterNetworkingToProto converts a AwsClusterNetworking resource to its proto representation.
func ContainerawsAlphaAwsClusterNetworkingToProto(o *alpha.AwsClusterNetworking) *alphapb.ContainerawsAlphaAwsClusterNetworking {
if o == nil {
return nil
}
p := &alphapb.ContainerawsAlphaAwsClusterNetworking{
VpcId: dcl.ValueOrEmptyString(o.VPCId),
}
for _, r := range o.PodAddressCidrBlocks {
p.PodAddressCidrBlocks = append(p.PodAddressCidrBlocks, r)
}
for _, r := range o.ServiceAddressCidrBlocks {
p.ServiceAddressCidrBlocks = append(p.ServiceAddressCidrBlocks, r)
}
for _, r := range o.ServiceLoadBalancerSubnetIds {
p.ServiceLoadBalancerSubnetIds = append(p.ServiceLoadBalancerSubnetIds, r)
}
return p
}
// AwsClusterControlPlaneToProto converts a AwsClusterControlPlane resource to its proto representation.
func ContainerawsAlphaAwsClusterControlPlaneToProto(o *alpha.AwsClusterControlPlane) *alphapb.ContainerawsAlphaAwsClusterControlPlane {
if o == nil {
return nil
}
p := &alphapb.ContainerawsAlphaAwsClusterControlPlane{
Version: dcl.ValueOrEmptyString(o.Version),
InstanceType: dcl.ValueOrEmptyString(o.InstanceType),
SshConfig: ContainerawsAlphaAwsClusterControlPlaneSshConfigToProto(o.SshConfig),
IamInstanceProfile: dcl.ValueOrEmptyString(o.IamInstanceProfile),
RootVolume: ContainerawsAlphaAwsClusterControlPlaneRootVolumeToProto(o.RootVolume),
MainVolume: ContainerawsAlphaAwsClusterControlPlaneMainVolumeToProto(o.MainVolume),
DatabaseEncryption: ContainerawsAlphaAwsClusterControlPlaneDatabaseEncryptionToProto(o.DatabaseEncryption),
AwsServicesAuthentication: ContainerawsAlphaAwsClusterControlPlaneAwsServicesAuthenticationToProto(o.AwsServicesAuthentication),
}
for _, r := range o.SubnetIds {
p.SubnetIds = append(p.SubnetIds, r)
}
for _, r := range o.SecurityGroupIds {
p.SecurityGroupIds = append(p.SecurityGroupIds, r)
}
p.Tags = make(map[string]string)
for k, r := range o.Tags {
p.Tags[k] = r
}
return p
}
// AwsClusterControlPlaneSshConfigToProto converts a AwsClusterControlPlaneSshConfig resource to its proto representation.
func ContainerawsAlphaAwsClusterControlPlaneSshConfigToProto(o *alpha.AwsClusterControlPlaneSshConfig) *alphapb.ContainerawsAlphaAwsClusterControlPlaneSshConfig {
if o == nil {
return nil
}
p := &alphapb.ContainerawsAlphaAwsClusterControlPlaneSshConfig{
Ec2KeyPair: dcl.ValueOrEmptyString(o.Ec2KeyPair),
}
return p
}
// AwsClusterControlPlaneRootVolumeToProto converts a AwsClusterControlPlaneRootVolume resource to its proto representation.
func ContainerawsAlphaAwsClusterControlPlaneRootVolumeToProto(o *alpha.AwsClusterControlPlaneRootVolume) *alphapb.ContainerawsAlphaAwsClusterControlPlaneRootVolume {
if o == nil {
return nil
}
p := &alphapb.ContainerawsAlphaAwsClusterControlPlaneRootVolume{
SizeGib: dcl.ValueOrEmptyInt64(o.SizeGib),
VolumeType: ContainerawsAlphaAwsClusterControlPlaneRootVolumeVolumeTypeEnumToProto(o.VolumeType),
Iops: dcl.ValueOrEmptyInt64(o.Iops),
KmsKeyArn: dcl.ValueOrEmptyString(o.KmsKeyArn),
}
return p
}
// AwsClusterControlPlaneMainVolumeToProto converts a AwsClusterControlPlaneMainVolume resource to its proto representation.
func ContainerawsAlphaAwsClusterControlPlaneMainVolumeToProto(o *alpha.AwsClusterControlPlaneMainVolume) *alphapb.ContainerawsAlphaAwsClusterControlPlaneMainVolume {
if o == nil {
return nil
}
p := &alphapb.ContainerawsAlphaAwsClusterControlPlaneMainVolume{
SizeGib: dcl.ValueOrEmptyInt64(o.SizeGib),
VolumeType: ContainerawsAlphaAwsClusterControlPlaneMainVolumeVolumeTypeEnumToProto(o.VolumeType),
Iops: dcl.ValueOrEmptyInt64(o.Iops),
KmsKeyArn: dcl.ValueOrEmptyString(o.KmsKeyArn),
}
return p
}
// AwsClusterControlPlaneDatabaseEncryptionToProto converts a AwsClusterControlPlaneDatabaseEncryption resource to its proto representation.
func ContainerawsAlphaAwsClusterControlPlaneDatabaseEncryptionToProto(o *alpha.AwsClusterControlPlaneDatabaseEncryption) *alphapb.ContainerawsAlphaAwsClusterControlPlaneDatabaseEncryption {
if o == nil {
return nil
}
p := &alphapb.ContainerawsAlphaAwsClusterControlPlaneDatabaseEncryption{
KmsKeyArn: dcl.ValueOrEmptyString(o.KmsKeyArn),
}
return p
}
// AwsClusterControlPlaneAwsServicesAuthenticationToProto converts a AwsClusterControlPlaneAwsServicesAuthentication resource to its proto representation.
func ContainerawsAlphaAwsClusterControlPlaneAwsServicesAuthenticationToProto(o *alpha.AwsClusterControlPlaneAwsServicesAuthentication) *alphapb.ContainerawsAlphaAwsClusterControlPlaneAwsServicesAuthentication {
if o == nil {
return nil
}
p := &alphapb.ContainerawsAlphaAwsClusterControlPlaneAwsServicesAuthentication{
RoleArn: dcl.ValueOrEmptyString(o.RoleArn),
RoleSessionName: dcl.ValueOrEmptyString(o.RoleSessionName),
}
return p
}
// AwsClusterAuthorizationToProto converts a AwsClusterAuthorization resource to its proto representation.
func ContainerawsAlphaAwsClusterAuthorizationToProto(o *alpha.AwsClusterAuthorization) *alphapb.ContainerawsAlphaAwsClusterAuthorization {
if o == nil {
return nil
}
p := &alphapb.ContainerawsAlphaAwsClusterAuthorization{}
for _, r := range o.AdminUsers {
p.AdminUsers = append(p.AdminUsers, ContainerawsAlphaAwsClusterAuthorizationAdminUsersToProto(&r))
}
return p
}
// AwsClusterAuthorizationAdminUsersToProto converts a AwsClusterAuthorizationAdminUsers resource to its proto representation.
func ContainerawsAlphaAwsClusterAuthorizationAdminUsersToProto(o *alpha.AwsClusterAuthorizationAdminUsers) *alphapb.ContainerawsAlphaAwsClusterAuthorizationAdminUsers {
if o == nil {
return nil
}
p := &alphapb.ContainerawsAlphaAwsClusterAuthorizationAdminUsers{
Username: dcl.ValueOrEmptyString(o.Username),
}
return p
}
// AwsClusterWorkloadIdentityConfigToProto converts a AwsClusterWorkloadIdentityConfig resource to its proto representation.
func ContainerawsAlphaAwsClusterWorkloadIdentityConfigToProto(o *alpha.AwsClusterWorkloadIdentityConfig) *alphapb.ContainerawsAlphaAwsClusterWorkloadIdentityConfig {
if o == nil {
return nil
}
p := &alphapb.ContainerawsAlphaAwsClusterWorkloadIdentityConfig{
IssuerUri: dcl.ValueOrEmptyString(o.IssuerUri),
WorkloadPool: dcl.ValueOrEmptyString(o.WorkloadPool),
IdentityProvider: dcl.ValueOrEmptyString(o.IdentityProvider),
}
return p
}
// AwsClusterToProto converts a AwsCluster resource to its proto representation.
func AwsClusterToProto(resource *alpha.AwsCluster) *alphapb.ContainerawsAlphaAwsCluster {
p := &alphapb.ContainerawsAlphaAwsCluster{
Name: dcl.ValueOrEmptyString(resource.Name),
Description: dcl.ValueOrEmptyString(resource.Description),
Networking: ContainerawsAlphaAwsClusterNetworkingToProto(resource.Networking),
AwsRegion: dcl.ValueOrEmptyString(resource.AwsRegion),
ControlPlane: ContainerawsAlphaAwsClusterControlPlaneToProto(resource.ControlPlane),
Authorization: ContainerawsAlphaAwsClusterAuthorizationToProto(resource.Authorization),
State: ContainerawsAlphaAwsClusterStateEnumToProto(resource.State),
Endpoint: dcl.ValueOrEmptyString(resource.Endpoint),
Uid: dcl.ValueOrEmptyString(resource.Uid),
Reconciling: dcl.ValueOrEmptyBool(resource.Reconciling),
CreateTime: dcl.ValueOrEmptyString(resource.CreateTime),
UpdateTime: dcl.ValueOrEmptyString(resource.UpdateTime),
Etag: dcl.ValueOrEmptyString(resource.Etag),
WorkloadIdentityConfig: ContainerawsAlphaAwsClusterWorkloadIdentityConfigToProto(resource.WorkloadIdentityConfig),
Project: dcl.ValueOrEmptyString(resource.Project),
Location: dcl.ValueOrEmptyString(resource.Location),
}
return p
}
// ApplyAwsCluster handles the gRPC request by passing it to the underlying AwsCluster Apply() method.
func (s *AwsClusterServer) applyAwsCluster(ctx context.Context, c *alpha.Client, request *alphapb.ApplyContainerawsAlphaAwsClusterRequest) (*alphapb.ContainerawsAlphaAwsCluster, error) {
p := ProtoToAwsCluster(request.GetResource())
res, err := c.ApplyAwsCluster(ctx, p)
if err != nil {
return nil, err
}
r := AwsClusterToProto(res)
return r, nil
}
// ApplyAwsCluster handles the gRPC request by passing it to the underlying AwsCluster Apply() method.
func (s *AwsClusterServer) ApplyContainerawsAlphaAwsCluster(ctx context.Context, request *alphapb.ApplyContainerawsAlphaAwsClusterRequest) (*alphapb.ContainerawsAlphaAwsCluster, error) {
cl, err := createConfigAwsCluster(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return s.applyAwsCluster(ctx, cl, request)
}
// DeleteAwsCluster handles the gRPC request by passing it to the underlying AwsCluster Delete() method.
func (s *AwsClusterServer) DeleteContainerawsAlphaAwsCluster(ctx context.Context, request *alphapb.DeleteContainerawsAlphaAwsClusterRequest) (*emptypb.Empty, error) {
cl, err := createConfigAwsCluster(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteAwsCluster(ctx, ProtoToAwsCluster(request.GetResource()))
}
// ListContainerawsAlphaAwsCluster handles the gRPC request by passing it to the underlying AwsClusterList() method.
func (s *AwsClusterServer) ListContainerawsAlphaAwsCluster(ctx context.Context, request *alphapb.ListContainerawsAlphaAwsClusterRequest) (*alphapb.ListContainerawsAlphaAwsClusterResponse, error) {
cl, err := createConfigAwsCluster(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
resources, err := cl.ListAwsCluster(ctx, ProtoToAwsCluster(request.GetResource()))
if err != nil {
return nil, err
}
var protos []*alphapb.ContainerawsAlphaAwsCluster
for _, r := range resources.Items {
rp := AwsClusterToProto(r)
protos = append(protos, rp)
}
return &alphapb.ListContainerawsAlphaAwsClusterResponse{Items: protos}, nil
}
func createConfigAwsCluster(ctx context.Context, service_account_file string) (*alpha.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return alpha.NewClient(conf), nil
}
|
package constants
const (
CTX_FIXER_CLIENT = "CTX_FIXER_CLIENT"
CTX_PROJECT = "CTX_PROJECT"
CTX_PROJECTS = "CTX_PROJECTS"
)
|
package main
import "testing"
func TestGreeting( t *testing.T) {
mensagem := "Code.education Rocks!"
retorno := greeting(mensagem)
esperado := "<b>"+mensagem+"</b>"
if retorno == "" {
t.Error("greeting function returned empty string")
}
if retorno != esperado {
t.Errorf("greeting function returned %v, expected %v", retorno, esperado)
}
}
func TestSqrtCalc( t *testing.T ) {
const (
expected = 249996287100.738586
)
val := sqrtCalc(0.0001)
if val != expected {
t.Errorf("sqrtcalc function returned %v, expected %v", val, expected)
}
}
|
package main
const Name string = "aliyundisk-provisioner"
const Version string = "1.1.0"
// GitCommit describes latest commit hash.
// This value is extracted by git command when building.
var GitCommit string
|
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSetupConfigDefaults(t *testing.T) {
config := parseConfig("")
assert.Equal(t, config.SourceDir, "_source", "Is source directory set?")
assert.Equal(t, config.LayoutDir, "_layout", "Is layout directory set?")
assert.Equal(t, config.PublishDir, "public", "Is publish directory set?")
assert.Equal(t, config.UseMarkdown, true, "Is default use markdown?")
}
func TestSetupConfigJson(t *testing.T) {
jsonConfig := `{"SourceDir":"pages","LayoutDir":"../themes/docs/","PublishDir":"../docs/"}`
config := parseConfig(jsonConfig)
assert.Equal(t, config.SourceDir, "pages", "Is source directory set?")
assert.Equal(t, config.LayoutDir, "../themes/docs/", "Is layout directory set?")
assert.Equal(t, config.UseMarkdown, true, "Is default use markdown?")
}
func TestSetupConfigParams(t *testing.T) {
jsonConfig := `{"SourceDir":"pages","Params":{"SiteName":"Hastie"}}`
config := parseConfig(jsonConfig)
assert.Equal(t, config.SourceDir, "pages", "Is source directory set?")
assert.Equal(t, config.Params["SiteName"], "Hastie", "Is custom parameter set?")
}
|
package blocksutil
import "testing"
func TestBlocksAreDifferent(t *testing.T) {
gen := NewBlockGenerator()
blocks := gen.Blocks(100)
for i, block1 := range blocks {
for j, block2 := range blocks {
if i != j {
if block1.String() == block2.String() {
t.Error("Found duplicate blocks")
}
}
}
}
}
|
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"log"
"net/url"
"os"
"time"
"github.com/gorilla/websocket"
)
// TODO: reconnect functionality
// instead of ending the program in a dead end with error
// Failed to receive pong: write tcp 127.0.0.1:58146->127.0.0.1:8000: write: broken pipe2021/10/01 23:16:45 Conn 0 sending message
var (
ip = flag.String("ip", "frontend", "Frontend server IP")
connections = flag.Int("conn", 1, "number of websocket connections")
)
func main() {
flag.Usage = func() {
io.WriteString(os.Stderr, `Websockets client generator
Example usage: ./client -ip=172.17.0.1 -conn=10
`)
flag.PrintDefaults()
}
flag.Parse()
u := url.URL{Scheme: "ws", Host: *ip + ":8000", Path: "/"}
log.Printf("Connecting to %s", u.String())
var conns []*websocket.Conn
for i := 0; i < *connections; i++ {
c, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
if err != nil {
fmt.Println("Failed to connect", i, err)
break
}
conns = append(conns, c)
defer func() {
c.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), time.Now().Add(time.Second))
time.Sleep(time.Second)
c.Close()
}()
}
log.Printf("Finished initializing %d connections", len(conns))
tts := time.Second
if *connections > 100 {
tts = time.Millisecond * 5
}
for {
for i := 0; i < len(conns); i++ {
time.Sleep(tts)
conn := conns[i]
log.Printf("Conn %d sending message", i+1)
if err := conn.WriteControl(websocket.PingMessage, nil, time.Now().Add(time.Second*5)); err != nil {
fmt.Printf("Failed to receive pong: %v", err)
}
msg := Message{
"observable": "NBA",
"data": Message{
"team": "Chicago Bulls",
"result": "win",
"score": "111-108",
"home": true,
},
}
data, err := serialize(msg)
if err != nil {
fmt.Printf("Failed to serialize %v", msg)
}
conn.WriteMessage(websocket.TextMessage, data)
}
}
}
type Message map[string]interface{}
func serialize(msg Message) ([]byte, error) {
var b bytes.Buffer
encoder := json.NewEncoder(&b)
err := encoder.Encode(msg)
return b.Bytes(), err
}
|
package models
import (
"github.com/jinzhu/gorm"
)
//研发推广
type Cursor struct {
BaseModel
Name string `json:"name" form:"name"` //名称
Image string `json:"image" form:"image"`
Desc string `json:"desc" form:"desc"`
Alt string `json:"alt" form:"alt"`
Url string `json:"url" form:"url"`
}
func (csor *Cursor) Create(db *gorm.DB) (*Cursor, error) {
var model Cursor
err := db.Create(&csor).Error
if err == nil {
db.Where("id = ?", csor.ID).First(&model)
}
return &model, err
}
func (csor *Cursor) Update(db *gorm.DB) (*Cursor, error) {
err := db.Model(csor).Updates(csor).Error
db.First(csor)
return csor, err
}
|
package httpmanager
import (
"encoding/json"
"net/http"
)
// ListHandler A new worker registers
func (m* Manager) ListHandler(w http.ResponseWriter, r *http.Request) {
ret, err := m.platformManager.ListFunction()
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
jsonRet, err := json.Marshal(ret)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.Header().Add("Content-Type", "application/json;charset=utf-8")
w.Write(jsonRet)
return
}
|
package main
import (
"fmt"
"os"
"strconv"
)
func main() {
if len(os.Args) < 3 {
fmt.Println("You should pass [values] and \"unit\".")
os.Exit(1)
}
originalUnit := os.Args[len(os.Args)-1]
originalValues := os.Args[1 : len(os.Args)-1]
var finalUnit string
if originalUnit == "celsius" {
finalUnit = "fahrenheit"
} else if originalUnit == "km" {
finalUnit = "miles"
} else {
fmt.Printf("%s isn't a known unit!\n", originalUnit)
os.Exit(1)
}
for i, v := range originalValues {
originalValue, err := strconv.ParseFloat(v, 64)
if err != nil {
fmt.Printf(
"The value %s in the position %d isn't a valid number!\n",
v, i,
)
os.Exit(1)
}
var finalValue float64
if originalUnit == "celsius" {
finalValue = originalValue*1.8 + 32
} else {
finalValue = originalValue / 1.60934
}
fmt.Printf(
"%.2f %s = %.2f %s\n",
originalValue, originalUnit,
finalValue, finalUnit,
)
}
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package addindextest
import (
"strconv"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/util/logutil"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
var compCtx = CompatibilityContext{}
type testType int8
const (
// TestNonUnique test type of create none unique index.
TestNonUnique testType = iota
// TestUnique test type of create unique index.
TestUnique
// TestPK test type of create Primary key.
TestPK
// TestGenIndex test type of create generated col index.
TestGenIndex
// TestMultiCols test type of multi columns in one index.
TestMultiCols
)
// CompatibilityContext is context of compatibility test.
type CompatibilityContext struct {
isMultiSchemaChange bool
isConcurrentDDL bool
isPiTR bool
executor []*executor
colIIDs [][]int
colJIDs [][]int
tType testType
}
type paraDDLChan struct {
err error
finished bool
}
type executor struct {
id int
tk *testkit.TestKit
PDChan chan *paraDDLChan
}
func newExecutor(tableID int) *executor {
er := executor{
id: tableID,
PDChan: make(chan *paraDDLChan, 1),
}
return &er
}
func initCompCtxParams(ctx *suiteContext) {
ctx.CompCtx = &compCtx
compCtx.isConcurrentDDL = false
compCtx.isMultiSchemaChange = false
compCtx.isPiTR = false
}
func (cCtx *CompatibilityContext) start(ctx *suiteContext) {
cCtx.executor = cCtx.executor[:0]
for i := 0; i < 3; i++ {
er := newExecutor(i)
er.tk = ctx.getTestKit()
cCtx.executor = append(cCtx.executor, er)
go cCtx.executor[i].run(ctx)
}
}
func (cCtx *CompatibilityContext) stop(ctx *suiteContext) error {
count := 3
for i := 0; i < 3; i++ {
pdChan := <-cCtx.executor[i].PDChan
if pdChan.err != nil {
require.NoError(ctx.t, pdChan.err)
return pdChan.err
}
if pdChan.finished {
count--
logutil.BgLogger().Info("xlc test worker", zap.Int("count", count), zap.Int("er id", i))
ctx.putTestKit(ctx.CompCtx.executor[i].tk)
}
if count == 0 {
break
}
}
return nil
}
func (e *executor) run(ctx *suiteContext) {
var (
err error
erChan paraDDLChan
)
switch ctx.CompCtx.tType {
case TestNonUnique:
err = testOneColFramePara(ctx, e.id, ctx.CompCtx.colIIDs, addIndexNonUnique)
case TestUnique:
err = testOneColFramePara(ctx, e.id, ctx.CompCtx.colIIDs, addIndexUnique)
case TestPK:
err = testOneIndexFramePara(ctx, e.id, 0, addIndexPK)
case TestGenIndex:
err = testOneIndexFramePara(ctx, e.id, 29, addIndexGenCol)
case TestMultiCols:
err = testTwoColsFramePara(ctx, e.id, ctx.CompCtx.colIIDs, ctx.CompCtx.colJIDs, addIndexMultiCols)
default:
}
erChan.err = err
erChan.finished = true
e.PDChan <- &erChan
}
func testOneColFramePara(ctx *suiteContext, tableID int, colIDs [][]int, f func(*suiteContext, int, string, int) error) (err error) {
tableName := "addindex.t" + strconv.Itoa(tableID)
for _, i := range colIDs[tableID] {
err = f(ctx, tableID, tableName, i)
if err != nil {
if ctx.isUnique || ctx.isPK {
require.Contains(ctx.t, err.Error(), "Duplicate entry")
err = nil
continue
}
logutil.BgLogger().Error("add index failed", zap.String("category", "add index test"), zap.Error(err))
require.NoError(ctx.t, err)
break
}
checkResult(ctx, tableName, i, tableID)
}
return err
}
func testTwoColsFramePara(ctx *suiteContext, tableID int, iIDs [][]int, jIDs [][]int, f func(*suiteContext, int, string, int, int, int) error) (err error) {
tableName := "addindex.t" + strconv.Itoa(tableID)
indexID := 0
for _, i := range iIDs[tableID] {
for _, j := range jIDs[tableID] {
err = f(ctx, tableID, tableName, indexID, i, j)
if err != nil {
logutil.BgLogger().Error("add index failed", zap.String("category", "add index test"), zap.Error(err))
}
require.NoError(ctx.t, err)
if err == nil && i != j {
checkResult(ctx, tableName, indexID, tableID)
}
indexID++
if err != nil {
return err
}
}
}
return err
}
func testOneIndexFramePara(ctx *suiteContext, tableID int, colID int, f func(*suiteContext, int, string, int) error) (err error) {
tableName := "addindex.t" + strconv.Itoa(tableID)
err = f(ctx, tableID, tableName, colID)
if err != nil {
logutil.BgLogger().Error("add index failed", zap.String("category", "add index test"), zap.Error(err))
}
require.NoError(ctx.t, err)
if err == nil {
if ctx.isPK {
checkTableResult(ctx, tableName, tableID)
} else {
checkResult(ctx, tableName, colID, tableID)
}
}
return err
}
|
package middleware
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io/ioutil"
"net/http"
model "github.com/cloudreve/Cloudreve/v3/models"
"github.com/cloudreve/Cloudreve/v3/pkg/auth"
"github.com/cloudreve/Cloudreve/v3/pkg/cache"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/onedrive"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/oss"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/upyun"
"github.com/cloudreve/Cloudreve/v3/pkg/serializer"
"github.com/cloudreve/Cloudreve/v3/pkg/util"
"github.com/gin-contrib/sessions"
"github.com/gin-gonic/gin"
"github.com/qiniu/api.v7/v7/auth/qbox"
)
// SignRequired 验证请求签名
func SignRequired() gin.HandlerFunc {
return func(c *gin.Context) {
var err error
switch c.Request.Method {
case "PUT", "POST":
err = auth.CheckRequest(auth.General, c.Request)
// TODO 生产环境去掉下一行
//err = nil
default:
err = auth.CheckURI(auth.General, c.Request.URL)
}
if err != nil {
c.JSON(200, serializer.Err(serializer.CodeCheckLogin, err.Error(), err))
c.Abort()
return
}
c.Next()
}
}
// CurrentUser 获取登录用户
func CurrentUser() gin.HandlerFunc {
return func(c *gin.Context) {
session := sessions.Default(c)
uid := session.Get("user_id")
if uid != nil {
user, err := model.GetActiveUserByID(uid)
if err == nil {
c.Set("user", &user)
}
}
c.Next()
}
}
// AuthRequired 需要登录
func AuthRequired() gin.HandlerFunc {
return func(c *gin.Context) {
if user, _ := c.Get("user"); user != nil {
if _, ok := user.(*model.User); ok {
c.Next()
return
}
}
c.JSON(200, serializer.CheckLogin())
c.Abort()
}
}
// WebDAVAuth 验证WebDAV登录及权限
func WebDAVAuth() gin.HandlerFunc {
return func(c *gin.Context) {
// OPTIONS 请求不需要鉴权,否则Windows10下无法保存文档
if c.Request.Method == "OPTIONS" {
c.Next()
return
}
username, password, ok := c.Request.BasicAuth()
if !ok {
c.Writer.Header()["WWW-Authenticate"] = []string{`Basic realm="cloudreve"`}
c.Status(http.StatusUnauthorized)
c.Abort()
return
}
expectedUser, err := model.GetUserByEmail(username)
if err != nil {
c.Status(http.StatusUnauthorized)
c.Abort()
return
}
// 密码正确?
webdav, err := model.GetWebdavByPassword(password, expectedUser.ID)
if err != nil {
c.Status(http.StatusUnauthorized)
c.Abort()
return
}
// 用户组已启用WebDAV?
if !expectedUser.Group.WebDAVEnabled {
c.Status(http.StatusForbidden)
c.Abort()
return
}
c.Set("user", &expectedUser)
c.Set("webdav", webdav)
c.Next()
}
}
// uploadCallbackCheck 对上传回调请求的 callback key 进行验证,如果成功则返回上传用户
func uploadCallbackCheck(c *gin.Context) (serializer.Response, *model.User) {
// 验证 Callback Key
callbackKey := c.Param("key")
if callbackKey == "" {
return serializer.ParamErr("Callback Key 不能为空", nil), nil
}
callbackSessionRaw, exist := cache.Get("callback_" + callbackKey)
if !exist {
return serializer.ParamErr("回调会话不存在或已过期", nil), nil
}
callbackSession := callbackSessionRaw.(serializer.UploadSession)
c.Set("callbackSession", &callbackSession)
// 清理回调会话
_ = cache.Deletes([]string{callbackKey}, "callback_")
// 查找用户
user, err := model.GetActiveUserByID(callbackSession.UID)
if err != nil {
return serializer.Err(serializer.CodeCheckLogin, "找不到用户", err), nil
}
c.Set("user", &user)
return serializer.Response{}, &user
}
// RemoteCallbackAuth 远程回调签名验证
func RemoteCallbackAuth() gin.HandlerFunc {
return func(c *gin.Context) {
// 验证key并查找用户
resp, user := uploadCallbackCheck(c)
if resp.Code != 0 {
c.JSON(200, resp)
c.Abort()
return
}
// 验证签名
authInstance := auth.HMACAuth{SecretKey: []byte(user.Policy.SecretKey)}
if err := auth.CheckRequest(authInstance, c.Request); err != nil {
c.JSON(200, serializer.Err(serializer.CodeCheckLogin, err.Error(), err))
c.Abort()
return
}
c.Next()
}
}
// QiniuCallbackAuth 七牛回调签名验证
func QiniuCallbackAuth() gin.HandlerFunc {
return func(c *gin.Context) {
// 验证key并查找用户
resp, user := uploadCallbackCheck(c)
if resp.Code != 0 {
c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: resp.Msg})
c.Abort()
return
}
// 验证回调是否来自qiniu
mac := qbox.NewMac(user.Policy.AccessKey, user.Policy.SecretKey)
ok, err := mac.VerifyCallback(c.Request)
if err != nil {
util.Log().Debug("无法验证回调请求,%s", err)
c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: "无法验证回调请求"})
c.Abort()
return
}
if !ok {
c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: "回调签名无效"})
c.Abort()
return
}
c.Next()
}
}
// OSSCallbackAuth 阿里云OSS回调签名验证
func OSSCallbackAuth() gin.HandlerFunc {
return func(c *gin.Context) {
// 验证key并查找用户
resp, _ := uploadCallbackCheck(c)
if resp.Code != 0 {
c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: resp.Msg})
c.Abort()
return
}
err := oss.VerifyCallbackSignature(c.Request)
if err != nil {
util.Log().Debug("回调签名验证失败,%s", err)
c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: "回调签名验证失败"})
c.Abort()
return
}
c.Next()
}
}
// UpyunCallbackAuth 又拍云回调签名验证
func UpyunCallbackAuth() gin.HandlerFunc {
return func(c *gin.Context) {
// 验证key并查找用户
resp, user := uploadCallbackCheck(c)
if resp.Code != 0 {
c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: resp.Msg})
c.Abort()
return
}
// 获取请求正文
body, err := ioutil.ReadAll(c.Request.Body)
c.Request.Body.Close()
if err != nil {
c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: err.Error()})
c.Abort()
return
}
c.Request.Body = ioutil.NopCloser(bytes.NewReader(body))
// 准备验证Upyun回调签名
handler := upyun.Driver{Policy: &user.Policy}
contentMD5 := c.Request.Header.Get("Content-Md5")
date := c.Request.Header.Get("Date")
actualSignature := c.Request.Header.Get("Authorization")
// 计算正文MD5
actualContentMD5 := fmt.Sprintf("%x", md5.Sum(body))
if actualContentMD5 != contentMD5 {
c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: "MD5不一致"})
c.Abort()
return
}
// 计算理论签名
signature := handler.Sign(context.Background(), []string{
"POST",
c.Request.URL.Path,
date,
contentMD5,
})
// 对比签名
if signature != actualSignature {
c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: "鉴权失败"})
c.Abort()
return
}
c.Next()
}
}
// OneDriveCallbackAuth OneDrive回调签名验证
// TODO 解耦
func OneDriveCallbackAuth() gin.HandlerFunc {
return func(c *gin.Context) {
// 验证key并查找用户
resp, _ := uploadCallbackCheck(c)
if resp.Code != 0 {
c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: resp.Msg})
c.Abort()
return
}
// 发送回调结束信号
onedrive.FinishCallback(c.Param("key"))
c.Next()
}
}
// COSCallbackAuth 腾讯云COS回调签名验证
// TODO 解耦 测试
func COSCallbackAuth() gin.HandlerFunc {
return func(c *gin.Context) {
// 验证key并查找用户
resp, _ := uploadCallbackCheck(c)
if resp.Code != 0 {
c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: resp.Msg})
c.Abort()
return
}
c.Next()
}
}
// S3CallbackAuth Amazon S3回调签名验证
func S3CallbackAuth() gin.HandlerFunc {
return func(c *gin.Context) {
// 验证key并查找用户
resp, _ := uploadCallbackCheck(c)
if resp.Code != 0 {
c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: resp.Msg})
c.Abort()
return
}
c.Next()
}
}
func UfileCallbackAuth() gin.HandlerFunc {
return func(c *gin.Context) {
// 验证key并查找用户
resp, _ := uploadCallbackCheck(c)
if resp.Code != 0 {
c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: resp.Msg})
c.Abort()
return
}
c.Next()
}
}
// IsAdmin 必须为管理员用户组
func IsAdmin() gin.HandlerFunc {
return func(c *gin.Context) {
user, _ := c.Get("user")
if user.(*model.User).Group.ID != 1 && user.(*model.User).ID != 1 {
c.JSON(200, serializer.Err(serializer.CodeAdminRequired, "您不是管理组成员", nil))
c.Abort()
return
}
c.Next()
}
}
|
package post
import (
"errors"
"github.com/gin-gonic/gin"
"time"
postModel "yj-app/app/model/system/post"
userService "yj-app/app/service/system/user"
"yj-app/app/yjgframe/utils/convert"
"yj-app/app/yjgframe/utils/page"
)
//根据主键查询数据
func SelectRecordById(id int64) (*postModel.Entity, error) {
entity := &postModel.Entity{PostId: id}
_, err := entity.FindOne()
return entity, err
}
//根据主键删除数据
func DeleteRecordById(id int64) bool {
rs, err := (&postModel.Entity{PostId: id}).Delete()
if err == nil {
if rs > 0 {
return true
}
}
return false
}
//批量删除数据记录
func DeleteRecordByIds(ids string) int64 {
ida := convert.ToInt64Array(ids, ",")
result, err := postModel.DeleteBatch(ida...)
if err != nil {
return 0
}
return result
}
//添加数据
func AddSave(req *postModel.AddReq, c *gin.Context) (int64, error) {
var entity postModel.Entity
entity.PostName = req.PostName
entity.PostCode = req.PostCode
entity.Status = req.Status
entity.PostSort = req.PostSort
entity.Remark = req.Remark
entity.CreateTime = time.Now()
entity.CreateBy = ""
user := userService.GetProfile(c)
if user != nil {
entity.CreateBy = user.LoginName
}
_, err := entity.Insert()
return entity.PostId, err
}
//修改数据
func EditSave(req *postModel.EditReq, c *gin.Context) (int64, error) {
entity := &postModel.Entity{PostId: req.PostId}
ok, err := entity.FindOne()
if err != nil {
return 0, err
}
if !ok {
return 0, errors.New("数据不存在")
}
entity.PostName = req.PostName
entity.PostCode = req.PostCode
entity.Status = req.Status
entity.Remark = req.Remark
entity.PostSort = req.PostSort
entity.UpdateTime = time.Now()
entity.UpdateBy = ""
user := userService.GetProfile(c)
if user == nil {
entity.UpdateBy = user.LoginName
}
return entity.Update()
}
//根据条件分页查询角色数据
func SelectListAll(params *postModel.SelectPageReq) ([]postModel.EntityFlag, error) {
return postModel.SelectListAll(params)
}
//根据条件分页查询角色数据
func SelectListByPage(params *postModel.SelectPageReq) ([]postModel.Entity, *page.Paging, error) {
return postModel.SelectListByPage(params)
}
// 导出excel
func Export(param *postModel.SelectPageReq) (string, error) {
head := []string{"岗位序号", "岗位名称", "岗位编码", "岗位排序", "状态"}
col := []string{"post_id", "post_name", "post_code", "post_sort", "stat"}
return postModel.SelectListExport(param, head, col)
}
//根据用户ID查询岗位
func SelectPostsByUserId(userId int64) ([]postModel.EntityFlag, error) {
var paramsPost *postModel.SelectPageReq
postAll, err := postModel.SelectListAll(paramsPost)
if err != nil || postAll == nil {
return nil, errors.New("未查询到岗位数据")
}
userPost, err := postModel.SelectPostsByUserId(userId)
if err != nil || userPost == nil {
return nil, errors.New("未查询到用户岗位数据")
} else {
for i := range postAll {
for j := range userPost {
if userPost[j].PostId == postAll[i].PostId {
postAll[i].Flag = true
break
}
}
}
}
return postAll, nil
}
//检查角色名是否唯一
func CheckPostNameUniqueAll(postName string) string {
post, err := postModel.CheckPostNameUniqueAll(postName)
if err != nil {
return "1"
}
if post != nil && post.PostId > 0 {
return "1"
}
return "0"
}
//检查岗位名称是否唯一
func CheckPostNameUnique(postName string, postId int64) string {
post, err := postModel.CheckPostNameUniqueAll(postName)
if err != nil {
return "1"
}
if post != nil && post.PostId > 0 && post.PostId != postId {
return "1"
}
return "0"
}
//检查岗位编码是否唯一
func CheckPostCodeUniqueAll(postCode string) string {
post, err := postModel.CheckPostCodeUniqueAll(postCode)
if err != nil {
return "1"
}
if post != nil && post.PostId > 0 {
return "1"
}
return "0"
}
//检查岗位编码是否唯一
func CheckPostCodeUnique(postCode string, postId int64) string {
post, err := postModel.CheckPostCodeUniqueAll(postCode)
if err != nil {
return "1"
}
if post != nil && post.PostId > 0 && post.PostId != postId {
return "1"
}
return "0"
}
|
package fbmessenger
import (
"bytes"
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/url"
)
var defaultMessengerEndpoint = &url.URL{
Scheme: "https",
Host: "graph.facebook.com",
Path: "/v2.6/me/messages",
}
// A SenderOption set options on a sender.
type SenderOption func(*Sender) error
// Sender provides the functionality to send messages to Facebook Messenger.
type Sender struct {
accessToken string
client *http.Client
endpoint *url.URL
}
// HTTPClient returns a SenderOption that sets the HTTP client.
func HTTPClient(client *http.Client) SenderOption {
return func(s *Sender) error {
s.client = client
return nil
}
}
// Endpoint returns a SenderOption that sets the endpoint URL.
func Endpoint(u *url.URL) SenderOption {
return func(s *Sender) error {
s.endpoint = u
return nil
}
}
// NewSender creates a new Sender.
func NewSender(accessToken string, opts ...SenderOption) (*Sender, error) {
s := &Sender{
accessToken: accessToken,
}
for _, opt := range opts {
if err := opt(s); err != nil {
return nil, err
}
}
// set default values if not set
if s.client == nil {
s.client = http.DefaultClient
}
if s.endpoint == nil {
s.endpoint = defaultMessengerEndpoint
}
// copy the configured endpoint and
// add the access token as query parameter
endpoint := *s.endpoint
qs := endpoint.Query()
qs.Set("access_token", accessToken)
endpoint.RawQuery = qs.Encode()
s.endpoint = &endpoint
return s, nil
}
// MessageResponse contains information about a sent message.
type MessageResponse struct {
RecipientID string `json:"recipient_id"`
MessageID string `json:"message_id"`
AttachmentID string `json:"attachment_id"`
}
// SendMessage sends a message.
func (s *Sender) SendMessage(ctx context.Context, msg *Message) (*MessageResponse, error) {
src, err := msg.Source()
if err != nil {
return nil, err
}
var resp MessageResponse
if err := s.send(src, &resp); err != nil {
return nil, err
}
return &resp, nil
}
// SenderAction represents a sender action.
type SenderAction string
const (
// MarkSeen mark last message as read.
MarkSeen SenderAction = "mark_seen"
// TypingOn turn typing indicators on.
// Typing indicators are automatically turned off after 20 seconds.
TypingOn SenderAction = "typing_on"
// TypingOff turn typing indicators off.
TypingOff SenderAction = "typing_off"
)
// SendAction sends a sender action.
func (s *Sender) SendAction(ctx context.Context, to Recipient, action SenderAction) error {
recipient, err := to.Source()
if err != nil {
return err
}
return s.send(map[string]interface{}{
"recipient": recipient,
"sender_action": action,
}, nil)
}
func (s *Sender) send(src interface{}, dst interface{}) error {
body, err := json.Marshal(src)
if err != nil {
return err
}
call, err := http.NewRequest("POST", s.endpoint.String(), bytes.NewReader(body))
if err != nil {
return err
}
call.Header.Set("Content-Type", "application/json")
resp, err := s.client.Do(call)
if err != nil {
return err
}
defer resp.Body.Close()
if err := s.checkResponse(resp); err != nil {
return err
}
if dst == nil {
io.Copy(ioutil.Discard, resp.Body)
return nil
}
return json.NewDecoder(resp.Body).Decode(dst)
}
func (s *Sender) checkResponse(resp *http.Response) error {
if resp.StatusCode >= 200 && resp.StatusCode <= 299 {
return nil
}
slurp, err := ioutil.ReadAll(resp.Body)
if err != nil {
return &fbError{
Message: err.Error(),
Code: resp.StatusCode,
}
}
var errResp struct {
Error fbError `json:"error"`
}
if err := json.Unmarshal(slurp, &errResp); err != nil {
return &fbError{
Message: err.Error(),
Code: resp.StatusCode,
}
}
return &errResp.Error
}
type fbError struct {
Message string `json:"message"`
Type string `json:"type"`
Code int `json:"code"`
ErrorSubcode int `json:"error_subcode"`
FBTraceID string `json:"fbtrace_id"`
}
func (err *fbError) Error() string {
return err.Message
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.