text stringlengths 11 4.05M |
|---|
/* Copyright (c) 2016 Jason Ish
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package api
import (
"fmt"
"github.com/gorilla/mux"
"github.com/jasonish/evebox/core"
"github.com/jasonish/evebox/eve"
"github.com/jasonish/evebox/log"
"github.com/jasonish/evebox/server/sessions"
"github.com/pkg/errors"
"net/http"
"strconv"
"strings"
"time"
)
func (c *ApiContext) GetEventByIdHandler(w *ResponseWriter, r *http.Request) error {
eventId := mux.Vars(r)["id"]
event, err := c.appContext.DataStore.GetEventById(eventId)
if err != nil {
log.Error("%v", err)
return err
}
if event == nil {
//return HttpNotFoundResponse(fmt.Sprintf("No event with ID %s", eventId))
//return errors.New(fmt.Sprintf("No event with ID %s", eventId))
return httpNotFoundResponse(fmt.Sprintf("No event with ID %s", eventId))
}
return w.OkJSON(event)
}
// Archive a single event.
func (c *ApiContext) ArchiveEventHandler(w *ResponseWriter, r *http.Request) error {
session := r.Context().Value("session").(*sessions.Session)
eventId := mux.Vars(r)["id"]
err := c.appContext.DataStore.ArchiveEvent(eventId, session.User)
if err != nil {
log.Error("Failed to archive event: %v", err)
return err
}
return w.Ok()
}
func (c *ApiContext) EscalateEventHandler(w *ResponseWriter, r *http.Request) error {
session := r.Context().Value("session").(*sessions.Session)
eventId := mux.Vars(r)["id"]
err := c.appContext.DataStore.EscalateEvent(eventId, session.User)
if err != nil {
log.Error("Failed to escalated event: %v", err)
return err
}
return w.Ok()
}
func (c *ApiContext) DeEscalateEventHandler(w *ResponseWriter, r *http.Request) error {
session := r.Context().Value("session").(*sessions.Session)
eventId := mux.Vars(r)["id"]
err := c.appContext.DataStore.DeEscalateEvent(eventId, session.User)
if err != nil {
log.Error("Failed to de-escalated event: %v", err)
return err
}
return w.Ok()
}
func (c *ApiContext) EventQueryHandler(w *ResponseWriter, r *http.Request) error {
var options core.EventQueryOptions
if err := r.ParseForm(); err != nil {
return newHttpErrorResponse(http.StatusBadRequest, err)
}
options.QueryString = r.FormValue("query_string")
maxTs, err := parseFormTimestamp(r, "max_ts")
if err != nil {
return errors.Wrap(err, "failed to parse max_ts")
}
options.MaxTs = maxTs
minTs, err := parseFormTimestamp(r, "min_ts")
if err != nil {
return errors.Wrap(err, "failed to parse min_ts")
}
options.MinTs = minTs
options.SortBy = r.FormValue("sort_by")
options.SortOrder = r.FormValue("order")
options.TimeRange = r.FormValue("time_range")
options.EventType = r.FormValue("event_type")
options.Size, _ = strconv.ParseInt(r.FormValue("size"), 0, 64)
response, err := c.appContext.DataStore.EventQuery(options)
if err != nil {
return err
}
return w.OkJSON(response)
}
func parseFormTimestamp(request *http.Request, key string) (time.Time, error) {
timestamp := request.FormValue(key)
if timestamp == "" {
return time.Time{}, nil
}
// Properly formatted timestamp may contain a "+" as part of the
// time zone. Angular does not URL encode the plus sign, and Go will
// decode it as a space. So after getting the form value replace all
// spaces with "+"
timestamp = strings.Replace(timestamp, " ", "+", 1)
ts, err := eve.ParseTimestamp(timestamp)
if err != nil {
return time.Time{}, err
}
return ts, nil
}
|
package utils
import (
"testing"
"github.com/go-errors/errors"
"github.com/stretchr/testify/assert"
)
// TestSplitLines is a function.
func TestSplitLines(t *testing.T) {
type scenario struct {
multilineString string
expected []string
}
scenarios := []scenario{
{
"",
[]string{},
},
{
"\n",
[]string{},
},
{
"hello world !\nhello universe !\n",
[]string{
"hello world !",
"hello universe !",
},
},
}
for _, s := range scenarios {
assert.EqualValues(t, s.expected, SplitLines(s.multilineString))
}
}
// TestWithPadding is a function.
func TestWithPadding(t *testing.T) {
type scenario struct {
str string
padding int
expected string
}
scenarios := []scenario{
{
"hello world !",
1,
"hello world !",
},
{
"hello world !",
14,
"hello world ! ",
},
}
for _, s := range scenarios {
assert.EqualValues(t, s.expected, WithPadding(s.str, s.padding))
}
}
// TestNormalizeLinefeeds is a function.
func TestNormalizeLinefeeds(t *testing.T) {
type scenario struct {
byteArray []byte
expected []byte
}
scenarios := []scenario{
{
// \r\n
[]byte{97, 115, 100, 102, 13, 10},
[]byte{97, 115, 100, 102, 10},
},
{
// bash\r\nblah
[]byte{97, 115, 100, 102, 13, 10, 97, 115, 100, 102},
[]byte{97, 115, 100, 102, 10, 97, 115, 100, 102},
},
{
// \r
[]byte{97, 115, 100, 102, 13},
[]byte{97, 115, 100, 102},
},
{
// \n
[]byte{97, 115, 100, 102, 10},
[]byte{97, 115, 100, 102, 10},
},
}
for _, s := range scenarios {
assert.EqualValues(t, string(s.expected), NormalizeLinefeeds(string(s.byteArray)))
}
}
// TestResolvePlaceholderString is a function.
func TestResolvePlaceholderString(t *testing.T) {
type scenario struct {
templateString string
arguments map[string]string
expected string
}
scenarios := []scenario{
{
"",
map[string]string{},
"",
},
{
"hello",
map[string]string{},
"hello",
},
{
"hello {{arg}}",
map[string]string{},
"hello {{arg}}",
},
{
"hello {{arg}}",
map[string]string{"arg": "there"},
"hello there",
},
{
"hello",
map[string]string{"arg": "there"},
"hello",
},
{
"{{nothing}}",
map[string]string{"nothing": ""},
"",
},
{
"{{}} {{ this }} { should not throw}} an {{{{}}}} error",
map[string]string{
"blah": "blah",
"this": "won't match",
},
"{{}} {{ this }} { should not throw}} an {{{{}}}} error",
},
}
for _, s := range scenarios {
assert.EqualValues(t, s.expected, ResolvePlaceholderString(s.templateString, s.arguments))
}
}
// TestDisplayArraysAligned is a function.
func TestDisplayArraysAligned(t *testing.T) {
type scenario struct {
input [][]string
expected bool
}
scenarios := []scenario{
{
[][]string{{"", ""}, {"", ""}},
true,
},
{
[][]string{{""}, {"", ""}},
false,
},
}
for _, s := range scenarios {
assert.EqualValues(t, s.expected, displayArraysAligned(s.input))
}
}
// TestGetPaddedDisplayStrings is a function.
func TestGetPaddedDisplayStrings(t *testing.T) {
type scenario struct {
stringArrays [][]string
padWidths []int
expected []string
}
scenarios := []scenario{
{
[][]string{{"a", "b"}, {"c", "d"}},
[]int{1},
[]string{"a b", "c d"},
},
}
for _, s := range scenarios {
assert.EqualValues(t, s.expected, getPaddedDisplayStrings(s.stringArrays, s.padWidths))
}
}
// TestGetPadWidths is a function.
func TestGetPadWidths(t *testing.T) {
type scenario struct {
stringArrays [][]string
expected []int
}
scenarios := []scenario{
{
[][]string{{""}, {""}},
[]int{},
},
{
[][]string{{"a"}, {""}},
[]int{},
},
{
[][]string{{"aa", "b", "ccc"}, {"c", "d", "e"}},
[]int{2, 1},
},
}
for _, s := range scenarios {
assert.EqualValues(t, s.expected, getPadWidths(s.stringArrays))
}
}
func TestRenderTable(t *testing.T) {
type scenario struct {
input [][]string
expected string
expectedErr error
}
scenarios := []scenario{
{
input: [][]string{{"a", "b"}, {"c", "d"}},
expected: "a b\nc d",
expectedErr: nil,
},
{
input: [][]string{{"aaaa", "b"}, {"c", "d"}},
expected: "aaaa b\nc d",
expectedErr: nil,
},
{
input: [][]string{{"a"}, {"c", "d"}},
expected: "",
expectedErr: errors.New("Each item must return the same number of strings to display"),
},
}
for _, s := range scenarios {
output, err := RenderTable(s.input)
assert.EqualValues(t, s.expected, output)
if s.expectedErr != nil {
assert.EqualError(t, err, s.expectedErr.Error())
} else {
assert.NoError(t, err)
}
}
}
func TestMarshalIntoFormat(t *testing.T) {
type innerData struct {
Foo int `json:"foo"`
Bar string `json:"bar"`
Baz bool `json:"baz"`
}
type data struct {
Qux int `json:"quz"`
Quux innerData `json:"quux"`
}
type scenario struct {
input interface{}
format string
expected []byte
expectedErr error
}
scenarios := []scenario{
{
input: data{1, innerData{2, "foo", true}},
format: "json",
expected: []byte(`{
"quz": 1,
"quux": {
"foo": 2,
"bar": "foo",
"baz": true
}
}`),
expectedErr: nil,
},
{
input: data{1, innerData{2, "foo", true}},
format: "yaml",
expected: []byte(`quz: 1
quux:
bar: foo
baz: true
foo: 2
`),
expectedErr: nil,
},
{
input: data{1, innerData{2, "foo", true}},
format: "xml",
expected: nil,
expectedErr: errors.New("Unsupported detailization format: xml"),
},
}
for _, s := range scenarios {
output, err := marshalIntoFormat(s.input, s.format)
assert.EqualValues(t, s.expected, output)
if s.expectedErr != nil {
assert.EqualError(t, err, s.expectedErr.Error())
} else {
assert.NoError(t, err)
}
}
}
|
package cache
import (
"github.com/go-redis/redis/v7"
"time"
)
type Service interface {
HSet(key string, values ...interface{}) error
HSetNX(key string, field string, value interface{}, expiration time.Duration) (set bool, err error)
Expire(key string, expiration time.Duration) error
HGet(key string, field string) (string, error)
HGetAll(key string) map[string]string
HDel(key string, fields string) error
Del(key string) error
Pipeline() redis.Pipeliner
}
func NewCacheService(clusterMode bool, host, port, password string) (Service, error) {
if clusterMode {
return NewClusterCacheClient(host, port, password)
}
return NewSimpleCacheClient(host, port, password)
}
|
package core
import (
"context"
"github.com/borchero/switchboard/core/utils"
"go.borchero.com/typewriter"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// Reconciler serves as base for all
type Reconciler struct {
client client.Client
scheme *runtime.Scheme
cache utils.BackendCache
}
// NewReconciler initializes a new reconciler that can be shared among specialized reconcilers.
func NewReconciler(manager ctrl.Manager) *Reconciler {
return &Reconciler{
client: manager.GetClient(),
scheme: manager.GetScheme(),
cache: utils.NewBackendCache(),
}
}
func (r *Reconciler) doReconcile(
request ctrl.Request,
obj interface {
runtime.Object
metav1.Object
},
logger typewriter.Logger,
update func(typewriter.Logger) error,
delete func(typewriter.Logger) error,
emptyDelete func(types.NamespacedName, typewriter.Logger) error,
) (ctrl.Result, error) {
ctx := context.Background()
if request.Namespace == "" {
logger = logger.With(request.Name)
} else {
logger = logger.With(request.Namespace).With(request.Name)
}
// 1) Get reconciled object
if err := r.client.Get(ctx, request.NamespacedName, obj); err != nil {
if apierrs.IsNotFound(err) {
// 1.1) Just ignore, object was deleted
logger.Info("Object already deleted")
if err := emptyDelete(request.NamespacedName, logger); err != nil {
logger.Error("Error occurred while reconciling deleted object", err)
return ctrl.Result{Requeue: true}, nil
}
return ctrl.Result{}, nil
}
logger.Error("Failed to get reconciled object", err)
return ctrl.Result{Requeue: true}, nil
}
// 2) Run reconciliation
if obj.GetDeletionTimestamp().IsZero() {
logger.Info("Updating")
if err := update(logger); err != nil {
logger.Error("Failed updating object", err)
return ctrl.Result{Requeue: true}, nil
}
} else {
logger.Info("deleting")
if err := delete(logger); err != nil {
logger.Error("Failed deleting object", err)
return ctrl.Result{Requeue: true}, nil
}
}
logger.Info("Successfully reconciled")
return ctrl.Result{}, nil
}
/////////////////////////
/// UTILITY FUNCTIONS ///
/////////////////////////
func noDelete(typewriter.Logger) error {
return nil
}
func noEmptyDelete(types.NamespacedName, typewriter.Logger) error {
return nil
}
|
package session
import (
"sync"
)
type SessionStorage interface {
Put(key string)
Has(key string) bool
Remove(key string)
}
type mapSessionStorage struct {
storage map[string]bool
lock *sync.RWMutex
}
func (s *mapSessionStorage) Put(key string) {
s.lock.Lock()
defer s.lock.Unlock()
s.storage[key] = true
}
func (s *mapSessionStorage) Has(key string) bool {
s.lock.RLock()
defer s.lock.RUnlock()
_, ok := s.storage[key]
return ok
}
func (s *mapSessionStorage) Remove(key string) {
s.lock.Lock()
defer s.lock.Unlock()
delete(s.storage, key)
}
func BlogSessionStorage() SessionStorage {
return &mapSessionStorage{
storage: map[string]bool{},
lock: &sync.RWMutex{},
}
} |
package main
import "fmt"
func main() {
//1.创建一个可以存放3个int类型的管道
var intChan chan int
intChan = make(chan int, 3)
//2.看看intChan是什么
fmt.Printf("intChan 的值=%v intChan本身的地址=%p\n", intChan, &intChan)
//3.向管道写入数据
intChan <- 10
intChan <- 90
intChan <- 100
//intChan <- 101 //fatal error: all goroutines are asleep - deadlock!
//注意点,当我们给管道写入数据时,不能超过其容量3,否则会报死锁
//4.看看管道的长度和容量(cap)
fmt.Printf("channel len=%v,cap=%v\n", len(intChan), cap(intChan)) //len=3,cap=3
//5.从管道中读取数据,取过数据后,管道的长度(len)会变小
<-intChan
fmt.Printf("channel len=%v,cap=%v\n", len(intChan), cap(intChan)) //len=2,cap=3
//6.在没有使用协程的情况下,如果我们的管道数据已经全部取出,再取就会报deadlock
num1 := <-intChan
num2 := <-intChan
//num3 := <-intChan//fatal error: all goroutines are asleep - deadlock!
fmt.Println(num1, num2 /*, num3*/)
//类型断言的一个案例
var allChan chan interface{} = make(chan interface{}, 5)
allChan <- 10
allChan <- "tom"
cat := Cat{"小黑猫", "黑色"}
allChan <- cat
//我们希望获取到管道中的第三个元素,则先将前2个推出
<-allChan
<-allChan
newCat := <-allChan //这里的newCat其实是interface{}的引用指向cat的实例
fmt.Printf("newCat=%T,newCat=%v\n", newCat, newCat)
//fmt.Println(newCat.Name)//newCat.Name undefined (type interface {} is interface with no methods)
a := newCat.(Cat)
fmt.Println("猫名", a.Name)
//管道的遍历
intChan2 := make(chan int, 100)
for i := 0; i < 100; i++ {
intChan2 <- i
}
//如果遍历时,没有关闭管道会报死锁all goroutines are asleep - deadlock!
close(intChan2) //不关闭的话,for会一直等待在这里,系统判定为死锁
for v := range intChan2 {
fmt.Println("v=", v)
}
//1.管道的遍历
intChan3 := make(chan int, 100)
for i := 0; i < 100; i++ {
intChan3 <- i
}
//如果遍历时,没有关闭管道会报死锁all goroutines are asleep - deadlock!
close(intChan3) //不关闭的话,for会一直等待在这里,系统判定为死锁
for v := range intChan3 {
fmt.Println("v=", v)
}
}
type Cat struct {
Name string
Color string
}
//channel
// 1)channel本质是一个数据结构-队列
// 2)数据是先进先出【FIFO】
// 3)线程安全,多goroutine访问时,不需要加锁,就是说channel本身就是线程安全的
// 4)channel有类型的,一个string的channel只能存放string类型数据
//声明
// var 变量 chan 数据类型
// var intChan chan int
// var mapChan chan map[int]string
// var perChan chan Person
// var perChan2 chan *Person
// //说明
// 1)channel是引用类型
// 2)channel必须初始化才能写入数据,即make后才能使用
// 3)管道是有类型的,intChan只能写入整数int
//注意事项
// 1)channel中只能存放指定的数据类型
// 2)channel的数据放满后,就不能再放入了
// 3)如果从channel取出数据后,可以继续放入
// 4)在没有使用协程的情况下,如果channel数据取完了,再取,就会报deadlock
|
// package exec takes a core and config as input and completes one iteration,
// returning any changed game conditions like terminated warriors
package exec
|
package mysqldb
import (
"context"
"time"
)
// UserPreferences 用户偏好
type UserPreferences struct {
UserID int32 `gorm:"primary_key;column:user_id"`
EnableHeartRateChart int32 // 是否开启心率扇形图
EnablePulseWaveChart int32 // 是否开启波形图
EnableWarmPrompt int32 // 是否开启温馨提示
EnableChooseStatus int32 // 是否开启选择状态
EnableConstitutionDifferentiation int32 // 是否开启中医体质判读
EnableSyndromeDifferentiation int32 // 是否开启中医脏腑判读
EnableWesternMedicineAnalysis int32 // 是否开启西医判读
EnableMeridianBarGraph int32 // 是否开启柱状图
EnableComment int32 // 是否开启备注
EnableHealthTrending int32 // 开启健康趋势
EnableLocationNotification int32 // 是否开启本地通知
CreatedAt time.Time // 创建时间
UpdatedAt time.Time // 更新时间
DeletedAt *time.Time // 删除时间
}
// TableName 返回 User 所在的表名
func (u UserPreferences) TableName() string {
return "user_preferences"
}
// GetUserPreferencesByUserID 返回数据库中的用户偏好
func (db *DbClient) GetUserPreferencesByUserID(ctx context.Context, userID int32) (*UserPreferences, error) {
var u UserPreferences
if err := db.GetDB(ctx).First(&u, "user_id = ? ", userID).Error; err != nil {
return nil, err
}
return &u, nil
}
// CreateUserPreferences 创建UserPreferences
func (db *DbClient) CreateUserPreferences(ctx context.Context, userID int32) error {
now := time.Now()
userPreferences := UserPreferences{
UserID: userID,
CreatedAt: now,
UpdatedAt: now,
}
return db.GetDB(ctx).Create(&userPreferences).Error
}
|
package pathfileops
import (
"fmt"
"strings"
"testing"
)
const (
logDir = "../../logTest"
commonDir = "../../pathfileops"
)
func TestCleanDir(t *testing.T) {
var expected, cleanDir, targetDir string
fh := FileHelper{}
targetDir = "../..///pathfileops"
cleanDir = fh.CleanPathStr(targetDir)
expected = fh.CleanPathStr(commonDir)
if cleanDir != expected {
t.Error(fmt.Sprintf("Expected Clean Version of %v, got: ", commonDir), cleanDir)
}
}
func TestDirMgr_ChangeWorkingDir_01(t *testing.T) {
var err error
var startDir, checkDir, targetDir string
fh := FileHelper{}
startDir, err = fh.GetAbsCurrDir()
if err != nil {
t.Error("GetAnsCurrDir() Failed:", err)
}
targetDir, err = fh.MakeAbsolutePath(logDir)
if err != nil {
t.Error("MakeAbsolutePath() Failed:", err)
}
err = fh.ChangeWorkingDir(targetDir)
if err != nil {
t.Error("ChangeWorkingDir() Failed:", err)
}
checkDir, err = fh.GetAbsCurrDir()
if err != nil {
t.Error("GetAbsCurrDir() 2 Failed:", err)
}
if checkDir != targetDir {
t.Error("Target Dir != CheckDir")
}
err = fh.ChangeWorkingDir(startDir)
if err != nil {
t.Error("Change To Start Dir Failed:", err)
}
checkDir, err = fh.GetAbsCurrDir()
if err != nil {
t.Errorf("GetAbsCurrDir() 3 Failed. Error='%v'", err)
}
if checkDir != startDir {
t.Error("Start Dir != CheckDir")
}
}
func TestDirMgr_ConsolidateErrors_01(t *testing.T) {
errs := make([]error, 0, 300)
maxCnt := 9
for i := 0; i < maxCnt; i++ {
err := fmt.Errorf("Heder\nError-%v text\n\n", i+1)
errs = append(errs, err)
}
conSolError := DirMgr{}.ConsolidateErrors(errs)
if conSolError == nil {
t.Error("Test Error returned from DirMgr{}.ConsolidateErrors(errs) is 'nil'\n")
return
}
errStr := fmt.Sprintf("%v", conSolError.Error())
if len(errStr) == 0 {
t.Error("Error string returned from DirMgr{}.ConsolidateErrors(errs) is zero length")
}
testCnt := 0
for j := 0; j < maxCnt; j++ {
testStr := fmt.Sprintf("Error-%v text", j+1)
if strings.Contains(errStr, testStr) {
testCnt++
}
}
if maxCnt != testCnt {
t.Errorf("ERROR: Expected Error String to contain %v Errors.\n"+
"Instead, found only %v Errors.",
maxCnt, testCnt)
}
}
func TestDirMgr_ConsolidateErrors_02(t *testing.T) {
testDir := "../../checkfiles"
testDirMgr, err := DirMgr{}.New(testDir)
if err != nil {
t.Errorf("Test Setup Error returned by testDirMgr, err := DirMgr{}.New(testDir).\n"+
"testDir='%v'\nError='%v'\n",
testDir, err.Error())
return
}
errs := make([]error, 0, 300)
maxCnt := 9
for i := 0; i < maxCnt; i++ {
err := fmt.Errorf("Heder\nError-%v text\n\n", i+1)
errs = append(errs, err)
}
conSolError := testDirMgr.ConsolidateErrors(errs)
if conSolError == nil {
t.Error("Test Error returned from testDirMgr{}.ConsolidateErrors(errs) is 'nil'\n")
return
}
errStr := fmt.Sprintf("%v", conSolError.Error())
if len(errStr) == 0 {
t.Error("Error string returned from DirMgr{}.ConsolidateErrors(errs) is zero length")
}
testCnt := 0
for j := 0; j < maxCnt; j++ {
testStr := fmt.Sprintf("Error-%v text", j+1)
if strings.Contains(errStr, testStr) {
testCnt++
}
}
if maxCnt != testCnt {
t.Errorf("ERROR: Expected Error String to contain %v Errors.\n"+
"Instead, found only %v Errors.",
maxCnt, testCnt)
}
}
func TestDirMgr_ConsolidateErrors_03(t *testing.T) {
testDir := "../../checkfiles"
testDirMgr, err := DirMgr{}.New(testDir)
if err != nil {
t.Errorf("Test Setup Error returned by testDirMgr, err := DirMgr{}.New(testDir).\n"+
"testDir='%v'\nError='%v'\n",
testDir, err.Error())
return
}
errs := make([]error, 0, 300)
conSolError := testDirMgr.ConsolidateErrors(errs)
if conSolError != nil {
t.Error("ERROR: Expected a 'nil' return from testDirMgr.ConsolidateErrors(errs)\n" +
"because errs is 'nil'. However, the returned value was NOT 'nil'.")
}
}
func TestDirMgr_CopyDirectory_01(t *testing.T) {
targetDir := "../../checkfiles/TestDirMgr_CopyFilesToDirectory_01"
fh := FileHelper{}
err := fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(targetDir).\n"+
"testDir='%v'\nError='%v'\n", targetDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDir1 := "../../filesfortest/levelfilesfortest"
srcDMgr, err := DirMgr{}.New(srcDir1)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir1).\n"+
"srcDir1='%v'\nError='%v'\n", srcDir1, err.Error())
return
}
fsc := FileSelectionCriteria{}
dirCopyStats,
errs := srcDMgr.CopyDirectory(targetDMgr, fsc, false)
if len(errs) > 0 {
t.Errorf("Error returned from srcDMgr.CopyDirectory(targetDMgr, fsc)\n"+
"targetDir='%v'\nErrors Follow:\n\n%v",
targetDMgr.GetAbsolutePath(),
targetDMgr.ConsolidateErrors(errs))
_ = fh.DeleteDirPathAll(targetDir)
return
}
// 5 txt src Files
/*
"../../filesfortest/levelfilesfortest/level_0_0_test.txt"
"../../filesfortest/levelfilesfortest/level_0_1_test.txt"
"../../filesfortest/levelfilesfortest/level_0_2_test.txt"
"../../filesfortest/levelfilesfortest/level_0_3_test.txt"
"../../filesfortest/levelfilesfortest/level_0_4_test.txt"
*/
fileNames := []string{"level_0_0_test.txt",
"level_0_1_test.txt",
"level_0_2_test.txt",
"level_0_3_test.txt",
"level_0_4_test.txt"}
fsc = FileSelectionCriteria{}
fMgrCollection, err := targetDMgr.FindFilesBySelectCriteria(fsc)
if err != nil {
t.Errorf("Test Setup Error returned by targetDMgr.FindFilesBySelectCriteria(fsc).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr.GetAbsolutePath(), err.Error())
_ = fh.DeleteDirPathAll(targetDir)
return
}
if fMgrCollection.GetNumOfFileMgrs() != 5 {
t.Errorf("Test Setup Error: Expected to find 5-files in 'targetDir'.\n"+
"Instead, %v-files were found.", fMgrCollection.GetNumOfFileMgrs())
_ = fh.DeleteDirPathAll(targetDir)
return
}
if 5 != dirCopyStats.FilesCopied {
t.Errorf("Test Setup Error: Expected that dirCopyStats.FilesCopied='5'.\n"+
"Instead, dirCopyStats.FilesCopied='%v'.\n",
dirCopyStats.FilesCopied)
return
}
for i := 0; i < fMgrCollection.GetNumOfFileMgrs(); i++ {
fMgr, err := fMgrCollection.GetFileMgrAtIndex(i)
if err != nil {
t.Errorf("Error returned by fMgrCollection.GetFileMgrAtIndex(%v)\n"+
"Error='%v'\n", i, err.Error())
_ = fh.DeleteDirPathAll(targetDir)
return
}
fileName := fMgr.GetFileNameExt()
foundFile := false
for k := 0; k < len(fileNames); k++ {
if fileNames[k] == fileName {
foundFile = true
}
}
if foundFile == false {
t.Errorf("Error: File NOT Found. Expected to find specfic file Name.\n"+
"However, it WAS NOT FOUND!\nFileName='%v'", fileName)
}
}
err = fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(targetDir)\targetDir='%v'\n"+
"Error='%v'\n", targetDir, err.Error())
}
return
}
func TestDirMgr_CopyDirectory_02(t *testing.T) {
targetDir := "../../checkfiles/TestDirMgr_CopyFilesToDirectory_02"
fh := FileHelper{}
err := fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(targetDir).\n"+
"testDir='%v'\nError='%v'\n", targetDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDir1 := "../../filesfortest/iDoNotExist"
srcDMgr, err := DirMgr{}.New(srcDir1)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir1).\n"+
"srcDir1='%v'\nError='%v'\n", srcDir1, err.Error())
return
}
fsc := FileSelectionCriteria{}
_,
errs := srcDMgr.CopyDirectory(targetDMgr, fsc, false)
if len(errs) == 0 {
t.Error("Expected an error return from srcDMgr.CopyDirectory(targetDMgr, fsc)\n" +
"because 'srcDMgr' path DOES NOT EXIST!\nHowever, NO ERROR WAS RETURNED!")
}
err = fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(targetDir)\targetDir='%v'\n"+
"Error='%v'\n", targetDir, err.Error())
}
return
}
func TestDirMgr_CopyDirectory_03(t *testing.T) {
targetDir := "../../checkfiles/TestDirMgr_CopyFilesToDirectory_03"
fh := FileHelper{}
err := fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(targetDir).\n"+
"testDir='%v'\nError='%v'\n", targetDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDir1 := "../../filesfortest/levelfilesfortest"
srcDMgr, err := DirMgr{}.New(srcDir1)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir1).\n"+
"srcDir1='%v'\nError='%v'\n", srcDir1, err.Error())
return
}
fsc := FileSelectionCriteria{}
srcDMgr.isInitialized = false
_,
errs := srcDMgr.CopyDirectory(targetDMgr, fsc, false)
if len(errs) == 0 {
t.Error("Expected an error return from srcDMgr.CopyDirectory(targetDMgr, fsc)\n" +
"because 'srcDMgr' is INVALID!\nHowever, NO ERROR WAS RETURNED!")
}
srcDMgr.isInitialized = true
err = fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(targetDir)\targetDir='%v'\n"+
"Error='%v'\n", targetDir, err.Error())
}
return
}
func TestDirMgr_CopyDirectory_04(t *testing.T) {
targetDir := "../../checkfiles/TestDirMgr_CopyFilesToDirectory_04"
fh := FileHelper{}
err := fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(targetDir).\n"+
"testDir='%v'\nError='%v'\n", targetDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDir1 := "../../filesfortest/levelfilesfortest"
srcDMgr, err := DirMgr{}.New(srcDir1)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir1).\n"+
"srcDir1='%v'\nError='%v'\n", srcDir1, err.Error())
return
}
fsc := FileSelectionCriteria{}
targetDMgr.isInitialized = false
_,
errs := srcDMgr.CopyDirectory(targetDMgr, fsc, false)
if len(errs) == 0 {
t.Error("Expected an error return from srcDMgr.CopyDirectory(targetDMgr, fsc)\n" +
"because 'targetDMgr' is INVALID!\nHowever, NO ERROR WAS RETURNED!")
}
targetDMgr.isInitialized = true
err = fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(targetDir)\targetDir='%v'\n"+
"Error='%v'\n", targetDir, err.Error())
}
return
}
func TestDirMgr_CopyDirectory_05(t *testing.T) {
targetDir := "../../checkfiles/TestDirMgr_CopyFilesToDirectory_05"
fh := FileHelper{}
err := fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(targetDir).\n"+
"testDir='%v'\nError='%v'\n", targetDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDir1 := "../../filesfortest/levelfilesfortest"
srcDMgr, err := DirMgr{}.New(srcDir1)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir1).\n"+
"srcDir1='%v'\nError='%v'\n", srcDir1, err.Error())
return
}
fsc := FileSelectionCriteria{}
fsc.FileNamePatterns = []string{"*.htm"}
dirCopyStats,
errs := srcDMgr.CopyDirectory(targetDMgr, fsc, false)
if len(errs) > 0 {
t.Errorf("Errors returned from srcDMgr.CopyDirectory(targetDMgr, fsc)\n"+
"targetDir='%v'\nErrors Follow:\n\n%v",
targetDMgr.GetAbsolutePath(),
targetDMgr.ConsolidateErrors(errs))
_ = fh.DeleteDirPathAll(targetDir)
return
}
// 5 txt src Files
/*
"../../filesfortest/levelfilesfortest/level_0_0_test.txt"
"../../filesfortest/levelfilesfortest/level_0_1_test.txt"
"../../filesfortest/levelfilesfortest/level_0_2_test.txt"
"../../filesfortest/levelfilesfortest/level_0_3_test.txt"
"../../filesfortest/levelfilesfortest/level_0_4_test.txt"
*/
if targetDMgr.DoesAbsolutePathExist() {
t.Errorf("Error: Expected that target directory would not exist because\n" +
"none of the source files matched the search criteria.\n" +
"However, the target directory DOES EXIST!!!")
}
if dirCopyStats.FilesCopied != 0 {
t.Errorf("Expected that dirCopyStats.FilesCopied='0'.\n"+
"Instead, dirCopyStats.FilesCopied='%v'", dirCopyStats.FilesCopied)
}
err = fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(targetDir)\targetDir='%v'\n"+
"Error='%v'\n", targetDir, err.Error())
}
return
}
func TestDirMgr_CopyDirectory_06(t *testing.T) {
targetDir := "../../checkfiles/TestDirMgr_CopyFilesToDirectory_06"
fh := FileHelper{}
err := fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(targetDir).\n"+
"testDir='%v'\nError='%v'\n", targetDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDir1 := "../../filesfortest/levelfilesfortest"
srcDMgr, err := DirMgr{}.New(srcDir1)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir1).\n"+
"srcDir1='%v'\nError='%v'\n", srcDir1, err.Error())
return
}
fsc := FileSelectionCriteria{}
dirCopyStats,
errs := srcDMgr.CopyDirectory(targetDMgr, fsc, false)
if len(errs) > 0 {
t.Errorf("Error returned from srcDMgr.CopyDirectory(targetDMgr, fsc)\n"+
"targetDir='%v'\nErrors Follow:\n\n%v",
targetDMgr.GetAbsolutePath(),
targetDMgr.ConsolidateErrors(errs))
_ = fh.DeleteDirPathAll(targetDir)
return
}
// 5 txt src Files
/*
"../../filesfortest/levelfilesfortest/level_0_0_test.txt"
"../../filesfortest/levelfilesfortest/level_0_1_test.txt"
"../../filesfortest/levelfilesfortest/level_0_2_test.txt"
"../../filesfortest/levelfilesfortest/level_0_3_test.txt"
"../../filesfortest/levelfilesfortest/level_0_4_test.txt"
*/
fileNames := []string{"level_0_0_test.txt",
"level_0_1_test.txt",
"level_0_2_test.txt",
"level_0_3_test.txt",
"level_0_4_test.txt"}
fsc = FileSelectionCriteria{}
fMgrCollection, err := targetDMgr.FindFilesBySelectCriteria(fsc)
if err != nil {
t.Errorf("Test Setup Error returned by targetDMgr.FindFilesBySelectCriteria(fsc).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr.GetAbsolutePath(), err.Error())
_ = fh.DeleteDirPathAll(targetDir)
return
}
if fMgrCollection.GetNumOfFileMgrs() != 5 {
t.Errorf("Error: Expected to find 5-files in 'targetDir'.\n"+
"Instead, %v-files were found.", fMgrCollection.GetNumOfFileMgrs())
_ = fh.DeleteDirPathAll(targetDir)
return
}
if 5 != dirCopyStats.FilesCopied {
t.Errorf("Error: Expected that dirCopyStats.FilesCopied='5'.\n"+
"Instead, dirCopyStats.FilesCopied='%v'.\n",
dirCopyStats.FilesCopied)
return
}
if 0 != dirCopyStats.FilesNotCopied {
t.Errorf("Error: Expected that dirCopyStats.FilesNotCopied='0'.\n"+
"Instead, dirCopyStats.FilesNotCopied='%v'.\n",
dirCopyStats.FilesNotCopied)
return
}
if 5 != dirCopyStats.TotalFilesProcessed {
t.Errorf("Error: Expected that dirCopyStats.TotalFilesProcessed='5'.\n"+
"Instead, dirCopyStats.TotalFilesProcessed='%v'.\n",
dirCopyStats.TotalFilesProcessed)
return
}
if 1 != dirCopyStats.DirCreated {
t.Errorf("Error: Expected that dirCopyStats.DirsCreated='1'.\n"+
"Instead, dirCopyStats.TotalFilesProcessed='%v'.\n",
dirCopyStats.DirCreated)
return
}
if dirCopyStats.ComputeError != nil {
t.Errorf("Error: Expected that dirCopyStats.ComputeError='nil'.\n"+
"Instead, dirCopyStats.ComputeError='%v'.\n",
dirCopyStats.ComputeError.Error())
return
}
for i := 0; i < fMgrCollection.GetNumOfFileMgrs(); i++ {
fMgr, err := fMgrCollection.GetFileMgrAtIndex(i)
if err != nil {
t.Errorf("Error returned by fMgrCollection.GetFileMgrAtIndex(%v)\n"+
"Error='%v'\n", i, err.Error())
_ = fh.DeleteDirPathAll(targetDir)
return
}
fileName := fMgr.GetFileNameExt()
foundFile := false
for k := 0; k < len(fileNames); k++ {
if fileNames[k] == fileName {
foundFile = true
}
}
if foundFile == false {
t.Errorf("Error: File NOT Found. Expected to find specfic file Name.\n"+
"However, it WAS NOT FOUND!\nFileName='%v'", fileName)
}
}
err = fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(targetDir)\targetDir='%v'\n"+
"Error='%v'\n", targetDir, err.Error())
}
return
}
func TestDirMgr_CopyDirectory_07(t *testing.T) {
targetDir := "../../checkfiles/TestDirMgr_CopyFilesToDirectory_07"
fh := FileHelper{}
err := fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned by fh.DeleteDirPathAll(targetDir).\n"+
"testDir='%v'\nError='%v'\n", targetDir, err.Error())
return
}
targetDMgr, err := DirMgr{}.New(targetDir)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(targetDMgr).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr, err.Error())
return
}
srcDir1 := "../../filesfortest/levelfilesfortest"
srcDMgr, err := DirMgr{}.New(srcDir1)
if err != nil {
t.Errorf("Test Setup Error returned from DirMgr{}.New(srcDir1).\n"+
"srcDir1='%v'\nError='%v'\n", srcDir1, err.Error())
return
}
fsc := FileSelectionCriteria{}
fsc.FileNamePatterns = []string{"*.xxx"}
dirCopyStats,
errs := srcDMgr.CopyDirectory(targetDMgr, fsc, true)
if len(errs) > 0 {
t.Errorf("Error returned from srcDMgr.CopyDirectory(targetDMgr, fsc)\n"+
"targetDir='%v'\nErrors Follow:\n\n%v",
targetDMgr.GetAbsolutePath(),
targetDMgr.ConsolidateErrors(errs))
_ = fh.DeleteDirPathAll(targetDir)
return
}
if !targetDMgr.DoesPathExist() {
t.Errorf("Error: CopyDirectory() was called with parameter, 'copyEmptyDirectory' = 'true'.\n"+
"Therefore, the target directory should have been created even though no files were\n"+
"copied to the target director. However, the target directory was NOT created and DOES NOT EXIST!\n"+
"targetDir='%v'\n",
targetDMgr.GetAbsolutePath())
return
}
fsc = FileSelectionCriteria{}
fMgrCollection, err := targetDMgr.FindFilesBySelectCriteria(fsc)
if err != nil {
t.Errorf("Test Setup Error returned by targetDMgr.FindFilesBySelectCriteria(fsc).\n"+
"targetDMgr='%v'\nError='%v'\n", targetDMgr.GetAbsolutePath(), err.Error())
_ = fh.DeleteDirPathAll(targetDir)
return
}
if fMgrCollection.GetNumOfFileMgrs() != 0 {
t.Errorf("Error: Expected to find 0-files in 'targetDir'.\n"+
"Instead, %v-files were found.", fMgrCollection.GetNumOfFileMgrs())
_ = fh.DeleteDirPathAll(targetDir)
return
}
if 0 != dirCopyStats.FilesCopied {
t.Errorf("Error: Expected that dirCopyStats.FilesCopied='0'.\n"+
"Instead, dirCopyStats.FilesCopied='%v'.\n",
dirCopyStats.FilesCopied)
_ = fh.DeleteDirPathAll(targetDir)
return
}
if 5 != dirCopyStats.TotalFilesProcessed {
t.Errorf("Error: Expected that dirCopyStats.TotalFilesProcessed='5'.\n"+
"Instead, dirCopyStats.TotalFilesProcessed='%v'.\n",
dirCopyStats.TotalFilesProcessed)
_ = fh.DeleteDirPathAll(targetDir)
return
}
if 5 != dirCopyStats.FilesNotCopied {
t.Errorf("Error: Expected that dirCopyStats.FilesNotCopied='5'.\n"+
"Instead, dirCopyStats.FilesNotCopied='%v'.\n",
dirCopyStats.FilesNotCopied)
_ = fh.DeleteDirPathAll(targetDir)
return
}
if 1 != dirCopyStats.DirCreated {
t.Errorf("Error: Expected that dirCopyStats.DirsCreated='1'.\n"+
"Instead, dirCopyStats.TotalFilesProcessed='%v'.\n",
dirCopyStats.DirCreated)
_ = fh.DeleteDirPathAll(targetDir)
return
}
if dirCopyStats.ComputeError != nil {
t.Errorf("Error: Expected that dirCopyStats.ComputeError='nil'.\n"+
"Instead, dirCopyStats.ComputeError='%v'.\n",
dirCopyStats.ComputeError.Error())
_ = fh.DeleteDirPathAll(targetDir)
return
}
err = fh.DeleteDirPathAll(targetDir)
if err != nil {
t.Errorf("Test Clean-Up Error returned by "+
"fh.DeleteDirPathAll(targetDir)\targetDir='%v'\n"+
"Error='%v'\n", targetDir, err.Error())
}
return
}
|
package resolver
import (
"fmt"
operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
v1listers "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/listers/operators/v1"
"k8s.io/apimachinery/pkg/labels"
)
// IsFailForwardEnabled takes a namespaced operatorGroup lister and returns
// True if an operatorGroup exists in the namespace and its upgradeStrategy
// is set to UnsafeFailForward and false otherwise. An error is returned if
// an more than one operatorGroup exists in the namespace.
// No error is returned if no OperatorGroups are found to keep the resolver
// backwards compatible.
func IsFailForwardEnabled(ogLister v1listers.OperatorGroupNamespaceLister) (bool, error) {
ogs, err := ogLister.List(labels.Everything())
if err != nil {
return false, err
}
if len(ogs) != 1 {
return false, fmt.Errorf("found %d operatorGroups, expected 1", len(ogs))
}
return ogs[0].UpgradeStrategy() == operatorsv1.UpgradeStrategyUnsafeFailForward, nil
}
type walkOption func(csv *operatorsv1alpha1.ClusterServiceVersion) error
// WithCSVPhase returns an error if the CSV is not in the given phase.
func WithCSVPhase(phase operatorsv1alpha1.ClusterServiceVersionPhase) walkOption {
return func(csv *operatorsv1alpha1.ClusterServiceVersion) error {
if csv == nil || csv.Status.Phase != phase {
return fmt.Errorf("csv %s/%s in phase %s instead of %s", csv.GetNamespace(), csv.GetName(), csv.Status.Phase, phase)
}
return nil
}
}
// WithUniqueCSVs returns an error if the CSV has been seen before.
func WithUniqueCSVs() walkOption {
visited := map[string]struct{}{}
return func(csv *operatorsv1alpha1.ClusterServiceVersion) error {
// Check if we have visited the CSV before
if _, ok := visited[csv.GetName()]; ok {
return fmt.Errorf("csv %s/%s has already been seen", csv.GetNamespace(), csv.GetName())
}
visited[csv.GetName()] = struct{}{}
return nil
}
}
// WalkReplacementChain walks along the chain of clusterServiceVersions being replaced and returns
// the last clusterServiceVersions in the replacement chain. An error is returned if any of the
// clusterServiceVersions before the last is not in the replaces phase or if an infinite replacement
// chain is detected.
func WalkReplacementChain(csv *operatorsv1alpha1.ClusterServiceVersion, csvToReplacement map[string]*operatorsv1alpha1.ClusterServiceVersion, options ...walkOption) (*operatorsv1alpha1.ClusterServiceVersion, error) {
if csv == nil {
return nil, fmt.Errorf("csv cannot be nil")
}
for {
// Check if there is a CSV that replaces this CSVs
next, ok := csvToReplacement[csv.GetName()]
if !ok {
break
}
// Check walk options
for _, o := range options {
if err := o(csv); err != nil {
return nil, err
}
}
// Move along replacement chain.
csv = next
}
return csv, nil
}
// isReplacementChainThatEndsInFailure returns true if the last CSV in the chain is in the failed phase and all other
// CSVs are in the replacing phase.
func isReplacementChainThatEndsInFailure(csv *operatorsv1alpha1.ClusterServiceVersion, csvToReplacement map[string]*operatorsv1alpha1.ClusterServiceVersion) (bool, error) {
lastCSV, err := WalkReplacementChain(csv, csvToReplacement, WithCSVPhase(operatorsv1alpha1.CSVPhaseReplacing), WithUniqueCSVs())
if err != nil {
return false, err
}
return (lastCSV != nil && lastCSV.Status.Phase == operatorsv1alpha1.CSVPhaseFailed), nil
}
// ReplacementMapping takes a list of CSVs and returns a map that maps a CSV's name to the CSV that replaces it.
func ReplacementMapping(csvs []*operatorsv1alpha1.ClusterServiceVersion) map[string]*operatorsv1alpha1.ClusterServiceVersion {
replacementMapping := map[string]*operatorsv1alpha1.ClusterServiceVersion{}
for _, csv := range csvs {
if csv.Spec.Replaces != "" {
replacementMapping[csv.Spec.Replaces] = csv
}
}
return replacementMapping
}
|
package main
import "fmt"
type Item struct {
Next *Item
Prev *Item
Value interface{}
}
type List struct {
FirstItem *Item
LastItem *Item
Lenght int
}
func main() {
var newList List
newList.PushFront(1)
newList.PushFront(2)
newList.PushBack(4)
newList.PushFront(3)
newList.PushBack(5)
newList.Remove(*newList.Last())
a := newList.First()
b := newList.Last()
x := newList.Len()
fmt.Printf("%d %d %d", a.Value, b.Value, x)
}
func (l *List) PushFront(v int) {
var newItem Item
newItem.Value = v
if l.FirstItem == nil {
l.FirstItem = &newItem
l.LastItem = &newItem
} else {
l.FirstItem.Prev = &newItem
newItem.Next = l.FirstItem
newItem.Prev = nil
l.FirstItem = &newItem
}
l.Lenght++
}
func (l *List) PushBack(v int) {
var newItem Item
newItem.Value = v
if l.FirstItem == nil {
l.FirstItem = &newItem
l.LastItem = &newItem
} else {
l.LastItem.Next = &newItem
newItem.Prev = l.LastItem
newItem.Next = nil
l.LastItem = &newItem
}
l.Lenght++
}
func (l *List) Remove(i Item) {
if i.Prev == nil {
l.FirstItem = i.Next
} else {
i.Prev.Next = i.Next
}
if i.Next == nil {
l.LastItem = i.Prev
} else {
i.Next.Prev = i.Prev
}
l.Lenght--
}
func (l *List) Len() int {
return l.Lenght
}
func (l *List) First() *Item {
return l.FirstItem
}
func (l *List) Last() *Item {
return l.LastItem
}
|
package health
import (
"github.com/square/p2/pkg/types"
)
// SortOrder sorts the nodes in the list from least to most health.
type SortOrder struct {
Nodes []types.NodeName
Health map[types.NodeName]Result
}
func (s SortOrder) Len() int {
return len(s.Nodes)
}
func (s SortOrder) Swap(i, j int) {
s.Nodes[i], s.Nodes[j] = s.Nodes[j], s.Nodes[i]
}
func (s SortOrder) Less(i, j int) bool {
iHealth := Unknown
if res, ok := s.Health[s.Nodes[i]]; ok {
iHealth = res.Status
}
jHealth := Unknown
if res, ok := s.Health[s.Nodes[j]]; ok {
jHealth = res.Status
}
return Compare(iHealth, jHealth) < 0
}
|
// +build windows
package process_exterminator
func Init() error {
return nil
}
|
package cloud
import (
"github.com/devspace-cloud/devspace/pkg/devspace/cloud/client"
"github.com/devspace-cloud/devspace/pkg/devspace/cloud/config"
"github.com/devspace-cloud/devspace/pkg/devspace/cloud/config/versions/latest"
"github.com/devspace-cloud/devspace/pkg/devspace/docker"
"github.com/devspace-cloud/devspace/pkg/devspace/kubectl"
"github.com/devspace-cloud/devspace/pkg/util/browser"
"github.com/devspace-cloud/devspace/pkg/util/kubeconfig"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/devspace-cloud/devspace/pkg/util/survey"
"github.com/pkg/errors"
)
// Provider interacts with one cloud provider
type Provider interface {
GetAndUpdateSpaceCache(spaceID int, forceUpdate bool) (*latest.SpaceCache, bool, error)
CacheSpace(space *latest.Space, serviceAccount *latest.ServiceAccount) error
ConnectCluster(options *ConnectClusterOptions) error
ResetKey(clusterName string) error
UpdateKubeConfig(contextName string, serviceAccount *latest.ServiceAccount, spaceID int, setActive bool) error
DeleteKubeContext(space *latest.Space) error
GetClusterKey(cluster *latest.Cluster) (string, error)
PrintToken(spaceID int) error
PrintSpaces(cluster, name string, all bool) error
Save() error
Client() client.Client
GetConfig() latest.Provider
}
// DevSpaceKubeContextName is the name for the kube config context
const DevSpaceKubeContextName = "devspace"
// Provider describes the struct to hold the cloud configuration
type provider struct {
latest.Provider
browser browser.Browser
client client.Client
kubeClient kubectl.Client
loader config.Loader
kubeLoader kubeconfig.Loader
log log.Logger
// Only for testing
dockerClient docker.Client
}
// GetProvider returns the current specified cloud provider
func GetProvider(useProviderName string, log log.Logger) (Provider, error) {
// Get provider configuration
loader := config.NewLoader()
return GetProviderWithOptions(useProviderName, "", false, loader, kubeconfig.NewLoader(), log)
}
// GetProviderWithOptions returns a provider by options
func GetProviderWithOptions(useProviderName, key string, relogin bool, loader config.Loader, kubeLoader kubeconfig.Loader, log log.Logger) (Provider, error) {
var err error
//Get config
providerConfig, err := loader.Load()
if err != nil {
return nil, err
}
// Get provider name
providerName := config.DevSpaceCloudProviderName
if useProviderName == "" {
// Choose cloud provider
if providerConfig.Default != "" {
providerName = providerConfig.Default
} else if len(providerConfig.Providers) > 1 {
options := []string{}
for _, providerHost := range providerConfig.Providers {
options = append(options, providerHost.Name)
}
providerName, err = log.Question(&survey.QuestionOptions{
Question: "Select cloud provider",
Options: options,
})
if err != nil {
return nil, err
}
}
} else {
providerName = useProviderName
}
// Let's check if we are logged in first
p := config.GetProvider(providerConfig, providerName)
if p == nil {
cloudProviders := ""
for _, p := range providerConfig.Providers {
cloudProviders += p.Name + " "
}
return nil, errors.Errorf("Cloud provider not found! Did you run `devspace add provider [url]`? Existing cloud providers: %s", cloudProviders)
}
provider := &provider{
Provider: *p,
browser: browser.NewBrowser(),
loader: loader,
kubeLoader: kubeLoader,
log: log,
}
if provider.Provider.ClusterKey == nil {
provider.Provider.ClusterKey = map[int]string{}
}
if relogin == true || provider.Key == "" {
provider.Token = ""
provider.Key = ""
if key != "" {
provider.Key = key
provider.client = client.NewClient(providerName, p.Host, key, "", loader)
// Check if we got access
_, err := provider.client.GetSpaces()
if err != nil {
return nil, errors.Errorf("Access denied for key %s: %v", key, err)
}
} else {
err := provider.Login()
if err != nil {
return nil, errors.Wrap(err, "Login")
}
}
log.Donef("Successfully logged into %s", provider.Name)
// We have to save here so that the client when he saves the token will know that the provider exists
err = provider.Save()
if err != nil {
return nil, err
}
// Login into registries
err = provider.loginIntoRegistries()
if err != nil {
log.Warnf("Error logging into docker registries: %v", err)
}
} else {
provider.client = client.NewClient(providerName, p.Host, p.Key, p.Token, loader)
}
// Return provider config
return provider, nil
}
// Save saves the provider config
func (p *provider) Save() error {
providerConfig, err := p.loader.Load()
if err != nil {
return err
}
found := false
for idx, provider := range providerConfig.Providers {
if provider.Name == p.Name {
found = true
providerConfig.Providers[idx] = &p.Provider
break
}
}
if !found {
providerConfig.Providers = append(providerConfig.Providers, &p.Provider)
}
return p.loader.Save(providerConfig)
}
// Client returns the providers' client
func (p *provider) Client() client.Client {
return p.client
}
// Client returns the providers' client
func (p *provider) GetConfig() latest.Provider {
return p.Provider
}
|
package user
import "gorm.io/gorm"
type User struct {
gorm.Model
Email string `gorm:"index:,unique,where: deleted_at is null" json:"email" validate:"email"`
Password string `json:"string"`
}
|
package graphql_test
import (
"context"
"reflect"
"testing"
"github.com/graphql-go/graphql"
"github.com/graphql-go/graphql/testutil"
)
type T struct {
Query string
Schema graphql.Schema
Expected interface{}
Variables map[string]interface{}
}
var Tests = []T{}
func init() {
Tests = []T{
{
Query: `
query HeroNameQuery {
hero {
name
}
}
`,
Schema: testutil.StarWarsSchema,
Expected: &graphql.Result{
Data: map[string]interface{}{
"hero": map[string]interface{}{
"name": "R2-D2",
},
},
},
},
{
Query: `
query HeroNameAndFriendsQuery {
hero {
id
name
friends {
name
}
}
}
`,
Schema: testutil.StarWarsSchema,
Expected: &graphql.Result{
Data: map[string]interface{}{
"hero": map[string]interface{}{
"id": "2001",
"name": "R2-D2",
"friends": []interface{}{
map[string]interface{}{
"name": "Luke Skywalker",
},
map[string]interface{}{
"name": "Han Solo",
},
map[string]interface{}{
"name": "Leia Organa",
},
},
},
},
},
},
{
Query: `
query HumanByIdQuery($id: String!) {
human(id: $id) {
name
}
}
`,
Schema: testutil.StarWarsSchema,
Expected: &graphql.Result{
Data: map[string]interface{}{
"human": map[string]interface{}{
"name": "Darth Vader",
},
},
},
Variables: map[string]interface{}{
"id": "1001",
},
},
}
}
func TestQuery(t *testing.T) {
for _, test := range Tests {
params := graphql.Params{
Schema: test.Schema,
RequestString: test.Query,
VariableValues: test.Variables,
}
testGraphql(test, params, t)
}
}
func testGraphql(test T, p graphql.Params, t *testing.T) {
result := graphql.Do(p)
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
if !reflect.DeepEqual(result, test.Expected) {
t.Fatalf("wrong result, query: %v, graphql result diff: %v", test.Query, testutil.Diff(test.Expected, result))
}
}
func TestBasicGraphQLExample(t *testing.T) {
// taken from `graphql-js` README
helloFieldResolved := func(p graphql.ResolveParams) (interface{}, error) {
return "world", nil
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "RootQueryType",
Fields: graphql.Fields{
"hello": &graphql.Field{
Description: "Returns `world`",
Type: graphql.String,
Resolve: helloFieldResolved,
},
},
}),
})
if err != nil {
t.Fatalf("wrong result, unexpected errors: %v", err.Error())
}
query := "{ hello }"
var expected interface{}
expected = map[string]interface{}{
"hello": "world",
}
result := graphql.Do(graphql.Params{
Schema: schema,
RequestString: query,
})
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
if !reflect.DeepEqual(result.Data, expected) {
t.Fatalf("wrong result, query: %v, graphql result diff: %v", query, testutil.Diff(expected, result))
}
}
func TestThreadsContextFromParamsThrough(t *testing.T) {
extractFieldFromContextFn := func(p graphql.ResolveParams) (interface{}, error) {
return p.Context.Value(p.Args["key"]), nil
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"value": &graphql.Field{
Type: graphql.String,
Args: graphql.FieldConfigArgument{
"key": &graphql.ArgumentConfig{Type: graphql.String},
},
Resolve: extractFieldFromContextFn,
},
},
}),
})
if err != nil {
t.Fatalf("wrong result, unexpected errors: %v", err.Error())
}
query := `{ value(key:"a") }`
result := graphql.Do(graphql.Params{
Schema: schema,
RequestString: query,
Context: context.WithValue(context.TODO(), "a", "xyz"),
})
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
expected := map[string]interface{}{"value": "xyz"}
if !reflect.DeepEqual(result.Data, expected) {
t.Fatalf("wrong result, query: %v, graphql result diff: %v", query, testutil.Diff(expected, result))
}
}
func TestNewErrorChecksNilNodes(t *testing.T) {
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"graphql_is": &graphql.Field{
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return "", nil
},
},
},
}),
})
if err != nil {
t.Fatalf("unexpected errors: %v", err.Error())
}
query := `{graphql_is:great(sort:ByPopularity)}{stars}`
result := graphql.Do(graphql.Params{
Schema: schema,
RequestString: query,
})
if len(result.Errors) == 0 {
t.Fatalf("expected errors, got: %v", result)
}
}
func TestEmptyStringIsNotNull(t *testing.T) {
checkForEmptyString := func(p graphql.ResolveParams) (interface{}, error) {
arg := p.Args["arg"]
if arg == nil || arg.(string) != "" {
t.Errorf("Expected empty string for input arg, got %#v", arg)
}
return "yay", nil
}
returnEmptyString := func(p graphql.ResolveParams) (interface{}, error) {
return "", nil
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"checkEmptyArg": &graphql.Field{
Type: graphql.String,
Args: graphql.FieldConfigArgument{
"arg": &graphql.ArgumentConfig{Type: graphql.String},
},
Resolve: checkForEmptyString,
},
"checkEmptyResult": &graphql.Field{
Type: graphql.String,
Resolve: returnEmptyString,
},
},
}),
})
if err != nil {
t.Fatalf("wrong result, unexpected errors: %v", err.Error())
}
query := `{ checkEmptyArg(arg:"") checkEmptyResult }`
result := graphql.Do(graphql.Params{
Schema: schema,
RequestString: query,
})
if len(result.Errors) > 0 {
t.Fatalf("wrong result, unexpected errors: %v", result.Errors)
}
expected := map[string]interface{}{"checkEmptyArg": "yay", "checkEmptyResult": ""}
if !reflect.DeepEqual(result.Data, expected) {
t.Errorf("wrong result, query: %v, graphql result diff: %v", query, testutil.Diff(expected, result))
}
}
|
package collectors
import (
"time"
cfclient "github.com/cloudfoundry-community/go-cfclient"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
type ServiceInstancesCollector struct {
namespace string
environment string
deployment string
cfClient *cfclient.Client
serviceInstanceInfoMetric *prometheus.GaugeVec
serviceInstancesScrapesTotalMetric prometheus.Counter
serviceInstancesScrapeErrorsTotalMetric prometheus.Counter
lastServiceInstancesScrapeErrorMetric prometheus.Gauge
lastServiceInstancesScrapeTimestampMetric prometheus.Gauge
lastServiceInstancesScrapeDurationSecondsMetric prometheus.Gauge
}
func NewServiceInstancesCollector(
namespace string,
environment string,
deployment string,
cfClient *cfclient.Client,
) *ServiceInstancesCollector {
serviceInstanceInfoMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "service_instance",
Name: "info",
Help: "Labeled Cloud Foundry Service Instance information with a constant '1' value.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"service_instance_id", "service_instance_name", "service_plan_id", "space_id", "type", "last_operation_type", "last_operation_state"},
)
serviceInstancesScrapesTotalMetric := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: "service_instances_scrapes",
Name: "total",
Help: "Total number of scrapes for Cloud Foundry Service Instances.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
serviceInstancesScrapeErrorsTotalMetric := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: "service_instances_scrape_errors",
Name: "total",
Help: "Total number of scrape error of Cloud Foundry Service Instances.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
lastServiceInstancesScrapeErrorMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_service_instances_scrape_error",
Help: "Whether the last scrape of Service Instances metrics from Cloud Foundry resulted in an error (1 for error, 0 for success).",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
lastServiceInstancesScrapeTimestampMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_service_instances_scrape_timestamp",
Help: "Number of seconds since 1970 since last scrape of Service Instances metrics from Cloud Foundry.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
lastServiceInstancesScrapeDurationSecondsMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_service_instances_scrape_duration_seconds",
Help: "Duration of the last scrape of Service Instances metrics from Cloud Foundry.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
return &ServiceInstancesCollector{
namespace: namespace,
environment: environment,
deployment: deployment,
cfClient: cfClient,
serviceInstanceInfoMetric: serviceInstanceInfoMetric,
serviceInstancesScrapesTotalMetric: serviceInstancesScrapesTotalMetric,
serviceInstancesScrapeErrorsTotalMetric: serviceInstancesScrapeErrorsTotalMetric,
lastServiceInstancesScrapeErrorMetric: lastServiceInstancesScrapeErrorMetric,
lastServiceInstancesScrapeTimestampMetric: lastServiceInstancesScrapeTimestampMetric,
lastServiceInstancesScrapeDurationSecondsMetric: lastServiceInstancesScrapeDurationSecondsMetric,
}
}
func (c ServiceInstancesCollector) Collect(ch chan<- prometheus.Metric) {
var begun = time.Now()
errorMetric := float64(0)
if err := c.reportServiceInstancesMetrics(ch); err != nil {
errorMetric = float64(1)
c.serviceInstancesScrapeErrorsTotalMetric.Inc()
}
c.serviceInstancesScrapeErrorsTotalMetric.Collect(ch)
c.serviceInstancesScrapesTotalMetric.Inc()
c.serviceInstancesScrapesTotalMetric.Collect(ch)
c.lastServiceInstancesScrapeErrorMetric.Set(errorMetric)
c.lastServiceInstancesScrapeErrorMetric.Collect(ch)
c.lastServiceInstancesScrapeTimestampMetric.Set(float64(time.Now().Unix()))
c.lastServiceInstancesScrapeTimestampMetric.Collect(ch)
c.lastServiceInstancesScrapeDurationSecondsMetric.Set(time.Since(begun).Seconds())
c.lastServiceInstancesScrapeDurationSecondsMetric.Collect(ch)
}
func (c ServiceInstancesCollector) Describe(ch chan<- *prometheus.Desc) {
c.serviceInstanceInfoMetric.Describe(ch)
c.serviceInstancesScrapesTotalMetric.Describe(ch)
c.serviceInstancesScrapeErrorsTotalMetric.Describe(ch)
c.lastServiceInstancesScrapeErrorMetric.Describe(ch)
c.lastServiceInstancesScrapeTimestampMetric.Describe(ch)
c.lastServiceInstancesScrapeDurationSecondsMetric.Describe(ch)
}
func (c ServiceInstancesCollector) reportServiceInstancesMetrics(ch chan<- prometheus.Metric) error {
c.serviceInstanceInfoMetric.Reset()
serviceInstances, err := c.cfClient.ListServiceInstances()
if err != nil {
log.Errorf("Error while listing service instances: %v", err)
return err
}
for _, serviceInstance := range serviceInstances {
c.serviceInstanceInfoMetric.WithLabelValues(
serviceInstance.Guid,
serviceInstance.Name,
serviceInstance.ServicePlanGuid,
serviceInstance.SpaceGuid,
serviceInstance.Type,
serviceInstance.LastOperation.Type,
serviceInstance.LastOperation.State,
).Set(float64(1))
}
c.serviceInstanceInfoMetric.Collect(ch)
return nil
}
|
// Package delay is a CoreDNS plugin that sleeps for a configurable interval before passing to the next plugin
//
package delay
import (
"context"
"time"
"github.com/coredns/coredns/plugin"
clog "github.com/coredns/coredns/plugin/pkg/log"
"github.com/miekg/dns"
)
// Define log to be a logger with the plugin name in it. This way we can just use log.Info and
// friends to log.
var log = clog.NewWithPlugin("delay")
type Delay struct {
Delay uint64
Next plugin.Handler
}
// ServeDNS implements the plugin.Handler interface. This method gets called when delay is used
// in a Server.
func (e *Delay) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
// Debug log that we've have seen the query. This will only be shown when the debug plugin is loaded.
log.Debug("Received response")
// Pause execution for configured interval
time.Sleep(e.Delay * time.Millisecond)
// Call next plugin (if any).
return plugin.NextOrFailure(e.Name(), e.Next, ctx, w, r)
}
// Name implements the Handler interface.
func (e Delay) Name() string { return "delay" }
|
// Copyright 2020 Ant Group. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
package rule
import (
"encoding/json"
"fmt"
"reflect"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/dragonflyoss/image-service/contrib/nydusify/pkg/parser"
"github.com/dragonflyoss/image-service/contrib/nydusify/pkg/utils"
)
// ManifestRule validates manifest format of Nydus image
type ManifestRule struct {
SourceParsed *parser.Parsed
TargetParsed *parser.Parsed
MultiPlatform bool
BackendType string
ExpectedArch string
}
func (rule *ManifestRule) Name() string {
return "Manifest"
}
func (rule *ManifestRule) Validate() error {
logrus.Infof("Checking Nydus manifest")
// Ensure the target image represents a manifest list,
// and it should consist of OCI and Nydus manifest
if rule.MultiPlatform {
if rule.TargetParsed.Index == nil {
return errors.New("not found image manifest list")
}
foundNydusDesc := false
foundOCIDesc := false
for _, desc := range rule.TargetParsed.Index.Manifests {
if desc.Platform == nil {
continue
}
if desc.Platform.Architecture == rule.ExpectedArch && desc.Platform.OS == "linux" {
if utils.IsNydusPlatform(desc.Platform) {
foundNydusDesc = true
} else {
foundOCIDesc = true
}
}
}
if !foundNydusDesc {
return errors.Errorf("not found nydus image of specified platform linux/%s", rule.ExpectedArch)
}
if !foundOCIDesc {
return errors.Errorf("not found OCI image of specified platform linux/%s", rule.ExpectedArch)
}
}
// Check manifest of Nydus
if rule.TargetParsed.NydusImage == nil {
return errors.New("invalid nydus image manifest")
}
layers := rule.TargetParsed.NydusImage.Manifest.Layers
blobListInAnnotation := []string{}
blobListInLayer := []string{}
for i, layer := range layers {
if i == len(layers)-1 {
if layer.Annotations[utils.LayerAnnotationNydusBootstrap] != "true" {
return errors.New("invalid bootstrap layer in nydus image manifest")
}
// Check blob list in annotation
blobListStr, ok := layer.Annotations[utils.LayerAnnotationNydusBlobIDs]
if !ok {
return errors.New("invalid blob list in annotation of nydus image manifest")
}
if err := json.Unmarshal([]byte(blobListStr), &blobListInAnnotation); err != nil {
return errors.Wrap(err, "failed to unmarshal blob list in annotation of nydus image manifest")
}
} else {
if layer.MediaType != utils.MediaTypeNydusBlob ||
layer.Annotations[utils.LayerAnnotationNydusBlob] != "true" {
return errors.New("invalid blob layer in nydus image manifest")
}
blobListInLayer = append(blobListInLayer, layer.Digest.Hex())
}
}
// Compare the blob list differences between bootstrap layer annotation
// and manifest layers.
if rule.BackendType == "registry" && !reflect.DeepEqual(blobListInAnnotation, blobListInLayer) {
return fmt.Errorf(
"unmatched blob list between in annotation and layers: %v != %v",
blobListInAnnotation, blobListInLayer,
)
}
// Check Nydus image config with OCI image
if rule.SourceParsed.OCIImage != nil {
ociConfig, err := json.Marshal(rule.SourceParsed.OCIImage.Config.Config)
if err != nil {
return errors.New("marshal oci image config")
}
nydusConfig, err := json.Marshal(rule.TargetParsed.NydusImage.Config.Config)
if err != nil {
return errors.New("marshal nydus image config")
}
if !reflect.DeepEqual(ociConfig, nydusConfig) {
return errors.New("nydus image config should be equal with oci image config")
}
}
return nil
}
|
package repoimpl
import (
"context"
"strings"
"sync"
"williamfeng323/mooncake-duty/src/infrastructure/db"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
//AccountRepo is the implementation of Project repository
type AccountRepo struct {
collection *mongo.Collection
}
func init() {
db.Register(&AccountRepo{})
}
//GetName returns the name of project collection
func (pr *AccountRepo) GetName() string {
return "Account"
}
// SetCollection set the collection that communicate with db to the instance
func (pr *AccountRepo) SetCollection(coll *mongo.Collection) {
pr.collection = coll
}
// InsertOne creates new project
func (pr *AccountRepo) InsertOne(ctx context.Context, document interface{},
opts ...*options.InsertOneOptions) (*mongo.InsertOneResult, error) {
return pr.collection.InsertOne(ctx, document, opts...)
}
//Find get project documents that meet the criteria
func (pr *AccountRepo) Find(ctx context.Context, filter interface{},
opts ...*options.FindOptions) (*mongo.Cursor, error) {
return pr.collection.Find(ctx, filter, opts...)
}
// FindOne returns the single document that meets the criteria
func (pr *AccountRepo) FindOne(ctx context.Context, filter interface{},
opts ...*options.FindOneOptions) *mongo.SingleResult {
return pr.collection.FindOne(ctx, filter, opts...)
}
// EmailFilter returns the filter for selecting data by email
func (pr *AccountRepo) EmailFilter(email string) bson.M {
return bson.M{"$or": bson.A{bson.M{"email": strings.ToUpper(email)}, bson.M{"email": strings.ToLower(email)}, bson.M{"email": email}}}
}
// UpdateOne update the document according to the filter.
func (pr *AccountRepo) UpdateOne(ctx context.Context, filter interface{}, update interface{},
opts ...*options.UpdateOptions) (*mongo.UpdateResult, error) {
return pr.collection.UpdateOne(ctx, filter, update, opts...)
}
// DeleteOne executes a delete command to delete at most one document from the collection.
func (pr *AccountRepo) DeleteOne(ctx context.Context, filter interface{},
opts ...*options.DeleteOptions) (*mongo.DeleteResult, error) {
return pr.collection.DeleteOne(ctx, filter, opts...)
}
var accountRepo *AccountRepo
var lock sync.RWMutex
// GetAccountRepo get the account repository instance,
// Create if not exist.
func GetAccountRepo() *AccountRepo {
lock.Lock()
defer lock.Unlock()
if accountRepo == nil {
accountRepo = &AccountRepo{}
accountRepo.SetCollection(db.GetConnection().GetCollection("Account"))
}
return accountRepo
}
|
package streamview
import (
"net"
"time"
)
type netMessages struct {
message string
addr net.UDPAddr
timestamp time.Time
}
type StreamView struct {
netChan chan netMessages
udpPort string
httpPort string
}
type SamplePKG struct {
Power bool
Execution bool
Rand float64
HLimit int32
values []float32
}
|
// Copyright 2016 Kranz. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package base
import "strings"
func IsSliceContainsStr(sl []string, str string) (bool, string) {
str = strings.ToLower(str)
for _, s := range sl {
if strings.ToLower(s) == str {
return true, s
}
}
return false, ""
}
|
/*
* Copyright © 2018-2022 Software AG, Darmstadt, Germany and/or its licensors
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package adatypes
import (
"bytes"
"fmt"
"regexp"
"strconv"
)
const (
// LastEntry last N name index for Adabas
LastEntry = -2
noEntry = -1
allEntries = -124
)
// AdaRange Adabas range definition
type AdaRange struct {
from int
to int
}
// NewEmptyRange create an empty range
func NewEmptyRange() *AdaRange {
return &AdaRange{from: noEntry, to: noEntry}
}
// NewRangeParser new range using string parser
func NewRangeParser(r string) *AdaRange {
var re = regexp.MustCompile(`(?m)^(N|[0-9]*)-?(N|[0-9]*)?$`)
match := re.FindStringSubmatch(r)
if match == nil {
Central.Log.Debugf("Does not match: %s", r)
return nil
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Got matches %s->%s,%s", r, match[1], match[1])
}
from := 0
to := 0
var err error
if len(match) > 1 {
if match[1] == "N" {
from = LastEntry
to = LastEntry
} else {
from, err = strconv.Atoi(match[1])
if err != nil {
return nil
}
to = from
}
}
if len(match) > 2 && match[2] != "" {
if match[2] == "N" {
to = LastEntry
} else {
if from == LastEntry {
return nil
}
to, err = strconv.Atoi(match[2])
if err != nil {
if Central.IsDebugLevel() {
Central.Log.Debugf("Integer error: %s -> %s", r, match[2])
}
return nil
}
}
}
if to < from {
if to != LastEntry {
if Central.IsDebugLevel() {
Central.Log.Debugf("Last entry error: %s -> %d < %d", r, to, LastEntry)
}
return nil
}
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Create new range %d-%d", from, to)
}
return &AdaRange{from: from, to: to}
}
// NewSingleRange new single dimensioned range
func NewSingleRange(index int) *AdaRange {
return &AdaRange{from: index, to: index}
}
// NewPartialRange new partial range
func NewPartialRange(from, to int) *AdaRange {
return &AdaRange{from: from, to: to}
}
// NewRange new range from a dimension to a dimension
func NewRange(from, to int) *AdaRange {
if from > to {
if to != LastEntry {
return nil
}
}
return &AdaRange{from: from, to: to}
}
// NewLastRange range defining only the last entry
func NewLastRange() *AdaRange {
return &AdaRange{from: LastEntry, to: LastEntry}
}
// FormatBuffer generate corresponding format buffer
func (adaRange *AdaRange) FormatBuffer() string {
if adaRange == nil {
return ""
}
var buffer bytes.Buffer
if adaRange.from == LastEntry {
buffer.WriteRune('N')
} else if adaRange.from > 0 {
buffer.WriteString(fmt.Sprintf("%d", adaRange.from))
}
if adaRange.to != 0 && adaRange.from != adaRange.to {
if adaRange.to == LastEntry {
buffer.WriteString("-N")
} else {
buffer.WriteString(fmt.Sprintf("-%d", adaRange.to))
}
}
return buffer.String()
}
func (adaRange *AdaRange) multiplier() int {
if adaRange.to == adaRange.from {
return 1
}
if adaRange.to != LastEntry && adaRange.from != LastEntry {
return adaRange.to - adaRange.from + 1
}
return allEntries
}
func (adaRange *AdaRange) index(pos uint32, max uint32) uint32 {
if adaRange.from == LastEntry {
return max
}
if adaRange.from > 0 {
return uint32(adaRange.from) + pos - 1
}
return pos
}
// IsSingleIndex is a single index query, although range available
func (adaRange *AdaRange) IsSingleIndex() bool {
//Central.Log.Debugf("%d to %d", adaRange.from, adaRange.to)
if adaRange.from == 0 && adaRange.to == 0 {
return false
}
if adaRange.from == noEntry {
return false
}
if adaRange.from == adaRange.to {
return true
}
return false
}
|
package auth
import (
"fmt"
"testing"
sdk "github.com/irisnet/irishub/types"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
)
func Test_queryAccount(t *testing.T) {
input := setupTestInput()
req := abci.RequestQuery{
Path: fmt.Sprintf("custom/%s/%s", "acc", QueryAccount),
Data: []byte{},
}
res, err := queryAccount(input.ctx, req, input.ak)
require.NotNil(t, err)
require.Nil(t, res)
req.Data = input.cdc.MustMarshalJSON(NewQueryAccountParams([]byte("")))
res, err = queryAccount(input.ctx, req, input.ak)
require.NotNil(t, err)
require.Nil(t, res)
_, _, addr := keyPubAddr()
req.Data = input.cdc.MustMarshalJSON(NewQueryAccountParams(addr))
res, err = queryAccount(input.ctx, req, input.ak)
require.NotNil(t, err)
require.Nil(t, res)
input.ak.SetAccount(input.ctx, input.ak.NewAccountWithAddress(input.ctx, addr))
res, err = queryAccount(input.ctx, req, input.ak)
require.Nil(t, err)
require.NotNil(t, res)
var account Account
err2 := input.cdc.UnmarshalJSON(res, &account)
require.Nil(t, err2)
}
func Test_queryTokenStats(t *testing.T) {
input := setupTestInput()
req := abci.RequestQuery{
Path: fmt.Sprintf("custom/%s/%s", "acc", QueryTokenStats),
Data: []byte{},
}
res, err := queryAccount(input.ctx, req, input.ak)
require.NotNil(t, err)
require.Nil(t, res)
loosenToken := sdk.Coins{sdk.NewCoin("iris", sdk.NewInt(100))}
input.ak.IncreaseTotalLoosenToken(input.ctx, loosenToken)
burnedToken := sdk.Coins{sdk.NewCoin("iris", sdk.NewInt(50))}
input.ak.IncreaseBurnedToken(input.ctx, burnedToken)
res, err = queryTokenStats(input.ctx, input.ak)
require.Nil(t, err)
require.NotNil(t, res)
var tokenStats TokenStats
require.Nil(t, input.cdc.UnmarshalJSON(res, &tokenStats))
require.Equal(t, loosenToken.String(), tokenStats.LooseTokens.String())
require.Equal(t, burnedToken.String(), tokenStats.BurnedTokens.String())
}
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package blob
import (
"bytes"
"context"
"io"
"testing"
"github.com/stretchr/testify/assert"
)
func getBucket(t *testing.T) *BlobGCP {
gcs, err := NewBlobGCP("gs://triton-integration")
if err != nil {
t.Fatalf("Initializing bucket error: %v", err)
}
t.Cleanup(func() { assert.NoError(t, gcs.Close(), "Close should not fail.") })
return gcs
}
func TestGCS_OpenBucket(t *testing.T) {
gcs, err := NewBlobGCP("gs://triton-integration")
if err != nil {
t.Fatalf("failed to initialize gcs backend: %v", err)
}
if gcs.bucket == nil {
t.Fatalf("initialized gcs bucket but got nil")
}
}
func TestGCS_PutFile(t *testing.T) {
ctx := context.Background()
gcs := getBucket(t)
filePath := "put.txt"
if err := gcs.Put(ctx, filePath, []byte("hello world")); err != nil {
t.Fatalf("Put file(%q) in GCS got error: %v", filePath, err)
}
}
func TestGCS_GetFile(t *testing.T) {
ctx := context.Background()
gcs := getBucket(t)
filePath := "get.txt"
if err := gcs.Put(ctx, filePath, []byte("hello world")); err != nil {
t.Fatalf("Put file(%q) in GCS got error: %v", filePath, err)
}
got, err := gcs.Get(ctx, filePath)
if err != nil {
t.Fatalf("Get file(%q) from GCS got error: %v", filePath, err)
}
want := []byte("hello world")
if !bytes.Equal(got, want) {
t.Fatalf("Get file(%q) from GCS failed\ngot: %s\nwant: %s", filePath, got, want)
}
}
func TestGCS_Delete(t *testing.T) {
ctx := context.Background()
gcs := getBucket(t)
filePath := "delete.txt"
if err := gcs.Put(ctx, filePath, []byte("hello world")); err != nil {
t.Fatalf("Put file(%q) in GCS got error: %v", filePath, err)
}
if err := gcs.Delete(ctx, filePath); err != nil {
t.Fatalf("Delete file(%q) in GCS got error: %v", filePath, err)
}
// Check to see access to this file fails now that it has been deleted.
if _, err := gcs.Get(ctx, filePath); err == nil {
t.Fatalf("Get should fail after file has been deleted, got nil")
}
}
func TestGCS_SimpleStreamTests(t *testing.T) {
ctx := context.Background()
gcs := getBucket(t)
filePath := "simple-stream-tests.txt"
testBlob := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit")
writer, err := gcs.NewWriter(ctx, filePath)
if err != nil {
t.Fatalf("NewWriter(%q) in GCS got error: %v", filePath, err)
}
assert.NotNil(t, writer)
n, err := writer.Write(testBlob[:10])
assert.NoError(t, err)
assert.Equal(t, 10, n)
n, err = writer.Write(testBlob[10:])
assert.NoError(t, err)
assert.Equal(t, len(testBlob)-10, n)
if err := writer.Close(); err != nil {
t.Fatalf("Failed to close writer: %v", err)
}
t.Cleanup(func() { assert.NoError(t, gcs.Delete(ctx, filePath)) })
reader, err := gcs.NewReader(ctx, filePath)
if err != nil {
t.Fatalf("NewReader(%q) in GCS got error: %v", filePath, err)
}
assert.NotNil(t, reader)
readBuf := make([]byte, 1)
n, err = reader.Read(readBuf)
assert.NoError(t, err)
assert.Equal(t, 1, n)
assert.Equal(t, testBlob[:1], readBuf)
readBuf = make([]byte, 100)
n, err = reader.Read(readBuf)
assert.NoError(t, err)
assert.Equal(t, len(testBlob)-1, n)
assert.Equal(t, testBlob[1:], readBuf[:n])
n, err = reader.Read(readBuf)
assert.Zero(t, n)
assert.Equal(t, io.EOF, err)
assert.NoError(t, reader.Close())
rangeReader, err := gcs.NewRangeReader(ctx, filePath, 3, 5)
if err != nil {
t.Fatalf("NewRangeReader(%q, %q, %q) in GCS got error: %v", filePath, 3, 5, err)
}
n, err = rangeReader.Read(readBuf)
assert.NoError(t, err)
assert.Equal(t, 5, n)
assert.Equal(t, testBlob[3:3+5], readBuf[:5])
n, err = rangeReader.Read(readBuf)
assert.Zero(t, n)
assert.Equal(t, io.EOF, err)
assert.NoError(t, rangeReader.Close())
}
|
// This file was generated for SObject DuplicateRecordSet, API Version v43.0 at 2018-07-30 03:47:32.341147216 -0400 EDT m=+18.684454745
package sobjects
import (
"fmt"
"strings"
)
type DuplicateRecordSet struct {
BaseSObject
CreatedById string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
DuplicateRuleId string `force:",omitempty"`
Id string `force:",omitempty"`
IsDeleted bool `force:",omitempty"`
LastModifiedById string `force:",omitempty"`
LastModifiedDate string `force:",omitempty"`
LastReferencedDate string `force:",omitempty"`
LastViewedDate string `force:",omitempty"`
Name string `force:",omitempty"`
RecordCount int `force:",omitempty"`
SystemModstamp string `force:",omitempty"`
}
func (t *DuplicateRecordSet) ApiName() string {
return "DuplicateRecordSet"
}
func (t *DuplicateRecordSet) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("DuplicateRecordSet #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tDuplicateRuleId: %v\n", t.DuplicateRuleId))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted))
builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById))
builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate))
builder.WriteString(fmt.Sprintf("\tLastReferencedDate: %v\n", t.LastReferencedDate))
builder.WriteString(fmt.Sprintf("\tLastViewedDate: %v\n", t.LastViewedDate))
builder.WriteString(fmt.Sprintf("\tName: %v\n", t.Name))
builder.WriteString(fmt.Sprintf("\tRecordCount: %v\n", t.RecordCount))
builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp))
return builder.String()
}
type DuplicateRecordSetQueryResponse struct {
BaseQuery
Records []DuplicateRecordSet `json:"Records" force:"records"`
}
|
package expr
import (
"go/ast"
"github.com/sky0621/go-testcode-autogen/inspect/result"
"fmt"
)
type InterfaceTypeInspector struct{}
func (i *InterfaceTypeInspector) IsTarget(node ast.Node) bool {
switch node.(type) {
case *ast.InterfaceType:
return true
}
return false
}
func (i *InterfaceTypeInspector) Inspect(node ast.Node, aggregater *result.Aggregater) error {
it, ok := node.(*ast.InterfaceType)
if !ok {
return fmt.Errorf("Not target Node: %#v", node)
}
// FIXME
fmt.Println("===== InterfaceTypeInspector ===================================================================================")
fmt.Printf("InterfaceType: %#v\n", it)
return nil
}
|
//
// Copyright (c) 2016-2022 Snowplow Analytics Ltd. All rights reserved.
//
// This program is licensed to you under the Apache License Version 2.0,
// and you may not use this file except in compliance with the Apache License Version 2.0.
// You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the Apache License Version 2.0 is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
//
package main
import (
"encoding/json"
"net/http"
"testing"
"time"
"github.com/jarcoal/httpmock"
gt "github.com/snowplow/snowplow-golang-tracker/v3/tracker"
"github.com/stretchr/testify/assert"
)
// --- CLI
func TestGetSdJSON(t *testing.T) {
assert := assert.New(t)
sdj, err := getSdJSON("", "", "")
assert.Nil(sdj)
assert.NotNil(err)
assert.Equal("fatal: --sdjson or --schema URI plus a --json needs to be specified", err.Error())
sdj, err = getSdJSON("", "iglu:com.acme/event/jsonschema/1-0-0", "")
assert.Nil(sdj)
assert.NotNil(err)
assert.Equal("fatal: --json needs to be specified", err.Error())
sdj, err = getSdJSON("", "", "{\"e\":\"pv\"}")
assert.Nil(sdj)
assert.NotNil(err)
assert.Equal("fatal: --schema URI needs to be specified", err.Error())
sdj, err = getSdJSON("", "iglu:com.acme/event/jsonschema/1-0-0", "{\"e\":\"pv\"}")
assert.Nil(err)
assert.NotNil(sdj)
assert.Equal("{\"data\":{\"e\":\"pv\"},\"schema\":\"iglu:com.acme/event/jsonschema/1-0-0\"}", sdj.String())
sdj, err = getSdJSON("", "iglu:com.acme/event/jsonschema/1-0-0", "{\"e\"}")
assert.NotNil(err)
assert.Nil(sdj)
assert.Equal("invalid character '}' after object key", err.Error())
sdj, err = getSdJSON("{\"data\":{\"e\":\"pv\"},\"schema\":\"iglu:com.acme/event/jsonschema/1-0-0\"}", "", "")
assert.Nil(err)
assert.NotNil(sdj)
assert.Equal("{\"data\":{\"e\":\"pv\"},\"schema\":\"iglu:com.acme/event/jsonschema/1-0-0\"}", sdj.String())
sdj, err = getSdJSON("{\"data\":{\"e\"},\"schema\":\"iglu:com.acme/event/jsonschema/1-0-0\"}", "", "")
assert.NotNil(err)
assert.Nil(sdj)
assert.Equal("invalid character '}' after object key", err.Error())
sdj, err = getSdJSON("{\"data\":{\"timestamp\":1534429336},\"schema\":\"iglu:com.acme/event/jsonschema/1-0-0\"}", "", "")
assert.Nil(err)
assert.NotNil(sdj)
assert.Equal("{\"data\":{\"timestamp\":1534429336},\"schema\":\"iglu:com.acme/event/jsonschema/1-0-0\"}", sdj.String())
}
func TestGetContexts(t *testing.T) {
assert := assert.New(t)
contexts, err := getContexts("")
assert.Nil(contexts)
assert.NotNil(err)
contexts, err = getContexts("[]")
assert.NotNil(contexts)
assert.Nil(err)
assert.Equal(0, len(contexts))
contexts, err = getContexts("[{\"data\":{\"timestamp\":1534429336},\"schema\":\"iglu:com.acme/context_1/jsonschema/1-0-0\"},{\"data\":{\"timestamp\":1534429336},\"schema\":\"iglu:com.acme/context_1/jsonschema/1-0-0\"}]")
assert.NotNil(contexts)
assert.Nil(err)
assert.Equal(2, len(contexts))
assert.Equal("{\"data\":{\"timestamp\":1534429336},\"schema\":\"iglu:com.acme/context_1/jsonschema/1-0-0\"}", contexts[0].String())
assert.Equal("{\"data\":{\"timestamp\":1534429336},\"schema\":\"iglu:com.acme/context_1/jsonschema/1-0-0\"}", contexts[1].String())
}
// --- Tracker
func TestInitTracker(t *testing.T) {
assert := assert.New(t)
trackerChan := make(chan int, 1)
tracker := initTracker("com.acme", "myapp", "POST", "https", "", trackerChan, nil)
assert.NotNil(tracker)
assert.NotNil(tracker.Emitter)
assert.NotNil(tracker.Subject)
assert.Equal("myapp", tracker.AppId)
}
func TestTrackSelfDescribingEventGood(t *testing.T) {
assert := assert.New(t)
// Setup HTTPMock
httpmock.Activate()
defer httpmock.DeactivateAndReset()
requests := []*http.Request{}
httpmock.RegisterResponder(
"GET",
"http://com.acme/i",
func(req *http.Request) (*http.Response, error) {
requests = append(requests, req)
return httpmock.NewStringResponse(200, ""), nil
},
)
httpClient := &http.Client{
Timeout: time.Duration(1 * time.Second),
Transport: http.DefaultTransport,
}
// Setup Tracker
trackerChan := make(chan int, 1)
tracker := initTracker("com.acme", "myapp", "GET", "http", "", trackerChan, httpClient)
assert.NotNil(tracker)
// Make SDJ
schemaStr := "iglu:com.acme/event/jsonschema/1-0-0"
jsonDataMap, _ := stringToMap("{\"hello\":\"world\"}")
sdj := gt.InitSelfDescribingJson(schemaStr, jsonDataMap)
// Make contexts
contexts, _ := getContexts("[{\"data\":{\"timestamp\":1534429336},\"schema\":\"iglu:com.acme/context_1/jsonschema/1-0-0\"},{\"data\":{\"timestamp\":1534429336},\"schema\":\"iglu:com.acme/context_1/jsonschema/1-0-0\"}]")
// Send an event
statusCode := trackSelfDescribingEvent(tracker, trackerChan, sdj, contexts)
assert.Equal(200, statusCode)
assert.Equal(1, len(requests))
}
func TestTrackSelfDescribingEventBad(t *testing.T) {
assert := assert.New(t)
// Setup HTTPMock
httpmock.Activate()
defer httpmock.DeactivateAndReset()
requests := []*http.Request{}
httpmock.RegisterResponder(
"POST",
"http://com.acme/com.snowplowanalytics.snowplow/tp2",
func(req *http.Request) (*http.Response, error) {
requests = append(requests, req)
return httpmock.NewStringResponse(404, ""), nil
},
)
httpClient := &http.Client{
Timeout: time.Duration(1 * time.Second),
Transport: http.DefaultTransport,
}
// Setup Tracker
trackerChan := make(chan int, 1)
tracker := initTracker("com.acme", "myapp", "POST", "http", "", trackerChan, httpClient)
assert.NotNil(tracker)
// Make SDJ
schemaStr := "iglu:com.acme/event/jsonschema/1-0-0"
jsonDataMap, _ := stringToMap("{\"hello\":\"world\"}")
sdj := gt.InitSelfDescribingJson(schemaStr, jsonDataMap)
// Send an event
statusCode := trackSelfDescribingEvent(tracker, trackerChan, sdj, nil)
assert.Equal(404, statusCode)
assert.Equal(1, len(requests))
}
// --- Utilities
func TestParseStatusCode(t *testing.T) {
assert := assert.New(t)
result := parseStatusCode(200)
assert.Equal(0, result)
result = parseStatusCode(300)
assert.Equal(0, result)
result = parseStatusCode(404)
assert.Equal(4, result)
result = parseStatusCode(501)
assert.Equal(5, result)
result = parseStatusCode(600)
assert.Equal(1, result)
}
func TestStringToMap(t *testing.T) {
assert := assert.New(t)
m, err := stringToMap("{\"hello\":\"world\"}")
assert.Nil(err)
assert.NotNil(m)
assert.Equal("world", m["hello"])
assert.Equal(1, len(m))
m, err = stringToMap("{\"hello\"}")
assert.NotNil(err)
assert.Nil(m)
m, err = stringToMap("{\"timestamp\":1534429336}")
assert.Nil(err)
assert.NotNil(m)
assert.Equal(json.Number("1534429336"), m["timestamp"])
assert.Equal(1, len(m))
}
|
package api
import (
"fmt"
"net/http"
"strconv"
)
var listenersCh chan chan []byte
func Listen(port int, listeners chan chan []byte) error {
listenersCh = listeners
http.HandleFunc("/api/pings", pingsHandler)
return http.ListenAndServe(":"+strconv.Itoa(port), nil)
}
func pingsHandler(w http.ResponseWriter, r *http.Request) {
message := make(chan []byte)
if r.Method != http.MethodPost {
w.WriteHeader(404)
return
}
listenersCh <- message
w.Write([]byte(fmt.Sprintf("%s\n", <-message)))
}
|
/*
* Copyright © 2018-2022 Software AG, Darmstadt, Germany and/or its licensors
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package adatypes
import (
"encoding/binary"
)
// Version version of current build
var Version = "v1.6.19"
// FieldType indicate a field type of the field
type FieldType uint
const (
// FieldTypeUndefined field type undefined
FieldTypeUndefined FieldType = iota
// FieldTypeUByte field type unsigned byte
FieldTypeUByte
// FieldTypeByte field type signed byte
FieldTypeByte
// FieldTypeUInt2 field type unsigned integer of 2 bytes
FieldTypeUInt2
// FieldTypeInt2 field type signed integer of 2 bytes
FieldTypeInt2
// FieldTypeShort field type signed short
FieldTypeShort
// FieldTypeUInt4 field type unsigned integer of 4 bytes
FieldTypeUInt4
// FieldTypeUInt4Array field type array unsigned integer of 4 bytes
FieldTypeUInt4Array
// FieldTypeInt4 field type signed integer of 4 bytes
FieldTypeInt4
// FieldTypeUInt8 field type unsigned integer of 8 bytes
FieldTypeUInt8
// FieldTypeInt8 field type signed integer of 8 bytes
FieldTypeInt8
// FieldTypeLong field type signed long
FieldTypeLong
// FieldTypePacked field type packed
FieldTypePacked
// FieldTypeUnpacked field type unpacked
FieldTypeUnpacked
// FieldTypeDouble field type double
FieldTypeDouble
// FieldTypeFloat field type float
FieldTypeFloat
// FieldTypeFiller field type for fill gaps between struct types
FieldTypeFiller
// FieldTypeString field type string
FieldTypeString
// FieldTypeByteArray field type byte array
FieldTypeByteArray
// FieldTypeCharacter field type character
FieldTypeCharacter
// FieldTypeLength field type for length definitions
FieldTypeLength
// FieldTypeUnicode field type unicode string
FieldTypeUnicode
// FieldTypeLAUnicode field type unicode large objects
FieldTypeLAUnicode
// FieldTypeLBUnicode field type unicode LOB
FieldTypeLBUnicode
// FieldTypeLAString field type string large objects
FieldTypeLAString
// FieldTypeLBString field type string LOB
FieldTypeLBString
// FieldTypeFieldLength field length
FieldTypeFieldLength
// FieldTypePeriodGroup field type period group
FieldTypePeriodGroup
// FieldTypeMultiplefield field type multiple fields
FieldTypeMultiplefield
// FieldTypeStructure field type of structured types
FieldTypeStructure
// FieldTypeGroup field type group
FieldTypeGroup
// FieldTypeRedefinition field type group
FieldTypeRedefinition
// FieldTypePackedArray field type packed array
FieldTypePackedArray
// FieldTypePhonetic field type of phonetic descriptor
FieldTypePhonetic
// FieldTypeSuperDesc field type of super descriptors
FieldTypeSuperDesc
// FieldTypeLiteral field type of literal data send to database
FieldTypeLiteral
// FieldTypeFieldCount field type to defined field count of MU or PE fields
FieldTypeFieldCount
// FieldTypeHyperDesc field type of Hyper descriptors
FieldTypeHyperDesc
// FieldTypeReferential field type for referential integrity
FieldTypeReferential
// FieldTypeCollation field type of collation descriptors
FieldTypeCollation
// FieldTypeFunction field type to define functions working on result list
FieldTypeFunction
)
var typeName = []string{"Undefined", "UByte", "Byte", "UInt2", "Int2", "Short", "UInt4", "UInt4Array", "Int4", "UInt8", "Int8",
"Long", "Packed", "Unpacked", "Double", "Float", "Filler", "String", "ByteArray", "Character", "Length",
"Unicode", "LAUnicode", "LBUnicode", "LAString", "LBString", "FieldLength", "PeriodGroup", "Multiplefield",
"Structure", "Group", "PackedArray", "Phonetic", "SuperDesc", "Literal", "FieldCount", "HyperDesc",
"Referential", "Collation", "Function"}
func (fieldType FieldType) name() string {
return typeName[fieldType]
}
// FormatCharacter format character use to output FDT
func (fieldType FieldType) FormatCharacter() string {
switch fieldType {
case FieldTypeCharacter, FieldTypeString, FieldTypeLAString, FieldTypeLBString:
return "A"
case FieldTypeUnicode, FieldTypeLAUnicode, FieldTypeLBUnicode:
return "W"
case FieldTypeUByte, FieldTypeUInt2, FieldTypeUInt4, FieldTypeUInt8, FieldTypeShort, FieldTypeByteArray:
return "B"
case FieldTypePacked:
return "P"
case FieldTypeUnpacked:
return "U"
case FieldTypeByte, FieldTypeInt2, FieldTypeInt4, FieldTypeInt8:
return "F"
case FieldTypeFloat:
return "G"
default:
}
return " "
}
// EvaluateFieldType evaluate field type of format string
func EvaluateFieldType(fieldType rune, length int32) FieldType {
switch fieldType {
case 'A':
if length == 1 {
return FieldTypeByte
}
return FieldTypeString
case 'P':
return FieldTypePacked
case 'U':
return FieldTypeUnpacked
case 'G':
return FieldTypeFloat
case 'B':
switch length {
case 1:
return FieldTypeUByte
case 2:
return FieldTypeUInt2
case 4:
return FieldTypeUInt4
case 8:
return FieldTypeUInt8
}
return FieldTypeByteArray
case 'F':
switch length {
case 1:
return FieldTypeByte
case 2:
return FieldTypeInt2
case 4:
return FieldTypeInt4
case 8:
return FieldTypeInt8
}
return FieldTypeByteArray
default:
}
return FieldTypeUndefined
}
// CommonType common data type structure defined for all types
type CommonType struct {
fieldType FieldType
name string
shortName string
length uint32
level uint8
flags uint32
parentType IAdaType
options uint32
Charset string
endian binary.ByteOrder
peRange AdaRange
muRange AdaRange
partialRange *AdaRange
FormatTypeCharacter rune
FormatLength uint32
SubTypes []IAdaType
convert ConvertUnicode
}
// Type returns field type of the field
func (commonType *CommonType) Type() FieldType {
return commonType.fieldType
}
// Name return the name of the field
func (commonType *CommonType) Name() string {
return commonType.name
}
// ShortName return the short name of the field
func (commonType *CommonType) ShortName() string {
return commonType.shortName
}
// SetName set the name of the field
func (commonType *CommonType) SetName(name string) {
commonType.name = name
}
// Level Type return level of the field
func (commonType *CommonType) Level() uint8 {
return commonType.level
}
// SetLevel Set Adabas level of the field
func (commonType *CommonType) SetLevel(level uint8) {
commonType.level = level
}
// Endian Get data endian
func (commonType *CommonType) Endian() binary.ByteOrder {
if commonType.endian == nil {
commonType.endian = endian()
}
return commonType.endian
}
// SetEndian Set data endian
func (commonType *CommonType) SetEndian(endian binary.ByteOrder) {
commonType.endian = endian
}
// SetRange set Adabas range
func (commonType *CommonType) SetRange(r *AdaRange) {
commonType.peRange = *r
}
// SetParent set the parent of the type
func (commonType *CommonType) SetParent(parentType IAdaType) {
if parentType != nil {
if Central.IsDebugLevel() {
Central.Log.Debugf("%s parent is set to %s", commonType.name, parentType.Name())
}
if parentType.HasFlagSet(FlagOptionPE) {
commonType.AddFlag(FlagOptionPE)
}
if commonType.HasFlagSet(FlagOptionAtomicFB) {
p := parentType
for p != nil {
if p.GetParent() != nil {
p.AddFlag(FlagOptionAtomicFB)
}
p = p.GetParent()
}
}
} else {
if commonType.parentType != nil {
pType := commonType.parentType.(*StructureType)
pType.RemoveField(commonType)
}
}
commonType.parentType = parentType
}
// GetParent get the parent defined to this type
func (commonType *CommonType) GetParent() IAdaType {
return commonType.parentType
}
// IsStructure return if the type is of structure types
func (commonType *CommonType) IsStructure() bool {
return false
}
// AddOption add the option to the field
func (commonType *CommonType) AddOption(fieldOption FieldOption) {
commonType.options |= (1 << fieldOption)
}
// ClearOption clear the option to the field
func (commonType *CommonType) ClearOption(fieldOption FieldOption) {
commonType.options &^= (1 << fieldOption)
}
// IsOption Check if the option of the field is set
func (commonType *CommonType) IsOption(fieldOption FieldOption) bool {
return (commonType.options & (1 << fieldOption)) != 0
}
// SetOption Set all options of the field
func (commonType *CommonType) SetOption(option uint32) {
commonType.options = option
}
// IsSpecialDescriptor return true if it is a special descriptor
func (commonType *CommonType) IsSpecialDescriptor() bool {
switch commonType.fieldType {
case FieldTypeCollation, FieldTypePhonetic, FieldTypeSuperDesc,
FieldTypeHyperDesc, FieldTypeReferential:
return true
default:
}
return false
}
// FieldOption type for field option
type FieldOption uint32
const (
// FieldOptionUQ field option for unique descriptors
FieldOptionUQ FieldOption = iota
// FieldOptionNU field option for null suppression
FieldOptionNU
// FieldOptionFI field option for fixed size
FieldOptionFI
// FieldOptionDE field option for descriptors
FieldOptionDE
// FieldOptionNC field option for sql
FieldOptionNC
// FieldOptionNN field option for non null
FieldOptionNN
// FieldOptionHF field option for high order fields
FieldOptionHF
// FieldOptionNV field option for null value
FieldOptionNV
// FieldOptionNB field option for
FieldOptionNB
// FieldOptionHE field option for
FieldOptionHE
// FieldOptionPE field option for period
FieldOptionPE
// FieldOptionMU field option for multiple fields
FieldOptionMU
// FieldOptionPF field option for Packed F
FieldOptionPF
// FieldOptionLA field option for large alpha
FieldOptionLA
// FieldOptionLB field option for large objects
FieldOptionLB
// FieldOptionColExit field option for collation exit
FieldOptionColExit
)
var fieldOptions = []string{"UQ", "NU", "FI", "DE", "NC", "NN", "HF", "NV", "NB", "HE", "PE", "MU", "PF"}
// FlagOption flag option used to omit traversal through the tree (example is MU and PE)
type FlagOption uint32
const (
// FlagOptionPE indicate tree is part of period group
FlagOptionPE FlagOption = iota
// FlagOptionAtomicFB indicate tree contains MU fields
FlagOptionAtomicFB
// FlagOptionMUGhost ghost field for MU
FlagOptionMUGhost
// FlagOptionToBeRemoved should be removed
FlagOptionToBeRemoved
// FlagOptionSecondCall Field will need a second call to get the value
FlagOptionSecondCall
// FlagOptionReference Field will skip parsing value
FlagOptionReference
// FlagOptionReadOnly read only field
FlagOptionReadOnly
// FlagOptionLengthNotIncluded length not include in record buffer
FlagOptionLengthNotIncluded
// FlagOptionPart structure is request only in parts
FlagOptionPart
// FlagOptionSingleIndex single index query
FlagOptionSingleIndex
// FlagOptionLengthPE instead of length use period group count
FlagOptionLengthPE
)
// Bit return the Bit of the option flag
func (flagOption FlagOption) Bit() uint32 {
return (1 << flagOption)
}
// HasFlagSet check if given flag is set
func (commonType *CommonType) HasFlagSet(flagOption FlagOption) bool {
//Central.Log.Debugf("Check flag %d set %d=%d -> %v", commonType.flags, flagOption.Bit(), flagOption.Bit(), (commonType.flags & flagOption.Bit()))
return (commonType.flags & flagOption.Bit()) != 0
}
// AddFlag add the flag to the type flag set
func (commonType *CommonType) AddFlag(flagOption FlagOption) {
if commonType.HasFlagSet(flagOption) {
if Central.IsDebugLevel() {
Central.Log.Debugf("Flag %s to %d already done", commonType.shortName, flagOption.Bit())
}
return
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Set Flag %s to %d", commonType.shortName, flagOption.Bit())
}
commonType.flags |= flagOption.Bit()
if flagOption == FlagOptionAtomicFB || flagOption == FlagOptionSingleIndex {
p := commonType.GetParent()
for p != nil && p.ShortName() != "" {
if Central.IsDebugLevel() {
Central.Log.Debugf("Set Parent Flag %s to %d", p.ShortName(), flagOption.Bit())
}
if p.HasFlagSet(flagOption) {
break
}
p.AddFlag(flagOption)
p = p.GetParent()
}
if flagOption == FlagOptionAtomicFB {
// Only work in period group or group
// if !p.HasFlagSet(flagOption) {
for _, s := range commonType.SubTypes {
if Central.IsDebugLevel() {
Central.Log.Debugf("Set Children Flag %s to %d", s.ShortName(), flagOption.Bit())
}
if !s.HasFlagSet(flagOption) {
s.AddFlag(flagOption)
}
}
// }
}
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Set Flag %s to %d done", commonType.shortName, flagOption.Bit())
}
}
// RemoveFlag add the flag to the type flag set
func (commonType *CommonType) RemoveFlag(flagOption FlagOption) {
commonType.flags &= ^flagOption.Bit()
}
// SetPartialRange set partial range
func (commonType *CommonType) SetPartialRange(partial *AdaRange) {
commonType.partialRange = partial
}
// PartialRange partial range provided
func (commonType *CommonType) PartialRange() *AdaRange {
return commonType.partialRange
}
// PeriodicRange range of PE field provided
func (commonType *CommonType) PeriodicRange() *AdaRange {
return &commonType.peRange
}
// MultipleRange range of MU field provided
func (commonType *CommonType) MultipleRange() *AdaRange {
return &commonType.muRange
}
// Convert convert function if type is Alpha/String
func (commonType *CommonType) Convert() ConvertUnicode {
return commonType.convert
}
// SetCharset set charset converter
func (commonType *CommonType) SetCharset(name string) {
commonType.convert = NewUnicodeConverter(name)
}
// SetConvert set convert function if type is Alpha/String
func (commonType *CommonType) SetConvert(c ConvertUnicode) {
switch commonType.fieldType {
case FieldTypeString, FieldTypeLAString, FieldTypeLBString:
commonType.convert = c
default:
}
}
|
package pool
import (
"crawler/logger"
"testing"
"time"
"gopkg.in/h2non/gock.v1"
)
func init() {
logger.Mute()
}
func TestStartTask(t *testing.T) {
defer gock.Off()
for _, hostname := range []string{"a.com", "b.com", "c.com", "e.com"} {
gock.New("http://" + hostname).Get("/").Reply(200).BodyString("")
}
ThrottleRate = time.Nanosecond
for _, tc := range testCasesStartTask {
task := StartTask(tc.Body)
<-task.Done
if task.Id != tc.Id {
t.Fatalf("task.Id = %d. Want: %d",
task.Id, tc.Id)
}
if Storage.Len() != tc.StorageSize {
t.Fatalf("Storage.Len() = %d. Want: %d",
Storage.Len(), tc.StorageSize)
}
if Throttle.Len() != tc.ThrottleSize {
t.Fatalf("Throttle.Len() = %d. Want: %d",
Throttle.Len(), tc.ThrottleSize)
}
}
}
func TestFindTask(t *testing.T) {
for _, tc := range testCasesFindTask {
tc.Setup()
_, err := FindTask(tc.Id)
if err != tc.Error {
t.Fatalf("FindTask(%d) = _, %#v. Want: %#v",
err, tc.Error)
}
}
}
func TestDeleteTask(t *testing.T) {
for _, tc := range testCasesDeleteTask {
tc.Setup()
_, err := DeleteTask(tc.Id)
if err != tc.Error {
t.Fatalf("FindTask(%d) = _, %#v. Want: %#v",
err, tc.Error)
}
if Storage.Len() != tc.StorageSize {
t.Fatalf("Storage.Len() = %d. Want: %d",
Storage.Len(), tc.StorageSize)
}
}
}
|
package main
// Auto generated file, do NOT edit!
import (
"tetra/lib/factory"
"tetra/lib/gui"
)
var factoryRegisted bool
// FactoryRegister register creator in factory for package main
func FactoryRegister() {
if factoryRegisted {
return
}
factoryRegisted = true
factory.Register(`main.Window`, func() interface{} {
return NewWindow()
})
}
// NewWindow create and init new Window object.
func NewWindow() *Window {
p := new(Window)
p.Window.Window.Self = p
p.Init()
return p
}
// Class name for factory
func (p *Window) Class() string {
return (`main.Window`)
}
// IWindow is interface of class Window
type IWindow interface {
gui.IWindow
}
|
package jwt
import (
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strings"
)
func Check(token, secret string) (jwtPayload, error) {
var data jwtPayload
tempSplitToken := strings.Split(token, ".")
if len(tempSplitToken) != 3 {
return data, errors.New("Invalid token format")
}
// check make new signature equal request signature
// 0: header
// 1: payload
// 2: signature
signatureData := fmt.Sprintf("%s.%s", tempSplitToken[0], tempSplitToken[1])
newSignature := hmacAlgorithm(signatureData, sha256.New, secret)
requestSignature, _ := base64.StdEncoding.DecodeString(tempSplitToken[2])
if !hmac.Equal(requestSignature, newSignature) {
return data, errors.New("Signature not match")
}
payload, _ := base64.StdEncoding.DecodeString(tempSplitToken[1])
err := json.Unmarshal([]byte(payload), &data)
return data, err
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"github.com/nlopes/slack"
"net/http"
"os"
"regexp"
"sort"
"time"
)
func main() {
cmdGitHub, _ := regexp.Compile("^!gh\\s([a-zA-Z0-9_]+)")
api := slack.New(
os.Getenv("SLACK_TOKEN"),
// slack.OptionDebug(true),
// slack.OptionLog(log.New(os.Stdout, "slack-bot: ", log.Lshortfile|log.LstdFlags)),
)
rtm := api.NewRTM()
go rtm.ManageConnection()
for msg := range rtm.IncomingEvents {
fmt.Print("Event Received\n")
switch ev := msg.Data.(type) {
case *slack.HelloEvent:
// Ignore hello
case *slack.ConnectedEvent:
fmt.Printf("\tInfos: %+v\n", ev.Info)
fmt.Printf("\tConnection counter: %d\n", ev.ConnectionCount)
// Replace C2147483705 with your Channel ID
// rtm.SendMessage(rtm.NewOutgoingMessage("Hello world", "C2147483705"))
case *slack.MessageEvent:
cmd := cmdGitHub.FindStringSubmatch(ev.Text)
if len(cmd) != 2 {
continue
}
user := cmd[1]
rtm.SendMessage(rtm.NewOutgoingMessage(fmt.Sprintf("Fetching github repositories for %s...", user), ev.Channel))
for _, m := range SplitSubN(getUserRepositories(user), 4000) {
time.Sleep(time.Second)
rtm.SendMessage(rtm.NewOutgoingMessage(m, ev.Channel))
}
case *slack.PresenceChangeEvent:
fmt.Printf("\tPresence Change: %v\n", ev)
case *slack.LatencyReport:
fmt.Printf("\tCurrent latency: %v\n", ev.Value)
case *slack.RTMError:
fmt.Printf("\tError: %s\n", ev.Error())
case *slack.InvalidAuthEvent:
fmt.Printf("\tInvalid credentials")
return
default:
// Ignore other events..
fmt.Printf("\tUnexpected: %+v\n", msg.Data)
}
}
}
type UserRepository struct {
FullName string `json:"full_name"`
StargazerCount int `json:"stargazers_count"`
}
type UserRepositories []UserRepository
func (a UserRepositories) Len() int { return len(a) }
func (a UserRepositories) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a UserRepositories) Less(i, j int) bool { return a[i].StargazerCount > a[j].StargazerCount }
func getUserRepositories(u string) string {
res, err := http.Get("https://api.github.com/users/" + u + "/repos?per_page=100")
if err != nil {
return fmt.Sprintf("Error occurred: %s", err)
}
defer res.Body.Close()
var repositories UserRepositories
if err := json.NewDecoder(res.Body).Decode(&repositories); err != nil {
return fmt.Sprintf("Error occurred: %s", err)
}
sort.Sort(repositories)
var out bytes.Buffer
for _, repository := range repositories {
fmt.Fprintf(&out, "https://github.com/%s with stars %d\n", repository.FullName, repository.StargazerCount)
}
return out.String()
}
func SplitSubN(s string, n int) []string {
sub := ""
subs := []string{}
runes := bytes.Runes([]byte(s))
l := len(runes)
for i, r := range runes {
sub = sub + string(r)
if (i + 1) % n == 0 {
subs = append(subs, sub)
sub = ""
} else if (i + 1) == l {
subs = append(subs, sub)
}
}
return subs
}
|
package chat
import (
"fmt"
"math/rand"
"strings"
"time"
)
type slackClient interface {
GetUserIDsInChannel(channelID string) ([]string, error)
GetUserName(userID string) (string, error)
PostMessage(channelID string, message string) error
}
type Service struct {
slack slackClient
}
func NewService(client slackClient) *Service {
return &Service{slack: client}
}
func (s *Service) Process(channelID string, ppg int) error {
userIDs, err := s.slack.GetUserIDsInChannel(channelID)
if err != nil {
return err
}
var userNames []string
for _, v := range userIDs {
name, err := s.slack.GetUserName(v)
if err != nil {
return err
}
userNames = append(userNames, name)
}
// remove bot users
userNames = deleteEmptyStrings(userNames)
// randomise user name slice
userNames = shuffle(userNames)
// split user name slice based on ppg (persons per group)
groups := splitGroups(userNames, ppg)
// compose a message
message := composeMessage(groups)
// post message
if err := s.slack.PostMessage(channelID, message); err != nil {
return err
}
return nil
}
func deleteEmptyStrings(slice []string) []string {
var r []string
for _, str := range slice {
if str != "" {
r = append(r, str)
}
}
return r
}
func shuffle(slice []string) []string {
r := rand.New(rand.NewSource(time.Now().Unix()))
ret := make([]string, len(slice))
perm := r.Perm(len(slice))
for i, randIndex := range perm {
ret[i] = slice[randIndex]
}
return ret
}
func splitGroups(slice []string, ppg int) [][]string {
if ppg < 0 || ppg > len(slice) {
return [][]string{slice}
}
var result [][]string
idx := 0
for idx < len(slice) {
rightBound := idx + ppg
if rightBound > len(slice) {
rightBound = len(slice)
}
result = append(result, slice[idx:rightBound])
idx = rightBound
}
// adjust the last group
if result != nil && len(result) > 1 {
lastGroup := result[len(result)-1]
if float32(len(lastGroup)) <= float32(ppg/2) {
// append to the second last group
secondLastGroup := &result[len(result)-2]
*secondLastGroup = append(*secondLastGroup, lastGroup...)
result = result[0 : len(result)-1]
}
}
return result
}
func composeMessage(groups [][]string) string {
msgs := []string{"Good day team:roller_coaster:. The random chat roster of this week :scroll::"}
for j, v := range groups {
for i := range v {
v[i] = fmt.Sprintf("<@%s>", v[i])
}
msgLine := fmt.Sprintf("Group %d: %s", j+1, strings.Join(v, " :blob-wine-gif: "))
msgs = append(msgs, msgLine)
}
return strings.Join(msgs, "\n")
}
|
/*
* Copyright @ 2020 - present Blackvisor Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package storage
import (
"database/sql"
"errors"
"fmt"
"log"
"math"
"strconv"
"time"
_ "github.com/go-sql-driver/mysql"
)
type MySQLStorage struct {
Connection *sql.DB
}
func (mss *MySQLStorage) Setup(credentials map[string]string) error {
return nil
}
func (mss *MySQLStorage) executeSingleQuery(query string) error {
statement, err := mss.Connection.Prepare(query) // Prepare SQL Statement
if err != nil {
return err
}
_, err = statement.Exec() // Execute SQL Statements
return err
}
func (mss *MySQLStorage) Init() error {
keysTableSQL := `CREATE TABLE IF NOT EXISTS ` + "`" + `keys` + "`" + `(
id BIGINT NOT NULL AUTO_INCREMENT,
` + "`" + `table` + "`" + ` LONGTEXT,
table_hash BIGINT,
` + "`" + `key` + "`" + ` LONGTEXT,
`
keysUniqueIndexValues := ""
for i := 1; i <= NumberOfColumns; i++ {
keysTableSQL += `column_` + strconv.Itoa(i) + `_hash BIGINT,
`
keysUniqueIndexValues = keysUniqueIndexValues + "column_" + strconv.Itoa(i) + "_hash"
if i != NumberOfColumns {
keysUniqueIndexValues = keysUniqueIndexValues + ", "
}
}
keysUniqueIndexValues += ", table_hash"
keysTableSQL += `value LONGTEXT,
ttl BIGINT,
PRIMARY KEY (id),
CONSTRAINT columns_unique_key UNIQUE(` + keysUniqueIndexValues + `));
` // SQL Statement for Create Table
log.Println(keysTableSQL)
err := mss.executeSingleQuery(keysTableSQL)
if err != nil {
return err
}
keysTableSQL = `CREATE INDEX table_hash_index ON ` + "`" + `keys` + "`" + `(table_hash);`
err = mss.executeSingleQuery(keysTableSQL)
if err != nil {
log.Println(err.Error())
}
for i := 1; i <= NumberOfColumns; i++ {
keysTableSQL = `CREATE INDEX column_` + strconv.Itoa(i) + `_hash_index ON ` + "`" + `keys` + "`" + `(column_` + strconv.Itoa(i) + `_hash);`
err = mss.executeSingleQuery(keysTableSQL)
if err != nil {
log.Println(err.Error())
}
}
keysTableSQL = `CREATE INDEX ttl_index ON ` + "`" + `keys` + "`" + `(ttl);`
err = mss.executeSingleQuery(keysTableSQL)
if err != nil {
log.Println(err.Error())
}
//og.Println("keys table created")
mapsTableSQL := `CREATE TABLE IF NOT EXISTS maps (
id BIGINT NOT NULL AUTO_INCREMENT,
` + "`" + `table` + "`" + ` LONGTEXT,
table_hash BIGINT,
` + "`" + `key` + "`" + ` LONGTEXT,
`
mapKeysUniqueIndexValues := ""
for i := 1; i <= NumberOfColumns; i++ {
mapsTableSQL += `column_` + strconv.Itoa(i) + `_hash BIGINT,
`
mapKeysUniqueIndexValues = mapKeysUniqueIndexValues + "column_" + strconv.Itoa(i) + "_hash"
if i != NumberOfColumns {
mapKeysUniqueIndexValues = mapKeysUniqueIndexValues + ", "
}
}
mapKeysUniqueIndexValues += ", table_hash"
mapKeysUniqueIndexValues += ", object_key_hash"
mapsTableSQL += `value LONGTEXT,
object_key LONGTEXT,
object_key_hash BIGINT,
PRIMARY KEY (id),
CONSTRAINT map_columns_unique_key UNIQUE(` + mapKeysUniqueIndexValues + `));
` // SQL Statement for Create Table
err = mss.executeSingleQuery(mapsTableSQL)
if err != nil {
return err
}
mapsTableSQL = `CREATE INDEX table_hash_index ON maps(table_hash);`
err = mss.executeSingleQuery(mapsTableSQL)
if err != nil {
log.Println(err.Error())
}
for i := 1; i <= NumberOfColumns; i++ {
mapsTableSQL = `CREATE INDEX column_` + strconv.Itoa(i) + `_hash_index ON maps(column_` + strconv.Itoa(i) + `_hash);`
err = mss.executeSingleQuery(mapsTableSQL)
if err != nil {
log.Println(err.Error())
}
}
log.Println("Before creating an index for object key hash")
mapsTableSQL = `CREATE INDEX object_key_hash_index ON maps(object_key_hash);`
err = mss.executeSingleQuery(mapsTableSQL)
if err != nil {
log.Println(err.Error())
}
LastTruncate = 0
return nil
}
func (mss *MySQLStorage) Create(credentials map[string]string) error {
var err error
url := credentials["url"]
username := credentials["username"]
password := credentials["password"]
dbname := credentials["dbname"]
mss.Connection, err = sql.Open("mysql", username+":"+password+url+"/"+dbname)
if err != nil {
return err
}
return nil
}
func (mss *MySQLStorage) Destroy() error {
return mss.Connection.Close() // Defer Closing the database
}
func (mss *MySQLStorage) GetKeys(table string, pattern string) ([]string, error) {
err := mss.KeysCleanUp()
if err != nil {
return nil, err
}
keys := []string{}
columns := SplitToParts(pattern)
if len(columns) > NumberOfColumns {
return nil, errors.New("Too many data columns")
}
hashes := CalculateHashesOfColumns(columns)
tableHash := CalculateHash(table)
clause := CreateWhereClause(columns, hashes)
sql := "SELECT `table`, `key`, ttl FROM `keys` WHERE table_hash = " + fmt.Sprint(tableHash)
if clause != "" {
sql += " AND " + clause
}
now := time.Now().UnixNano() / int64(time.Millisecond)
row, err := mss.Connection.Query(sql)
if err != nil {
return nil, err
}
defer row.Close()
for row.Next() { // Iterate and fetch the records from result cursor
var table string
var key string
var ttl int64
row.Scan(&table, &key, &ttl)
if ttl >= now {
keys = append(keys, table+"/"+key)
}
}
sql = "SELECT `table`, `key` FROM maps WHERE table_hash = " + fmt.Sprint(tableHash)
if clause != "" {
sql += " AND " + clause
}
now = time.Now().UnixNano() / int64(time.Millisecond)
row, err = mss.Connection.Query(sql)
if err != nil {
return nil, err
}
defer row.Close()
for row.Next() { // Iterate and fetch the records from result cursor
var table string
var key string
row.Scan(&table, &key)
keys = append(keys, table+"/"+key)
}
return keys, nil
}
func (mss *MySQLStorage) SetKey(table string, key string, value string, expiration time.Duration) error {
err := mss.KeysCleanUp()
if err != nil {
return err
}
columns := SplitToParts(key)
if len(columns) > NumberOfColumns {
return errors.New("Too many data columns")
}
hashes := CalculateHashesOfColumns(columns)
tableHash := CalculateHash(table)
sql := "INSERT INTO `keys` (`table`, table_hash, `key`,"
keysUniqueIndexValues := ""
for i := 1; i <= NumberOfColumns; i++ {
sql += " column_" + strconv.Itoa(i) + "_hash,"
}
sql += " value, ttl) VALUES ('" + table + "', " + fmt.Sprint(tableHash) + ", '" + key + "',"
hashesLen := len(hashes)
for i := 1; i <= NumberOfColumns; i++ {
if i <= hashesLen {
sql += " " + fmt.Sprint(hashes[i-1]) + ","
} else {
sql += " 0,"
}
keysUniqueIndexValues = keysUniqueIndexValues + "column_" + strconv.Itoa(i) + "_hash"
if i != NumberOfColumns {
keysUniqueIndexValues = keysUniqueIndexValues + ", "
}
}
keysUniqueIndexValues += ", table_hash"
until := time.Now().Add(expiration)
if expiration.Milliseconds() <= 0 {
until = time.Unix(0, int64(math.MaxInt64))
}
untilMilliseconds := until.UnixNano() / int64(time.Millisecond)
sql += " '" + value + "', " + strconv.FormatInt(untilMilliseconds, 10) + ")"
sql += " ON DUPLICATE KEY UPDATE value = VALUES(value), ttl = VALUES(ttl)"
//log.Println(sql)
_, err = mss.Connection.Exec(sql)
return err
}
func (mss *MySQLStorage) GetFullKey(key string) (string, error) {
parts := SplitToParts(key)
table := parts[0]
newKey := ""
partsLen := len(parts)
for i := 1; i < partsLen; i++ {
newKey = newKey + parts[i]
if i+1 < partsLen {
newKey += "/"
}
}
return mss.GetKey(table, newKey)
}
func (mss *MySQLStorage) GetKey(table string, key string) (string, error) {
err := mss.KeysCleanUp()
if err != nil {
return "", err
}
columns := SplitToParts(key)
if len(columns) > NumberOfColumns {
return "", errors.New("Too many data columns")
}
hashes := CalculateHashesOfColumns(columns)
tableHash := CalculateHash(table)
clause := CreateWhereClause(columns, hashes)
sql := "SELECT `value`, `ttl` FROM `keys` WHERE table_hash = " + fmt.Sprint(tableHash)
if clause != "" {
sql += " AND " + clause
}
now := time.Now().UnixNano() / int64(time.Millisecond)
row, err := mss.Connection.Query(sql)
if err != nil {
return "", err
}
defer row.Close()
for row.Next() { // Iterate and fetch the records from result cursor
var value string
var ttl int64
row.Scan(&value, &ttl)
if ttl >= now {
return value, nil
}
}
return "", nil
}
func (mss *MySQLStorage) KeysCleanUp() error {
now := time.Now().UnixNano() / int64(time.Millisecond)
if (now - LastTruncate) > TruncateInterval {
nowStr := fmt.Sprint(now)
log.Println(nowStr)
sql := "DELETE FROM `keys` WHERE ttl < " + nowStr
result, err := mss.Connection.Exec(sql)
if err != nil {
return err
}
truncatedRows, err := result.RowsAffected()
if err != nil {
return err
}
log.Printf("%d rows truncated\n", truncatedRows)
LastTruncate = now
}
return nil
}
func (mss *MySQLStorage) DelKey(table string, key string) (int64, error) {
err := mss.KeysCleanUp()
if err != nil {
return 0, err
}
columns := SplitToParts(key)
if len(columns) > NumberOfColumns {
return -1, errors.New("Too many data columns")
}
hashes := CalculateHashesOfColumns(columns)
tableHash := CalculateHash(table)
clause := CreateWhereClause(columns, hashes)
sql := "DELETE FROM `keys` WHERE table_hash = " + fmt.Sprint(tableHash)
if clause != "" {
sql += " AND " + clause
}
res, err := mss.Connection.Exec(sql)
if err != nil {
return -1, err
}
return res.RowsAffected()
}
func (mss *MySQLStorage) AddToMap(table string, key string, objectKey string, object string) error {
columns := SplitToParts(key)
if len(columns) > NumberOfColumns {
return errors.New("Too many data columns")
}
hashes := CalculateHashesOfColumns(columns)
tableHash := CalculateHash(table)
objectKeyHash := CalculateHash(objectKey)
sql := "INSERT INTO maps (`table`, table_hash, `key`, object_key, object_key_hash,"
keysUniqueIndexValues := ""
for i := 1; i <= NumberOfColumns; i++ {
sql += " column_" + strconv.Itoa(i) + "_hash,"
}
sql += " value) VALUES ('" + table + "', " + fmt.Sprint(tableHash) + ", '" + key + "', '" + objectKey + "', " + fmt.Sprint(objectKeyHash) + ", "
hashesLen := len(hashes)
for i := 1; i <= NumberOfColumns; i++ {
if i <= hashesLen {
sql += " " + fmt.Sprint(hashes[i-1]) + ","
} else {
sql += " 0,"
}
keysUniqueIndexValues = keysUniqueIndexValues + "column_" + strconv.Itoa(i) + "_hash"
if i != NumberOfColumns {
keysUniqueIndexValues = keysUniqueIndexValues + ", "
}
}
keysUniqueIndexValues += ", table_hash"
keysUniqueIndexValues += ", object_key_hash"
sql += " '" + object + "')"
sql += " ON DUPLICATE KEY UPDATE value = VALUES(value)"
//log.Println(sql)
_, err := mss.Connection.Exec(sql)
return err
}
func (mss *MySQLStorage) DelFromMap(table string, key string, objectKey string) error {
columns := SplitToParts(key)
if len(columns) > NumberOfColumns {
return errors.New("Too many data columns")
}
hashes := CalculateHashesOfColumns(columns)
tableHash := CalculateHash(table)
objectKeyHash := CalculateHash(objectKey)
clause := CreateWhereClause(columns, hashes)
sql := "DELETE FROM maps WHERE table_hash = " + fmt.Sprint(tableHash) + " AND object_key_hash = " + fmt.Sprint(objectKeyHash)
if clause != "" {
sql += " AND " + clause
}
_, err := mss.Connection.Exec(sql)
if err != nil {
return err
}
return nil
}
func (mss *MySQLStorage) GetFromMap(table string, key string, objectKey string) (string, error) {
columns := SplitToParts(key)
if len(columns) > NumberOfColumns {
return "", errors.New("Too many data columns")
}
hashes := CalculateHashesOfColumns(columns)
tableHash := CalculateHash(table)
objectKeyHash := CalculateHash(objectKey)
clause := CreateWhereClause(columns, hashes)
sql := "SELECT `value` FROM maps WHERE table_hash = " + fmt.Sprint(tableHash) + " AND object_key_hash = " + fmt.Sprint(objectKeyHash)
if clause != "" {
sql += " AND " + clause
}
row, err := mss.Connection.Query(sql)
if err != nil {
return "", err
}
defer row.Close()
for row.Next() { // Iterate and fetch the records from result cursor
var value string
row.Scan(&value)
return value, nil
}
return "", nil
}
func (mss *MySQLStorage) GetMap(table string, key string) (map[string]string, error) {
result := map[string]string{}
columns := SplitToParts(key)
if len(columns) > NumberOfColumns {
return nil, errors.New("Too many data columns")
}
hashes := CalculateHashesOfColumns(columns)
tableHash := CalculateHash(table)
clause := CreateWhereClause(columns, hashes)
sql := "SELECT object_key, `value` FROM maps WHERE table_hash = " + fmt.Sprint(tableHash)
if clause != "" {
sql += " AND " + clause
}
row, err := mss.Connection.Query(sql)
if err != nil {
return nil, err
}
defer row.Close()
for row.Next() { // Iterate and fetch the records from result cursor
var value string
var objectKey string
row.Scan(&objectKey, &value)
result[objectKey] = value
}
return result, nil
}
|
package main
import (
"bufio"
"fmt"
"log"
"net"
)
func main() {
ln, err := net.Listen("tcp", ":8080")
if err != nil {
log.Fatal(err)
}
defer ln.Close()
for {
conn, err := ln.Accept()
if err != nil {
log.Fatal(err)
}
go handleConnect(conn)
}
}
func handleConnect(conn net.Conn) {
//todo
scanner := bufio.NewScanner(conn)
for scanner.Scan() {
fmt.Println(scanner.Text())
conn.Write([]byte("hello,world\n"))
}
}
|
package db
import (
"errors"
"fmt"
"pencil/global"
p_logger "pencil/utils/log"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
)
const (
DB_User = `user`
)
type mysql struct {
DataBaseUser *gorm.DB
DataBaseVIP *gorm.DB
}
var (
MysqlService *mysql
)
func init() {
MysqlService = new(mysql)
MysqlService.loadDataBase()
}
func (o *mysql) GetTransaction(db string) (*gorm.DB, error) {
switch db {
case DB_User:
return o.DataBaseUser.Begin(), nil
}
return nil, errors.New("db not exist")
}
func (o *mysql) loadDataBase() {
o.DataBaseVIP = CreateDB(global.GlobalConfig.Conf.DB.VIP.Username,
global.GlobalConfig.Conf.DB.VIP.Password,
global.GlobalConfig.Conf.DB.VIP.Host,
global.GlobalConfig.Conf.DB.VIP.Port,
global.GlobalConfig.Conf.DB.VIP.Database,
global.GlobalConfig.Conf.DB.VIP.LogMode,
global.GlobalConfig.Conf.DB.VIP.MaxIdleConns,
global.GlobalConfig.Conf.DB.VIP.MaxOpenConns,
global.GlobalConfig.Conf.DB.VIP.IsAutoMigrate)
o.DataBaseUser = CreateDB(global.GlobalConfig.Conf.DB.User.Username,
global.GlobalConfig.Conf.DB.User.Password,
global.GlobalConfig.Conf.DB.User.Host,
global.GlobalConfig.Conf.DB.User.Port,
global.GlobalConfig.Conf.DB.User.Database,
global.GlobalConfig.Conf.DB.User.LogMode,
global.GlobalConfig.Conf.DB.User.MaxIdleConns,
global.GlobalConfig.Conf.DB.User.MaxOpenConns,
global.GlobalConfig.Conf.DB.User.IsAutoMigrate)
}
func CreateDB(userName string, passWord string, host string, port uint32, dbName string, log_mode bool, max_idle_conns int, max_open_conns int, is_auto_migrate bool) *gorm.DB {
connStr := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8&parseTime=True&loc=UTC",
userName,
passWord,
host,
port,
dbName,
)
db, err := gorm.Open("mysql", connStr)
if err != nil {
panic("mysql conn fail," + host + ":" + dbName)
}
p_logger.Logger.Info(fmt.Sprintf("Connected to MySQL db : %s ,host : %s ,port: %d", dbName, host, port))
db.LogMode(log_mode)
db.DB().SetMaxIdleConns(max_idle_conns)
db.DB().SetMaxOpenConns(max_open_conns)
//db.DB().SetConnMaxLifetime(time.Duration(config.MaxLifeTime) * time.Second)
if is_auto_migrate {
db.AutoMigrate()
}
return db
}
|
package ch04
import "testing"
// 4.2 思想不难理解,代码编写困难
// 采用分解矩阵,加减之后组合,把原来需要计算8次减少为只需要计算7次
func TestStrassen(t *testing.T) {
arr := [][]int{}
arr = append(arr, []int{1, 2, 3})
arr = append(arr, []int{1, 2, 3})
arr = append(arr, []int{1, 2, 3})
t.Log(squareMatrixMultiply(arr, arr))
}
// n^2矩阵乘法
func squareMatrixMultiply(A, B [][]int) [][]int {
n := len(A)
result := make([][]int, n)
for i := 0; i < n; i++ {
result[i] = make([]int, n)
for j := 0; j < n; j++ {
temp := 0
for k := 0; k < n; k++ {
temp += A[i][k] * B[k][j]
}
result[i][j] = temp
}
}
return result
}
|
/*
* @lc app=leetcode.cn id=91 lang=golang
*
* [91] 解码方法
*/
// @lc code=start
package main
import "fmt"
func main() {
var s string
s = "226"
fmt.Println(numDecodings(s))
s = "12"
fmt.Println(numDecodings(s))
}
func numDecodings2(s string) int {
// res := 0
// var dfs(int, int)
// dfs = func(i, j int) {
// if i == len(s) {
// return
// }
// dfs(0, 1)
// }
// dfs(0,1)
return 0
}
func numDecodings(s string) int {
if s[0] == '0' {
return 0
}
p, q := 1, 1
for i := 1; i < len(s); i++ {
if s[i] == '0' && s[i-1] != '1' && s[i-1] != '2' {
return 0
}
if s[i] == '0' {
q = p
continue
}
if (s[i] <= '6' && (s[i-1] == '1' || s[i-1] == '2')) || (s[i] > '6' && s[i-1] == '1') {
p, q = q, p + q
continue
}
p = q
}
return q
}
// @lc code=end
|
package main
import "fmt"
func main() {
x := []int{12, 31, 6, 9, 102}
fmt.Println(x)
// y := []int{234, 77, 88, 99}
// x = append(x, y...)
// fmt.Println(x)
x = append(x[:2], x[4:]...) // this is how you delete from slice
// here we wanted to delete 2 elements of index 2 and 3
fmt.Println(x)
}
|
package main
import (
"bufio"
"context"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
"sync"
"time"
"google.golang.org/grpc/metadata"
"github.com/gocql/gocql"
"github.com/webdevgopi/chatApp-gRPC/proto"
"google.golang.org/grpc"
)
var client proto.MessagingServiceClient
var wait *sync.WaitGroup
func init() {
wait = &sync.WaitGroup{}
}
func connect(user *proto.ConnectUserRequest, ctx context.Context, opt grpc.CallOption) error {
var streamerror error
stream, err := client.ConnectUser(ctx, user, opt)
if err != nil {
return fmt.Errorf("connection failed: %v\n", err)
}
wait.Add(1)
go func(str proto.MessagingService_ConnectUserClient, ctx context.Context, opt grpc.CallOption) {
defer wait.Done()
for {
msg, err2 := str.Recv()
if err2 != nil {
if err2 == io.EOF {
streamerror = fmt.Errorf("User logged out\n")
}
streamerror = fmt.Errorf("Error reading message: %v\n", err2)
return
} else {
errChannel := make(chan error)
go func(msg *proto.ChatMessage, ctx context.Context, opt grpc.CallOption, errChannel chan error) {
fmt.Printf("updating msg status \n")
res, err3 := client.UpdateMessageStatus(ctx, &proto.ChatMessageStatus{
ChatMessage: msg,
MsgStatus: proto.ChatMessageStatus_SEEN,
}, opt)
errChannel <- err3
if err3 != nil {
fmt.Printf("trouble while updating msg status %v\n", err3.Error())
}
fmt.Printf("res : %v\n", res)
}(msg, ctx, opt, errChannel)
<-errChannel
close(errChannel)
}
fmt.Printf("ChatId : %v,\nFromUser: %v,\nBody: %v,\nTime: %v,\n", msg.GetChatId(), msg.GetFromUser(), msg.GetBody(), msg.GetTimeStamp())
}
}(stream, ctx, opt)
//wait.Wait()
fmt.Println("exiting connect func")
//fmt.Printf(streamerror.Error())
return streamerror
}
func main() {
//timestamp := time.Now()
log.SetFlags(log.LstdFlags | log.Lshortfile)
done := make(chan int)
var id = flag.String("ID", "", "The id of the user")
flag.Parse()
conn, err := grpc.Dial("localhost:8080", grpc.WithInsecure())
if err != nil {
log.Fatalf("Couldnt connect to service: %v", err)
}
client = proto.NewMessagingServiceClient(conn)
md := metadata.Pairs("authorization", "bearer i_am_an_auth_token")
ctx := metadata.NewOutgoingContext(context.Background(), md)
// Make RPC using the context with the metadata.
var header metadata.MD
//header := metadata.New(
// map[string]string{
// "authorization": "bearer i_am_an_auth_token",
// })
//header.Set("bearer ", "i_am_an_auth_token")
//ctx := context.WithValue(context.Background(), "authorization", "bearer i_am_an_auth_token")
user, err := client.GetUser(ctx, &proto.QueryUser{
UserId: *id,
})
if err != nil {
fmt.Printf("Trouble while fetching user; %v", err.Error())
return
}
fmt.Printf("Userobj %v\n", user.String())
userType := proto.ConnectUserRequest_UNKNOWN
if user.GetType() == proto.User_EXPERT {
userType = proto.ConnectUserRequest_EXPERT
}
if user.GetType() == proto.User_PATIENT {
userType = proto.ConnectUserRequest_PATIENT
}
connectUserObj := &proto.ConnectUserRequest{
UserId: user.GetUserId(),
Name: user.GetName(),
Device: fmt.Sprintf("%v-laptop", user.GetName()),
Type: userType,
}
errr := connect(connectUserObj, ctx, grpc.Header(&header))
if errr != nil {
return
}
fmt.Printf("Client-Streaming\n")
stream, _ := client.UploadFile(ctx, grpc.Header(&header))
tempChatId := gocql.TimeUUID().String()
ts, err3 := time.Now().MarshalText()
if err3 != nil {
fmt.Printf("error while marshaling time; %v\n", err3.Error())
}
err2 := stream.Send(&proto.FileUploadChunk{
UploadData: &proto.FileUploadChunk_Info{
Info: &proto.FileUploadChunk_FileUpload{
FileName: "a.pdf",
ChatMessage: &proto.ChatMessage{
ChatId: tempChatId,
FromUser: user,
ToUser: user,
Body: "file from u to u",
TimeStamp: string(ts),
Attachment: true,
},
},
},
})
if err2 != nil {
fmt.Printf("Trouble while client streaming %v\n", err2.Error())
return
}
const BufferSize = 1024
file, errt := os.Open("C:\\Users\\gopia\\Desktop\\ERP-IIT BBS-SREEK.pdf")
if errt != nil {
fmt.Println(errt)
return
}
defer file.Close()
r := bufio.NewReader(file)
for {
buffer := make([]byte, BufferSize)
bytesread, erry := r.Read(buffer)
if erry != nil {
if err2 == io.EOF {
errn := stream.CloseSend()
if errn != nil {
return
}
}
break
}
err2 = stream.Send(&proto.FileUploadChunk{
UploadData: &proto.FileUploadChunk_Content{
Content: buffer[0:bytesread],
},
})
if err2 != nil {
fmt.Printf("Trouble while client streaming %v\n", err2.Error())
return
}
fmt.Println("bytes read: ", bytesread)
}
streamRes, erru := stream.CloseAndRecv()
if erru != nil {
fmt.Printf("error %v", erru.Error())
}
fmt.Printf("Stream Res : %v\n", streamRes)
fileDownload, err := client.DownloadFile(ctx, &proto.ChatMessage{
ChatId: tempChatId,
}, grpc.Header(&header))
if err != nil {
fmt.Printf("error1 %v", err.Error())
return
}
recv, errn := fileDownload.Recv()
if errn != nil {
if errn == io.EOF {
fmt.Printf("didn't even get file info")
}
fmt.Printf("error2 %v", errn.Error())
return
}
fileName := recv.GetInfo().GetFileName()
//tempChatMessage := recv.GetInfo().GetChatMessage()
fileCont := make([]byte, 0, 1024)
for {
recv, errn = fileDownload.Recv()
if errn != nil {
if errn == io.EOF {
fmt.Println("File downloaded successfully")
} else {
fmt.Printf("error3 %v", errn.Error())
return
}
break
}
fileCont = append(fileCont, recv.GetContent()...)
}
err = ioutil.WriteFile(fileName, fileCont, 0644)
if err != nil {
log.Printf("Trouble while writing to file %v\n", err.Error())
}
wait.Add(1)
go func(user *proto.User, ctx context.Context, opt grpc.CallOption) {
defer wait.Done()
fmt.Println("Enter the user id (with whom you're willing to chat) :")
reader := bufio.NewReader(os.Stdin)
chatUserId, _ := reader.ReadString('\n')
chatUserId = strings.TrimSpace(chatUserId)
input := &proto.QueryUser{
UserId: chatUserId,
}
res, err2 := client.GetUser(ctx, input, opt)
if err2 != nil {
fmt.Printf("Trouble while fetching user: %v => %v\n", chatUserId, err2.Error())
return
}
fmt.Printf("User status %v\n", res.GetStatus().String())
fmt.Printf("You can send messages to user %v\n", res.GetName())
b, err2 := time.Now().MarshalText()
if err2 != nil {
fmt.Printf("error while marshaling time; %v\n", err2.Error())
}
fmt.Printf("Getting all messages\n")
msgs, err2 := client.GetMessages(ctx, &proto.QueryMessagesRequest{
User1: user,
User2: res,
TimeConstraint: string(b),
Limit: 20,
}, opt)
if err2 != nil {
fmt.Printf("trouble while getting msgs %v\n", err2.Error())
return
}
for i, m := range msgs.GetMessages() {
fmt.Printf("msg %v : %v\n", i, m)
}
fmt.Println("Type message")
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
b, err3 := time.Now().MarshalText()
if err3 != nil {
fmt.Printf("error while marshaling time; %v\n", err3.Error())
}
msg := &proto.ChatMessage{
ChatId: gocql.TimeUUID().String(),
FromUser: user,
ToUser: res,
Body: scanner.Text(),
TimeStamp: string(b),
}
msgRes, err4 := client.BroadcastMessage(ctx, msg, opt)
if err4 != nil {
fmt.Printf("Error Sending Message: %v\n", err4)
break
}
errChannel := make(chan error)
go func(msg *proto.ChatMessage, ctx context.Context, opt grpc.CallOption, errChannel chan error) {
msgres, err5 := client.GetMessageStatus(ctx, msg)
errChannel <- err5
if err5 != nil {
fmt.Printf("trouble while getting msg status %v\n", err5.Error())
}
fmt.Printf("res : %v\n", msgres)
}(msg, ctx, grpc.Header(&header), errChannel)
<-errChannel
close(errChannel)
fmt.Printf("Message Response for chatId: %v => %v\n", msgRes.GetChatMessage().GetChatId(), msgRes.GetMsgStatus())
}
}(user, ctx, grpc.Header(&header))
go func() {
wait.Wait()
close(done)
}()
<-done
}
|
package execext
import (
"context"
"errors"
"io"
"os"
"path/filepath"
"strings"
"mvdan.cc/sh/expand"
"mvdan.cc/sh/interp"
"mvdan.cc/sh/shell"
"mvdan.cc/sh/syntax"
)
// RunCommandOptions is the options for the RunCommand func
type RunCommandOptions struct {
Command string
Dir string
Env []string
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
var (
// ErrNilOptions is returned when a nil options is given
ErrNilOptions = errors.New("execext: nil options given")
)
// RunCommand runs a shell command
func RunCommand(ctx context.Context, opts *RunCommandOptions) error {
if opts == nil {
return ErrNilOptions
}
p, err := syntax.NewParser().Parse(strings.NewReader(opts.Command), "")
if err != nil {
return err
}
environ := opts.Env
if len(environ) == 0 {
environ = os.Environ()
}
r, err := interp.New(
interp.Dir(opts.Dir),
interp.Env(expand.ListEnviron(environ...)),
interp.Module(interp.DefaultExec),
interp.Module(interp.OpenDevImpls(interp.DefaultOpen)),
interp.StdIO(opts.Stdin, opts.Stdout, opts.Stderr),
)
if err != nil {
return err
}
return r.Run(ctx, p)
}
// IsExitError returns true the given error is an exis status error
func IsExitError(err error) bool {
switch err.(type) {
case interp.ExitStatus, interp.ShellExitStatus:
return true
default:
return false
}
}
// Expand is a helper to mvdan.cc/shell.Fields that returns the first field
// if available.
func Expand(s string) (string, error) {
s = filepath.ToSlash(s)
s = strings.Replace(s, " ", `\ `, -1)
fields, err := shell.Fields(s, nil)
if err != nil {
return "", err
}
if len(fields) > 0 {
return fields[0], nil
}
return "", nil
}
|
package control
import (
"io"
"net/http"
"os"
)
func IndexView(w http.ResponseWriter, r *http.Request) {
f, _ := os.Open("./views/index.html")
io.Copy(w, f)
f.Close()
}
func DetailView(w http.ResponseWriter, r *http.Request) {
f, _ := os.Open("./views/detail.html")
io.Copy(w, f)
f.Close()
}
func EditView(w http.ResponseWriter, r *http.Request) {
f, _ := os.Open("./views/edit.html")
io.Copy(w, f)
f.Close()
}
func ListView(w http.ResponseWriter, r *http.Request) {
f, _ := os.Open("./views/list.html")
io.Copy(w, f)
f.Close()
}
func ArticleAddView(w http.ResponseWriter, r *http.Request) {
f, _ := os.Open("./views/add.html")
io.Copy(w, f)
f.Close()
}
|
package form3
import "time"
// ClientOption defines an optional parameter for creating a form3.NewClient client.
type ClientOption func(cl *Client)
// WithRequestTimeout sets a maximum request timeout on the client for all requests.
// Individual requests can be timeout out by context in a shorter time. The timeout
// set here can't be extended with individual timeouts.
func WithRequestTimeout(timeout time.Duration) ClientOption {
return func(cl *Client) {
cl.maxRequestTimeout = timeout
}
}
// WithDebug enables colorful debugging of the communication.
func WithDebug() ClientOption {
return func(cl *Client) {
cl.enableDbg = true
}
}
|
package codenames
import (
"encoding/json"
"fmt"
"math"
"time"
"github.com/cockroachdb/pebble"
)
// PebbleStore wraps a *pebble.DB with an implementation of the
// Store interface, persisting games under a []byte(`/games/`)
// key prefix.
type PebbleStore struct {
DB *pebble.DB
}
// Restore loads all persisted games from storage.
func (ps *PebbleStore) Restore() (map[string]*Game, error) {
iter := ps.DB.NewIter(&pebble.IterOptions{
LowerBound: []byte("/games/"),
UpperBound: []byte(fmt.Sprintf("/games/%019d", math.MaxInt64)),
})
defer iter.Close()
games := make(map[string]*Game)
for _ = iter.First(); iter.Valid(); iter.Next() {
var g Game
err := json.Unmarshal(iter.Value(), &g)
if err != nil {
return nil, fmt.Errorf("Unmarshal game: %w", err)
}
games[g.ID] = &g
}
if err := iter.Error(); err != nil {
return nil, fmt.Errorf("restore iter: %w", err)
}
return games, nil
}
// DeleteExpired deletes all games created before `expiry.`
func (ps *PebbleStore) DeleteExpired(expiry time.Time) error {
return ps.DB.DeleteRange(
mkkey(0, ""),
mkkey(expiry.Unix(), ""),
nil,
)
}
// Save saves the game to persistent storage.
func (ps *PebbleStore) Save(g *Game) error {
k, v, err := gameKV(g)
if err != nil {
return fmt.Errorf("trySave: %w", err)
}
err = ps.DB.Set(k, v, &pebble.WriteOptions{Sync: true})
if err != nil {
return fmt.Errorf("db.Set: %w", err)
}
return err
}
func gameKV(g *Game) (key, value []byte, err error) {
value, err = json.Marshal(g)
if err != nil {
return nil, nil, fmt.Errorf("marshaling GameState: %w", err)
}
return mkkey(g.CreatedAt.Unix(), g.ID), value, nil
}
func mkkey(unixSecs int64, id string) []byte {
// We could use a binary encoding for keys,
// but it's not like we're storing that many
// kv pairs. Ease of debugging is probably
// more important.
return []byte(fmt.Sprintf("/games/%019d/%q", unixSecs, id))
}
type discardStore struct{}
func (ds discardStore) Save(*Game) error { return nil }
|
package main
import (
"os"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
func init() {
logrus.SetLevel(logrus.DebugLevel)
}
func main() {
app := cli.NewApp()
app.Name = "startup-exporter"
app.Usage = "A tool to collect and export container startup time"
app.Commands = []cli.Command{
collectCmd,
exportCmd,
}
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "enable debug output",
},
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}
|
package api
import (
"bytes"
"fmt"
"io/ioutil"
"mime/multipart"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"time"
"github.com/azzzak/fakecast/fs"
"github.com/azzzak/fakecast/store"
"github.com/stretchr/testify/assert"
)
func TestSetCoverURL(t *testing.T) {
cfg := &Cfg{
Host: "http://host.com",
}
type args struct {
cfg *Cfg
c *store.Channel
}
tests := []struct {
name string
args args
want *store.Channel
}{
{
name: "empty",
args: args{
cfg: cfg,
c: &store.Channel{},
},
want: &store.Channel{},
}, {
name: "with cover",
args: args{
cfg: cfg,
c: &store.Channel{
Alias: "nope",
Cover: "image.png",
},
},
want: &store.Channel{
Alias: "nope",
Cover: fmt.Sprintf("%s/files/nope/%s/image.png", cfg.Host, fs.CoverDirName),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
setCoverURL(tt.args.cfg, tt.args.c)
assert.Equal(t, tt.want, tt.args.c)
})
}
}
func TestUploadCover(t *testing.T) {
tests := []struct {
name string
wantErr bool
}{
{
name: "ok",
wantErr: false,
}, {
name: "error",
wantErr: true,
},
}
assert := assert.New(t)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
testDir := fmt.Sprintf("test_dir_%x", time.Now().Unix())
err := os.MkdirAll(testDir, os.ModePerm)
assert.Nil(err)
s, err := store.NewStore(testDir)
assert.Nil(err)
defer func() {
s.Close()
err = os.RemoveAll(testDir)
assert.Nil(err)
}()
root := fs.NewRoot(testDir)
cfg := &Cfg{
Store: s,
FS: root,
}
id, err := s.AddChannel()
assert.Nil(err)
assert.Equal(int64(1), id)
c, err := s.ChannelInfo(id)
assert.Nil(err)
assert.Equal(int64(1), c.ID)
c.Alias = "1"
err = s.UpdateChannel(c)
assert.Nil(err)
err = root.CreateDir(c.ID)
assert.Nil(err)
path := filepath.Join("..", "_testdata", "tiny.jpg")
file, err := os.Open(path)
assert.Nil(err)
fileContents, err := ioutil.ReadAll(file)
assert.Nil(err)
fi, err := file.Stat()
assert.Nil(err)
file.Close()
body := new(bytes.Buffer)
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", fi.Name())
assert.Nil(err)
part.Write(fileContents)
err = writer.Close()
assert.Nil(err)
if tt.wantErr {
err := s.DropChannels()
assert.Nil(err)
}
r := httptest.NewRequest("POST", "/api/channel/1/cover/upload", body)
r.Header.Add("Content-Type", writer.FormDataContentType())
w := httptest.NewRecorder()
handler := http.Handler(InitHandlers(cfg))
handler.ServeHTTP(w, r)
resp := w.Result()
defer resp.Body.Close()
if tt.wantErr {
assert.Equal(http.StatusInternalServerError, resp.StatusCode)
return
}
if !assert.Equal(http.StatusOK, resp.StatusCode) {
t.Fatalf("Got status code: %d\n", resp.StatusCode)
}
path = filepath.Join(testDir, fs.PodcastsDirName, c.Alias, fs.CoverDirName, "tiny.jpg")
file, err = os.Open(path)
assert.Nil(err)
fi, err = file.Stat()
assert.Nil(err)
file.Close()
})
}
}
func TestDeleteCover(t *testing.T) {
tests := []struct {
name string
wantErr bool
}{
{
name: "ok",
wantErr: false,
}, {
name: "error",
wantErr: true,
},
}
assert := assert.New(t)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
testDir := fmt.Sprintf("test_dir_%x", time.Now().Unix())
err := os.MkdirAll(testDir, os.ModePerm)
assert.Nil(err)
s, err := store.NewStore(testDir)
assert.Nil(err)
defer func() {
s.Close()
err = os.RemoveAll(testDir)
assert.Nil(err)
}()
root := fs.NewRoot(testDir)
cfg := &Cfg{
Store: s,
FS: root,
}
id, err := s.AddChannel()
assert.Nil(err)
assert.Equal(int64(1), id)
c, err := s.ChannelInfo(id)
assert.Nil(err)
assert.Equal(int64(1), c.ID)
coverName := "image.jpg"
c.Alias = "1"
c.Title = "channel 1"
c.Cover = coverName
err = s.UpdateChannel(c)
assert.Nil(err)
err = root.CreateDir(c.ID)
assert.Nil(err)
coverPath := filepath.Join(testDir, fs.PodcastsDirName, c.Alias, fs.CoverDirName, coverName)
_, err = os.Create(coverPath)
assert.Nil(err)
if tt.wantErr {
err := s.DropChannels()
assert.Nil(err)
}
r := httptest.NewRequest("DELETE", fmt.Sprintf("/api/channel/1/cover/%s", coverName), nil)
w := httptest.NewRecorder()
handler := http.Handler(InitHandlers(cfg))
handler.ServeHTTP(w, r)
resp := w.Result()
defer resp.Body.Close()
if tt.wantErr {
assert.Equal(http.StatusInternalServerError, resp.StatusCode)
return
}
if !assert.Equal(http.StatusOK, resp.StatusCode) {
t.Fatalf("Got status code: %d\n", resp.StatusCode)
}
c, err = s.ChannelInfo(id)
assert.Nil(err)
assert.Equal("", c.Cover)
_, err = os.Stat(coverPath)
assert.NotNil(err)
})
}
}
|
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/endpoints"
"github.com/aws/aws-sdk-go-v2/aws/external"
"github.com/aws/aws-sdk-go-v2/service/lexruntimeservice"
"github.com/rmcsoft/hasp/sound"
"golang.org/x/net/context"
//"github.com/tosone/minimp3"
//"github.com/twinj/uuid"
)
func makeAwsSession() *lexruntimeservice.Client {
cfg, err := external.LoadDefaultAWSConfig()
if err != nil {
panic("unable to load SDK config, " + err.Error())
}
cfg.Region = endpoints.UsEast1RegionID
awsClient := lexruntimeservice.New(cfg)
if awsClient == nil {
log.Fatal("Failed to create AWS Lex client")
}
return awsClient
}
func doTheJob(hwd *sound.HotWordDetector, svc *lexruntimeservice.Client, uid string, player *sound.SoundPlayer) {
soundCapturerEventSource, _ := hwd.StartSoundCapture()
for event := range soundCapturerEventSource.Events() {
if event.Name != "SoundEmpty" {
data, _ := event.Args[0].(sound.SoundCapturedEventData)
samples := data.AudioData.Samples()
reader := bytes.NewReader(samples)
soundCapturerEventSource.Close()
req := svc.PostContentRequest(&lexruntimeservice.PostContentInput{
BotAlias: aws.String("$LATEST"),
BotName: aws.String("HASPBot"),
ContentType: aws.String(data.AudioData.Mime()),
UserId: aws.String(uid),
InputStream: reader,
Accept: aws.String("audio/pcm"),
//Accept: aws.String("audio/mpeg"),
})
fmt.Println("Sending request to runtime.lex")
resp, err := req.Send(context.TODO())
if err != nil {
fmt.Print("Failed to send request to runtime.lex: %v", err)
return
}
log.Println("Response runtime.lex: ", resp)
if resp.InputTranscript != nil {
fmt.Println("InputTranscript: ", *resp.InputTranscript)
}
if resp.Message != nil {
fmt.Println("Message: ", *resp.Message)
}
if resp.AudioStream == nil {
fmt.Print("Response from runtime.lex does not contain AudioStream")
return
}
outSamples, err := ioutil.ReadAll(resp.AudioStream)
if err != nil || len(outSamples) == 0 {
fmt.Print("!!!!! Unable to read audio data from the runtime.lex response")
return
}
// dec, pcm, err := minimp3.DecodeFull(outSamples)
ad := sound.NewMonoS16LE(16000, outSamples)
player.PlaySync(ad)
}
}
}
func main() {
svc := makeAwsSession()
hwd, _ := sound.NewHotWordDetector(
sound.HotWordDetectorParams{
DebugSound: true,
ModelPath: "porcupine_params.pv",
KeywordPath: "alexa_linux.ppn",
CaptureDeviceName: "default",
//KeywordPath: "francesca_beaglebone.ppn",
//CaptureDeviceName: "hw:0",
},
)
player, _ := sound.NewSoundPlayer("default")
//uid := uuid.NewV4()
//doTheJob(hwd, svc, uid.String(), player)
doTheJob(hwd, svc, "User1", player)
}
|
package client
type EmptyURL struct {
}
func (e EmptyURL) Error() string {
return "missing urls"
}
type InvalidFlag struct {
}
func (i InvalidFlag) Error() string {
return "flag parallel must be greater than zero"
}
|
package main_test
import (
"container/heap"
Algo "AlgorithmAndDataStructure"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("AlgorithmAndDataStructure", func() {
Describe("non modifying sequence", func() {
var a []int
var b []int
var equal func(int, int) bool
BeforeEach(func() {
a = []int{1, 2, 3}
b = []int{1, 1, 3}
equal = func(i int, j int) bool {
return i == j
}
})
Context("AllOf", func() {
It("should be true", func() {
Expect(Algo.AllOf(a, func(i int) bool {
return i < 4
})).To(BeTrue())
})
It("should be false", func() {
Expect(Algo.AllOf(a, func(i int) bool {
return i < 3
})).To(BeFalse())
})
})
It("AnyOf", func() {
Expect(Algo.AnyOf(a, func(i int) bool {
return i > 1
})).To(BeTrue())
})
It("NoneOf", func() {
Expect(Algo.NoneOf(a, func(i int) bool {
return i > 4
})).To(BeTrue())
})
It("ForEach", func() {
tmp := make([]int, 0)
Algo.ForEach(a, func(i int) {
tmp = append(tmp, i)
})
Expect(tmp).To(Equal(a))
})
It("Count", func() {
Expect(Algo.Count(a)).To(Equal(len(a)))
})
It("CountIf", func() {
Expect(Algo.CountIf(a, func(i int) bool {
return i%2 == 0
})).To(Equal(1))
})
Context("Mismatch", func() {
It("has misMatch", func() {
first, second, hasMisMatch := Algo.Mismatch(a, b,
func(i int, j int) bool {
return i == j
})
Expect(hasMisMatch).To(BeTrue())
Expect(first).To(Equal(2))
Expect(second).To(Equal(1))
})
It("don't has mismatch", func() {
c := make([]int, len(a))
copy(c, a)
first, second, hasMisMatch := Algo.Mismatch(a, c,
func(i int, j int) bool {
return i == j
})
Expect(hasMisMatch).To(BeFalse())
Expect(first).To(Equal(0))
Expect(second).To(Equal(0))
})
})
It("Equal", func() {
same := []int{1, 2, 3}
Expect(Algo.Equal(a, same, func(first int, second int) bool {
return first == second
})).To(BeTrue())
})
It("Find", func() {
Expect(Algo.Find(a, 2)).To(Equal(1))
})
It("FindIf", func() {
Expect(Algo.FindIf(a, func(i int) bool {
return i == 2
})).To(Equal(1))
})
It("FindIfNot", func() {
Expect(Algo.FindIfNot(a, func(i int) bool {
return i == 2
})).To(Equal(0))
})
Context("FindFirstOf", func() {
It("Not existed", func() {
tmp := []int{4, 5, 6}
_, found := Algo.FindFirstOf(tmp, a, equal)
Expect(found).To(BeFalse())
})
It("Only one existed", func() {
tmp := []int{0, 1}
idx, found := Algo.FindFirstOf(tmp, a, equal)
Expect(found).To(BeTrue())
Expect(idx).To(Equal(1))
})
It("existed many times", func() {
tmp := []int{0, 1, 2, 3, 1, 2, 3}
idx, _ := Algo.FindFirstOf(tmp, a, equal)
Expect(idx).To(Equal(1))
})
})
Context("FindEnd", func() {
It("Not Existed", func() {
tmp := []int{1, 2, 0}
_, found := Algo.FindEnd(tmp, a, equal)
Expect(found).To(BeFalse())
})
It("Existed", func() {
tmp := []int{1, 2, 1, 2, 1}
c := []int{1, 2, 1}
idx, found := Algo.FindEnd(tmp, c, equal)
Expect(found).To(BeTrue())
Expect(idx).To(Equal(2))
})
})
Context("AdjacentFind", func() {
It("Not existed", func() {
tmp := []int{1, 2, 3}
_, found := Algo.AdjacentFind(tmp, equal)
Expect(found).To(BeFalse())
})
It("Existed", func() {
tmp := []int{1, 1, 3}
idx, found := Algo.AdjacentFind(tmp, equal)
Expect(found).To(BeTrue())
Expect(idx).To(Equal(0))
})
})
Context("Search", func() {
It("Not existed", func() {
tmp := []int{1, 2, 2, 4}
_, found := Algo.Search(tmp, a, equal)
Expect(found).To(BeFalse())
})
It("Existed", func() {
tmp := []int{0, 1, 2, 3}
idx, found := Algo.Search(tmp, a, equal)
Expect(found).To(BeTrue())
Expect(idx).To(Equal(1))
})
})
Context("SearchN", func() {
tmp := []int{0, 1, 1, 1}
It("Existed", func() {
idx, found := Algo.SearchN(tmp, 3, 1, equal)
Expect(found).To(BeTrue())
Expect(idx).To(Equal(1))
})
It("Not existed", func() {
_, found := Algo.SearchN(tmp, 4, 1, equal)
Expect(found).To(BeFalse())
})
})
})
Describe("modifying sequence", func() {
var src []int
var dest []int
var isOdd func(int) bool
BeforeEach(func() {
src = []int{1, 2, 3}
dest = make([]int, 3)
isOdd = func(i int) bool {
return i%2 != 0
}
})
It("CopyIf", func() {
Expect(Algo.CopyIf(dest, src, isOdd)).To(Equal(2))
Expect(dest[0]).To(Equal(src[0]))
Expect(dest[1]).To(Equal(src[2]))
})
It("CopyN", func() {
Algo.CopyN(dest, src, 1)
Expect(dest[0]).To(Equal(src[0]))
Expect(dest[1]).NotTo(Equal(src[1]))
})
It("CopyBackward", func() {
dest = make([]int, 2)
Expect(Algo.CopyBackward(dest, src)).To(Equal(2))
Expect(dest[0]).To(Equal(src[1]))
Expect(dest[1]).To(Equal(src[2]))
})
It("Fill", func() {
tmp := []int{1, 1, 1}
Algo.Fill(dest, 1)
Expect(dest).To(Equal(tmp))
})
It("FillN", func() {
tmp := []int{1, 1, 1}
Algo.FillN(dest, 3, 1)
Expect(dest).To(Equal(tmp))
Algo.FillN(dest, len(dest)+1, 1)
})
It("Transform", func() {
dest = make([]int, 2)
plusOne := func(prev int) int {
return prev + 1
}
Expect(Algo.Transform(dest, src, plusOne)).
To(Equal([]int{2, 3}))
})
Context("Generate", func() {
allAreOne := func() int {
return 1
}
It("Generate", func() {
Algo.Generate(dest, allAreOne)
Expect(dest).To(Equal([]int{1, 1, 1}))
})
It("GenerateN", func() {
dest[2] = 2
Algo.GenerateN(dest, 2, allAreOne)
Expect(dest).To(Equal([]int{1, 1, 2}))
})
})
It("Remove", func() {
Expect(Algo.Remove(src, 1)).To(Equal([]int{2, 3}))
})
It("RemoveIf", func() {
Expect(Algo.RemoveIf(src, isOdd)).To(Equal([]int{2}))
})
It("RemoveCopy", func() {
Algo.RemoveCopy(dest, src, 3)
Expect(dest[0]).To(Equal(src[0]))
Expect(dest[1]).To(Equal(src[1]))
Expect(dest[2]).NotTo(Equal(src[2]))
})
It("RemoveCopyIf", func() {
Algo.RemoveCopyIf(dest, src, isOdd)
Expect(dest[0]).To(Equal(src[1]))
})
It("Replace", func() {
Algo.Replace(src, 1, 2)
Expect(src).To(Equal([]int{2, 2, 3}))
})
It("ReplaceIf", func() {
Algo.ReplaceIf(src, isOdd, 2)
Expect(src).To(Equal([]int{2, 2, 2}))
})
It("ReplaceCopy", func() {
Algo.ReplaceCopy(dest, src, 3, 1)
Expect(dest).To(Equal([]int{1, 2, 1}))
})
It("ReplaceCopyIf", func() {
Algo.ReplaceCopyIf(dest, src, isOdd, 1)
Expect(dest).To(Equal([]int{1, 2, 1}))
})
It("Swap", func() {
a := 3
b := 1
Algo.Swap(&a, &b)
Expect(a).To(Equal(1))
Expect(b).To(Equal(3))
})
It("SwapRanges", func() {
dest = []int{4, 5, 6}
Algo.SwapRanges(dest, src, 2)
Expect(src).To(Equal([]int{4, 5, 3}))
Expect(dest).To(Equal([]int{1, 2, 6}))
})
Context("Reverse", func() {
It("Odd", func() {
Algo.Reverse(src)
Expect(src).To(Equal([]int{3, 2, 1}))
})
It("Even", func() {
even := []int{1, 2, 3, 4}
Algo.Reverse(even)
Expect(even).To(Equal([]int{4, 3, 2, 1}))
})
})
It("ReverseCopy", func() {
Expect(Algo.ReverseCopy(dest, src)).To(Equal([]int{3, 2, 1}))
})
Context("Rotate", func() {
It("with pivot == 2", func() {
Algo.Rotate(src, 2)
Expect(src).To(Equal([]int{3, 1, 2}))
})
It("with pivot == 1", func() {
Algo.Rotate(src, 1)
Expect(src).To(Equal([]int{2, 3, 1}))
})
It("with wrong pivot", func() {
Algo.Rotate(src, 3)
Expect(src).To(Equal([]int{1, 2, 3}))
})
})
It("RotateCopy", func() {
Expect(Algo.RotateCopy(dest, src, 2)).To(Equal([]int{3, 1, 2}))
})
It("Shuffle", func() {
Algo.Shuffle(src)
})
Context("Unique algorithms", func() {
var duplicate []int
BeforeEach(func() {
duplicate = []int{1, 1, 2, 2}
})
It("Unique", func() {
Expect(Algo.Unique(duplicate)).To(Equal([]int{1, 2}))
Expect(duplicate).NotTo(Equal([]int{1, 1, 2, 2}))
})
It("UniqueCopy", func() {
Expect(Algo.UniqueCopy(duplicate)).To(Equal([]int{1, 2}))
})
})
})
Describe("Heap", func() {
var h *Algo.IntHeap
var hp *Algo.IntHeap
BeforeEach(func() {
h = &Algo.IntHeap{2, 1, 5}
hp = &Algo.IntHeap{2, 1, 5}
heap.Init(hp)
})
It("SortHeap", func() {
Algo.SortHeap(h)
Expect(*h).To(Equal(Algo.IntHeap{1, 2, 5}))
})
It("isHeap", func() {
Expect(Algo.IsHeap(h)).To(BeFalse())
heap.Init(h)
Expect(Algo.IsHeap(h)).To(BeTrue())
})
It("IsHeapUntil", func() {
Expect(Algo.IsHeapUntil(h)).To(Equal(1))
heap.Init(h)
Expect(Algo.IsHeapUntil(h)).To(Equal(h.Len()))
})
It("PushHeap", func() {
x := 3
Algo.PushHeap(hp, x)
Expect(*hp).To(ContainElement(x))
Expect(Algo.IsHeap(hp)).To(BeTrue())
})
It("PopHeap", func() {
Expect(Algo.PopHeap(hp)).To(Equal(1))
})
It("MakeHeap", func() {
Algo.MakeHeap(h)
Expect(h).To(Equal(hp))
})
})
Describe("BinarySearch", func() {
var v1 []int
var v2 []int
BeforeEach(func() {
v1 = []int{1, 2, 4, 8, 16}
v2 = []int{1, 2, 3, 4}
})
Context("LowerBound", func() {
It("v1", func() {
idx, _ := Algo.LowerBound(v1, 4)
Expect(v1[idx]).To(Equal(4))
idx, _ = Algo.LowerBound(v1, 1)
Expect(v1[idx]).To(Equal(1))
_, found := Algo.LowerBound(v1, 17)
Expect(found).To(BeFalse())
})
It("v1 more", func() {
idx, found := Algo.LowerBound(v1, 3)
Expect(v1[idx]).To(Equal(4))
Expect(found).To(BeTrue())
})
It("v2", func() {
idx, _ := Algo.LowerBound(v2, 3)
Expect(v2[idx]).To(Equal(3))
idx, _ = Algo.LowerBound(v2, 2)
Expect(v2[idx]).To(Equal(2))
})
})
Context("UpperBound", func() {
It("v1", func() {
idx, _ := Algo.UpperBound(v1, 4)
Expect(v1[idx]).To(Equal(8))
idx, found := Algo.UpperBound(v1, 5)
Expect(found).To(BeTrue())
Expect(v1[idx]).To(Equal(8))
_, found = Algo.UpperBound(v1, 16)
Expect(found).To(BeFalse())
idx, _ = Algo.UpperBound(v1, 0)
Expect(v1[idx]).To(Equal(1))
})
It("v2", func() {
idx, _ := Algo.UpperBound(v2, 3)
Expect(v2[idx]).To(Equal(4))
idx, _ = Algo.UpperBound(v2, 2)
Expect(v2[idx]).To(Equal(3))
})
})
Context("BinarySearch", func() {
It("found", func() {
Expect(Algo.BinarySearch(v1, 4)).To(BeTrue())
})
It("Not found", func() {
Expect(Algo.BinarySearch(v1, 0)).To(BeFalse())
})
})
It("EqualRange", func() {
from, to := Algo.EqualRange(v1, 4)
Expect(v1[from]).To(Equal(4))
Expect(v1[to]).To(Equal(8))
from, _ = Algo.EqualRange(v1, 0)
Expect(from).To(Equal(0))
from, to = Algo.EqualRange(v1, 17)
Expect(from).To(Equal(len(v1)))
Expect(to).To(Equal(len(v1)))
})
})
Describe("Sorting", func() {
var sorted []int
var v []int
var less func(int, int) bool
BeforeEach(func() {
sorted = []int{1, 2, 3, 4}
v = []int{190, 20, 4, 4, 10}
less = func(a int, b int) bool {
return a < b
}
})
It("isSorted", func() {
Expect(Algo.IsSorted(sorted, less)).To(BeTrue())
Expect(Algo.IsSorted(v, less)).To(BeFalse())
})
It("isSortedUntil", func() {
Expect(Algo.IsSortedUntil(sorted, less)).To(Equal(len(sorted)))
Expect(Algo.IsSortedUntil(v, less)).To(Equal(1))
})
It("Sort", func() {
sortedV := []int{4, 4, 10, 20, 190}
Algo.Sort(v, less)
Expect(v).To(Equal(sortedV))
unsorted := []int{3, 4, 2, 1}
Algo.Sort(unsorted, less)
Expect(unsorted).To(Equal(sorted))
})
It("StableSort", func() {
pairLess := func(x *Algo.Pair, y *Algo.Pair) bool {
return x.A < y.A
}
Pairs := []Algo.Pair{
{8, 20},
{10, 9},
{8, 10},
{20, 10},
{10, 11},
{10, 10},
}
Algo.StableSort(Pairs, pairLess)
Expect(Pairs[0]).To(Equal(Algo.Pair{8, 20}))
Expect(Pairs[2]).To(Equal(Algo.Pair{10, 9}))
Expect(Pairs[3]).To(Equal(Algo.Pair{10, 11}))
Expect(Pairs[5]).To(Equal(Algo.Pair{20, 10}))
})
It("PartialSort", func() {
Algo.PartialSort(v, 2)
Expect(v[0]).To(Equal(4))
Expect(v[1]).To(Equal(4))
})
Context("PartialSortCopy", func() {
It("len(out) >= len(in)", func() {
out := []int{0, 0}
Algo.PartialSortCopy(v, 2, out)
Expect(out).To(Equal([]int{20, 190}))
})
It("len(out) < len(in)", func() {
out := []int{0, 0}
Algo.PartialSortCopy(v, 3, out)
Expect(out).To(Equal([]int{4, 20}))
})
})
It("NthElement", func() {
Algo.NthElement(v, 3)
Expect(v[2]).To(Equal(10))
Expect(v[1] <= v[3]).To(BeTrue())
Algo.NthElement(v, 2)
Expect(v[1]).To(Equal(4))
Expect(v[0] <= v[2]).To(BeTrue())
Algo.NthElement(v, 4)
Expect(v[3]).To(Equal(20))
Expect(v[2] <= v[4]).To(BeTrue())
})
})
Describe("Set", func() {
var a []int
var b []int
BeforeEach(func() {
a = []int{1, 2, 3, 4}
b = []int{3, 4, 5, 6}
})
Context("Merge", func() {
It("empty sets", func() {
empty := []int{}
Expect(Algo.Merge(a, empty)).To(Equal(a))
Expect(Algo.Merge(empty, a)).To(Equal(a))
Expect(Algo.Merge(empty, empty)).To(Equal(empty))
})
It("non-empty sets", func() {
ab := []int{1, 2, 3, 3, 4, 4, 5, 6}
Expect(Algo.Merge(a, b)).To(Equal(ab))
})
})
It("InplaceMerge", func() {
pairLess := func(x *Algo.Pair, y *Algo.Pair) bool {
return x.A < y.A
}
pairs := []Algo.Pair{
{8, 20},
{8, 10},
{10, 9},
{10, 11},
{10, 10},
{20, 10},
}
merged := []Algo.Pair{
{8, 20},
{8, 10},
{10, 9},
{10, 11},
{10, 10},
{20, 10},
}
Algo.InplaceMerge(pairs, 3, pairLess)
Expect(pairs).To(Equal(merged))
})
It("Includes", func() {
Expect(Algo.Includes(a, b)).To(BeFalse())
c := []int{1, 2}
Expect(Algo.Includes(a, c)).To(BeTrue())
d := []int{1, 2, 3, 4, 5}
Expect(Algo.Includes(a, d)).To(BeFalse())
Expect(Algo.Includes(a, a)).To(BeTrue())
})
It("SetDifference", func() {
c := []int{1, 2}
Expect(Algo.SetDifference(a, b)).To(Equal(c))
d := []int{5, 6}
Expect(Algo.SetDifference(b, a)).To(Equal(d))
})
It("SetIntersection", func() {
c := []int{3, 4}
Expect(Algo.SetIntersection(a, b)).To(Equal(c))
})
It("SetSymmetricDifference", func() {
c := []int{1, 2, 5, 6}
Expect(Algo.SetSymmetricDifference(a, b)).To(Equal(c))
})
It("SetUnion", func() {
c := []int{1, 2, 3, 4, 5, 6}
Expect(Algo.SetUnion(a, b)).To(Equal(c))
})
})
Describe("MinMax", func() {
var v []int
BeforeEach(func() {
v = []int{1, 2, 4, 5, -1}
})
It("Max", func() {
Expect(Algo.Max(-1, 1)).To(Equal(1))
})
It("Min", func() {
Expect(Algo.Min(-1, 1)).To(Equal(-1))
})
It("MaxElement", func() {
Expect(v[Algo.MaxElement(v)]).To(Equal(5))
})
It("MinElement", func() {
Expect(v[Algo.MinElement(v)]).To(Equal(-1))
})
It("MinMaxElement", func() {
minIdx, maxIdx := Algo.MinMaxElement(v)
Expect(v[minIdx]).To(Equal(-1))
Expect(v[maxIdx]).To(Equal(5))
})
Context("LexicographicalCompare", func() {
It("different length", func() {
a := "abc"
b := "abcd"
c := "abcde"
Expect(Algo.LexicographicalCompare(a, b)).To(BeTrue())
Expect(Algo.LexicographicalCompare(b, c)).To(BeTrue())
Expect(Algo.LexicographicalCompare(c, b)).To(BeFalse())
})
It("same length", func() {
a := "abcd"
b := "abce"
Expect(Algo.LexicographicalCompare(a, a)).To(BeFalse())
Expect(Algo.LexicographicalCompare(a, b)).To(BeTrue())
})
})
It("isPermutation", func() {
a := []int{1, 2, 5, 4, -1}
b := []int{1, 2, 5, 5, -1}
Expect(Algo.IsPermutation(v, a)).To(BeTrue())
Expect(Algo.IsPermutation(a, b)).To(BeFalse())
})
Context("NextPermutation", func() {
It("No permutation", func() {
empty := []int{}
noPermutation := []int{5, 4, 2, 1, -1}
Expect(Algo.NextPermutation(empty)).To(BeFalse())
Expect(Algo.NextPermutation(noPermutation)).To(BeFalse())
})
It("permutation", func() {
Expect(Algo.NextPermutation(v)).To(BeTrue())
Expect(v).To(Equal([]int{1, 2, 5, -1, 4}))
})
})
It("PrevPermutation", func() {
copyV := append([]int{}, v...)
Expect(Algo.PrevPermutation(v)).To(BeTrue())
Algo.NextPermutation(v)
Expect(v).To(Equal(copyV))
})
})
Describe("Numeric", func() {
var v []int
BeforeEach(func() {
v = []int{1, 2, 3, 4}
})
It("Itoa", func() {
res := []int{3, 4, 5, 6}
Algo.Itoa(v, 3)
Expect(v).To(Equal(res))
})
It("Accumulate", func() {
Expect(Algo.Accumulate(v, 1, func(a int, b int) int {
return a * b
})).To(Equal(24))
})
It("InnerProduct", func() {
Expect(Algo.InnerProduct(v, v, 0)).To(Equal(30))
})
It("AdjacentDifference", func() {
Expect(Algo.AdjacentDifference(v)).To(Equal([]int{1, 1, 1, 1}))
Expect(Algo.AdjacentDifference([]int{})).To(Equal([]int{}))
Expect(Algo.AdjacentDifference([]int{1})).To(Equal([]int{1}))
})
It("PartialSum", func() {
Expect(Algo.PartialSum(v)).To(Equal([]int{1, 3, 6, 10}))
Expect(Algo.PartialSum([]int{})).To(Equal([]int{}))
Expect(Algo.PartialSum([]int{1})).To(Equal([]int{1}))
})
})
Describe("Partitioning", func() {
var v []int
var lessThan20 func(int) bool
var notLessThan20 func(int) bool
BeforeEach(func() {
v = []int{10, 20, 5, 4, 3, 190}
lessThan20 = func(x int) bool {
return x < 20
}
notLessThan20 = func(x int) bool {
return x >= 20
}
})
It("IsPartitioned", func() {
Expect(Algo.IsPartitioned(v, func(x int) bool {
return x < 25
})).To(BeTrue())
Expect(Algo.IsPartitioned(v, lessThan20)).To(BeFalse())
})
It("Partition", func() {
falseIdx := Algo.Partition(v, lessThan20)
Expect(Algo.AllOf(v[:falseIdx], lessThan20)).To(BeTrue())
Expect(Algo.AllOf(v[falseIdx:], notLessThan20)).To(BeTrue())
})
It("PartitionCopy", func() {
truePart, falsePart := Algo.PartitionCopy(v, lessThan20)
Expect(Algo.AllOf(truePart, lessThan20)).To(BeTrue())
Expect(Algo.AllOf(falsePart, notLessThan20)).To(BeTrue())
})
It("StablePartition", func() {
even := func(x int) bool {
return x%2 == 0
}
before := []int{20, 10, 5, 4, 3, 1, 2}
after := []int{20, 10, 4, 2, 5, 3, 1}
Algo.StablePartition(before, even)
Expect(before).To(Equal(after))
before2 := []int{10, 2, 5, 4, 1, 3, 20}
after2 := []int{10, 2, 4, 20, 5, 1, 3}
Algo.StablePartition(before2, even)
Expect(before2).To(Equal(after2))
})
It("PartitionPoint", func() {
Expect(Algo.PartitionPoint(v, lessThan20)).To(Equal(1))
})
})
})
|
package models
import (
"fmt"
"github.com/devplayg/ipas-mcs/libs"
"github.com/astaxie/beego/orm"
"github.com/devplayg/ipas-mcs/objs"
"time"
"github.com/devplayg/ipas-server"
)
func GetIpasStatusLog(filter *objs.IpasFilter, member *objs.Member) ([]objs.IpasLog, int64, error) {
var where string
var rows []objs.IpasLog
// 조건 설정
args := make([]interface{}, 0)
// 시간설정
startDate, _ := time.ParseInLocation(ipasserver.DateDefault, filter.StartDate+":00", member.Location)
endDate, _ := time.ParseInLocation(ipasserver.DateDefault, filter.EndDate+":59", member.Location)
args = append(args, startDate.UTC().Format(ipasserver.DateDefault), endDate.UTC().Format(ipasserver.DateDefault))
if member.Position < objs.Administrator {
where += " and group_id in (select asset_id from mbr_asset where member_id = ?)"
args = append(args, member.MemberId)
}
if len(filter.OrgId) > 0 {
where += fmt.Sprintf(" and org_id in (%s)", libs.JoinInt(filter.OrgId, ","))
}
if len(filter.GroupId) > 0 {
where += fmt.Sprintf(" and group_id in (%s)", libs.JoinInt(filter.GroupId, ","))
}
// 장비 태크 검색
if len(filter.TagPattern) > 0 {
where += " and (equip_id like ?)"
cond := "%"+filter.TagPattern+"%"
args = append(args, cond)
}
if len(filter.EquipId) > 0 {
where += " and equip_id = ?"
cond := filter.EquipId
args = append(args, cond)
}
// 페이징 모드(고속/일반)
if filter.FastPaging == "off" {
filter.FoundRows = "SQL_CALC_FOUND_ROWS"
}
// Set query
query := `
SELECT %s date, org_id, group_id, session_id, equip_id, latitude, longitude, speed
, snr, usim, ip, recv_date
from log_ipas_status
where date >= ? and date <= ? %s
order by %s %s
limit ?, ?
`
query = fmt.Sprintf(query, filter.FoundRows, where, filter.Sort, filter.Order)
args = append(args, filter.Offset, filter.Limit)
o := orm.NewOrm()
o.Begin()
defer o.Commit()
total, err := o.Raw(query, args).QueryRows(&rows)
if filter.FastPaging == "off" {
if RegexFoundRows.MatchString(query) {
dbResult := objs.NewDbResult()
o.Raw("select FOUND_ROWS() total").QueryRow(dbResult)
total = dbResult.Total
}
}
return rows, total, err
}
|
package leetcodego
func sortedListToBST(head *ListNode) *TreeNode {
if head == nil {
return nil
}
if head.Next == nil {
return &TreeNode{Val: head.Val}
}
pre, slow, fast := head, head, head
for fast != nil && fast.Next != nil {
pre = slow
slow = slow.Next
fast = fast.Next.Next
}
pre.Next = nil
treeNode := &TreeNode{Val: slow.Val}
treeNode.Left = sortedListToBST(head)
treeNode.Right = sortedListToBST(slow.Next)
return treeNode
}
|
package pkg
import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"path"
"github.com/allegro/bigcache"
kit_log "github.com/go-kit/kit/log"
weasel "github.com/revas-hq/weasel/pkg"
)
type cache struct {
Logger kit_log.Logger
Cache *bigcache.BigCache
Next weasel.Service
}
func NewCache(Logger kit_log.Logger, Cache *bigcache.BigCache, Next weasel.Service) weasel.Service {
return &cache{
Logger: Logger,
Cache: Cache,
Next: Next,
}
}
func (s *cache) getCache(ctx context.Context, name string, object *weasel.Object) error {
meta, err := s.Cache.Get(name + ":meta")
if err != nil {
return err
}
body, err := s.Cache.Get(name)
if err != nil {
return err
}
err = json.Unmarshal(meta, &object)
if err != nil {
return err
}
object.Body = bytes.NewReader(body)
return nil
}
func (s *cache) setCache(ctx context.Context, name string, object *weasel.Object) error {
if object.CacheControl == weasel.DisableCache {
return nil
}
meta, err := json.Marshal(object)
if err != nil {
return err
}
body, err := ioutil.ReadAll(object.Body)
if err != nil {
return err
}
err = s.Cache.Set(name+":meta", meta)
if err != nil {
return err
}
err = s.Cache.Set(name, body)
if err != nil {
return err
}
object.Body = bytes.NewReader(body)
return nil
}
func (s *cache) GetObject(ctx context.Context, host string, p string, object *weasel.Object) error {
name := path.Join(host, p)
err := s.getCache(ctx, name, object)
if err == nil {
_ = s.Logger.Log("object", name, "cache", "hit")
return nil
}
_ = s.Logger.Log("object", name, "cache", "miss")
err = s.Next.GetObject(ctx, host, p, object)
if err != nil {
_ = s.Logger.Log("object", name, "err", err)
return err
}
err = s.setCache(ctx, name, object)
if err != nil {
_ = s.Logger.Log("object", name, "err", err)
return err
}
return nil
}
|
package main
import "syscall"
import "os/exec"
import "os"
func main() {
binary, _ := exec.LookPath("ls")
args := []string{"xxxx", "-l", "-h"}
// actually, the first argument of args will always be ignored
// because Exec think the first argument as the name of program
envs := os.Environ()
err := syscall.Exec(binary, args, envs)
if err != nil {
panic(err)
}
}
|
package main
import (
json2 "encoding/json"
"fmt"
"log"
"os"
"strings"
"syscall"
"time"
"github.com/fatih/color"
"github.com/meilisearch/meilisearch-go"
"golang.org/x/term"
)
// MeiliSearch will index the sentences
// on a given MeiliSearch instance.
type MeiliSearch struct {
client meilisearch.ClientInterface
host, APIKey string
APIKeyRequired bool
}
// totalSentencesToIndexByRow define the total of sentences
// to index each bulk.
const totalSentencesToIndexByRow = 10000
// Init the MeiliSearch client and the index.
func (m *MeiliSearch) Init() {
// Format the host.
var host string
if strings.HasPrefix(m.host, "http://") || strings.HasPrefix(m.host, "https://") {
host = m.host
} else {
host = "http://" + m.host
}
// Ask the API key if needed.
if m.APIKeyRequired {
m.askAPIKey()
}
// Create a MeiliSearch client.
m.client = meilisearch.NewClient(meilisearch.Config{
Host: host,
APIKey: m.APIKey,
})
// Check if the index exist.
if _, err := m.client.Indexes().Get(IndexName); err != nil {
// The index doesn't exist, create it.
m.createIndex()
}
// Set the searchable attributes.
m.setSearchableAttributes()
// Print the current instance.
fmt.Printf("Indexing on MeiliSearch on the host \"%s\".\n", host)
}
// createIndex will create the Tatoeba index for Meilisearch.
func (m MeiliSearch) createIndex() {
// Create an index if the index does not exist.
_, err := m.client.Indexes().Create(meilisearch.CreateIndexRequest{
Name: strings.Title(IndexName),
UID: IndexName,
})
if err != nil {
log.Fatal(err)
}
}
// setSearchableAttributes will set the searchable attributes.
func (m MeiliSearch) setSearchableAttributes() {
searchableAttributes := []string{"id", "language", "content", "username"}
m.client.Settings(IndexName).UpdateSearchableAttributes(searchableAttributes)
}
// Index sentences to the MeiliSearch instance.
func (m MeiliSearch) Index(sentences map[string]Sentence) {
// Store the total of sentences.
totalSentences := len(sentences)
// Get the index documents from the client.
index := m.client.Documents(IndexName)
// i represent the current index of the loop.
i := 1
// Create a map of interfaces to store the sentences to index.
var documents []map[string]interface{}
// Loop over all sentences and index them.
for _, sentence := range sentences {
// Create an empty interface to convert the struct in.
var sentenceInterface map[string]interface{}
// Create a JSON from the struct to be able to
// convert it into interface.
sentenceAsJSON, _ := json2.Marshal(sentence)
// Convert the JSON to the interface.
json2.Unmarshal(sentenceAsJSON, &sentenceInterface)
// Add the sentence interface to documents to index.
documents = append(documents, sentenceInterface)
// Call the API to add the sentences.
if len(documents) == totalSentencesToIndexByRow || i == totalSentences {
// Check if the client still working.
if err := m.client.Health().Get(); err != nil {
color.Red("\nThe server isn't responding anymore... Can't index sentences...")
os.Exit(0)
}
// Add documents.
addResponse, err := index.AddOrReplace(documents)
if err != nil {
log.Fatal(err)
}
// Reset the sentences array of map.
documents = make([]map[string]interface{}, 0)
// Log to the terminal the advance.
fmt.Printf("\rIndexing sentences %d of %d", i, totalSentences)
// Wait until the documents has been added by calling
// the update API.
for {
// Wait 2 secondes between every update call.
time.Sleep(2 * time.Second)
// Get the update reponse.
response, _ := m.client.Updates(IndexName).Get(addResponse.UpdateID)
// Continue the indexation when the last update has been processed.
if response.Status == meilisearch.UpdateStatusProcessed {
break
}
}
}
// Increment the counter.
i++
}
}
// askAPIKey will prompt in terminal to enter the API key.
func (m *MeiliSearch) askAPIKey() {
// Ask user to enter the api key.
fmt.Print("Please enter the API key: ")
// Ask the user to enter the API key from the terminal.
APIKey, err := term.ReadPassword(int(syscall.Stdin))
if err != nil {
log.Fatal(err)
}
// Store the API key.
m.APIKey = string(APIKey)
fmt.Println()
}
|
package fileloader
import "testing"
func TestLoad(t *testing.T) {
tests := []string{
"template/aws-api-lambda-golang/.gitignore.tmpl",
"template/aws-api-lambda-golang/main.go.tmpl",
"template/aws-api-lambda-golang/Makefile.tmpl",
}
for _, tt := range tests {
t.Run("TestLoad", func(t *testing.T) {
_, err := Load(tt)
if err != nil {
t.Errorf("Load() error = %v", err)
return
}
})
}
}
|
package main
import (
"errors"
"fmt"
)
func customError() {
err := errors.New("bad emotion")
fmt.Println(err)
}
func initPanic() {
fmt.Println("start")
panic("crash")
fmt.Println("end")
}
|
package myip
import (
"errors"
"github.com/qjpcpu/common/web"
"github.com/qjpcpu/common/web/json"
"strings"
)
var ip_address string
func ResolvePublicIP() string {
if ip_address == "" {
ip_address = resolvePublicIP()
}
return ip_address
}
func RefreshIP() string {
ip_address = ResolvePublicIP()
return ip_address
}
func resolvePublicIP() string {
if ip, err := getIPFromTaobao(); err == nil {
return ip
}
if ip, err := getIPFromIPCN(); err == nil {
return ip
}
return "127.0.0.1"
}
func getIPFromTaobao() (string, error) {
var res struct {
Data struct {
Ip string `json:"ip"`
} `json:"data"`
}
if err := json.Get("http://ip.taobao.com/service/getIpInfo2.php?ip=myip", &res); err != nil {
return "", err
}
if res.Data.Ip == "" {
return "", errors.New("can't get ip via taobao")
}
return res.Data.Ip, nil
}
func getIPFromIPCN() (string, error) {
client := web.NewClient()
client.SetHeaders(web.Header{"User-Agent": "curl/7.54.0"})
data, err := client.Get("http://www.ip.cn")
if err != nil {
return "", err
}
str := strings.TrimPrefix(string(data), `当前 IP:`)
return strings.Split(str, " ")[0], nil
}
|
package cli
const (
// CLI command execution delay after the command is finished
cliCmdExecFinishDelaySeconds = 5
)
|
package main
import (
"fmt"
"testing"
)
func Test_checkStraightLine(t *testing.T) {
tts := []struct {
input [][]int
expected bool
}{
{[][]int{{1, 2}, {2, 3}, {3, 4}, {4, 5}, {6, 7}}, true},
{[][]int{{1, 1}, {2, 2}, {3, 4}, {4, 5}, {5, 6}, {7, 7}}, false},
}
for _, tt := range tts {
tt := tt
t.Run(fmt.Sprintf("input %v", tt.input), func(t *testing.T) {
t.Parallel()
actual := checkStraightLine(tt.input)
if tt.expected != actual {
t.Errorf("expected: %v <=> actual: %v", tt.expected, actual)
}
})
}
}
|
/*
* Copyright 1999-2018 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package core
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"testing"
"github.com/alibaba/Dragonfly/dfget/config"
"github.com/go-check/check"
"github.com/sirupsen/logrus"
)
func Test(t *testing.T) {
check.TestingT(t)
}
type CoreTestSuite struct {
workHome string
}
func init() {
check.Suite(&CoreTestSuite{})
}
func (s *CoreTestSuite) SetUpSuite(c *check.C) {
s.workHome, _ = ioutil.TempDir("/tmp", "dfget-CoreTestSuite-")
}
func (s *CoreTestSuite) TearDownSuite(c *check.C) {
if s.workHome != "" {
if err := os.RemoveAll(s.workHome); err != nil {
fmt.Printf("remove path:%s error", s.workHome)
}
}
}
func (s *CoreTestSuite) TestPrepare(c *check.C) {
buf := &bytes.Buffer{}
ctx := s.createContext(buf)
ctx.Output = path.Join(s.workHome, "test.output")
err := prepare(ctx)
fmt.Printf("%s\nerror:%v", buf.String(), err)
}
func (s *CoreTestSuite) createContext(writer io.Writer) *config.Context {
if writer == nil {
writer = &bytes.Buffer{}
}
ctx := config.NewContext()
ctx.WorkHome = s.workHome
ctx.MetaPath = path.Join(ctx.WorkHome, "meta", "host.meta")
ctx.SystemDataDir = path.Join(ctx.WorkHome, "data")
logrus.StandardLogger().Out = writer
ctx.ClientLogger = logrus.StandardLogger()
ctx.ServerLogger = logrus.StandardLogger()
return ctx
}
|
package 排列组合问题
// 回溯 + 外部变量 解决求数组组合问题 (数组中元素无重复且大于0,可重复选取)
var combinationSequence [][]int
func combinationSum(candidates []int, target int) [][]int {
// 容量定义大点,这样可以避免扩容时产生额外时空花费
combinationSequence = make([][]int, 0, 100)
combinationSumExec(candidates, target, make([]int, 0, 100))
return combinationSequence
}
func combinationSumExec(candidates []int, target int, sequence []int) {
if target < 0 {
return
}
if target == 0 {
combinationSequence = append(combinationSequence, newSlice(sequence))
return
}
for i := 0; i < len(candidates); i++ {
// [i:] 是为了防止再选取之前的元素,导致出现重复组合。(元素可以重复选取)
// [i+1:] 表示一个元素只能选择1次
combinationSumExec(candidates[i:], target-candidates[i], append(sequence, candidates[i]))
}
}
// 深拷贝
func newSlice(oldSlice []int) []int {
slice := make([]int, len(oldSlice))
copy(slice, oldSlice)
return slice
}
/*
题目链接:
https://leetcode-cn.com/problems/combination-sum/ 组合求和
https://leetcode-cn.com/problems/coin-change-2/ 零钱兑换2
*/
/*
总结
1. 切片可以初始化容量,这样可以避免当切片长度大于容量时,切片扩容导致的额外代价。
2. 该题和零钱兑换2几乎一样,只是零钱兑换2求的是种数,而这里需要输出所有的组合。 (零钱兑换那使用了动态规划,这里使用了回溯)
3. 该题题意是: 在一个可重复选取、元素大于0的数组中,选出一些数,使它们的值等于target,输出所有的组合。
*/
|
package main
import (
"net/http"
"fmt"
"io/ioutil"
)
func main() {
// var i int
f, err := http.Get("https://raw.githubusercontent.com/dwyl/english-words/master/words_alpha.txt")
if err != nil {
fmt.Println(err)
}
defer f.Body.Close()
a, err1 := ioutil.ReadAll(f.Body)
if err != nil {
fmt.Println(err1)
}
var slice =make([]string,1)
//slice[0]="shjshks"
//slice=append(slice, "ddfsf")
//slice[1]="dhsd"
//fmt.Println(slice,len(slice))
aa:=string(a)
//aa=strings.TrimSpace(aa)
fmt.Println(len(aa))
//fmt.Println(aa[0],aa[1])
for i:=0;i<len(aa)-1;i++ {
if aa[i]==10{
//fmt.Printf("\n br : %v",aa[i])
slice=append(slice,string(aa[i]))
}
if aa[i]==13{
continue
}else{
//fmt.Printf("%v",string(aa[i]))
slice=append(slice,string(aa[i]))
}
/*if aa[i]<97 || aa[i]>122{
if aa[i] != 10{
fmt.Println("vcxvgvfdg",a[i],string(aa[i]))
}
}*/
}
//fmt.Println(len(slice))
for j:=0;j<20;j++{
fmt.Println(j," --",slice[j])
}
}
/*
for i:=0;i<len(aa);i++ {
if aa[i]==13{
continue
}else if aa[i]==10{
fmt.Println("")
continue
}else{
fmt.Printf("%v",string(aa[i]))
}
}
*/ |
package scommon
import (
"fmt"
"runtime"
"github.com/davecgh/go-spew/spew"
)
func PrintPanicStack(extras ...interface{}) {
if x := recover(); x != nil {
//panicLog(fmt.Sprintf("%v", x))
i := 0
funcName, file, line, ok := runtime.Caller(i)
for ok {
i++
msg := fmt.Sprintf("PrintPanicStack. [func]: %s, [file]: %s, [line]: %d\n", runtime.FuncForPC(funcName).Name(), file, line)
LogError(msg)
funcName, file, line, ok = runtime.Caller(i)
}
for k := range extras {
msg := fmt.Sprintf("EXRAS#%v DATA:%v\n", k, spew.Sdump(extras[k]))
LogError(msg)
}
}
}
|
package exoscale
import (
"context"
"testing"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
)
func TestNodeAddresses(t *testing.T) {
ctx := context.Background()
p, ts := newMockInstanceAPI()
instance := &instances{p: p}
defer ts.Close()
nodeAddress, err := instance.NodeAddresses(ctx, types.NodeName(testInstanceName))
require.NoError(t, err)
require.NotNil(t, nodeAddress)
expectedAddresses := []v1.NodeAddress{
{Type: v1.NodeExternalIP, Address: testInstanceIP},
}
require.Equal(t, expectedAddresses, nodeAddress)
}
func TestNodeAddressesByProviderID(t *testing.T) {
ctx := context.Background()
p, ts := newMockInstanceAPI()
instance := &instances{p: p}
defer ts.Close()
nodeAddress, err := instance.NodeAddressesByProviderID(ctx, testInstanceProviderID)
require.NoError(t, err)
require.NotNil(t, nodeAddress)
expectedAddresses := []v1.NodeAddress{
{Type: v1.NodeExternalIP, Address: testInstanceIP},
}
require.Equal(t, expectedAddresses, nodeAddress)
}
func TestInstanceID(t *testing.T) {
ctx := context.Background()
p, ts := newMockInstanceAPI()
instance := &instances{p: p}
defer ts.Close()
node, err := instance.InstanceID(ctx, types.NodeName(testInstanceName))
require.NoError(t, err)
require.Equal(t, node, testInstanceID)
}
func TestInstanceType(t *testing.T) {
ctx := context.Background()
p, ts := newMockInstanceAPI()
instance := &instances{p: p}
defer ts.Close()
nodeType, err := instance.InstanceType(ctx, types.NodeName(testInstanceName))
require.NoError(t, err)
require.Equal(t, nodeType, testInstanceServiceOffering)
}
func TestInstanceTypeByProviderID(t *testing.T) {
ctx := context.Background()
p, ts := newMockInstanceAPI()
instance := &instances{p: p}
defer ts.Close()
nodeType, err := instance.InstanceTypeByProviderID(ctx, testInstanceProviderID)
require.NoError(t, err)
require.Equal(t, nodeType, testInstanceServiceOffering)
}
func TestCurrentNodeName(t *testing.T) {
ctx := context.Background()
p, ts := newMockInstanceAPI()
instance := &instances{p: p}
defer ts.Close()
nodeName, err := instance.CurrentNodeName(ctx, testInstanceName)
require.NoError(t, err)
require.NotNil(t, nodeName)
require.Equal(t, nodeName, types.NodeName(testInstanceName))
}
func TestInstanceExistsByProviderID(t *testing.T) {
ctx := context.Background()
p, ts := newMockInstanceAPI()
instance := &instances{p: p}
nodeExist, err := instance.InstanceExistsByProviderID(ctx, testInstanceProviderID)
require.NoError(t, err)
require.True(t, nodeExist)
ts.Close()
p, ts = newMockInstanceAPINotFound()
instance = &instances{p: p}
defer ts.Close()
nodeExist, err = instance.InstanceExistsByProviderID(ctx, "exoscale://00113bd2-d6cc-418e-831d-2d4785f6e5b6")
require.NoError(t, err)
require.False(t, nodeExist)
}
func TestInstanceShutdownByProviderID(t *testing.T) {
ctx := context.Background()
p, ts := newMockInstanceAPI()
instance := &instances{p: p}
defer ts.Close()
nodeShutdown, err := instance.InstanceShutdownByProviderID(ctx, testInstanceProviderID)
require.NoError(t, err)
require.False(t, nodeShutdown)
}
|
package main
import (
"fmt"
"math/rand"
"sort"
"strings"
"time"
)
func modify1(x int) {
x = 100
}
func modify2(x *int) {
*x = 100
}
func main() {
a := 10
modify1(a)
fmt.Println(a)
modify2(&a)
fmt.Println(a)
var p *int
fmt.Println(p)
var p2 = new(int)
fmt.Println(p2)
*p2 = 200
fmt.Println(p2)
fmt.Println(*p2)
var m1 map[string]int
m1 = make(map[string]int, 10)
m1["理想"] = 18
fmt.Println(m1)
rand.Seed(time.Now().UnixNano())
var scoreMap = make(map[string]int, 200)
for i := 0; i < 100; i++ {
key := fmt.Sprintf("stu%02d", i)
value := rand.Intn(100)
scoreMap[key] = value
}
fmt.Println(scoreMap)
var keys = make([]string, 0, 200)
for key := range scoreMap {
keys = append(keys, key)
}
fmt.Println(keys)
sort.Strings(keys)
for _, key := range keys {
fmt.Println(key, scoreMap[key])
}
var s1 = make([]map[int]string, 10, 10)
s1[0] = make(map[int]string, 1)
s1[0][80] = "周"
s1[0][70] = "刘"
fmt.Println(s1)
var m2 = make(map[string][]int, 10)
m2["北京"] = []int{10, 20, 30}
fmt.Println(m2)
str1 := "how do you do"
arr1 := strings.Split(str1, " ")
fmt.Println(arr1)
map2 := make(map[string]int, 10)
for _, v := range arr1 {
if _, ok := map2[v]; !ok {
map2[v] = 1
} else {
map2[v]++
}
}
fmt.Println(map2)
for key, value := range map2 {
fmt.Println(key, value)
}
//回文判断
ss := "上海自来水来自海上" //把字符串中的字符拿出来放到一个[]rune中
r := make([]rune, 0)
for _, c := range ss {
r = append(r, c)
}
fmt.Println(r)
//for i,_ :=range ss{
for i := 0; i < len(r)/2; i++ {
if r[i] != r[len(r)-1-i] {
fmt.Println("不是回文")
return
}
}
fmt.Println("是回文")
}
|
package transport
import "github.com/zaynjarvis/fyp/dc/api"
type CollectionService interface {
Start()
Stop()
RecvNotification() <-chan *api.CollectionEvent
SendConfig(*api.CollectionConfig)
Services() []string
}
func New(port string, push bool) CollectionService {
if push {
return newPushModel(port)
}
panic("not implemented")
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package beta
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"strings"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations"
)
func (r *AuthorizationPolicy) validate() error {
if err := dcl.Required(r, "name"); err != nil {
return err
}
if err := dcl.Required(r, "action"); err != nil {
return err
}
if err := dcl.RequiredParameter(r.Project, "Project"); err != nil {
return err
}
if err := dcl.RequiredParameter(r.Location, "Location"); err != nil {
return err
}
return nil
}
func (r *AuthorizationPolicyRules) validate() error {
return nil
}
func (r *AuthorizationPolicyRulesSources) validate() error {
return nil
}
func (r *AuthorizationPolicyRulesDestinations) validate() error {
if err := dcl.Required(r, "hosts"); err != nil {
return err
}
if err := dcl.Required(r, "ports"); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(r.HttpHeaderMatch) {
if err := r.HttpHeaderMatch.validate(); err != nil {
return err
}
}
return nil
}
func (r *AuthorizationPolicyRulesDestinationsHttpHeaderMatch) validate() error {
if err := dcl.Required(r, "headerName"); err != nil {
return err
}
if err := dcl.Required(r, "regexMatch"); err != nil {
return err
}
return nil
}
func (r *AuthorizationPolicy) basePath() string {
params := map[string]interface{}{}
return dcl.Nprintf("https://networksecurity.googleapis.com/v1beta1/", params)
}
func (r *AuthorizationPolicy) getURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"location": dcl.ValueOrEmptyString(nr.Location),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("projects/{{project}}/locations/{{location}}/authorizationPolicies/{{name}}", nr.basePath(), userBasePath, params), nil
}
func (r *AuthorizationPolicy) listURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"location": dcl.ValueOrEmptyString(nr.Location),
}
return dcl.URL("projects/{{project}}/locations/{{location}}/authorizationPolicies", nr.basePath(), userBasePath, params), nil
}
func (r *AuthorizationPolicy) createURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"location": dcl.ValueOrEmptyString(nr.Location),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("projects/{{project}}/locations/{{location}}/authorizationPolicies?authorizationPolicyId={{name}}", nr.basePath(), userBasePath, params), nil
}
func (r *AuthorizationPolicy) deleteURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"location": dcl.ValueOrEmptyString(nr.Location),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("projects/{{project}}/locations/{{location}}/authorizationPolicies/{{name}}", nr.basePath(), userBasePath, params), nil
}
func (r *AuthorizationPolicy) SetPolicyURL(userBasePath string) string {
nr := r.urlNormalized()
fields := map[string]interface{}{
"project": *nr.Project,
"location": *nr.Location,
"name": *nr.Name,
}
return dcl.URL("projects/{{project}}/locations/{{location}}/authorizationPolicies/{{name}}:setIamPolicy", nr.basePath(), userBasePath, fields)
}
func (r *AuthorizationPolicy) SetPolicyVerb() string {
return "POST"
}
func (r *AuthorizationPolicy) getPolicyURL(userBasePath string) string {
nr := r.urlNormalized()
fields := map[string]interface{}{
"project": *nr.Project,
"location": *nr.Location,
"name": *nr.Name,
}
return dcl.URL("projects/{{project}}/locations/{{location}}/authorizationPolicies/{{name}}:getIamPolicy", nr.basePath(), userBasePath, fields)
}
func (r *AuthorizationPolicy) IAMPolicyVersion() int {
return 3
}
// authorizationPolicyApiOperation represents a mutable operation in the underlying REST
// API such as Create, Update, or Delete.
type authorizationPolicyApiOperation interface {
do(context.Context, *AuthorizationPolicy, *Client) error
}
// newUpdateAuthorizationPolicyUpdateAuthorizationPolicyRequest creates a request for an
// AuthorizationPolicy resource's UpdateAuthorizationPolicy update type by filling in the update
// fields based on the intended state of the resource.
func newUpdateAuthorizationPolicyUpdateAuthorizationPolicyRequest(ctx context.Context, f *AuthorizationPolicy, c *Client) (map[string]interface{}, error) {
req := map[string]interface{}{}
res := f
_ = res
if v := f.Description; !dcl.IsEmptyValueIndirect(v) {
req["description"] = v
}
if v := f.Labels; !dcl.IsEmptyValueIndirect(v) {
req["labels"] = v
}
if v := f.Action; !dcl.IsEmptyValueIndirect(v) {
req["action"] = v
}
if v, err := expandAuthorizationPolicyRulesSlice(c, f.Rules, res); err != nil {
return nil, fmt.Errorf("error expanding Rules into rules: %w", err)
} else if v != nil {
req["rules"] = v
}
return req, nil
}
// marshalUpdateAuthorizationPolicyUpdateAuthorizationPolicyRequest converts the update into
// the final JSON request body.
func marshalUpdateAuthorizationPolicyUpdateAuthorizationPolicyRequest(c *Client, m map[string]interface{}) ([]byte, error) {
return json.Marshal(m)
}
type updateAuthorizationPolicyUpdateAuthorizationPolicyOperation struct {
// If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated.
// Usually it will be nil - this is to prevent us from accidentally depending on apply
// options, which should usually be unnecessary.
ApplyOptions []dcl.ApplyOption
FieldDiffs []*dcl.FieldDiff
}
// do creates a request and sends it to the appropriate URL. In most operations,
// do will transcribe a subset of the resource into a request object and send a
// PUT request to a single URL.
func (op *updateAuthorizationPolicyUpdateAuthorizationPolicyOperation) do(ctx context.Context, r *AuthorizationPolicy, c *Client) error {
_, err := c.GetAuthorizationPolicy(ctx, r)
if err != nil {
return err
}
u, err := r.updateURL(c.Config.BasePath, "UpdateAuthorizationPolicy")
if err != nil {
return err
}
req, err := newUpdateAuthorizationPolicyUpdateAuthorizationPolicyRequest(ctx, r, c)
if err != nil {
return err
}
c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req)
body, err := marshalUpdateAuthorizationPolicyUpdateAuthorizationPolicyRequest(c, req)
if err != nil {
return err
}
resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider)
if err != nil {
return err
}
var o operations.StandardGCPOperation
if err := dcl.ParseResponse(resp.Response, &o); err != nil {
return err
}
err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET")
if err != nil {
return err
}
return nil
}
func (c *Client) listAuthorizationPolicyRaw(ctx context.Context, r *AuthorizationPolicy, pageToken string, pageSize int32) ([]byte, error) {
u, err := r.urlNormalized().listURL(c.Config.BasePath)
if err != nil {
return nil, err
}
m := make(map[string]string)
if pageToken != "" {
m["pageToken"] = pageToken
}
if pageSize != AuthorizationPolicyMaxPage {
m["pageSize"] = fmt.Sprintf("%v", pageSize)
}
u, err = dcl.AddQueryParams(u, m)
if err != nil {
return nil, err
}
resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider)
if err != nil {
return nil, err
}
defer resp.Response.Body.Close()
return ioutil.ReadAll(resp.Response.Body)
}
type listAuthorizationPolicyOperation struct {
AuthorizationPolicies []map[string]interface{} `json:"authorizationPolicies"`
Token string `json:"nextPageToken"`
}
func (c *Client) listAuthorizationPolicy(ctx context.Context, r *AuthorizationPolicy, pageToken string, pageSize int32) ([]*AuthorizationPolicy, string, error) {
b, err := c.listAuthorizationPolicyRaw(ctx, r, pageToken, pageSize)
if err != nil {
return nil, "", err
}
var m listAuthorizationPolicyOperation
if err := json.Unmarshal(b, &m); err != nil {
return nil, "", err
}
var l []*AuthorizationPolicy
for _, v := range m.AuthorizationPolicies {
res, err := unmarshalMapAuthorizationPolicy(v, c, r)
if err != nil {
return nil, m.Token, err
}
res.Project = r.Project
res.Location = r.Location
l = append(l, res)
}
return l, m.Token, nil
}
func (c *Client) deleteAllAuthorizationPolicy(ctx context.Context, f func(*AuthorizationPolicy) bool, resources []*AuthorizationPolicy) error {
var errors []string
for _, res := range resources {
if f(res) {
// We do not want deleteAll to fail on a deletion or else it will stop deleting other resources.
err := c.DeleteAuthorizationPolicy(ctx, res)
if err != nil {
errors = append(errors, err.Error())
}
}
}
if len(errors) > 0 {
return fmt.Errorf("%v", strings.Join(errors, "\n"))
} else {
return nil
}
}
type deleteAuthorizationPolicyOperation struct{}
func (op *deleteAuthorizationPolicyOperation) do(ctx context.Context, r *AuthorizationPolicy, c *Client) error {
r, err := c.GetAuthorizationPolicy(ctx, r)
if err != nil {
if dcl.IsNotFound(err) {
c.Config.Logger.InfoWithContextf(ctx, "AuthorizationPolicy not found, returning. Original error: %v", err)
return nil
}
c.Config.Logger.WarningWithContextf(ctx, "GetAuthorizationPolicy checking for existence. error: %v", err)
return err
}
u, err := r.deleteURL(c.Config.BasePath)
if err != nil {
return err
}
// Delete should never have a body
body := &bytes.Buffer{}
resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider)
if err != nil {
return err
}
// wait for object to be deleted.
var o operations.StandardGCPOperation
if err := dcl.ParseResponse(resp.Response, &o); err != nil {
return err
}
if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil {
return err
}
// We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration.
// This is the reason we are adding retry to handle that case.
retriesRemaining := 10
dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) {
_, err := c.GetAuthorizationPolicy(ctx, r)
if dcl.IsNotFound(err) {
return nil, nil
}
if retriesRemaining > 0 {
retriesRemaining--
return &dcl.RetryDetails{}, dcl.OperationNotDone{}
}
return nil, dcl.NotDeletedError{ExistingResource: r}
}, c.Config.RetryProvider)
return nil
}
// Create operations are similar to Update operations, although they do not have
// specific request objects. The Create request object is the json encoding of
// the resource, which is modified by res.marshal to form the base request body.
type createAuthorizationPolicyOperation struct {
response map[string]interface{}
}
func (op *createAuthorizationPolicyOperation) FirstResponse() (map[string]interface{}, bool) {
return op.response, len(op.response) > 0
}
func (op *createAuthorizationPolicyOperation) do(ctx context.Context, r *AuthorizationPolicy, c *Client) error {
c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r)
u, err := r.createURL(c.Config.BasePath)
if err != nil {
return err
}
req, err := r.marshal(c)
if err != nil {
return err
}
resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider)
if err != nil {
return err
}
// wait for object to be created.
var o operations.StandardGCPOperation
if err := dcl.ParseResponse(resp.Response, &o); err != nil {
return err
}
if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil {
c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err)
return err
}
c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation")
op.response, _ = o.FirstResponse()
if _, err := c.GetAuthorizationPolicy(ctx, r); err != nil {
c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err)
return err
}
return nil
}
func (c *Client) getAuthorizationPolicyRaw(ctx context.Context, r *AuthorizationPolicy) ([]byte, error) {
u, err := r.getURL(c.Config.BasePath)
if err != nil {
return nil, err
}
resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider)
if err != nil {
return nil, err
}
defer resp.Response.Body.Close()
b, err := ioutil.ReadAll(resp.Response.Body)
if err != nil {
return nil, err
}
return b, nil
}
func (c *Client) authorizationPolicyDiffsForRawDesired(ctx context.Context, rawDesired *AuthorizationPolicy, opts ...dcl.ApplyOption) (initial, desired *AuthorizationPolicy, diffs []*dcl.FieldDiff, err error) {
c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...")
// First, let us see if the user provided a state hint. If they did, we will start fetching based on that.
var fetchState *AuthorizationPolicy
if sh := dcl.FetchStateHint(opts); sh != nil {
if r, ok := sh.(*AuthorizationPolicy); !ok {
c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected AuthorizationPolicy, got %T", sh)
} else {
fetchState = r
}
}
if fetchState == nil {
fetchState = rawDesired
}
// 1.2: Retrieval of raw initial state from API
rawInitial, err := c.GetAuthorizationPolicy(ctx, fetchState)
if rawInitial == nil {
if !dcl.IsNotFound(err) {
c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a AuthorizationPolicy resource already exists: %s", err)
return nil, nil, nil, fmt.Errorf("failed to retrieve AuthorizationPolicy resource: %v", err)
}
c.Config.Logger.InfoWithContext(ctx, "Found that AuthorizationPolicy resource did not exist.")
// Perform canonicalization to pick up defaults.
desired, err = canonicalizeAuthorizationPolicyDesiredState(rawDesired, rawInitial)
return nil, desired, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Found initial state for AuthorizationPolicy: %v", rawInitial)
c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for AuthorizationPolicy: %v", rawDesired)
// The Get call applies postReadExtract and so the result may contain fields that are not part of API version.
if err := extractAuthorizationPolicyFields(rawInitial); err != nil {
return nil, nil, nil, err
}
// 1.3: Canonicalize raw initial state into initial state.
initial, err = canonicalizeAuthorizationPolicyInitialState(rawInitial, rawDesired)
if err != nil {
return nil, nil, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for AuthorizationPolicy: %v", initial)
// 1.4: Canonicalize raw desired state into desired state.
desired, err = canonicalizeAuthorizationPolicyDesiredState(rawDesired, rawInitial, opts...)
if err != nil {
return nil, nil, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for AuthorizationPolicy: %v", desired)
// 2.1: Comparison of initial and desired state.
diffs, err = diffAuthorizationPolicy(c, desired, initial, opts...)
return initial, desired, diffs, err
}
func canonicalizeAuthorizationPolicyInitialState(rawInitial, rawDesired *AuthorizationPolicy) (*AuthorizationPolicy, error) {
// TODO(magic-modules-eng): write canonicalizer once relevant traits are added.
return rawInitial, nil
}
/*
* Canonicalizers
*
* These are responsible for converting either a user-specified config or a
* GCP API response to a standard format that can be used for difference checking.
* */
func canonicalizeAuthorizationPolicyDesiredState(rawDesired, rawInitial *AuthorizationPolicy, opts ...dcl.ApplyOption) (*AuthorizationPolicy, error) {
if rawInitial == nil {
// Since the initial state is empty, the desired state is all we have.
// We canonicalize the remaining nested objects with nil to pick up defaults.
return rawDesired, nil
}
canonicalDesired := &AuthorizationPolicy{}
if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) {
canonicalDesired.Name = rawInitial.Name
} else {
canonicalDesired.Name = rawDesired.Name
}
if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) {
canonicalDesired.Description = rawInitial.Description
} else {
canonicalDesired.Description = rawDesired.Description
}
if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
canonicalDesired.Labels = rawInitial.Labels
} else {
canonicalDesired.Labels = rawDesired.Labels
}
if dcl.IsZeroValue(rawDesired.Action) || (dcl.IsEmptyValueIndirect(rawDesired.Action) && dcl.IsEmptyValueIndirect(rawInitial.Action)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
canonicalDesired.Action = rawInitial.Action
} else {
canonicalDesired.Action = rawDesired.Action
}
canonicalDesired.Rules = canonicalizeAuthorizationPolicyRulesSlice(rawDesired.Rules, rawInitial.Rules, opts...)
if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) {
canonicalDesired.Project = rawInitial.Project
} else {
canonicalDesired.Project = rawDesired.Project
}
if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) {
canonicalDesired.Location = rawInitial.Location
} else {
canonicalDesired.Location = rawDesired.Location
}
return canonicalDesired, nil
}
func canonicalizeAuthorizationPolicyNewState(c *Client, rawNew, rawDesired *AuthorizationPolicy) (*AuthorizationPolicy, error) {
if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) {
rawNew.Name = rawDesired.Name
} else {
if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) {
rawNew.Name = rawDesired.Name
}
}
if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) {
rawNew.Description = rawDesired.Description
} else {
if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) {
rawNew.Description = rawDesired.Description
}
}
if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) {
rawNew.CreateTime = rawDesired.CreateTime
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) {
rawNew.UpdateTime = rawDesired.UpdateTime
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) {
rawNew.Labels = rawDesired.Labels
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.Action) && dcl.IsEmptyValueIndirect(rawDesired.Action) {
rawNew.Action = rawDesired.Action
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.Rules) && dcl.IsEmptyValueIndirect(rawDesired.Rules) {
rawNew.Rules = rawDesired.Rules
} else {
rawNew.Rules = canonicalizeNewAuthorizationPolicyRulesSlice(c, rawDesired.Rules, rawNew.Rules)
}
rawNew.Project = rawDesired.Project
rawNew.Location = rawDesired.Location
return rawNew, nil
}
func canonicalizeAuthorizationPolicyRules(des, initial *AuthorizationPolicyRules, opts ...dcl.ApplyOption) *AuthorizationPolicyRules {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &AuthorizationPolicyRules{}
cDes.Sources = canonicalizeAuthorizationPolicyRulesSourcesSlice(des.Sources, initial.Sources, opts...)
cDes.Destinations = canonicalizeAuthorizationPolicyRulesDestinationsSlice(des.Destinations, initial.Destinations, opts...)
return cDes
}
func canonicalizeAuthorizationPolicyRulesSlice(des, initial []AuthorizationPolicyRules, opts ...dcl.ApplyOption) []AuthorizationPolicyRules {
if des == nil {
return initial
}
if len(des) != len(initial) {
items := make([]AuthorizationPolicyRules, 0, len(des))
for _, d := range des {
cd := canonicalizeAuthorizationPolicyRules(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]AuthorizationPolicyRules, 0, len(des))
for i, d := range des {
cd := canonicalizeAuthorizationPolicyRules(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewAuthorizationPolicyRules(c *Client, des, nw *AuthorizationPolicyRules) *AuthorizationPolicyRules {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for AuthorizationPolicyRules while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
nw.Sources = canonicalizeNewAuthorizationPolicyRulesSourcesSlice(c, des.Sources, nw.Sources)
nw.Destinations = canonicalizeNewAuthorizationPolicyRulesDestinationsSlice(c, des.Destinations, nw.Destinations)
return nw
}
func canonicalizeNewAuthorizationPolicyRulesSet(c *Client, des, nw []AuthorizationPolicyRules) []AuthorizationPolicyRules {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []AuthorizationPolicyRules
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareAuthorizationPolicyRulesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewAuthorizationPolicyRules(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewAuthorizationPolicyRulesSlice(c *Client, des, nw []AuthorizationPolicyRules) []AuthorizationPolicyRules {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []AuthorizationPolicyRules
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewAuthorizationPolicyRules(c, &d, &n))
}
return items
}
func canonicalizeAuthorizationPolicyRulesSources(des, initial *AuthorizationPolicyRulesSources, opts ...dcl.ApplyOption) *AuthorizationPolicyRulesSources {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &AuthorizationPolicyRulesSources{}
if dcl.StringArrayCanonicalize(des.Principals, initial.Principals) {
cDes.Principals = initial.Principals
} else {
cDes.Principals = des.Principals
}
if dcl.StringArrayCanonicalize(des.IPBlocks, initial.IPBlocks) {
cDes.IPBlocks = initial.IPBlocks
} else {
cDes.IPBlocks = des.IPBlocks
}
return cDes
}
func canonicalizeAuthorizationPolicyRulesSourcesSlice(des, initial []AuthorizationPolicyRulesSources, opts ...dcl.ApplyOption) []AuthorizationPolicyRulesSources {
if des == nil {
return initial
}
if len(des) != len(initial) {
items := make([]AuthorizationPolicyRulesSources, 0, len(des))
for _, d := range des {
cd := canonicalizeAuthorizationPolicyRulesSources(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]AuthorizationPolicyRulesSources, 0, len(des))
for i, d := range des {
cd := canonicalizeAuthorizationPolicyRulesSources(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewAuthorizationPolicyRulesSources(c *Client, des, nw *AuthorizationPolicyRulesSources) *AuthorizationPolicyRulesSources {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for AuthorizationPolicyRulesSources while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
if dcl.StringArrayCanonicalize(des.Principals, nw.Principals) {
nw.Principals = des.Principals
}
if dcl.StringArrayCanonicalize(des.IPBlocks, nw.IPBlocks) {
nw.IPBlocks = des.IPBlocks
}
return nw
}
func canonicalizeNewAuthorizationPolicyRulesSourcesSet(c *Client, des, nw []AuthorizationPolicyRulesSources) []AuthorizationPolicyRulesSources {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []AuthorizationPolicyRulesSources
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareAuthorizationPolicyRulesSourcesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewAuthorizationPolicyRulesSources(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewAuthorizationPolicyRulesSourcesSlice(c *Client, des, nw []AuthorizationPolicyRulesSources) []AuthorizationPolicyRulesSources {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []AuthorizationPolicyRulesSources
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewAuthorizationPolicyRulesSources(c, &d, &n))
}
return items
}
func canonicalizeAuthorizationPolicyRulesDestinations(des, initial *AuthorizationPolicyRulesDestinations, opts ...dcl.ApplyOption) *AuthorizationPolicyRulesDestinations {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &AuthorizationPolicyRulesDestinations{}
if dcl.StringArrayCanonicalize(des.Hosts, initial.Hosts) {
cDes.Hosts = initial.Hosts
} else {
cDes.Hosts = des.Hosts
}
if dcl.IsZeroValue(des.Ports) || (dcl.IsEmptyValueIndirect(des.Ports) && dcl.IsEmptyValueIndirect(initial.Ports)) {
// Desired and initial values are equivalent, so set canonical desired value to initial value.
cDes.Ports = initial.Ports
} else {
cDes.Ports = des.Ports
}
if dcl.StringArrayCanonicalize(des.Methods, initial.Methods) {
cDes.Methods = initial.Methods
} else {
cDes.Methods = des.Methods
}
cDes.HttpHeaderMatch = canonicalizeAuthorizationPolicyRulesDestinationsHttpHeaderMatch(des.HttpHeaderMatch, initial.HttpHeaderMatch, opts...)
return cDes
}
func canonicalizeAuthorizationPolicyRulesDestinationsSlice(des, initial []AuthorizationPolicyRulesDestinations, opts ...dcl.ApplyOption) []AuthorizationPolicyRulesDestinations {
if des == nil {
return initial
}
if len(des) != len(initial) {
items := make([]AuthorizationPolicyRulesDestinations, 0, len(des))
for _, d := range des {
cd := canonicalizeAuthorizationPolicyRulesDestinations(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]AuthorizationPolicyRulesDestinations, 0, len(des))
for i, d := range des {
cd := canonicalizeAuthorizationPolicyRulesDestinations(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewAuthorizationPolicyRulesDestinations(c *Client, des, nw *AuthorizationPolicyRulesDestinations) *AuthorizationPolicyRulesDestinations {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for AuthorizationPolicyRulesDestinations while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
if dcl.StringArrayCanonicalize(des.Hosts, nw.Hosts) {
nw.Hosts = des.Hosts
}
if dcl.StringArrayCanonicalize(des.Methods, nw.Methods) {
nw.Methods = des.Methods
}
nw.HttpHeaderMatch = canonicalizeNewAuthorizationPolicyRulesDestinationsHttpHeaderMatch(c, des.HttpHeaderMatch, nw.HttpHeaderMatch)
return nw
}
func canonicalizeNewAuthorizationPolicyRulesDestinationsSet(c *Client, des, nw []AuthorizationPolicyRulesDestinations) []AuthorizationPolicyRulesDestinations {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []AuthorizationPolicyRulesDestinations
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareAuthorizationPolicyRulesDestinationsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewAuthorizationPolicyRulesDestinations(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewAuthorizationPolicyRulesDestinationsSlice(c *Client, des, nw []AuthorizationPolicyRulesDestinations) []AuthorizationPolicyRulesDestinations {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []AuthorizationPolicyRulesDestinations
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewAuthorizationPolicyRulesDestinations(c, &d, &n))
}
return items
}
func canonicalizeAuthorizationPolicyRulesDestinationsHttpHeaderMatch(des, initial *AuthorizationPolicyRulesDestinationsHttpHeaderMatch, opts ...dcl.ApplyOption) *AuthorizationPolicyRulesDestinationsHttpHeaderMatch {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &AuthorizationPolicyRulesDestinationsHttpHeaderMatch{}
if dcl.StringCanonicalize(des.HeaderName, initial.HeaderName) || dcl.IsZeroValue(des.HeaderName) {
cDes.HeaderName = initial.HeaderName
} else {
cDes.HeaderName = des.HeaderName
}
if dcl.StringCanonicalize(des.RegexMatch, initial.RegexMatch) || dcl.IsZeroValue(des.RegexMatch) {
cDes.RegexMatch = initial.RegexMatch
} else {
cDes.RegexMatch = des.RegexMatch
}
return cDes
}
func canonicalizeAuthorizationPolicyRulesDestinationsHttpHeaderMatchSlice(des, initial []AuthorizationPolicyRulesDestinationsHttpHeaderMatch, opts ...dcl.ApplyOption) []AuthorizationPolicyRulesDestinationsHttpHeaderMatch {
if dcl.IsEmptyValueIndirect(des) {
return initial
}
if len(des) != len(initial) {
items := make([]AuthorizationPolicyRulesDestinationsHttpHeaderMatch, 0, len(des))
for _, d := range des {
cd := canonicalizeAuthorizationPolicyRulesDestinationsHttpHeaderMatch(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]AuthorizationPolicyRulesDestinationsHttpHeaderMatch, 0, len(des))
for i, d := range des {
cd := canonicalizeAuthorizationPolicyRulesDestinationsHttpHeaderMatch(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewAuthorizationPolicyRulesDestinationsHttpHeaderMatch(c *Client, des, nw *AuthorizationPolicyRulesDestinationsHttpHeaderMatch) *AuthorizationPolicyRulesDestinationsHttpHeaderMatch {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for AuthorizationPolicyRulesDestinationsHttpHeaderMatch while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
if dcl.StringCanonicalize(des.HeaderName, nw.HeaderName) {
nw.HeaderName = des.HeaderName
}
if dcl.StringCanonicalize(des.RegexMatch, nw.RegexMatch) {
nw.RegexMatch = des.RegexMatch
}
return nw
}
func canonicalizeNewAuthorizationPolicyRulesDestinationsHttpHeaderMatchSet(c *Client, des, nw []AuthorizationPolicyRulesDestinationsHttpHeaderMatch) []AuthorizationPolicyRulesDestinationsHttpHeaderMatch {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []AuthorizationPolicyRulesDestinationsHttpHeaderMatch
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareAuthorizationPolicyRulesDestinationsHttpHeaderMatchNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewAuthorizationPolicyRulesDestinationsHttpHeaderMatch(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewAuthorizationPolicyRulesDestinationsHttpHeaderMatchSlice(c *Client, des, nw []AuthorizationPolicyRulesDestinationsHttpHeaderMatch) []AuthorizationPolicyRulesDestinationsHttpHeaderMatch {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []AuthorizationPolicyRulesDestinationsHttpHeaderMatch
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewAuthorizationPolicyRulesDestinationsHttpHeaderMatch(c, &d, &n))
}
return items
}
// The differ returns a list of diffs, along with a list of operations that should be taken
// to remedy them. Right now, it does not attempt to consolidate operations - if several
// fields can be fixed with a patch update, it will perform the patch several times.
// Diffs on some fields will be ignored if the `desired` state has an empty (nil)
// value. This empty value indicates that the user does not care about the state for
// the field. Empty fields on the actual object will cause diffs.
// TODO(magic-modules-eng): for efficiency in some resources, add batching.
func diffAuthorizationPolicy(c *Client, desired, actual *AuthorizationPolicy, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) {
if desired == nil || actual == nil {
return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual)
}
c.Config.Logger.Infof("Diff function called with desired state: %v", desired)
c.Config.Logger.Infof("Diff function called with actual state: %v", actual)
var fn dcl.FieldName
var newDiffs []*dcl.FieldDiff
// New style diffs.
if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Action, actual.Action, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("Action")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Rules, actual.Rules, dcl.DiffInfo{ObjectFunction: compareAuthorizationPolicyRulesNewStyle, EmptyObject: EmptyAuthorizationPolicyRules, OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("Rules")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if len(newDiffs) > 0 {
c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs)
}
return newDiffs, nil
}
func compareAuthorizationPolicyRulesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*AuthorizationPolicyRules)
if !ok {
desiredNotPointer, ok := d.(AuthorizationPolicyRules)
if !ok {
return nil, fmt.Errorf("obj %v is not a AuthorizationPolicyRules or *AuthorizationPolicyRules", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*AuthorizationPolicyRules)
if !ok {
actualNotPointer, ok := a.(AuthorizationPolicyRules)
if !ok {
return nil, fmt.Errorf("obj %v is not a AuthorizationPolicyRules", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.Sources, actual.Sources, dcl.DiffInfo{ObjectFunction: compareAuthorizationPolicyRulesSourcesNewStyle, EmptyObject: EmptyAuthorizationPolicyRulesSources, OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("Sources")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.Destinations, actual.Destinations, dcl.DiffInfo{ObjectFunction: compareAuthorizationPolicyRulesDestinationsNewStyle, EmptyObject: EmptyAuthorizationPolicyRulesDestinations, OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("Destinations")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareAuthorizationPolicyRulesSourcesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*AuthorizationPolicyRulesSources)
if !ok {
desiredNotPointer, ok := d.(AuthorizationPolicyRulesSources)
if !ok {
return nil, fmt.Errorf("obj %v is not a AuthorizationPolicyRulesSources or *AuthorizationPolicyRulesSources", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*AuthorizationPolicyRulesSources)
if !ok {
actualNotPointer, ok := a.(AuthorizationPolicyRulesSources)
if !ok {
return nil, fmt.Errorf("obj %v is not a AuthorizationPolicyRulesSources", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.Principals, actual.Principals, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("Principals")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.IPBlocks, actual.IPBlocks, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("IpBlocks")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareAuthorizationPolicyRulesDestinationsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*AuthorizationPolicyRulesDestinations)
if !ok {
desiredNotPointer, ok := d.(AuthorizationPolicyRulesDestinations)
if !ok {
return nil, fmt.Errorf("obj %v is not a AuthorizationPolicyRulesDestinations or *AuthorizationPolicyRulesDestinations", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*AuthorizationPolicyRulesDestinations)
if !ok {
actualNotPointer, ok := a.(AuthorizationPolicyRulesDestinations)
if !ok {
return nil, fmt.Errorf("obj %v is not a AuthorizationPolicyRulesDestinations", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.Hosts, actual.Hosts, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("Hosts")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.Ports, actual.Ports, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("Ports")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.Methods, actual.Methods, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("Methods")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.HttpHeaderMatch, actual.HttpHeaderMatch, dcl.DiffInfo{ObjectFunction: compareAuthorizationPolicyRulesDestinationsHttpHeaderMatchNewStyle, EmptyObject: EmptyAuthorizationPolicyRulesDestinationsHttpHeaderMatch, OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("HttpHeaderMatch")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
func compareAuthorizationPolicyRulesDestinationsHttpHeaderMatchNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*AuthorizationPolicyRulesDestinationsHttpHeaderMatch)
if !ok {
desiredNotPointer, ok := d.(AuthorizationPolicyRulesDestinationsHttpHeaderMatch)
if !ok {
return nil, fmt.Errorf("obj %v is not a AuthorizationPolicyRulesDestinationsHttpHeaderMatch or *AuthorizationPolicyRulesDestinationsHttpHeaderMatch", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*AuthorizationPolicyRulesDestinationsHttpHeaderMatch)
if !ok {
actualNotPointer, ok := a.(AuthorizationPolicyRulesDestinationsHttpHeaderMatch)
if !ok {
return nil, fmt.Errorf("obj %v is not a AuthorizationPolicyRulesDestinationsHttpHeaderMatch", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.HeaderName, actual.HeaderName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("HeaderName")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.RegexMatch, actual.RegexMatch, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAuthorizationPolicyUpdateAuthorizationPolicyOperation")}, fn.AddNest("RegexMatch")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
// urlNormalized returns a copy of the resource struct with values normalized
// for URL substitutions. For instance, it converts long-form self-links to
// short-form so they can be substituted in.
func (r *AuthorizationPolicy) urlNormalized() *AuthorizationPolicy {
normalized := dcl.Copy(*r).(AuthorizationPolicy)
normalized.Name = dcl.SelfLinkToName(r.Name)
normalized.Description = dcl.SelfLinkToName(r.Description)
normalized.Project = dcl.SelfLinkToName(r.Project)
normalized.Location = dcl.SelfLinkToName(r.Location)
return &normalized
}
func (r *AuthorizationPolicy) updateURL(userBasePath, updateName string) (string, error) {
nr := r.urlNormalized()
if updateName == "UpdateAuthorizationPolicy" {
fields := map[string]interface{}{
"project": dcl.ValueOrEmptyString(nr.Project),
"location": dcl.ValueOrEmptyString(nr.Location),
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("projects/{{project}}/locations/{{location}}/authorizationPolicies/{{name}}", nr.basePath(), userBasePath, fields), nil
}
return "", fmt.Errorf("unknown update name: %s", updateName)
}
// marshal encodes the AuthorizationPolicy resource into JSON for a Create request, and
// performs transformations from the resource schema to the API schema if
// necessary.
func (r *AuthorizationPolicy) marshal(c *Client) ([]byte, error) {
m, err := expandAuthorizationPolicy(c, r)
if err != nil {
return nil, fmt.Errorf("error marshalling AuthorizationPolicy: %w", err)
}
return json.Marshal(m)
}
// unmarshalAuthorizationPolicy decodes JSON responses into the AuthorizationPolicy resource schema.
func unmarshalAuthorizationPolicy(b []byte, c *Client, res *AuthorizationPolicy) (*AuthorizationPolicy, error) {
var m map[string]interface{}
if err := json.Unmarshal(b, &m); err != nil {
return nil, err
}
return unmarshalMapAuthorizationPolicy(m, c, res)
}
func unmarshalMapAuthorizationPolicy(m map[string]interface{}, c *Client, res *AuthorizationPolicy) (*AuthorizationPolicy, error) {
flattened := flattenAuthorizationPolicy(c, m, res)
if flattened == nil {
return nil, fmt.Errorf("attempted to flatten empty json object")
}
return flattened, nil
}
// expandAuthorizationPolicy expands AuthorizationPolicy into a JSON request object.
func expandAuthorizationPolicy(c *Client, f *AuthorizationPolicy) (map[string]interface{}, error) {
m := make(map[string]interface{})
res := f
_ = res
if v, err := dcl.DeriveField("projects/*/locations/%s/authorizationPolicies/%s", f.Name, dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil {
return nil, fmt.Errorf("error expanding Name into name: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["name"] = v
}
if v := f.Description; dcl.ValueShouldBeSent(v) {
m["description"] = v
}
if v := f.Labels; dcl.ValueShouldBeSent(v) {
m["labels"] = v
}
if v := f.Action; dcl.ValueShouldBeSent(v) {
m["action"] = v
}
if v, err := expandAuthorizationPolicyRulesSlice(c, f.Rules, res); err != nil {
return nil, fmt.Errorf("error expanding Rules into rules: %w", err)
} else if v != nil {
m["rules"] = v
}
if v, err := dcl.EmptyValue(); err != nil {
return nil, fmt.Errorf("error expanding Project into project: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["project"] = v
}
if v, err := dcl.EmptyValue(); err != nil {
return nil, fmt.Errorf("error expanding Location into location: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["location"] = v
}
return m, nil
}
// flattenAuthorizationPolicy flattens AuthorizationPolicy from a JSON request object into the
// AuthorizationPolicy type.
func flattenAuthorizationPolicy(c *Client, i interface{}, res *AuthorizationPolicy) *AuthorizationPolicy {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
if len(m) == 0 {
return nil
}
resultRes := &AuthorizationPolicy{}
resultRes.Name = dcl.FlattenString(m["name"])
resultRes.Description = dcl.FlattenString(m["description"])
resultRes.CreateTime = dcl.FlattenString(m["createTime"])
resultRes.UpdateTime = dcl.FlattenString(m["updateTime"])
resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"])
resultRes.Action = flattenAuthorizationPolicyActionEnum(m["action"])
resultRes.Rules = flattenAuthorizationPolicyRulesSlice(c, m["rules"], res)
resultRes.Project = dcl.FlattenString(m["project"])
resultRes.Location = dcl.FlattenString(m["location"])
return resultRes
}
// expandAuthorizationPolicyRulesMap expands the contents of AuthorizationPolicyRules into a JSON
// request object.
func expandAuthorizationPolicyRulesMap(c *Client, f map[string]AuthorizationPolicyRules, res *AuthorizationPolicy) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandAuthorizationPolicyRules(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandAuthorizationPolicyRulesSlice expands the contents of AuthorizationPolicyRules into a JSON
// request object.
func expandAuthorizationPolicyRulesSlice(c *Client, f []AuthorizationPolicyRules, res *AuthorizationPolicy) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandAuthorizationPolicyRules(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenAuthorizationPolicyRulesMap flattens the contents of AuthorizationPolicyRules from a JSON
// response object.
func flattenAuthorizationPolicyRulesMap(c *Client, i interface{}, res *AuthorizationPolicy) map[string]AuthorizationPolicyRules {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]AuthorizationPolicyRules{}
}
if len(a) == 0 {
return map[string]AuthorizationPolicyRules{}
}
items := make(map[string]AuthorizationPolicyRules)
for k, item := range a {
items[k] = *flattenAuthorizationPolicyRules(c, item.(map[string]interface{}), res)
}
return items
}
// flattenAuthorizationPolicyRulesSlice flattens the contents of AuthorizationPolicyRules from a JSON
// response object.
func flattenAuthorizationPolicyRulesSlice(c *Client, i interface{}, res *AuthorizationPolicy) []AuthorizationPolicyRules {
a, ok := i.([]interface{})
if !ok {
return []AuthorizationPolicyRules{}
}
if len(a) == 0 {
return []AuthorizationPolicyRules{}
}
items := make([]AuthorizationPolicyRules, 0, len(a))
for _, item := range a {
items = append(items, *flattenAuthorizationPolicyRules(c, item.(map[string]interface{}), res))
}
return items
}
// expandAuthorizationPolicyRules expands an instance of AuthorizationPolicyRules into a JSON
// request object.
func expandAuthorizationPolicyRules(c *Client, f *AuthorizationPolicyRules, res *AuthorizationPolicy) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
m := make(map[string]interface{})
if v, err := expandAuthorizationPolicyRulesSourcesSlice(c, f.Sources, res); err != nil {
return nil, fmt.Errorf("error expanding Sources into sources: %w", err)
} else if v != nil {
m["sources"] = v
}
if v, err := expandAuthorizationPolicyRulesDestinationsSlice(c, f.Destinations, res); err != nil {
return nil, fmt.Errorf("error expanding Destinations into destinations: %w", err)
} else if v != nil {
m["destinations"] = v
}
return m, nil
}
// flattenAuthorizationPolicyRules flattens an instance of AuthorizationPolicyRules from a JSON
// response object.
func flattenAuthorizationPolicyRules(c *Client, i interface{}, res *AuthorizationPolicy) *AuthorizationPolicyRules {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &AuthorizationPolicyRules{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyAuthorizationPolicyRules
}
r.Sources = flattenAuthorizationPolicyRulesSourcesSlice(c, m["sources"], res)
r.Destinations = flattenAuthorizationPolicyRulesDestinationsSlice(c, m["destinations"], res)
return r
}
// expandAuthorizationPolicyRulesSourcesMap expands the contents of AuthorizationPolicyRulesSources into a JSON
// request object.
func expandAuthorizationPolicyRulesSourcesMap(c *Client, f map[string]AuthorizationPolicyRulesSources, res *AuthorizationPolicy) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandAuthorizationPolicyRulesSources(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandAuthorizationPolicyRulesSourcesSlice expands the contents of AuthorizationPolicyRulesSources into a JSON
// request object.
func expandAuthorizationPolicyRulesSourcesSlice(c *Client, f []AuthorizationPolicyRulesSources, res *AuthorizationPolicy) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandAuthorizationPolicyRulesSources(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenAuthorizationPolicyRulesSourcesMap flattens the contents of AuthorizationPolicyRulesSources from a JSON
// response object.
func flattenAuthorizationPolicyRulesSourcesMap(c *Client, i interface{}, res *AuthorizationPolicy) map[string]AuthorizationPolicyRulesSources {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]AuthorizationPolicyRulesSources{}
}
if len(a) == 0 {
return map[string]AuthorizationPolicyRulesSources{}
}
items := make(map[string]AuthorizationPolicyRulesSources)
for k, item := range a {
items[k] = *flattenAuthorizationPolicyRulesSources(c, item.(map[string]interface{}), res)
}
return items
}
// flattenAuthorizationPolicyRulesSourcesSlice flattens the contents of AuthorizationPolicyRulesSources from a JSON
// response object.
func flattenAuthorizationPolicyRulesSourcesSlice(c *Client, i interface{}, res *AuthorizationPolicy) []AuthorizationPolicyRulesSources {
a, ok := i.([]interface{})
if !ok {
return []AuthorizationPolicyRulesSources{}
}
if len(a) == 0 {
return []AuthorizationPolicyRulesSources{}
}
items := make([]AuthorizationPolicyRulesSources, 0, len(a))
for _, item := range a {
items = append(items, *flattenAuthorizationPolicyRulesSources(c, item.(map[string]interface{}), res))
}
return items
}
// expandAuthorizationPolicyRulesSources expands an instance of AuthorizationPolicyRulesSources into a JSON
// request object.
func expandAuthorizationPolicyRulesSources(c *Client, f *AuthorizationPolicyRulesSources, res *AuthorizationPolicy) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
m := make(map[string]interface{})
if v := f.Principals; v != nil {
m["principals"] = v
}
if v := f.IPBlocks; v != nil {
m["ipBlocks"] = v
}
return m, nil
}
// flattenAuthorizationPolicyRulesSources flattens an instance of AuthorizationPolicyRulesSources from a JSON
// response object.
func flattenAuthorizationPolicyRulesSources(c *Client, i interface{}, res *AuthorizationPolicy) *AuthorizationPolicyRulesSources {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &AuthorizationPolicyRulesSources{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyAuthorizationPolicyRulesSources
}
r.Principals = dcl.FlattenStringSlice(m["principals"])
r.IPBlocks = dcl.FlattenStringSlice(m["ipBlocks"])
return r
}
// expandAuthorizationPolicyRulesDestinationsMap expands the contents of AuthorizationPolicyRulesDestinations into a JSON
// request object.
func expandAuthorizationPolicyRulesDestinationsMap(c *Client, f map[string]AuthorizationPolicyRulesDestinations, res *AuthorizationPolicy) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandAuthorizationPolicyRulesDestinations(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandAuthorizationPolicyRulesDestinationsSlice expands the contents of AuthorizationPolicyRulesDestinations into a JSON
// request object.
func expandAuthorizationPolicyRulesDestinationsSlice(c *Client, f []AuthorizationPolicyRulesDestinations, res *AuthorizationPolicy) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandAuthorizationPolicyRulesDestinations(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenAuthorizationPolicyRulesDestinationsMap flattens the contents of AuthorizationPolicyRulesDestinations from a JSON
// response object.
func flattenAuthorizationPolicyRulesDestinationsMap(c *Client, i interface{}, res *AuthorizationPolicy) map[string]AuthorizationPolicyRulesDestinations {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]AuthorizationPolicyRulesDestinations{}
}
if len(a) == 0 {
return map[string]AuthorizationPolicyRulesDestinations{}
}
items := make(map[string]AuthorizationPolicyRulesDestinations)
for k, item := range a {
items[k] = *flattenAuthorizationPolicyRulesDestinations(c, item.(map[string]interface{}), res)
}
return items
}
// flattenAuthorizationPolicyRulesDestinationsSlice flattens the contents of AuthorizationPolicyRulesDestinations from a JSON
// response object.
func flattenAuthorizationPolicyRulesDestinationsSlice(c *Client, i interface{}, res *AuthorizationPolicy) []AuthorizationPolicyRulesDestinations {
a, ok := i.([]interface{})
if !ok {
return []AuthorizationPolicyRulesDestinations{}
}
if len(a) == 0 {
return []AuthorizationPolicyRulesDestinations{}
}
items := make([]AuthorizationPolicyRulesDestinations, 0, len(a))
for _, item := range a {
items = append(items, *flattenAuthorizationPolicyRulesDestinations(c, item.(map[string]interface{}), res))
}
return items
}
// expandAuthorizationPolicyRulesDestinations expands an instance of AuthorizationPolicyRulesDestinations into a JSON
// request object.
func expandAuthorizationPolicyRulesDestinations(c *Client, f *AuthorizationPolicyRulesDestinations, res *AuthorizationPolicy) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
m := make(map[string]interface{})
if v := f.Hosts; v != nil {
m["hosts"] = v
}
if v := f.Ports; v != nil {
m["ports"] = v
}
if v := f.Methods; v != nil {
m["methods"] = v
}
if v, err := expandAuthorizationPolicyRulesDestinationsHttpHeaderMatch(c, f.HttpHeaderMatch, res); err != nil {
return nil, fmt.Errorf("error expanding HttpHeaderMatch into httpHeaderMatch: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["httpHeaderMatch"] = v
}
return m, nil
}
// flattenAuthorizationPolicyRulesDestinations flattens an instance of AuthorizationPolicyRulesDestinations from a JSON
// response object.
func flattenAuthorizationPolicyRulesDestinations(c *Client, i interface{}, res *AuthorizationPolicy) *AuthorizationPolicyRulesDestinations {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &AuthorizationPolicyRulesDestinations{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyAuthorizationPolicyRulesDestinations
}
r.Hosts = dcl.FlattenStringSlice(m["hosts"])
r.Ports = dcl.FlattenIntSlice(m["ports"])
r.Methods = dcl.FlattenStringSlice(m["methods"])
r.HttpHeaderMatch = flattenAuthorizationPolicyRulesDestinationsHttpHeaderMatch(c, m["httpHeaderMatch"], res)
return r
}
// expandAuthorizationPolicyRulesDestinationsHttpHeaderMatchMap expands the contents of AuthorizationPolicyRulesDestinationsHttpHeaderMatch into a JSON
// request object.
func expandAuthorizationPolicyRulesDestinationsHttpHeaderMatchMap(c *Client, f map[string]AuthorizationPolicyRulesDestinationsHttpHeaderMatch, res *AuthorizationPolicy) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandAuthorizationPolicyRulesDestinationsHttpHeaderMatch(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandAuthorizationPolicyRulesDestinationsHttpHeaderMatchSlice expands the contents of AuthorizationPolicyRulesDestinationsHttpHeaderMatch into a JSON
// request object.
func expandAuthorizationPolicyRulesDestinationsHttpHeaderMatchSlice(c *Client, f []AuthorizationPolicyRulesDestinationsHttpHeaderMatch, res *AuthorizationPolicy) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandAuthorizationPolicyRulesDestinationsHttpHeaderMatch(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenAuthorizationPolicyRulesDestinationsHttpHeaderMatchMap flattens the contents of AuthorizationPolicyRulesDestinationsHttpHeaderMatch from a JSON
// response object.
func flattenAuthorizationPolicyRulesDestinationsHttpHeaderMatchMap(c *Client, i interface{}, res *AuthorizationPolicy) map[string]AuthorizationPolicyRulesDestinationsHttpHeaderMatch {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]AuthorizationPolicyRulesDestinationsHttpHeaderMatch{}
}
if len(a) == 0 {
return map[string]AuthorizationPolicyRulesDestinationsHttpHeaderMatch{}
}
items := make(map[string]AuthorizationPolicyRulesDestinationsHttpHeaderMatch)
for k, item := range a {
items[k] = *flattenAuthorizationPolicyRulesDestinationsHttpHeaderMatch(c, item.(map[string]interface{}), res)
}
return items
}
// flattenAuthorizationPolicyRulesDestinationsHttpHeaderMatchSlice flattens the contents of AuthorizationPolicyRulesDestinationsHttpHeaderMatch from a JSON
// response object.
func flattenAuthorizationPolicyRulesDestinationsHttpHeaderMatchSlice(c *Client, i interface{}, res *AuthorizationPolicy) []AuthorizationPolicyRulesDestinationsHttpHeaderMatch {
a, ok := i.([]interface{})
if !ok {
return []AuthorizationPolicyRulesDestinationsHttpHeaderMatch{}
}
if len(a) == 0 {
return []AuthorizationPolicyRulesDestinationsHttpHeaderMatch{}
}
items := make([]AuthorizationPolicyRulesDestinationsHttpHeaderMatch, 0, len(a))
for _, item := range a {
items = append(items, *flattenAuthorizationPolicyRulesDestinationsHttpHeaderMatch(c, item.(map[string]interface{}), res))
}
return items
}
// expandAuthorizationPolicyRulesDestinationsHttpHeaderMatch expands an instance of AuthorizationPolicyRulesDestinationsHttpHeaderMatch into a JSON
// request object.
func expandAuthorizationPolicyRulesDestinationsHttpHeaderMatch(c *Client, f *AuthorizationPolicyRulesDestinationsHttpHeaderMatch, res *AuthorizationPolicy) (map[string]interface{}, error) {
if dcl.IsEmptyValueIndirect(f) {
return nil, nil
}
m := make(map[string]interface{})
if v := f.HeaderName; !dcl.IsEmptyValueIndirect(v) {
m["headerName"] = v
}
if v := f.RegexMatch; !dcl.IsEmptyValueIndirect(v) {
m["regexMatch"] = v
}
return m, nil
}
// flattenAuthorizationPolicyRulesDestinationsHttpHeaderMatch flattens an instance of AuthorizationPolicyRulesDestinationsHttpHeaderMatch from a JSON
// response object.
func flattenAuthorizationPolicyRulesDestinationsHttpHeaderMatch(c *Client, i interface{}, res *AuthorizationPolicy) *AuthorizationPolicyRulesDestinationsHttpHeaderMatch {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &AuthorizationPolicyRulesDestinationsHttpHeaderMatch{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyAuthorizationPolicyRulesDestinationsHttpHeaderMatch
}
r.HeaderName = dcl.FlattenString(m["headerName"])
r.RegexMatch = dcl.FlattenString(m["regexMatch"])
return r
}
// flattenAuthorizationPolicyActionEnumMap flattens the contents of AuthorizationPolicyActionEnum from a JSON
// response object.
func flattenAuthorizationPolicyActionEnumMap(c *Client, i interface{}, res *AuthorizationPolicy) map[string]AuthorizationPolicyActionEnum {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]AuthorizationPolicyActionEnum{}
}
if len(a) == 0 {
return map[string]AuthorizationPolicyActionEnum{}
}
items := make(map[string]AuthorizationPolicyActionEnum)
for k, item := range a {
items[k] = *flattenAuthorizationPolicyActionEnum(item.(interface{}))
}
return items
}
// flattenAuthorizationPolicyActionEnumSlice flattens the contents of AuthorizationPolicyActionEnum from a JSON
// response object.
func flattenAuthorizationPolicyActionEnumSlice(c *Client, i interface{}, res *AuthorizationPolicy) []AuthorizationPolicyActionEnum {
a, ok := i.([]interface{})
if !ok {
return []AuthorizationPolicyActionEnum{}
}
if len(a) == 0 {
return []AuthorizationPolicyActionEnum{}
}
items := make([]AuthorizationPolicyActionEnum, 0, len(a))
for _, item := range a {
items = append(items, *flattenAuthorizationPolicyActionEnum(item.(interface{})))
}
return items
}
// flattenAuthorizationPolicyActionEnum asserts that an interface is a string, and returns a
// pointer to a *AuthorizationPolicyActionEnum with the same value as that string.
func flattenAuthorizationPolicyActionEnum(i interface{}) *AuthorizationPolicyActionEnum {
s, ok := i.(string)
if !ok {
return nil
}
return AuthorizationPolicyActionEnumRef(s)
}
// This function returns a matcher that checks whether a serialized resource matches this resource
// in its parameters (as defined by the fields in a Get, which definitionally define resource
// identity). This is useful in extracting the element from a List call.
func (r *AuthorizationPolicy) matcher(c *Client) func([]byte) bool {
return func(b []byte) bool {
cr, err := unmarshalAuthorizationPolicy(b, c, r)
if err != nil {
c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.")
return false
}
nr := r.urlNormalized()
ncr := cr.urlNormalized()
c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr)
if nr.Project == nil && ncr.Project == nil {
c.Config.Logger.Info("Both Project fields null - considering equal.")
} else if nr.Project == nil || ncr.Project == nil {
c.Config.Logger.Info("Only one Project field is null - considering unequal.")
return false
} else if *nr.Project != *ncr.Project {
return false
}
if nr.Location == nil && ncr.Location == nil {
c.Config.Logger.Info("Both Location fields null - considering equal.")
} else if nr.Location == nil || ncr.Location == nil {
c.Config.Logger.Info("Only one Location field is null - considering unequal.")
return false
} else if *nr.Location != *ncr.Location {
return false
}
if nr.Name == nil && ncr.Name == nil {
c.Config.Logger.Info("Both Name fields null - considering equal.")
} else if nr.Name == nil || ncr.Name == nil {
c.Config.Logger.Info("Only one Name field is null - considering unequal.")
return false
} else if *nr.Name != *ncr.Name {
return false
}
return true
}
}
type authorizationPolicyDiff struct {
// The diff should include one or the other of RequiresRecreate or UpdateOp.
RequiresRecreate bool
UpdateOp authorizationPolicyApiOperation
FieldName string // used for error logging
}
func convertFieldDiffsToAuthorizationPolicyDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]authorizationPolicyDiff, error) {
opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff)
// Map each operation name to the field diffs associated with it.
for _, fd := range fds {
for _, ro := range fd.ResultingOperation {
if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok {
fieldDiffs = append(fieldDiffs, fd)
opNamesToFieldDiffs[ro] = fieldDiffs
} else {
config.Logger.Infof("%s required due to diff: %v", ro, fd)
opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd}
}
}
}
var diffs []authorizationPolicyDiff
// For each operation name, create a authorizationPolicyDiff which contains the operation.
for opName, fieldDiffs := range opNamesToFieldDiffs {
// Use the first field diff's field name for logging required recreate error.
diff := authorizationPolicyDiff{FieldName: fieldDiffs[0].FieldName}
if opName == "Recreate" {
diff.RequiresRecreate = true
} else {
apiOp, err := convertOpNameToAuthorizationPolicyApiOperation(opName, fieldDiffs, opts...)
if err != nil {
return diffs, err
}
diff.UpdateOp = apiOp
}
diffs = append(diffs, diff)
}
return diffs, nil
}
func convertOpNameToAuthorizationPolicyApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (authorizationPolicyApiOperation, error) {
switch opName {
case "updateAuthorizationPolicyUpdateAuthorizationPolicyOperation":
return &updateAuthorizationPolicyUpdateAuthorizationPolicyOperation{FieldDiffs: fieldDiffs}, nil
default:
return nil, fmt.Errorf("no such operation with name: %v", opName)
}
}
func extractAuthorizationPolicyFields(r *AuthorizationPolicy) error {
return nil
}
func extractAuthorizationPolicyRulesFields(r *AuthorizationPolicy, o *AuthorizationPolicyRules) error {
return nil
}
func extractAuthorizationPolicyRulesSourcesFields(r *AuthorizationPolicy, o *AuthorizationPolicyRulesSources) error {
return nil
}
func extractAuthorizationPolicyRulesDestinationsFields(r *AuthorizationPolicy, o *AuthorizationPolicyRulesDestinations) error {
vHttpHeaderMatch := o.HttpHeaderMatch
if vHttpHeaderMatch == nil {
// note: explicitly not the empty object.
vHttpHeaderMatch = &AuthorizationPolicyRulesDestinationsHttpHeaderMatch{}
}
if err := extractAuthorizationPolicyRulesDestinationsHttpHeaderMatchFields(r, vHttpHeaderMatch); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vHttpHeaderMatch) {
o.HttpHeaderMatch = vHttpHeaderMatch
}
return nil
}
func extractAuthorizationPolicyRulesDestinationsHttpHeaderMatchFields(r *AuthorizationPolicy, o *AuthorizationPolicyRulesDestinationsHttpHeaderMatch) error {
return nil
}
func postReadExtractAuthorizationPolicyFields(r *AuthorizationPolicy) error {
return nil
}
func postReadExtractAuthorizationPolicyRulesFields(r *AuthorizationPolicy, o *AuthorizationPolicyRules) error {
return nil
}
func postReadExtractAuthorizationPolicyRulesSourcesFields(r *AuthorizationPolicy, o *AuthorizationPolicyRulesSources) error {
return nil
}
func postReadExtractAuthorizationPolicyRulesDestinationsFields(r *AuthorizationPolicy, o *AuthorizationPolicyRulesDestinations) error {
vHttpHeaderMatch := o.HttpHeaderMatch
if vHttpHeaderMatch == nil {
// note: explicitly not the empty object.
vHttpHeaderMatch = &AuthorizationPolicyRulesDestinationsHttpHeaderMatch{}
}
if err := extractAuthorizationPolicyRulesDestinationsHttpHeaderMatchFields(r, vHttpHeaderMatch); err != nil {
return err
}
if !dcl.IsEmptyValueIndirect(vHttpHeaderMatch) {
o.HttpHeaderMatch = vHttpHeaderMatch
}
return nil
}
func postReadExtractAuthorizationPolicyRulesDestinationsHttpHeaderMatchFields(r *AuthorizationPolicy, o *AuthorizationPolicyRulesDestinationsHttpHeaderMatch) error {
return nil
}
|
package object
// PrivateUser represents PrivateUserObject
// Link: https://developer.spotify.com/documentation/web-api/reference/#object-privateuserobject
type PrivateUser struct {
DisplayName string `json:"display_name,omitempty"`
ID string `json:"id"`
Country string `json:"country"`
Email string `json:"email"`
ExplicitContent ExplicitContentSettings `json:"explicit_content"`
ExternalURLs ExternalURL `json:"external_urls"`
Followers *Followers `json:"followers,omitempty"`
HRef string `json:"href"`
Images []Image `json:"images,omitempty"`
Product string `json:"product"`
Type objectType `json:"type"`
URI string `json:"uri"`
}
// PublicUser represents PublicUserObject
// Link: https://developer.spotify.com/documentation/web-api/reference/#object-publicuserobject
type PublicUser struct {
DisplayName string `json:"display_name,omitempty"`
ID string `json:"id"`
ExternalURLs ExternalURL `json:"external_urls"`
Followers *Followers `json:"followers,omitempty"`
HRef string `json:"href"`
Images []Image `json:"images,omitempty"`
Type objectType `json:"type"`
URI string `json:"uri"`
}
|
package helper
import (
"crypto/sha512"
"encoding/base64"
)
const passwordSalt = "a99VVoWzmd1C9ujcitK0fIVNE0I5I61AC47C852RoLTsHDyLCltvP+ZHEkIl/2hkzTOW90c3ZEjtYRkdfTWJ1Q=="
// EncryptPassword helper
func EncryptPassword(email, password string) string {
hasher := sha512.New()
hasher.Write([]byte(passwordSalt))
hasher.Write([]byte(email))
hasher.Write([]byte(password))
return base64.URLEncoding.EncodeToString(hasher.Sum(nil))
}
|
package game
import (
"github.com/tanema/amore/gfx"
)
// Voxel is the main drawn box
type Voxel struct {
x, y, z float32
width, height float32
h, s, l float32
shine float32
relative bool
}
func newVoxel(x, y, z, width, height, h, s, l float32, relative bool) *Voxel {
return &Voxel{
x: x, y: y, z: z,
width: width, height: height,
h: h, s: s, l: l,
relative: relative,
shine: 1,
}
}
func (voxel *Voxel) update(world *World, x, y float32) {
// starting luminance based on distance from player center
voxel.shine = baseShine + exp(-(x*x+y*y)/playerShineRange*2)
if world.sin > 0 { // if, daytime add sunlight
voxel.shine += world.sin * (2 - world.sin) * (1 - voxel.shine)
}
}
func (voxel *Voxel) draw(camera *Camera, px, py float32) {
cellSize := float32(1)
if voxel.relative {
cellSize = camera.getCellSize()
}
gfx.SetColorC(gfx.NewHSLColor(voxel.h, voxel.s, pow(voxel.l, 1/voxel.shine), 1))
x, y := camera.worldToScreen(px, py, voxel.z, voxel.relative)
width, height := voxel.width*cellSize, voxel.height*cellSize
coords := []float32{
x + width, y, x, y + width/2,
x - width, y,
x - width, y - height,
x, y - height - width/2,
x + width, y - height,
}
gfx.Polygon(gfx.FILL, coords)
}
|
package conveyor
type QueneBuckets struct {
maxItems int
quene []int
totalSum int
}
func NewBuckets(maxItems int) *QueneBuckets {
return &QueneBuckets{maxItems: maxItems}
}
func (q *QueneBuckets) Shift(shifted int) {
if shifted > q.maxItems {
q.quene = []int{}
q.totalSum = 0
return
}
for shifted > 0 {
q.quene = append(q.quene, 0)
shifted--
}
for len(q.quene) > q.maxItems {
q.totalSum -= q.quene[0]
q.quene = q.quene[1:]
}
}
func (q *QueneBuckets) AddToBack(count int) {
if len(q.quene) == 0 {
q.Shift(1)
}
q.quene[len(q.quene)-1] += count
q.totalSum += count
}
func (q *QueneBuckets) TotalSum() int {
return q.totalSum
}
|
package main
import (
"fmt"
"os"
"net/http"
"log"
)
func main() {
http.HandleFunc("/foo", higoHandler)
log.Fatal(http.ListenAndServe(":8088", nil))
}
func higoHandler(w http.ResponseWriter, r *http.Request) {
name, err := os.Hostname()
if err != nil {
panic(err.Error())
}
fmt.Fprintln(w, "higo, "+name)
}
|
package actor
import (
"fmt"
"time"
"github.com/stretchr/testify/mock"
)
var nullProducer Producer = func() Actor { return nullReceive }
var nullReceive ActorFunc = func(Context) {}
var nilPID *PID
func matchPID(with *PID) interface{} {
return mock.MatchedBy(func(v *PID) bool {
return with.Address == v.Address && with.Id == v.Id
})
}
func spawnMockProcess(name string) (*PID, *mockProcess) {
p := &mockProcess{}
pid, ok := ProcessRegistry.Add(p, name)
if !ok {
panic(fmt.Errorf("did not spawn named process '%s'", name))
}
return pid, p
}
func removeMockProcess(pid *PID) {
ProcessRegistry.Remove(pid)
}
type mockProcess struct {
mock.Mock
}
func (m *mockProcess) SendUserMessage(pid *PID, message interface{}, sender *PID) {
m.Called(pid, message, sender)
}
func (m *mockProcess) SendSystemMessage(pid *PID, message interface{}) {
m.Called(pid, message)
}
func (m *mockProcess) Stop(pid *PID) {
m.Called(pid)
}
type mockContext struct {
mock.Mock
}
func (m *mockContext) Watch(pid *PID) {
m.Called(pid)
}
func (m *mockContext) Unwatch(pid *PID) {
m.Called(pid)
}
func (m *mockContext) Message() interface{} {
args := m.Called()
return args.Get(0)
}
func (m *mockContext) SetReceiveTimeout(d time.Duration) {
m.Called(d)
}
func (m *mockContext) ReceiveTimeout() time.Duration {
args := m.Called()
return args.Get(0).(time.Duration)
}
func (m *mockContext) Sender() *PID {
args := m.Called()
return args.Get(0).(*PID)
}
func (m *mockContext) MessageHeader() ReadonlyMessageHeader {
args := m.Called()
return args.Get(0).(ReadonlyMessageHeader)
}
func (m *mockContext) Tell(pid *PID, message interface{}) {
m.Called()
}
func (m *mockContext) Request(pid *PID, message interface{}) {
m.Called()
}
func (m *mockContext) RequestFuture(pid *PID, message interface{}, timeout time.Duration) *Future {
args := m.Called()
return args.Get(0).(*Future)
}
func (m *mockContext) SetBehavior(r ActorFunc) {
m.Called(r)
}
func (m *mockContext) PushBehavior(r ActorFunc) {
m.Called(r)
}
func (m *mockContext) PopBehavior() {
m.Called()
}
func (m *mockContext) Self() *PID {
args := m.Called()
return args.Get(0).(*PID)
}
func (m *mockContext) Parent() *PID {
args := m.Called()
return args.Get(0).(*PID)
}
func (m *mockContext) Spawn(p *Props) *PID {
args := m.Called(p)
return args.Get(0).(*PID)
}
func (m *mockContext) SpawnPrefix(p *Props, prefix string) *PID {
args := m.Called(p, prefix)
return args.Get(0).(*PID)
}
func (m *mockContext) SpawnNamed(p *Props, name string) (*PID, error) {
args := m.Called(p, name)
return args.Get(0).(*PID), args.Get(1).(error)
}
func (m *mockContext) Children() []*PID {
args := m.Called()
return args.Get(0).([]*PID)
}
func (m *mockContext) Stash() {
m.Called()
}
func (m *mockContext) Respond(response interface{}) {
m.Called(response)
}
func (m *mockContext) Actor() Actor {
args := m.Called()
return args.Get(0).(Actor)
}
func (m *mockContext) AwaitFuture(f *Future, cont func(res interface{}, err error)) {
m.Called(f, cont)
}
|
package mqtt
import (
"strconv"
"strings"
)
///////////////////////////////////////////////////////////////////////////////
type Subscription struct {
ctx *Context
// topic string
topic *Topic
qos byte
next, prev *Subscription
}
func NewSubscription(ctx* Context, qos byte) (*Subscription){
sub := new(Subscription)
sub.ctx = ctx;
sub.qos = qos;
return sub
}
func (s *Subscription) Publish(msg *Message) {
if s == nil {
return
}
s.ctx.Publish(s, msg)
s.next.Publish(msg)
}
func (s *Subscription) ChainLength() int {
if s == nil {
return 0
}
return s.next.ChainLength() + 1
}
func (sub *Subscription) Unsubscribe() {
topic := sub.topic
if topic == nil {
return
}
if sub.prev == nil {
if topic.subs == sub {
topic.subs = sub.next
} else {
topic.mlwcSubs = sub.next
}
if sub.next != nil {
sub.next.prev = nil
}
// the topic we unsubscribed can be removed if
if (topic.subs == nil && // no subscribers
topic.retainMsg == nil && // no retrain message
topic.mlwcSubs == nil && // no /# subscribers
topic.wcTopic == nil && // no /+ topic
len(topic.children) == 0) { // no sub-topics
topic.Remove()
}
} else {
sub.prev.next = sub.next
if sub.next != nil {
sub.next.prev = sub.prev
}
}
sub.topic = nil
}
///////////////////////////////////////////////////////////////////////////////
type Topic struct {
// topic name like "b" in 'a/b' for b
name string
// any sub topic like /b in 'a/b' for a
children map[string] *Topic
// wildcard (+) topic
wcTopic *Topic
// parent topic like a in 'a/b' for b
parent *Topic
// all subscriptions to this topic (double linked list)
subs *Subscription
// all subscriptions to /# (multi level wildcard)
mlwcSubs *Subscription
// retain message
retainMsg *Message
}
func NewTopic(parent *Topic, name string) (*Topic) {
t := new(Topic)
t.children = make(map[string]*Topic)
t.parent = parent
t.name = name
return t
}
func (topic *Topic) Find(s []string) (*Subscription) {
if len(s) == 0 {
return topic.subs
} else {
t, ok := topic.children[s[0]]
if ok {
return t.Find(s[1:])
}
}
return nil
}
func (topic *Topic) Publish(s []string, msg *Message) {
if len(s) == 0 {
// len() = 0 means we are at the end of the topics-tree
// and iform all subscribers here
topic.subs.Publish(msg)
// attach retain message to the topic
if msg.retain {
topic.retainMsg = msg
}
} else {
// search for the child note
t, ok := topic.children[s[0]]
if ok {
t.Publish(s[1:], msg)
} else {
if msg.retain {
// retain messages are attached to a topic
// se we need to create the topic as it does not exist
t.Subscribe(s[1:], nil)
}
}
// notify all ../+ subscribers
if topic.wcTopic != nil {
topic.wcTopic.Publish(s[1:], msg)
}
}
// the /# subscribers always match
topic.mlwcSubs.Publish(msg)
}
func (topic *Topic) String() string {
var builder strings.Builder
if n := topic.subs.ChainLength(); n != 0 {
builder.WriteString("\n/ ("+strconv.Itoa(n)+" listeners)\n")
}
if n := topic.mlwcSubs.ChainLength(); n != 0 {
builder.WriteString("\n/# ("+strconv.Itoa(n)+" listeners)\n")
}
for sub, topic := range topic.children {
builder.WriteString("\n"+sub+" ("+strconv.Itoa(topic.subs.ChainLength())+" listeners)")
topic.PrintIndent(&builder, " ")
}
return builder.String()
}
func (topic *Topic) PrintIndent(builder *strings.Builder, indent string ) {
if n := topic.mlwcSubs.ChainLength(); n != 0 {
builder.WriteString("\n"+indent+"/# ("+strconv.Itoa(n)+" listeners)")
}
if topic.wcTopic != nil {
builder.WriteString("\n"+indent+"/+ ("+strconv.Itoa(topic.wcTopic.subs.ChainLength())+" listeners)")
topic.wcTopic.PrintIndent(builder, indent+" ")
}
for sub, t := range topic.children {
builder.WriteString("\n"+indent+"/"+sub+" ("+strconv.Itoa(t.subs.ChainLength())+" listeners)")
t.PrintIndent(builder, indent+" ")
}
}
func (topic *Topic) Enqueue(queue **Subscription, s *Subscription) {
s.prev = nil
s.next = *queue
if s.next != nil {
s.next.prev = s
}
*queue = s
s.topic = topic
}
func (topic *Topic) Subscribe(t []string, sub *Subscription) {
if len(t) == 0 {
topic.Enqueue(&topic.subs, sub)
if topic.retainMsg != nil {
sub.ctx.Publish(sub, topic.retainMsg)
}
} else {
if t[0] == "#" {
topic.Enqueue(&topic.mlwcSubs, sub)
return
}
var child *Topic
var ok bool
if t[0] == "+" {
if topic.wcTopic == nil {
topic.wcTopic = NewTopic(topic, "+")
}
child = topic.wcTopic
} else {
child, ok = topic.children[t[0]]
if !ok {
child = NewTopic(topic, t[0])
topic.children[t[0]] = child
}
}
child.Subscribe(t[1:], sub)
}
}
func (topic *Topic) Remove() {
parent := topic.parent
if parent != nil {
// the wildcard topic is attached different to the parent topic
if topic.name == "+" {
// also the parent topic if
if (len(parent.children) == 0 && // no sub-topics
parent.retainMsg == nil && // no retain message
parent.subs == nil && // no subscriptions
parent.mlwcSubs == nil && // no /# subscriptions
parent.parent != nil) { // but not the root topic :)
parent.Remove()
return
}
// remove this wildcard topic
parent.wcTopic = nil
return
}
// also the parent topic if
if (parent.wcTopic == nil && // no /+ subscribers
parent.retainMsg == nil && // no retain message
parent.mlwcSubs == nil && // no /# subscribers
len(parent.children) == 1 && // no sub-topics (just this one)
parent.subs == nil && // no subscriptions
parent.parent != nil) { // but not the root topic
parent.Remove()
return
}
// remove this topic from the parents sub-topics
delete(parent.children, topic.name)
}
}
|
package index
import (
"fmt"
"strings"
)
func toTitle(filename string) string {
r := strings.NewReplacer("_", " ", ".md", "")
return strings.Title(r.Replace(filename))
}
func wrapWithTag(content, tagName string) string {
return fmt.Sprintf("<%s>%s</%s>", tagName, content, tagName)
}
func wrapWithAnchorTag(content, href string) string {
return fmt.Sprintf("<a href='%s'>%s</a>", href, content)
}
|
package reflectutils
import (
"fmt"
"reflect"
"testing"
)
type FileInfo struct {
FileId int64
Uploader int64
CopyrightOwner int64
Name string
Ext string
Size int64
Sha string
Title string
Tag string
Description string
IpfsHash string
IpfsUrl string //ipfs url
AliDnaJobId string
AliDnaFileId string
Status int
CreateTime int64
}
//type StructUpdaterField struct {
// FieldName string
// Tag string
// Value interface{}
//}
//type StructUpdater struct {
// object interface{}
// fields []StructUpdaterField
//}
//func NewStructUpdater(object interface{}) (*StructUpdater, error) {
// if reflect.TypeOf(object).Kind() != reflect.Struct {
// return nil, fmt.Errorf("object not a struct type")
// }
// return &FileStoreUpdater{object: object}, nil
//}
//func (updater *FileStoreUpdater) Update(fieldName string) error {
// objValue := reflect.ValueOf(updater.object)
// objType := reflect.TypeOf(updater.object)
// fieldTag, exist := objType.FieldByName(fieldName)
// if !exist {
// return fmt.Errorf("filed name not found")
// }
// fieldValue := objValue.FieldByName(fieldName)
// updater.fields = append(updater.fields, FileStoreUpdateField{fieldName, fieldTag.Name, fieldValue.Interface()})
// return nil
//}
//func (updater *FileStoreUpdater) Do() {
// if len(updater.fields) <= 0 {
// return
// }
// sqlFmt := "update bc_file_info set%s where Id=?"
// fields := ""
// for _, filed := range updater.fields {
// if filed.Tag != "" {
// fields += fmt.Sprintf(" `%s`=?,", filed.Tag)
// }
// }
// if "" == fields {
// return
// }
// fields = fields[:len(fields)-1] //strip last coma
// sqlString := fmt.Sprintf(sqlFmt, fields)
// fmt.Println(sqlString)
// // fmt.Println(mysql.Exec(sqlString, values...))
//}
func reflectInfo(o interface{}) {
t := reflect.TypeOf(o)
v := reflect.ValueOf(o)
for i := 0; i < t.NumField(); i++ {
tag := t.Field(i).Tag
value := v.Field(i).Interface()
fmt.Println(tag, "=", value.(string))
}
// v := reflect.ValueOf(o)
}
func Test(t *testing.T) {
fmt.Println("Test")
fInfo := &FileInfo{}
fInfo.IpfsHash = "aa"
// reflectInfo(*updater)
// fmt.Printf("%#v", reflect.ValueOf(*fInfo).FieldByName("IpfsHash1"))
updater, err := NewStructUpdater(*fInfo)
if err != nil {
fmt.Println(err)
return
}
_ = updater
// updater.Update("IpfsHash")
// for _, field := range updater.Fields() {
// fmt.Println(field)
// }
}
|
// ClueGetter - Does things with mail
//
// Copyright 2016 Dolf Schimmel, Freeaqingme.
//
// This Source Code Form is subject to the terms of the two-clause BSD license.
// For its contents, please refer to the LICENSE file.
//
package elasticsearch
import (
"encoding/hex"
"encoding/json"
"time"
"cluegetter/address"
"cluegetter/core"
"gopkg.in/olivere/elastic.v3"
)
const ModuleName = "elasticsearch"
type module struct {
*core.BaseModule
cg *core.Cluegetter
esClient *elastic.Client
}
type session struct {
*core.MilterSession
}
func init() {
core.ModuleRegister(&module{})
}
func (m *module) Name() string {
return ModuleName
}
func (m *module) SetCluegetter(cg *core.Cluegetter) {
m.cg = cg
}
func (m *module) Enable() bool {
return m.cg.Config.Elasticsearch.Enabled
}
func (m *module) Init() {
var err error
m.esClient, err = elastic.NewClient(
elastic.SetSniff(m.cg.Config.Elasticsearch.Sniff),
elastic.SetURL(m.cg.Config.Elasticsearch.Url...),
)
if err != nil {
m.cg.Log.Fatal("Could not connect to ElasticSearch: ", err.Error())
}
template := `{
"template": "cluegetter-*",
"settings": {
"number_of_shards": 5
},
"aliases" : {
"cluegetter-sessions" : {}
},
"mappings": {
"session": {
"_all": {
"enabled": false
},
"properties": {
"InstanceId": { "type": "integer" },
"DateConnect": { "type": "date" },
"DateDisconnect": { "type": "date" },
"SaslUsername": { "type": "string" },
"SaslSender": { "type": "string" },
"SaslMethod": { "type": "string" },
"CertIssuer": { "type": "string" },
"CipherBits": { "type": "short" },
"Cipher": { "type": "string" },
"TlsVersion": { "type": "string" },
"Ip": { "type": "string" },
"ReverseDns": { "type": "string" },
"Hostname": { "type": "string" },
"Helo": { "type": "string" },
"MtaHostName": { "type": "string" },
"MtaDaemonName": { "type": "string" },
"Messages": {
"type": "nested",
"properties": {
"QueueId": { "type": "string" },
"From": {
"properties": {
"Local": { "type": "string" },
"Domain": { "type": "string" }
}
},
"Rcpt": {
"type": "nested",
"properties": {
"Local": { "type": "string" },
"Domain": { "type": "string" }
}
},
"Headers": {
"type": "nested",
"properties": {
"Key": { "type": "string" },
"Value": { "type": "string" }
}
},
"Date": { "type": "date" },
"BodySize": { "type": "integer" },
"BodyHash": { "type": "string" },
"Verdict": { "type": "integer" },
"VerdictMsg": { "type": "string" },
"RejectScore": { "type": "float" },
"RejectScoreThreshold": { "type": "float" },
"TempfailScore": { "type": "float" },
"TempfailScoreThreshold": { "type": "float" },
"results": {
"type": "nested",
"properties": {
"Module": { "type": "string" },
"Verdict": { "type": "integer" },
"Message": { "type": "string" },
"Score": { "type": "float" },
"WeightedScore": { "type": "float" },
"Duration": { "type": "long" },
"Determinants": { "type": "string" }
}
}
}
}
}
}
}
}
`
_, err = m.esClient.IndexPutTemplate("cluegetter").BodyString(template).Do()
if err != nil {
m.cg.Log.Fatal("Could not create ES template: %s", err.Error())
}
}
func (m *module) SessionDisconnect(sess *core.MilterSession) {
m.persistSession(sess)
}
func (m *module) persistSession(coreSess *core.MilterSession) {
sess := &session{coreSess}
str, _ := sess.esMarshalJSON(m)
id := hex.EncodeToString(sess.Id())
_, err := m.esClient.Index().
Index("cluegetter-" + sess.DateConnect.Format("20060102")).
Type("session").
Id(id).
BodyString(string(str)).
Do()
if err != nil {
m.cg.Log.Error("Could not index session '%s', error: %s", id, err.Error())
return
}
//fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type)
}
func (s *session) esMarshalJSON(m *module) ([]byte, error) {
type Alias session
esMessages := []*esMessage{}
for _, v := range s.Messages {
esMessages = append(esMessages, &esMessage{v})
}
out := &struct {
InstanceId uint
*Alias
EsMessages []*esMessage `json:"Messages"`
}{
InstanceId: m.cg.Instance(),
Alias: (*Alias)(s),
EsMessages: esMessages,
}
return json.Marshal(out)
}
type esMessage struct {
*core.Message
}
func (m *esMessage) MarshalJSON() ([]byte, error) {
type Alias esMessage
out := &struct {
*Alias
}{
Alias: (*Alias)(m),
}
return json.Marshal(out)
}
func (s *session) UnmarshalJSON(data []byte) error {
type Alias session
aux := &struct {
*Alias
InstanceId uint
Messages []esMessage
}{
Alias: (*Alias)(s),
Messages: make([]esMessage, 0),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
aux.Alias.Messages = make([]*core.Message, 0)
for _, msg := range aux.Messages {
aux.Alias.Messages = append(aux.Alias.Messages, (*core.Message)(msg.Message))
}
s.Instance = aux.InstanceId
return nil
}
func (m *esMessage) UnmarshalJSON(data []byte) error {
type Alias esMessage
aux := &struct {
*Alias
From struct {
Local string
Domain string
}
Rcpt []struct {
Local string
Domain string
}
CheckResults []struct {
Module string
SuggestedAction int
Message string
Score float64
Determinants string
Duration time.Duration
WeightedScore float64
}
}{
Alias: (*Alias)(m),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
aux.Alias.From = address.FromString(aux.From.Local + "@" + aux.From.Domain)
for _, v := range aux.Rcpt {
aux.Alias.Rcpt = append(aux.Alias.Rcpt, address.FromString(v.Local+"@"+v.Domain))
}
for _, v := range aux.CheckResults {
var determinants interface{}
determinantsMap := make(map[string]interface{}, 0)
var err error
if err = json.Unmarshal([]byte(v.Determinants), &determinants); err != nil {
determinantsMap["error"] = "Could not unmarshal determinants from Elasticsearch Database: " + err.Error()
} else {
determinantsMap = determinants.(map[string]interface{})
}
aux.Alias.CheckResults = append(aux.Alias.CheckResults, &core.MessageCheckResult{
Module: v.Module,
SuggestedAction: v.SuggestedAction,
Score: v.Score,
Duration: v.Duration,
WeightedScore: v.WeightedScore,
Determinants: determinantsMap,
})
}
return nil
}
|
package main
import "fmt"
func main() {
var c1 byte = 'a'
var c2 byte = '0'
// 直接输出是ASCII码值
fmt.Println("c1 =", c1)
fmt.Println("c2 =", c2)
// 输出字符需要格式化输出
fmt.Printf("c1=%c c2=%c \n", c1, c2)
// 中文是unicode,不能用byte
var c3 int = '北'
fmt.Printf("c3=%c 对应的码值是%d \n", c3, c3)
var c4 int = 22269
fmt.Printf("c4=%c \n", c4)
var n1 = 10 + 'a'
fmt.Println("n1=", n1)
}
|
package main
import (
"fmt"
"github.com/littleajax/adventofcode/days/day12"
"github.com/littleajax/adventofcode/days/day10"
"github.com/littleajax/adventofcode/days/day7"
"github.com/littleajax/adventofcode/days/day8"
"github.com/littleajax/adventofcode/days/day9"
"github.com/littleajax/adventofcode/days/day5"
"github.com/littleajax/adventofcode/days/day6"
"github.com/littleajax/adventofcode/days/day4"
"github.com/littleajax/adventofcode/days/day2"
"github.com/littleajax/adventofcode/days/day3"
"github.com/littleajax/adventofcode/days/day1"
)
func main() {
fmt.Println("Day 1: -----------")
day1Result := day1.ExpenseReport(day1.ProcessInputs())
fmt.Println(day1Result)
day1Result2 := day1.ThreeEntriesExpenseReport(day1.ProcessInputs())
fmt.Println(day1Result2)
fmt.Println("Day 2: -----------")
day2Result := day2.PasswordCheck(day2.ProcessInputs())
fmt.Println(day2Result)
day2Result2 := day2.PasswordPositionCheck(day2.ProcessInputs())
fmt.Println(day2Result2)
fmt.Println("Day 3: -----------")
day3Result := day3.TreesSmashed(day3.ProcessInputs())
fmt.Println(day3Result)
fmt.Println("Day 4: -----------")
day4Result := day4.ValidPassportCount(day4.ProcessInputs())
fmt.Println(day4Result)
fmt.Println("Day 5: -----------")
day5Results := day5.GetHighestSeatNumber(day5.ProcessInputs())
fmt.Println(day5Results)
fmt.Println("Day 6: -----------")
day6Results := day6.TallyCustomsFields(day6.ProcessInputs())
fmt.Println(day6Results)
day6q2Results := day6.TallyEveryonesCustomsFields(day6.ProcessEveryonesInputs())
fmt.Println(day6q2Results)
fmt.Println("Day 7: -----------")
day7Results := day7.ShinyGoldBagContainers(day7.ProcessInputs())
fmt.Println(day7Results)
day7q2Results := day7.ShinyGoldBagChildren(day7.ProcessWithChildrenInputs())
fmt.Println(day7q2Results)
fmt.Println("Day 8: -----------")
day8Results := day8.GetFinalAccumulatorValue(day8.ProcessInputs())
fmt.Println(day8Results)
day8q2Results := day8.GetUncorruptedAccumulatorValue(day8.ProcessInputs())
fmt.Println(day8q2Results)
fmt.Println("Day 9: -----------")
day9Results := day9.FirstInvalidNumber(day9.ProcessInputs())
fmt.Println(day9Results)
day9q2low, day9q2high := day9.SmallestAndLargestOfAContiguousRange(day9.ProcessInputs(), day9Results)
fmt.Println(day9q2low + day9q2high)
fmt.Println("Day 10: -----------")
day10Results := day10.JoltageCalculator(day10.ProcessInputs())
fmt.Println(day10Results)
day10q2Results := day10.PermutationsCalculator(day10.ProcessInputs())
fmt.Println(day10q2Results)
fmt.Println("Day 11: -----------")
//so bad
//day11Results := day11.GetFerryRoundCount(5)
//fmt.Println(day11Results)
fmt.Println("Day 12: -----------")
ship := day12.Ship{
Facing: 0,
}
day12Results := day12.ExecuteOrders(ship, day12.ProcessInputs())
fmt.Println(day12Results)
waypointShip := day12.WaypointShip{
WaypointX: 10,
WaypointY: 1,
}
day12q2Results := day12.ExecuteWaypointOrders(waypointShip, day12.ProcessInputs())
fmt.Println(day12q2Results)
}
|
package timeformat
import (
"testing"
"time"
)
func TestFormatString(t *testing.T) {
inputs := []struct {
in string
want string
}{
{in: "YYYY", want: "2006"},
{in: "YY", want: "06"},
{in: "MMMM", want: "January"},
{in: "MMM", want: "Jan"},
{in: "MM", want: "01"},
{in: "M", want: "1"},
{in: "DD", want: "02"},
{in: "D", want: "2"},
{in: "hh", want: "03"},
{in: "HH", want: "15"},
{in: "h", want: "3"},
{in: "wwww", want: "Monday"},
{in: "www", want: "Mon"},
{in: "mm", want: "04"},
{in: "m", want: "4"},
{in: "ss", want: "05"},
{in: "s", want: "5"},
{in: "f", want: "0"},
{in: "F", want: "9"},
{in: "a", want: "pm"},
{in: "A", want: "PM"},
{in: "z", want: "MST"},
{in: "-Z:Z:Z", want: "-07:00:00"},
{in: "Z:Z:Z", want: "Z07:00:00"},
{in: "-Z:Z", want: "-07:00"},
{in: "Z:Z", want: "Z07:00"},
{in: "-ZZZ", want: "-070000"},
{in: "ZZZ", want: "Z070000"},
{in: "-ZZ", want: "-0700"},
{in: "ZZ", want: "Z0700"},
{in: "-Z", want: "-07"},
{in: "Z", want: "Z07"},
{in: "zZZ", want: "MSTZ0700"},
{in: "DD-MM-YYYY hh:mm:ss aZZ", want: "02-01-2006 03:04:05 pmZ0700"},
{in: "DD-MM-YYYY hh:mm:ss az", want: "02-01-2006 03:04:05 pmMST"},
{in: "wwww, MMMM DD YYYY", want: "Monday, January 02 2006"},
{in: "ww ww, MMMM DD YYYY", want: "ww ww, January 02 2006"},
{in: "YY YYYY YY-YY-YYY-YYYY-YY", want: "06 2006 06-06-06Y-2006-06"},
{in: "hh:mm:ss.ffff", want: "03:04:05.0000"},
{in: "hh:mm:ss.FFFF", want: "03:04:05.9999"},
{in: "-Z:ZZZ", want: "-07:00Z0700"},
{in: "hh:mm a", want: "03:04 pm"},
{in: "www MMM D HH:mm:ss -ZZ z YYYY", want: "Mon Jan 2 15:04:05 -0700 MST 2006"},
}
now := time.Now().UTC()
for _, i := range inputs {
if out := Layout(i.in); out != i.want {
t.Errorf("got: %s\nwant: %s\n", i.want, out)
} else if ft := T(now).Format(i.in); ft != now.Format(i.want) {
t.Errorf("got: %s\nwant: %s\n", ft, now.Format(i.want))
}
}
}
|
/*
You have a large electronic screen which can display up to 998244353 decimal digits.
The digits are displayed in the same way as on different electronic alarm clocks: each place for a digit consists of 7 segments which can be turned on and off to compose different digits.
The following picture describes how you can display all 10 decimal digits:
https://espresso.codeforces.com/39cedf07ce9ef18d7ec074f319640a9857b9f8cb.png
As you can see, different digits may require different number of segments to be turned on.
For example, if you want to display 1, you have to turn on 2 segments of the screen, and if you want to display 8, all 7 segments of some place to display a digit should be turned on.
You want to display a really large integer on the screen. Unfortunately, the screen is bugged: no more than n segments can be turned on simultaneously.
So now you wonder what is the greatest integer that can be displayed by turning on no more than n segments.
Your program should be able to process t different test cases.
Input
The first line contains one integer t (1≤t≤100) — the number of test cases in the input.
Then the test cases follow, each of them is represented by a separate line containing one integer n (2≤n≤10^5) — the maximum number of segments that can be turned on in the corresponding testcase.
It is guaranteed that the sum of n over all test cases in the input does not exceed 10^5.
Output
For each test case, print the greatest integer that can be displayed by turning on no more than n segments of the screen. Note that the answer may not fit in the standard 32-bit or 64-bit integral data type.
Example
input
2
3
4
output
7
11
*/
package main
import (
"bytes"
"strings"
)
func main() {
assert(maxint(3) == "7")
assert(maxint(4) == "11")
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func maxint(n int) string {
w := new(bytes.Buffer)
d := n/2 - n&1
if n&1 != 0 {
w.WriteByte('7')
}
w.WriteString(strings.Repeat("1", d))
return w.String()
}
|
package main
import (
"fmt"
"sync"
appConfig "github.com/anuj-verma/profilopedia/config"
services "github.com/anuj-verma/profilopedia/services"
"github.com/dghubble/go-twitter/twitter"
)
func fetchGithubData(wg *sync.WaitGroup) {
// Decrement the counter when the goroutine completes.
defer wg.Done()
githubClient := services.GetGithubClient(appConfig.GithubApiUrl())
githubClient.Search("anuj", appConfig.DefaultPage(), appConfig.DefaultPerPage())
data := githubClient.Response
for _, value := range data.Items {
fmt.Println("Github: ", value.Login)
}
}
func fetchTwitterData(wg *sync.WaitGroup) {
// Decrement the counter when the goroutine completes.
defer wg.Done()
twitterClient := services.GetTwitterClient(
appConfig.TwitterConsumerKey(),
appConfig.TwitterConsumerSecret(),
appConfig.TwitterAccessToken(),
appConfig.TwitterAccessTokenSecret(),
)
search, _, _ := twitterClient.Users.Search("anuj", &twitter.UserSearchParams{
Query: "anuj",
})
for _, value := range search {
fmt.Println("Twitter: ", value.Name)
}
}
func main() {
fmt.Println("Loading app configuration file...")
appConfig.LoadConfig("./config")
var wg sync.WaitGroup
// Increment the WaitGroup counter to number of go routines
wg.Add(2)
// Call go routines
go fetchGithubData(&wg)
go fetchTwitterData(&wg)
// Wait for all go routines to complete
wg.Wait()
}
|
package main
import (
"fmt"
"os"
"github.com/odpf/stencil/cmd"
)
const (
exitOK = 0
exitError = 1
)
func main() {
command := cmd.New()
if err := command.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(exitError)
}
}
|
package pg
import (
"github.com/kyleconroy/sqlc/internal/sql/ast"
)
type ArrayRef struct {
Xpr ast.Node
Refarraytype Oid
Refelemtype Oid
Reftypmod int32
Refcollid Oid
Refupperindexpr *ast.List
Reflowerindexpr *ast.List
Refexpr ast.Node
Refassgnexpr ast.Node
}
func (n *ArrayRef) Pos() int {
return 0
}
|
package config
import "os"
var (
AccessID = os.Getenv("OSS_ACCESS_KEY_ID")
AccessKey = os.Getenv("OSS_ACCESS_KEY_SECRET")
BucketName = os.Getenv("OSS_BUCKET")
Endpoint = os.Getenv("OSS_ENDPOINT")
)
|
package models
import "gopkg.in/mgo.v2/bson"
type Prefecture struct {
Id int `json:"id" bson:"_id"`
Name string `json:"name" bson:"name"`
Romaji string `json:"romaji" bson:"romaji"`
}
type Prefectures struct {
Prefectures []Prefecture `json:"prefectures"`
}
func GetPrefectures() (prefectures []Prefecture, err error) {
session := GetSession()
defer session.Close()
err = session.Prefectures().Find(bson.M{}).Sort("_id").All(&prefectures)
return
} |
/*
-Calculadora simple
-Contiene suma,resta,multiplicación y división
-Uso fácil en terminal
-Esta calculadora solo contempla dos valores en cualquier operación
*/
package main
import (
"fmt"
"os"
"os/exec"
"time"
"strconv"
"github.com/fatih/color"
)
func main() {
Limpiar()
Banner()
Tiempo()
Calculadora()
}
func Limpiar() {
c := exec.Command("clear")
c.Stdout = os.Stdout
c.Run()
}
func Banner() {
b := color.New(color.FgBlue).Add(color.Bold)
b.Println("")
b.Println(" .--. .-. .-. .-. ")
b.Println(": .--' : : : : : : ")
b.Println(": : .--. : : .--. .-..-.: : .--. .-' : .--. .--. .--. ")
b.Println(": :__ ' .; ; : :_ ' ..': :; :: :_ ' .; ; ' .; :' .; :: ..'' .; ; ")
b.Println("`.__.'`.__,_;`.__;`.__.'`.__.'`.__;`.__,_;`.__.'`.__.':_; `.__,_;")
b.Println("")
}
func Tiempo() {
a := color.New(color.FgRed).Add(color.Underline)
ahora := time.Now()
a.Println("Horario:", ahora)
año := ahora.Year()
mes := ahora.Month()
dia := ahora.Day()
hora := ahora.Hour()
minutos := ahora.Minute()
segundos := ahora.Second()
a.Println("Año:", año)
a.Println("Mes:", mes)
a.Println("Dia:", dia)
a.Println("Hora:", hora)
a.Println("Minutos:", minutos)
a.Println("Segundos:", segundos)
fmt.Println("")
a.Println("∆∆HORARIO FORMATO INTERNACIONAL√√")
fmt.Println("")
}
func Calculadora() {
c := color.New(color.FgCyan).Add(color.Underline)
calc := leida("Digite el número de la operación con la que quieras trabajar:\n\n [1]Suma\n\n [2]Resta\n\n [3]Multiplicación\n\n [4]División\n\n \t[<>]Elige una opción: ")
fmt.Println(calc)
if calc == "1" {
c.Println("\tHAS ELEGIDO SUMA!!\n")
num1, num2 := numeros()
result := num1 + num2
c.Println(fmt.Sprintf("\nEl resultado de la suma es: " + "%d\n", result))
}else if calc == "2" {
c.Println("\tHAS ELEGIDO RESTA!\n")
num1, num2 := numeros()
result := num1 - num2
c.Println(fmt.Sprintf("\nEl resultado de la resta es: " + "%d\n", result))
}else if calc == "3" {
c.Println("\tHAS ELEGIDO MULTIPLICACIÓN!\n")
num1, num2 := numeros()
result := num1 * num2
c.Println(fmt.Sprintf("\nEl resultado de la multiplicación es: " + "%d\n", result))
}else if calc == "4" {
c.Println("\tHAS ELEGIDO DIVISIÓN!\n")
num1, num2 := numeros()
result := float32(num1) / float32(num2)
c.Println("\nEl resultado de la división es: " + "%d\n", result)
}else {
c.Println("\tEsta opción no existe\n")
}
}
func leida(message string) string {
d := color.New(color.FgYellow).Add(color.Bold)
d.Print(message)
var input string
fmt.Scanln(&input)
return input
}
func numeros() (int, int) {
num1String := leida("[+]Ingresa el primer número: ")
num1, _ := strconv.Atoi(num1String)
num2String := leida("[+]Ingresa el segundo número: ")
num2, _ := strconv.Atoi(num2String)
return num1, num2
}
|
// Copyright 2017 orijtech, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memory
import (
"errors"
"sync"
)
type Memory struct {
m map[string]string
mu sync.Mutex
}
func (m *Memory) Close() error {
return nil
}
func (m *Memory) UpsertSecret(apiKey, apiSecret string) error {
m.mu.Lock()
m.m[apiKey] = apiSecret
m.mu.Unlock()
return nil
}
func (m *Memory) DeleteAPIKey(apiKey string) error {
m.mu.Lock()
delete(m.m, apiKey)
m.mu.Unlock()
return nil
}
func NewWithMap(m map[string]string) (*Memory, error) {
return &Memory{m: m}, nil
}
var errNoSuchSecret = errors.New("no such secret")
func (m *Memory) LookupSecret(apiKey string) ([]byte, error) {
m.mu.Lock()
defer m.mu.Unlock()
secret, ok := m.m[apiKey]
if !ok {
return nil, errNoSuchSecret
}
return []byte(secret), nil
}
|
// -------------------------------------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// --------------------------------------------------------------------------------------------
package appgw
import "errors"
var (
// ErrEmptyConfig is an error.
ErrEmptyConfig = errors.New("empty App Gateway config (APPG001)")
// ErrMultipleServiceBackendPortBinding is an error.
ErrMultipleServiceBackendPortBinding = errors.New("more than one service-backend port binding is not allowed (APPG002)")
// ErrGeneratingProbes is an error.
ErrGeneratingProbes = errors.New("unable to generate health probes (APPG003)")
// ErrGeneratingBackendSettings is an error.
ErrGeneratingBackendSettings = errors.New("unable to generate backend http settings (APPG004)")
// ErrGeneratingListeners is an error.
ErrGeneratingListeners = errors.New("unable to generate frontend listeners (APPG005)")
// ErrGeneratingRoutingRules is an error.
ErrGeneratingRoutingRules = errors.New("unable to generate request routing rules (APPG006)")
// ErrKeyNoDefaults is an error.
ErrKeyNoDefaults = errors.New("either a DefaultRedirectConfiguration or (DefaultBackendAddressPool + DefaultBackendHTTPSettings) must be configured (APPG007)")
// ErrKeyEitherDefaults is an error.
ErrKeyEitherDefaults = errors.New("URL Path Map must have either DefaultRedirectConfiguration or (DefaultBackendAddressPool + DefaultBackendHTTPSettings) but not both (APPG008)")
// ErrKeyNoBorR is an error.
ErrKeyNoBorR = errors.New("A valid path rule must have one of RedirectConfiguration or (BackendAddressPool + BackendHTTPSettings) (APPG009)")
// ErrKeyEitherBorR is an error.
ErrKeyEitherBorR = errors.New("A Path Rule must have either RedirectConfiguration or (BackendAddressPool + BackendHTTPSettings) but not both (APPG010)")
// ErrKeyNoPrivateIP is an error.
ErrKeyNoPrivateIP = errors.New("A Private IP must be present in the Application Gateway FrontendIPConfiguration if the controller is configured to UsePrivateIP for routing rules (APPG011)")
// ErrKeyNoPublicIP is an error.
ErrKeyNoPublicIP = errors.New("A Public IP must be present in the Application Gateway FrontendIPConfiguration (APPG012)")
// ErrIstioMultipleServiceBackendPortBinding is an error.
ErrIstioMultipleServiceBackendPortBinding = errors.New("more than one service-backend port binding is not allowed (APPG013)")
// ErrIstioResolvePortsForServices is an error.
ErrIstioResolvePortsForServices = errors.New("unable to resolve backend port for some services (APPG014)")
// ErrCreatingBackendPools is an error.
ErrCreatingBackendPools = errors.New("unable to generate backend address pools (APPG015)")
)
|
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"flag"
"github.com/golang/dep"
"github.com/golang/dep/gps"
"github.com/golang/dep/gps/pkgtree"
"github.com/pkg/errors"
)
func (cmd *hashinCommand) Name() string { return "hash-inputs" }
func (cmd *hashinCommand) Args() string { return "" }
func (cmd *hashinCommand) ShortHelp() string { return "" }
func (cmd *hashinCommand) LongHelp() string { return "" }
func (cmd *hashinCommand) Hidden() bool { return true }
func (cmd *hashinCommand) Register(fs *flag.FlagSet) {}
type hashinCommand struct{}
func (hashinCommand) Run(ctx *dep.Ctx, args []string) error {
p, err := ctx.LoadProject()
if err != nil {
return err
}
sm, err := ctx.SourceManager()
if err != nil {
return err
}
sm.UseDefaultSignalHandling()
defer sm.Release()
params := p.MakeParams()
params.RootPackageTree, err = pkgtree.ListPackages(p.ResolvedAbsRoot, string(p.ImportRoot))
if err != nil {
return errors.Wrap(err, "gps.ListPackages")
}
s, err := gps.Prepare(params, sm)
if err != nil {
return errors.Wrap(err, "prepare solver")
}
ctx.Out.Println(gps.HashingInputsAsString(s))
return nil
}
|
package main
import (
"fmt"
"net/http"
"strconv"
"sync"
"time"
)
// Debug is used for more verbose output messages
var Debug bool
var guesses []Guess
var scannerConfig ScannerConfig
var successfullLogins map[TcInstance]Guess
var version = "1.1.0"
var kudos = "By Michael Eder. @edermi on Github, @michael_eder_ on Twitter."
func main() {
prettyPrintLn(info, fmt.Sprintf("go-tomcat-mgmt-scanner version %s", version))
prettyPrintLn(info, kudos)
scannerConfig = parseCommandLineArgs()
defer timeTrack(time.Now()) // Count execution time
guesses = buildGuesses()
workQueue := make(chan TcInstance, 20) // This channel is used to pass work to the goroutines
var wg sync.WaitGroup // The WaitGroup is used to wait for all goroutines to finish at the end
successfullLogins = make(map[TcInstance]Guess)
spawnWorkers(&wg, workQueue)
fillQueue(workQueue)
wg.Wait() // Wait for goroutines to finish
for key := range successfullLogins {
prettyPrintLn(profit, fmt.Sprintf("%s:%s at %s", successfullLogins[key].username, successfullLogins[key].password, (key.host+":"+strconv.FormatUint(uint64(key.port), 10)+key.managerPath)))
}
}
// This function fills the workQueue channel with tcinstances that are scanned
// It also tracks process and reports status from time to time
func fillQueue(workQueue chan<- TcInstance) {
numTargets := scannerConfig.targetSize * uint64(len(scannerConfig.ports))
progress := uint64(0)
tenths := uint64(1)
for {
element, ok := <-scannerConfig.targetChan
if !ok {
break
}
prettyPrintLn(debug, fmt.Sprintf("Now sending to %v\n", element))
for _, port := range scannerConfig.ports {
if progress > tenths*(numTargets/10) {
prettyPrintLn(info, fmt.Sprintf("~%d0%% (%d/%d)", tenths, progress, numTargets))
tenths++
}
tc := TcInstance{element.String(), port, scannerConfig.managerPath}
workQueue <- tc
progress++
}
}
close(workQueue) // The closed channel also tells the goroutines to return
}
// spawnWorkers creates the desired number of goroutines, adds them to the WaitGroup and receives work from the channel
func spawnWorkers(wg *sync.WaitGroup, workQueue <-chan TcInstance) {
for worker := uint(0); worker < scannerConfig.goroutines; worker++ {
wg.Add(1)
go func() {
defer wg.Done()
client := &http.Client{ // We need to initialise our own http client struct
Timeout: time.Second * 5, // in order to define our own timeout
}
for {
tc, ok := <-workQueue
if ok == false { // If the channel is closed, we are done
break
}
if tc.check(client, false) {
for _, guess := range guesses {
success, _ := request(&tc, &guess)
if success {
break
}
}
}
}
}()
}
}
func (tc *TcInstance) check(client *http.Client, TLSenabled bool) (managerAvailable bool) {
var resp *http.Response
var err error
if TLSenabled {
resp, err = client.Get(buildRequestURL(true, tc))
} else {
resp, err = client.Get(buildRequestURL(false, tc))
}
if err != nil {
prettyPrintLn(debug, err.Error()) // For debugging socket issues
return false
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
prettyPrintLn(badnews, fmt.Sprintf("Manager not found at %s:%d%s", tc.host, tc.port, tc.managerPath))
return false
} else if resp.StatusCode == http.StatusForbidden {
prettyPrintLn(badnews, fmt.Sprintf("Manager not found at %s:%d%s", tc.host, tc.port, tc.managerPath))
return false
} else if resp.StatusCode == http.StatusUnauthorized {
prettyPrintLn(goodnews, fmt.Sprintf("Manager found at %s:%d%s", tc.host, tc.port, tc.managerPath))
return true
} else if resp.StatusCode == http.StatusBadRequest {
return tc.check(client, true)
} else {
prettyPrintLn(info, fmt.Sprintf("HTTP %d without authentication at %s:%d%s", resp.StatusCode, tc.host, tc.port, tc.managerPath))
return false
}
}
func request(tc *TcInstance, guess *Guess) (success bool, err error) {
client := &http.Client{
Timeout: time.Second * 5,
}
url := buildRequestURL(false, tc)
req, err := http.NewRequest("GET", url, nil)
req.SetBasicAuth(guess.username, guess.password)
resp, err := client.Do(req)
if err != nil {
return false, err
}
if resp.StatusCode == http.StatusOK {
prettyPrintLn(profit, fmt.Sprintf("Success! %s:%s on %s", guess.username, guess.password, url))
successfullLogins[*tc] = *guess
return true, nil
}
return false, nil
}
|
package event
import (
"context"
"github.com/dwaynelavon/es-loyalty-program/internal/app/eventsource"
"github.com/dwaynelavon/es-loyalty-program/internal/app/loyalty"
"github.com/dwaynelavon/es-loyalty-program/internal/app/user"
"github.com/pkg/errors"
"go.uber.org/zap"
)
type saga struct {
dispatcher eventsource.CommandDispatcher
repo user.ReadRepo
pointsMapping loyalty.PointsMappingService
logger *zap.Logger
}
func NewSaga(
logger *zap.Logger,
dispatcher eventsource.CommandDispatcher,
repo user.ReadRepo,
pointsMapping loyalty.PointsMappingService,
) eventsource.EventHandler {
return &saga{
dispatcher: dispatcher,
logger: logger,
repo: repo,
pointsMapping: pointsMapping,
}
}
func (s *saga) EventTypesHandled() []string {
return []string{user.UserCreatedEventType}
}
func (s *saga) Sync(ctx context.Context, aggregateID string) error {
panic("sync for user event saga not implemented")
}
func (s *saga) Handle(
ctx context.Context,
event eventsource.Event,
) error {
switch event.EventType {
case user.UserCreatedEventType:
return s.handleUserCreatedEvent(ctx, event)
}
return nil
}
func (s *saga) handleUserCreatedEvent(
ctx context.Context,
event eventsource.Event,
) error {
rawApplier, err := user.GetApplier(event)
if err != nil {
return err
}
applier, ok := rawApplier.(*user.Created)
if !ok {
return errors.New("invalid applier for event provided")
}
payload, errPayload := applier.GetDeserializedPayload()
if errPayload != nil {
return errPayload
}
if payload.ReferredByCode == nil {
return s.handleSignUpWithoutReferral(ctx, event)
}
referringUser, errReferringUser := s.repo.
UserByReferralCode(ctx, *payload.ReferredByCode)
if errReferringUser != nil {
return errors.Errorf(
"referring user not found for referral code: %v",
*payload.ReferredByCode,
)
}
errCompleteReferral := s.dispatcher.Dispatch(
ctx,
&loyalty.CompleteReferral{
CommandModel: eventsource.CommandModel{
ID: referringUser.UserID,
},
ReferredByCode: *payload.ReferredByCode,
ReferredUserEmail: payload.Email,
ReferredUserID: event.AggregateID,
},
)
if errCompleteReferral != nil {
return errCompleteReferral
}
// Earn points for both users
errEarnPointsReferrer := s.handleReferUser(ctx, event, referringUser.UserID)
if errEarnPointsReferrer != nil {
return errEarnPointsReferrer
}
errEarnPointsReferee := s.handleSignUpWithReferral(ctx, event)
if errEarnPointsReferee != nil {
return errEarnPointsReferee
}
return nil
}
func (s *saga) handleSignUpWithoutReferral(
ctx context.Context,
event eventsource.Event,
) error {
signUpPoints, errSignUpPoints := s.pointsMapping.
Map(loyalty.PointsActionSignUpWithoutReferral)
if errSignUpPoints != nil {
return errSignUpPoints
}
return s.dispatcher.Dispatch(
ctx,
&loyalty.EarnPoints{
CommandModel: eventsource.CommandModel{
ID: event.AggregateID,
},
Points: *signUpPoints,
},
)
}
func (s *saga) handleReferUser(
ctx context.Context,
event eventsource.Event,
referringUserID string,
) error {
referrerPoints, errReferrerPoints := s.pointsMapping.
Map(loyalty.PointsActionReferUser)
if errReferrerPoints != nil {
return errReferrerPoints
}
errEarnPointsReferrer := s.dispatcher.Dispatch(ctx, &loyalty.EarnPoints{
CommandModel: eventsource.CommandModel{
ID: referringUserID,
},
Points: *referrerPoints,
})
if errEarnPointsReferrer != nil {
return errEarnPointsReferrer
}
return nil
}
func (s *saga) handleSignUpWithReferral(
ctx context.Context,
event eventsource.Event,
) error {
refereePoints, errRefereePoints := s.pointsMapping.
Map(loyalty.PointsActionSignUpWithReferral)
if errRefereePoints != nil {
return errRefereePoints
}
errEarnPointsReferee := s.dispatcher.Dispatch(ctx, &loyalty.EarnPoints{
CommandModel: eventsource.CommandModel{
ID: event.AggregateID,
},
Points: *refereePoints,
})
if errEarnPointsReferee != nil {
return errEarnPointsReferee
}
return nil
}
|
package data
import (
"errors"
"fmt"
"io"
"log"
"net/http"
"os"
"github.com/souhub/wecircles/pkg/logging"
)
type Circle struct {
ID int
Name string
ImagePath string
Overview string
Category string
OwnerID int
OwnerIDStr string
OwnerImagePath string
TwitterID string
CreatedAt string
Owner User
Members []User
}
// Get the owner's circle
func GetCirclebyUser(userIdStr string) (circle Circle, err error) {
db := NewDB()
defer db.Close()
query := `SELECT * FROM circles
WHERE owner_id_str=?`
stmt, err := db.Prepare(query)
if err != nil {
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
return
}
defer stmt.Close()
err = stmt.QueryRow(userIdStr).Scan(&circle.ID, &circle.Name, &circle.ImagePath, &circle.Overview, &circle.Category, &circle.OwnerID, &circle.OwnerIDStr, &circle.TwitterID, &circle.CreatedAt)
return circle, err
}
// ユーザーがサークルを作ったことがあるかを調べるため(ユーザーIDを変更すると2つめのサークルを作れてしまうバグが発生する)
// func GetCirclebyUserID(id int) (circle Circle, err error) {
// db := NewDB()
// defer db.Close()
// query := `SELECT * FROM circles
// WHERE owner_id=?`
// stmt, err := db.Prepare(query)
// if err != nil {
// logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
// return
// }
// defer stmt.Close()
// err = stmt.QueryRow(id).Scan(&circle.ID, &circle.Name, &circle.ImagePath, &circle.Overview, &circle.Category, &circle.OwnerID, &circle.OwnerIDStr, &circle.TwitterID, &circle.CreatedAt)
// return circle, err
// }
// Get the owner
func (circle *Circle) GetOwner() (user User, err error) {
db := NewDB()
defer db.Close()
query := `SELECT id, name, user_id_str, image_path
FROM users
WHERE id=?`
stmt, err := db.Prepare(query)
if err != nil {
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
return
}
defer stmt.Close()
err = stmt.QueryRow(circle.OwnerID).Scan(&user.Id, &user.Name, &user.UserIdStr, &user.ImagePath)
return user, err
}
func (circle *Circle) MembershipsByCircleID() (memberships []Membership, err error) {
db := NewDB()
defer db.Close()
query := `SELECT *
FROM memberships
WHERE circle_id=?`
rows, err := db.Query(query, circle.ID)
if err != nil {
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
}
for rows.Next() {
var membership Membership
err = rows.Scan(&membership.ID, &membership.UserID, &membership.CircleID)
if err != nil {
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
}
memberships = append(memberships, membership)
}
rows.Close()
return
}
// Get the number of circle's memberships
func (circle *Circle) CountMemberships() (numberOfMemberships int, err error) {
db := NewDB()
defer db.Close()
query := `SELECT COUNT(*)
FROM memberships
WHERE circle_id=?`
rows, err := db.Query(query, circle.ID)
if err != nil {
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
}
for rows.Next() {
err = rows.Scan(&numberOfMemberships)
if err != nil {
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
}
}
rows.Close()
return
}
// Get all of the circles
func Circles() (circles []Circle, err error) {
db := NewDB()
defer db.Close()
query := `SELECT *
FROM circles
ORDER BY id DESC`
rows, err := db.Query(query)
if err != nil {
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
return
}
for rows.Next() {
var circle Circle
err = rows.Scan(&circle.ID, &circle.Name, &circle.ImagePath, &circle.Overview, &circle.Category, &circle.OwnerID, &circle.OwnerIDStr, &circle.TwitterID, &circle.CreatedAt)
if err != nil {
log.Fatal(err)
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
return
}
circles = append(circles, circle)
}
defer rows.Close()
return
}
func (circle *Circle) UserByOwnerID() (user User, err error) {
db := NewDB()
defer db.Close()
query := `SELECT id, name, user_id_str, image_path, created_at
FROM users
WHERE id=?`
err = db.QueryRow(query, circle.OwnerID).Scan(&user.Id, &user.Name, &user.UserIdStr, &user.ImagePath, &user.CreatedAt)
return
}
// Get the circle from owner_id
func CirclebyOwnerID(id string) (circle Circle, err error) {
db := NewDB()
defer db.Close()
query := `SELECT *
FROM circles
WHERE owner_id_str=?`
stmt, err := db.Prepare(query)
if err != nil {
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
return
}
err = stmt.QueryRow(id).Scan(&circle.ID, &circle.Name, &circle.ImagePath, &circle.Overview, &circle.Category, &circle.OwnerID, &circle.OwnerIDStr, &circle.TwitterID, &circle.CreatedAt)
return circle, err
}
func (circle *Circle) Create() (err error) {
db := NewDB()
defer db.Close()
query := `INSERT INTO circles (name, image_path, overview, category, owner_id, owner_id_str, twitter_id)
VALUES (?,?,?,?,?,?,?)`
_, err = db.Exec(query, circle.Name, circle.ImagePath, circle.Overview, circle.Category, circle.OwnerID, circle.OwnerIDStr, circle.TwitterID)
return
}
func (circle *Circle) Update() (err error) {
db := NewDB()
defer db.Close()
query := `UPDATE circles
SET name=?,image_path=?, overview=?, category=?, twitter_id=?
WHERE id=?`
_, err = db.Exec(query, circle.Name, circle.ImagePath, circle.Overview, circle.Category, circle.TwitterID, circle.ID)
return
}
func (circle *Circle) Upload(r *http.Request) (uploadedFileName string, err error) {
// Allow the "POST" method, only
if r.Method != "POST" {
err = errors.New("method error: POST only")
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
return
}
// Make thumbnail dir.
currentRootDir, err := os.Getwd()
circleImageDir := fmt.Sprintf("%s/web/img/user%d/circles/mycircle", currentRootDir, circle.OwnerID)
_, err = os.Stat(circleImageDir)
if err != nil {
logging.Info(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
if err = os.MkdirAll(circleImageDir, 0777); err != nil {
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
return
}
}
// Get the file sent form the form
file, fileHeader, err := r.FormFile("image")
// Get the uploaded file's name from the file.
if err != nil {
logging.Info(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
uploadedFileName = circle.ImagePath
return
}
// Delete the current thumbnail.
if err = circle.DeleteCircleImage(); err != nil {
logging.Info(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
}
uploadedFileName = fileHeader.Filename
// Set the uploaded file's path
imagePath := fmt.Sprintf("web/img/user%d/circles/mycircle/%s", circle.OwnerID, uploadedFileName)
// Save the uploaded file to "imagePath"
saveImage, err := os.Create(imagePath)
if err != nil {
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
return
}
// Write the uploaded file to the file for saving.
_, err = io.Copy(saveImage, file)
if err != nil {
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
return
}
// Close the "saveImage" and "file"
defer saveImage.Close()
defer file.Close()
// Upload to S3
if err = S3Upload(imagePath); err != nil {
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
return
}
// Delete the post directory on the server
if err = os.Remove(imagePath); err != nil {
logging.Warn(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
}
return uploadedFileName, err
}
func (circle *Circle) Delete() (err error) {
return
}
func (circle *Circle) DeleteCircleImage() (err error) {
// currentRootDir, err := os.Getwd()
circleImage := fmt.Sprintf("web/img/user%d/circles/mycircle/%s", circle.OwnerID, circle.ImagePath)
// _, err = os.Stat(circleImage)
// if err != nil {
// return
// }
// err = os.Remove(circleImage)
if err = S3Delete(circleImage); err != nil {
logging.Info(err, logging.GetCurrentFile(), logging.GetCurrentFileLine())
return
}
return
}
// Delete all of the users
func ResetCircles() (err error) {
db := NewDB()
defer db.Close()
query := `DELETE from circles`
_, err = db.Exec(query)
return
}
|
package main
import "fmt"
type student struct{
rollno int
name string
}
func (s *student) getRollNo() int{
return s.rollno
}
func (s *student) getName() string{
return s.name
}
func main(){
obj1 := student{name: "sushil", rollno: 16107}
fmt.Println("whole obj\t", obj1)
fmt.Println("address of object\t", &obj1)
fmt.Println("address of name inside of object\t",&obj1.name)
fmt.Println("address of rollNo inside of object\t",&obj1.rollno)
// fmt.Println("address of getName() inside of object\t",&obj1.getName)
// fmt.Println("address of getRollNo() inside of object\t",&obj1.rollNo)
fmt.Println("obj1.getRollNo()\t",obj1.getRollNo(),"\t")
fmt.Println("obj.getName()\t",obj1.getName())
obj2 := student{name: "arati", rollno: 16121}
fmt.Println("\n",obj2)
fmt.Print(obj2.getRollNo(),"\t")
fmt.Println(obj2.getName())
} |
package main
import "fmt"
import "./testb"
func main() {
fmt.Printf("Hello world!\n")
testb.Testa()
Testd()
coucou()
}
func coucou() {
fmt.Printf("coucou\n")
}
|
package repository
import (
"service-consult/service"
_ "github.com/go-sql-driver/mysql"
)
const (
Username = "root"
Password = "admin"
Hostname = "mysql:3306"
DBName = "testeDB"
)
type Storage interface {
ConsultNegativacoes(data string) ([]*service.Data, error)
}
|
package update
import (
"archive/tar"
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"time"
"github.com/fd/forklift/root"
"github.com/fd/forklift/static/gopkg.in/fd/go-cli.v1/cli"
)
func init() {
cli.Register(Update{})
}
type Update struct {
root.Root
cli.Arg0 `name:"update"`
cli.Manual `
Usage: forklift update
Summary: Update forklift.
`
}
func (cmd *Update) Main() error {
bin, err := find_bin(string(cmd.Root.Arg0))
if err != nil {
return err
}
fmt.Println("Looking for a new release:")
release, err := get_latest_release(true)
if err != nil {
return err
}
asset, err := release.get_asset()
if err != nil {
return err
}
fmt.Printf(" - %s (%s)\n", release.Name, release.TagName)
fmt.Printf(" - downloading ...")
r, err := asset.load_bin()
if err != nil {
return err
}
fmt.Printf(" done\n")
fmt.Printf(" - installing ...")
f, err := os.OpenFile(bin, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(f, r)
if err != nil {
return err
}
f.Close()
os.Chtimes(bin, time.Now(), release.PublishedAt)
fmt.Printf(" done\n")
return nil
}
func find_bin(arg0 string) (string, error) {
if filepath.IsAbs(arg0) {
return arg0, nil
}
path, err := filepath.Abs(arg0)
if err == nil {
if _, err := os.Stat(path); err == nil {
return path, nil
}
}
path, err = exec.LookPath(arg0)
if err == nil {
if _, err := os.Stat(path); err == nil {
return path, nil
}
}
return "", fmt.Errorf("Unable to determine location of the forklift binary")
}
type release_t struct {
Draft bool
Prerelease bool
Name string
TagName string `json:"tag_name"`
AssetsURL string `json:"assets_url"`
PublishedAt time.Time `json:"published_at"`
}
type asset_t struct {
Id int
Name string
release *release_t
}
func get_latest_release(prerelease bool) (*release_t, error) {
var (
releases []*release_t
latest *release_t
)
resp, err := http.Get("https://api.github.com/repos/fd/forklift/releases")
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = json.NewDecoder(resp.Body).Decode(&releases)
if err != nil {
return nil, err
}
for _, release := range releases {
if release.Draft {
continue
}
if release.Prerelease != prerelease {
continue
}
if latest == nil {
latest = release
continue
}
if latest.PublishedAt.Before(release.PublishedAt) {
latest = release
continue
}
}
if latest == nil {
return nil, fmt.Errorf("No releases were found")
}
return latest, nil
}
func (r *release_t) get_asset() (*asset_t, error) {
var (
assets []*asset_t
targeted *asset_t
name string
)
resp, err := http.Get(r.AssetsURL)
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = json.NewDecoder(resp.Body).Decode(&assets)
if err != nil {
return nil, err
}
name = fmt.Sprintf("forklift-%s-%s.tar.gz", runtime.GOOS, runtime.GOARCH)
for _, asset := range assets {
if asset.Name == name {
targeted = asset
}
}
if targeted == nil {
return nil, fmt.Errorf("No releases found for %s %s", runtime.GOOS, runtime.GOARCH)
}
targeted.release = r
return targeted, nil
}
func (a *asset_t) load_bin() (io.Reader, error) {
var (
buf bytes.Buffer
url string
name string
)
url = fmt.Sprintf("https://github.com/fd/forklift/releases/%s/%d/%s", a.release.TagName, a.Id, a.Name)
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
gzipr, err := gzip.NewReader(resp.Body)
if err != nil {
return nil, err
}
name = fmt.Sprintf("forklift-%s-%s/bin/forklift", runtime.GOOS, runtime.GOARCH)
tr := tar.NewReader(gzipr)
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return nil, err
}
if hdr.Name != name {
continue
}
_, err = io.Copy(&buf, tr)
if err != nil {
return nil, err
}
return bytes.NewReader(buf.Bytes()), nil
}
return nil, fmt.Errorf("missing binary in archive: %s", name)
}
|
package main
import (
"fmt"
)
// 350. 两个数组的交集 II
// 给定两个数组,编写一个函数来计算它们的交集。说明:
// 输出结果中每个元素出现的次数,应与元素在两个数组中出现的次数一致。
// 我们可以不考虑输出结果的顺序。
// 进阶:
// 如果给定的数组已经排好序呢?你将如何优化你的算法?
// 如果 nums1 的大小比 nums2 小很多,哪种方法更优?
// 如果 nums2 的元素存储在磁盘上,磁盘内存是有限的,并且你不能一次加载所有的元素到内存中,你该怎么办?
// 来源:力扣(LeetCode)
// 链接:https://leetcode-cn.com/problems/intersection-of-two-arrays-ii
func main() {
nums1 := []int{1, 2, 2, 1}
nums2 := []int{2, 2}
fmt.Println(intersection(nums1, nums2))
}
// 使用一个额外的map进行统计,两遍遍历,O(n)
func intersect(nums1 []int, nums2 []int) (result []int) {
if nums1 == nil || nums2 == nil {
return nil
}
m := make(map[int]int, len(nums1))
for _, num := range nums1 {
m[num]++
}
for _, num := range nums2 {
if v, ok := m[num]; ok && v != 0 {
result = append(result, num)
m[num]--
}
}
return result
}
// 进阶:如果数组已排序,双指针,一遍遍历
func intersect2(nums1 []int, nums2 []int) (result []int) {
if nums1 == nil || nums2 == nil {
return nil
}
i, j := 0, 0
for i < len(nums1) && j < len(nums2) {
if nums1[i] == nums2[j] {
result = append(result, nums1[i])
i++
j++
} else if nums1[i] < nums2[j] {
i++
} else {
j++
}
}
return result
}
// 进阶:如果nums1比nums2小很多,使用小的数组遍历生成map
// 进阶:如果磁盘空间有限,将两个数组排序,再双指针遍历比较
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.