text stringlengths 11 4.05M |
|---|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s3backend
import (
"io"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
// S3 defines the operations we use in the s3 api. Useful for mocking.
type S3 interface {
HeadObject(input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
Download(
w io.WriterAt,
input *s3.GetObjectInput,
options ...func(*s3manager.Downloader)) (n int64, err error)
Upload(
input *s3manager.UploadInput,
options ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error)
ListObjectsV2Pages(input *s3.ListObjectsV2Input, fn func(*s3.ListObjectsV2Output, bool) bool) error
}
type join struct {
s3iface.S3API
*s3manager.Downloader
*s3manager.Uploader
}
var _ S3 = (*join)(nil)
|
package project
import (
"golang.org/x/net/context"
"github.com/codeship/libcompose/project/events"
)
// Unpause pauses the specified services containers (like docker pause).
func (p *Project) Unpause(ctx context.Context, services ...string) error {
eventWrapper := events.NewEventWrapper("Project Pause", events.NewProjectUnpauseStartEvent, events.NewProjectUnpauseDoneEvent, events.NewProjectUnpauseFailedEvent)
return p.perform(eventWrapper, services, wrapperAction(func(wrapper *serviceWrapper, wrappers map[string]*serviceWrapper) {
serviceEventWrapper := events.NewEventWrapper("Service Pause", events.NewServiceUnpauseStartEvent, events.NewServiceUnpauseDoneEvent, events.NewServiceUnpauseFailedEvent)
wrapper.Do(nil, serviceEventWrapper, func(service Service) error {
return service.Unpause(ctx)
})
}), nil)
}
|
package river
import (
"bytes"
"os"
"path"
"sync"
"time"
"github.com/BurntSushi/toml"
"github.com/go-mysql-org/go-mysql/mysql"
"github.com/juju/errors"
"github.com/siddontang/go-log/log"
"github.com/siddontang/go/ioutil2"
)
type masterInfo struct {
sync.RWMutex
Name string `toml:"bin_name"`
Pos uint32 `toml:"bin_pos"`
SGtid string `toml:"bin_gtid"`
gset mysql.GTIDSet
filePath string
lastSaveTime time.Time
}
func loadMasterInfo(dataDir string, flavor string) (*masterInfo, error) {
var m masterInfo
if len(dataDir) == 0 {
return &m, nil
}
m.filePath = path.Join(dataDir, "master.info")
m.lastSaveTime = time.Now()
if err := os.MkdirAll(dataDir, 0755); err != nil {
return nil, errors.Trace(err)
}
f, err := os.Open(m.filePath)
if err != nil && !os.IsNotExist(errors.Cause(err)) {
return nil, errors.Trace(err)
} else if os.IsNotExist(errors.Cause(err)) {
return &m, nil
}
defer f.Close()
_, err = toml.DecodeReader(f, &m)
m.gset, err = mysql.ParseGTIDSet(flavor, m.SGtid)
if err != nil {
log.Errorf("parsed gtid str %s failed", err)
panic(err)
}
return &m, errors.Trace(err)
}
// TODO: add gtidset, what about update gtid ?
func (m *masterInfo) Save(pos mysql.Position, gtid string) error {
log.Debugf("save position %s, gtid: %s", pos, gtid)
m.Lock()
defer m.Unlock()
m.Name = pos.Name
m.Pos = pos.Pos
m.SGtid = gtid
if len(m.filePath) == 0 {
return nil
}
n := time.Now()
if n.Sub(m.lastSaveTime) < time.Second {
return nil
}
m.lastSaveTime = n
var buf bytes.Buffer
e := toml.NewEncoder(&buf)
e.Encode(m)
var err error
if err = ioutil2.WriteFileAtomic(m.filePath, buf.Bytes(), 0644); err != nil {
log.Errorf("canal save master info to file %s err %v", m.filePath, err)
}
return errors.Trace(err)
}
// TODO: add get gtid set for startWithGtid
func (m *masterInfo) Position() mysql.Position {
m.RLock()
defer m.RUnlock()
return mysql.Position{
Name: m.Name,
Pos: m.Pos,
}
}
func (m *masterInfo) GtidSet() mysql.GTIDSet {
gtid, _ := mysql.ParseGTIDSet("mysql", m.SGtid)
return gtid
}
func (m *masterInfo) Close() error {
pos := m.Position()
return m.Save(pos, m.SGtid)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package policy
import (
"context"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/pci"
"chromiumos/tast/common/policy"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/checked"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/restriction"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: DictationEnabled,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Behavior of DictationEnabled policy: checking if dictation is enabled or not",
Contacts: []string{
"swapnilgupta@google.com", // Test author
},
SoftwareDeps: []string{"chrome"},
// TODO(crbug.com/1238027): Close dialog before the next test.
// Attr: []string{"group:mainline", "informational"},
Fixture: fixture.ChromePolicyLoggedIn,
SearchFlags: []*testing.StringPair{
pci.SearchFlag(&policy.DictationEnabled{}, pci.VerifiedFunctionalityUI),
},
})
}
// DictationEnabled tests the DictationEnabled policy.
func DictationEnabled(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
fdms := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS()
// Connect to Test API to use it with the UI library.
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create Test API connection: ", err)
}
for _, param := range []struct {
name string
value *policy.DictationEnabled
wantButton bool
wantChecked checked.Checked
wantRestriction restriction.Restriction
}{
{
name: "unset",
value: &policy.DictationEnabled{Stat: policy.StatusUnset},
wantButton: false,
wantChecked: checked.False,
wantRestriction: restriction.None,
},
{
name: "disabled",
value: &policy.DictationEnabled{Val: false},
wantButton: false,
wantChecked: checked.False,
wantRestriction: restriction.Disabled,
},
{
name: "enabled",
value: &policy.DictationEnabled{Val: true},
wantButton: true,
wantChecked: checked.True,
wantRestriction: restriction.Disabled,
},
} {
s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) {
defer faillog.DumpUITreeWithScreenshotOnError(ctx, s.OutDir(), s.HasError, cr, "ui_tree_"+param.name)
// Perform cleanup.
if err := policyutil.ResetChrome(ctx, fdms, cr); err != nil {
s.Fatal("Failed to clean up: ", err)
}
// Update policies.
if err := policyutil.ServeAndVerify(ctx, fdms, cr, []policy.Policy{param.value}); err != nil {
s.Fatal("Failed to update policies: ", err)
}
// Trying to find the "Toggle dictation" button in the system tray.
buttonExists := true
ui := uiauto.New(tconn)
if err = ui.Exists(nodewith.Name("Toggle dictation").Role(role.Button))(ctx); err != nil {
buttonExists = false
}
if buttonExists != param.wantButton {
s.Errorf("Unexpected existence of Toggle dictation button: got %v; want %v", buttonExists, param.wantButton)
}
if err := policyutil.OSSettingsPage(ctx, cr, "manageAccessibility").
SelectNode(ctx, nodewith.
Name("Enable dictation (speak to type)").
Role(role.ToggleButton)).
Restriction(param.wantRestriction).
Checked(param.wantChecked).
Verify(); err != nil {
s.Error("Unexpected OS settings state: ", err)
}
})
}
}
|
package service
import (
"github.com/LiveSocket/bot/service/db"
"github.com/LiveSocket/bot/service/healthcheck"
"github.com/LiveSocket/bot/service/socket"
)
const (
ALL = 0x7
SOCKET = 0x1
DB = 0x2
HEALTHCHECK = 0x4
)
type Service struct {
*db.DB
*socket.Socket
}
type model struct {
db *db.DB
socket *socket.Socket
}
type Actions = map[string]socket.Action
type Subscriptions = map[string]socket.Subscription
// Init Creates a new service using standard livesocket project settings
func (service *Service) Init(actions Actions, subscriptions Subscriptions, migrationTable string, migrations ...db.Migration) func() {
d, dbClose := db.Init(migrationTable, migrations...)
s := &socket.Socket{}
socketClose := s.Init(actions, subscriptions)
service.DB = d
service.Socket = s
healthcheck.Init()
return func() {
dbClose()
socketClose()
}
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file contains helper functions to allocate memory on ChromeOS.
// Package memory contains common utilities to allocate memory and read memory
// pressure state on ChromeOS and Android.
package memory
import (
"container/list"
"context"
"runtime"
"strings"
"time"
"golang.org/x/sys/unix"
"chromiumos/tast/errors"
"chromiumos/tast/local/resourced"
"chromiumos/tast/local/syslog"
"chromiumos/tast/testing"
)
// ChromeOSAllocator helps test code allocate memory on ChromeOS.
type ChromeOSAllocator struct {
allocated *list.List
size uint64
}
// NewChromeOSAllocator creates a helper to allocate memory on ChromeOS.
// Returns the helper.
func NewChromeOSAllocator() *ChromeOSAllocator {
return &ChromeOSAllocator{
allocated: list.New(),
size: 0,
}
}
// Size returns the size of all allocated memory
func (c *ChromeOSAllocator) Size() uint64 {
return c.size
}
// Allocate some memory on ChromeOS.
// Parameter size is the size of the allocation in bytes.
// Allocated memory is filled with random data so that page compression can't
// shrink it.
func (c *ChromeOSAllocator) Allocate(size int) error {
if size <= 0 {
return errors.Errorf("can not allocate size %d", size)
}
for size > 0 {
mmapSize := size
if mmapSize > MiB {
mmapSize = MiB
}
size -= mmapSize
buffer, err := unix.Mmap(
-1,
0,
mmapSize,
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_PRIVATE|unix.MAP_ANONYMOUS,
)
if err != nil {
var stats runtime.MemStats
runtime.ReadMemStats(&stats)
return errors.Wrapf(err, "failed to allocate %d byte chunk after allocating %d bytes, total Sys %d", mmapSize, c.size, stats.Sys)
}
// Fill each page with random bytes so that page compression can't reduce
// the size.
for i := 0; i < mmapSize; i += len(randomPage) {
copy(buffer[i:], randomPage[:])
}
c.allocated.PushBack(buffer)
c.size += uint64(len(buffer))
}
return nil
}
// FreeLast frees the most recently allocated buffer.
// Returns the size of the buffer freed.
func (c *ChromeOSAllocator) FreeLast() (uint64, error) {
if c.allocated.Len() == 0 {
return 0, errors.New("nothing to free")
}
buffer := c.allocated.Remove(c.allocated.Back()).([]byte)
size := uint64(len(buffer))
c.size -= size
if err := unix.Munmap(buffer); err != nil {
return 0, errors.Wrap(err, "unable to free buffer")
}
return size, nil
}
// FreeAll frees all allocated buffers.
// Returns the size of freed memory.
func (c *ChromeOSAllocator) FreeAll() (uint64, error) {
size := c.size
for c.allocated.Len() > 0 {
if _, err := c.FreeLast(); err != nil {
return 0, errors.Wrap(err, "unable to free")
}
}
if c.size != 0 {
return 0, errors.Errorf("allocated size is %d after freeing averything", c.size)
}
return size, nil
}
const (
oomKillMessage = "Out of memory: Kill process"
oomSyslogTimeout = 10 * time.Second
)
func checkForOOMs(ctx context.Context, reader *syslog.Reader) error {
_, err := reader.Wait(ctx, oomSyslogTimeout, func(e *syslog.Entry) bool {
return strings.Contains(e.Content, oomKillMessage)
})
if err == syslog.ErrNotFound {
return nil
}
if err != nil {
return errors.Wrap(err, "failed to check for OOM")
}
return errors.New("test triggered Linux OOM killer")
}
// AllocateUntil allocates memory until available memory is at the passed
// margin, in bytes. To allow the system to stabilize, it will try attempts
// times, waiting attemptInterval duration between each attempt.
// If too much memory has been allocated, then the extra is freed between
// attempts to avoid overshooting the margin.
// Returns the allocated memory at every attempt.
func (c *ChromeOSAllocator) AllocateUntil(
ctx context.Context,
rm *resourced.Client,
attemptInterval time.Duration,
attempts int,
margin uint64,
) ([]uint64, error) {
// Create a reader to scan for OOMs, we can't use syslog.Program tofilter to
// a specific process name because ARCVM includes the PID in the process
// name field.
reader, err := syslog.NewReader(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to open syslog reader")
}
defer reader.Close()
crosCrit, err := NewAvailableLimit(ctx, rm, margin)
if err != nil {
return nil, errors.Wrap(err, "failed to make ChromeOS available Limit")
}
// Use NewPageReclaimLimit to avoid the Linux OOM killer. Once page reclaim
// starts, we are quite close to a Zone's min watermark.
nearOOM := NewPageReclaimLimit()
limit := NewCompositeLimit(crosCrit, nearOOM)
allocated := make([]uint64, attempts)
for attempt := 0; attempt < attempts; attempt++ {
for {
distance, err := limit.Distance(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to read memory limit")
}
if distance <= 0 {
break
}
// Be conservative and only allocate 1/4 of the distance to the
// nearest memory limit. Truncate allocations to MiB.
// Limit allocations to 64MiB, to avoid large mmap ranges that might
// fail.
const maxAllocMiB = 64
allocMiB := (distance / MiB) / 4
if allocMiB == 0 {
allocMiB = 1
} else if allocMiB > maxAllocMiB {
allocMiB = maxAllocMiB
}
if err = c.Allocate(int(allocMiB * MiB)); err != nil {
return nil, errors.Wrap(err, "unable to allocate")
}
}
allocated[attempt] = c.Size()
testing.ContextLogf(ctx, "Attempt %d: %d MiB", attempt, c.Size()/MiB)
// Available is less than target margin, but it might be much less
// if the system becomes unresponsive from the memory pressure we
// are applying. Available memory can drop much faster than the
// amount allocated, causing us to overshoot and apply much higher
// memory pressure than intended. To reduce the risk of having the
// linux OOM killer kill anything, we free anything extra we may
// have allocated.
for {
distance, err := limit.Distance(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to read memory limit")
}
if distance > 0 {
break
}
if _, err := c.FreeLast(); err != nil {
return nil, errors.Wrap(err, "unable to free after overshoot")
}
}
if err := testing.Sleep(ctx, attemptInterval); err != nil {
return nil, errors.Wrap(err, "failed to sleep after allocation attempt")
}
}
if err := checkForOOMs(ctx, reader); err != nil {
return nil, err
}
return allocated, nil
}
|
package cmd
import (
"bytes"
"strings"
"testing"
)
func TestRoot(t *testing.T) {
tcs := []struct {
name string
command string
want string
wantErr bool
}{
{"้ทใใใค", "gomah -m 123123123123 -s 123 -p 123 -w 111", "๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ \n", false},
{"ๆฎ้ใฎใใค", "gomah -m 123 -s 123 -p 123 -w 111 -d 22", "๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐
๐
\n", false},
{"็ญใใใค", "gomah -m 123", "๐ ๐ ๐ \n", false},
{"ๅญๅจใใชใ็ใฎ็จฎ้กใฏ่ฝใกใ", "gomah -w 5", "", true},
}
for _, tc := range tcs {
buf := new(bytes.Buffer)
cmd := NewRootCmd()
cmd.SetOutput(buf)
cmdArgs := strings.Split(tc.command, " ")
cmd.SetArgs(cmdArgs[1:])
err := cmd.Execute()
if tc.wantErr && err == nil {
t.Errorf("expected error, but not returned error")
}
get := buf.String()
if !tc.wantErr && tc.want != get {
t.Errorf("unexpected response: want:%+v, get:%+v", tc.want, get)
}
}
}
|
package main
import (
"errors"
"os/exec"
"strings"
"sync/atomic"
"time"
)
var ErrCmdStatus = errors.New("Code != 200")
type WorkerCmd struct {
Id int
cmd atomic.Value
task chan *Task
s *Scheduler
}
func (w *WorkerCmd) Init(id int, s *Scheduler) *WorkerCmd {
w.Id = id
w.s = s
w.task = make(chan *Task)
return w
}
func (w *WorkerCmd) Exec(t *Task) {
w.task <- t
}
func (w *WorkerCmd) Cancel() {
_cmd := w.cmd.Load()
if _cmd != nil {
cmd := _cmd.(*exec.Cmd)
if cmd.Process != nil {
cmd.Process.Kill()
}
}
}
func (w *WorkerCmd) Run() {
for t := range w.task {
if t == nil {
return
}
t.Status, t.Msg = w.doCMD(t)
w.s.complete <- t
}
}
func (w *WorkerCmd) Close() {
close(w.task)
}
func (w *WorkerCmd) doCMD(t *Task) (status int, msg string) {
task := w.s.cfg.Base + " " + t.job.Name
task = strings.TrimSpace(task)
params := strings.Split(task, " ")
task = params[0]
params = params[1:]
params = append(params, t.Params...)
c := exec.Command(task, params...)
w.cmd.Store(c)
timer := time.AfterFunc(w.s.cfg.TaskTimeout, w.Cancel)
defer timer.Stop()
out, err := c.CombinedOutput()
if err != nil {
if c.ProcessState != nil {
status = c.ProcessState.ExitCode()
} else {
status = 1
}
if len(out) == 0 {
msg = err.Error()
} else {
msg = string(out)
}
t.Err = err
return
}
status = c.ProcessState.ExitCode()
msg = string(out)
if status != 0 {
t.Err = ErrCmdStatus
}
return
}
|
package main
import (
"errors"
"github.com/graphql-go/graphql"
"github.com/renteasy/marketplace/internal/database"
"time"
)
var rentalType = graphql.NewObject(
graphql.ObjectConfig{
Name: "Rental",
Fields: graphql.Fields{
"id": &graphql.Field{
Type: graphql.Int,
Resolve: func(p graphql.ResolveParams) (i interface{}, err error) {
rental, ok := p.Source.(database.Rental)
if !ok {
return nil, errors.New("could not decode Gorm Model")
}
return rental.Model.ID, nil
},
},
"property": &graphql.Field{
Type: propertyType,
Resolve: func(p graphql.ResolveParams) (i interface{}, err error) {
rental, ok := p.Source.(database.Rental)
if !ok {
return nil, errors.New("could not decode Gorm Model")
}
return rental.Property, nil
},
},
"bedrooms": &graphql.Field{
Type: graphql.Int,
Description: "How many bedrooms the rental has available to the tenant.",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if rental, ok := p.Source.(database.Rental); ok {
return rental.Bedrooms, nil
}
return nil, nil
},
},
"bathrooms": &graphql.Field{
Type: graphql.Int,
Description: "How many bathrooms the rental has available to the tenant.",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if rental, ok := p.Source.(database.Rental); ok {
return rental.Bathrooms, nil
}
return nil, nil
},
},
"rentDeposit": &graphql.Field{
Type: graphql.Float,
Description: "How much deposit the tenant would be expected to pay.",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if rental, ok := p.Source.(database.Rental); ok {
return rental.RentDeposit, nil
}
return nil, nil
},
},
"rentMonthly": &graphql.Field{
Type: graphql.Float,
Description: "How much the rent is in USD monthly.",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if rental, ok := p.Source.(database.Rental); ok {
return rental.RentMonthly, nil
}
return nil, nil
},
},
"listingDate": &graphql.Field{
Type: graphql.DateTime,
Description: "When the property went on the market.",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if rental, ok := p.Source.(database.Rental); ok {
return rental.ListingDate, nil
}
return nil, nil
},
},
},
},
)
var rentalRegister = GraphQLType{
Type: rentalType,
QueryFields: graphql.Fields{
"rental": &graphql.Field{
Type: rentalType,
Description: "Get rental by id",
Args: graphql.FieldConfigArgument{
"id": &graphql.ArgumentConfig{
Type: graphql.Int,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
id, ok := p.Args["id"].(int)
if ok {
return db.Rentals.GetRentalById(id)
}
return nil, nil
},
},
"rentals": &graphql.Field{
Type: graphql.NewList(rentalType),
Description: "Get rentals list",
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
return db.Rentals.GetRentals()
},
},
},
MutationFields: graphql.Fields{
"createRental": &graphql.Field{
Type: rentalType,
Description: "Create a new Rental",
Args: graphql.FieldConfigArgument{
// Property Values
"address": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
},
"city": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
},
"state": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
},
"zipcode": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
},
// Rental Values
"propertyType": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
},
"rentalStatus": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
},
"unit": &graphql.ArgumentConfig{
Type: graphql.String,
},
"sqft": &graphql.ArgumentConfig{
Type: graphql.Int,
},
"bedrooms": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.Int),
},
"bathrooms": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.Int),
},
"stories": &graphql.ArgumentConfig{
Type: graphql.String,
},
"rentDeposit": &graphql.ArgumentConfig{
Type: graphql.Float,
},
"rentMonthly": &graphql.ArgumentConfig{
Type: graphql.Float,
},
},
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
// Find or create property
property, err := db.Properties.FirstOrCreate(database.Property{
Address: params.Args["address"].(string),
City: params.Args["city"].(string),
State: params.Args["state"].(string),
Zipcode: params.Args["zipcode"].(string),
})
if err != nil {
return nil, err
}
// Create rental
unit, unitOk := params.Args["unit"].(string)
sqft, sqftOk := params.Args["sqft"].(int)
stories, storiesOk := params.Args["stories"].(int)
rental := database.Rental{
Property: property,
Bedrooms: params.Args["bedrooms"].(int),
Bathrooms: params.Args["bathrooms"].(int),
RentDeposit: params.Args["rentDeposit"].(float64),
RentMonthly: params.Args["rentMonthly"].(float64),
ListingDate: time.Now(),
}
if unitOk {
rental.Unit = unit
}
if sqftOk {
rental.Sqft = sqft
}
if storiesOk {
rental.Stories = stories
}
err = db.Rentals.CreateRental(&rental)
if err != nil {
return nil, err
}
return rental, nil
},
},
},
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package eviction
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewConfig(t *testing.T) {
type tCase struct {
name string
input string
assertFunc func(*testing.T, Config) error
}
cases := []tCase{
{
name: "Should has default config with empty input",
input: ``,
assertFunc: func(t *testing.T, config Config) error {
memBytes, _ := config.GetHard().GetMemoryAvailable().Value.Quantity.AsInt64()
assert.Equal(t, int64(1024*1024*100), memBytes)
assert.Equal(t, float32(0.1), config.GetHard().GetNodeFsAvailable().Value.Percentage)
assert.Equal(t, float32(0.05), config.GetHard().GetNodeFsInodesFree().Value.Percentage)
assert.Equal(t, float32(0.15), config.GetHard().GetImageFsAvailable().Value.Percentage)
return nil
},
},
{
name: "With all config",
input: `
evictionHard:
imagefs.available: 25%
memory.available: 1024Mi
nodefs.available: 15%
nodefs.inodesFree: 10%`,
assertFunc: func(t *testing.T, config Config) error {
memBytes, _ := config.GetHard().GetMemoryAvailable().Value.Quantity.AsInt64()
assert.Equal(t, int64(1024*1024*1024), memBytes)
assert.Equal(t, float32(0.25), config.GetHard().GetImageFsAvailable().Value.Percentage)
assert.Equal(t, float32(0.15), config.GetHard().GetNodeFsAvailable().Value.Percentage)
assert.Equal(t, float32(0.1), config.GetHard().GetNodeFsInodesFree().Value.Percentage)
return nil
},
},
{
name: "Mixed config with default",
input: `
evictionHard:
imagefs.available: 5%
nodefs.inodesFree: 10%`,
assertFunc: func(t *testing.T, config Config) error {
memBytes, _ := config.GetHard().GetMemoryAvailable().Value.Quantity.AsInt64()
assert.Equal(t, int64(1024*1024*100), memBytes)
assert.Equal(t, float32(0.05), config.GetHard().GetImageFsAvailable().Value.Percentage)
assert.Equal(t, float32(0.1), config.GetHard().GetNodeFsAvailable().Value.Percentage)
assert.Equal(t, float32(0.1), config.GetHard().GetNodeFsInodesFree().Value.Percentage)
return nil
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(st *testing.T) {
if config, err := NewConfig([]byte(tc.input)); err != nil {
st.Errorf("[%s] NewConfig error: %v", tc.name, err)
} else {
str, _ := json.MarshalIndent(config, "", " ")
st.Logf("%s", str)
if err := tc.assertFunc(st, config); err != nil {
st.Errorf("[%s] assert error: %v", tc.name, err)
}
}
})
}
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package supervisor_test
import (
"fmt"
"math/rand"
"testing"
ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
eksTypes "github.com/aws/aws-sdk-go-v2/service/eks/types"
"github.com/mattermost/mattermost-cloud/internal/events"
"github.com/mattermost/mattermost-cloud/internal/metrics"
"github.com/mattermost/mattermost-cloud/internal/store"
"github.com/mattermost/mattermost-cloud/internal/supervisor"
"github.com/mattermost/mattermost-cloud/internal/testlib"
"github.com/mattermost/mattermost-cloud/internal/testutil"
"github.com/mattermost/mattermost-cloud/internal/tools/aws"
"github.com/mattermost/mattermost-cloud/internal/tools/utils"
"github.com/mattermost/mattermost-cloud/k8s"
"github.com/mattermost/mattermost-cloud/model"
mmv1alpha1 "github.com/mattermost/mattermost-operator/apis/mattermost/v1alpha1"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
)
type mockInstallationStore struct {
Installation *model.Installation
Installations []*model.Installation
UnlockedInstallationsPendingWork []*model.Installation
Group *model.Group
UnlockChan chan interface{}
UpdateInstallationCalls int
mockMultitenantDBStore
}
var cloudMetrics = metrics.New()
func (s *mockInstallationStore) GetClusters(clusterFilter *model.ClusterFilter) ([]*model.Cluster, error) {
return nil, nil
}
func (s *mockInstallationStore) GetCluster(id string) (*model.Cluster, error) {
return &model.Cluster{}, nil
}
func (s *mockInstallationStore) UpdateCluster(cluster *model.Cluster) error {
return nil
}
func (s *mockInstallationStore) LockCluster(clusterID, lockerID string) (bool, error) {
return true, nil
}
func (s *mockInstallationStore) UnlockCluster(clusterID string, lockerID string, force bool) (bool, error) {
return true, nil
}
func (s *mockInstallationStore) GetInstallation(installationID string, includeGroupConfig, includeGroupConfigOverrides bool) (*model.Installation, error) {
if s.Installation != nil {
return s.Installation, nil
}
for _, installation := range s.Installations {
if installation.ID == installationID {
return installation, nil
}
}
return nil, nil
}
func (s *mockInstallationStore) GetInstallations(installationFilter *model.InstallationFilter, includeGroupConfig, includeGroupConfigOverrides bool) ([]*model.Installation, error) {
if s.Installation == nil {
s.Installation = &model.Installation{
ID: model.NewID(),
}
}
if installationFilter.State == model.InstallationStateImportComplete {
s.Installation.State = model.InstallationStateStable
}
return []*model.Installation{s.Installation}, nil
}
func (s *mockInstallationStore) GetUnlockedInstallationsPendingWork() ([]*model.Installation, error) {
installations := make([]*model.Installation, len(s.UnlockedInstallationsPendingWork))
copy(installations, s.UnlockedInstallationsPendingWork)
return installations, nil
}
func (s *mockInstallationStore) UpdateInstallation(installation *model.Installation) error {
s.UpdateInstallationCalls++
return nil
}
func (s *mockInstallationStore) UpdateInstallationGroupSequence(installation *model.Installation) error {
return nil
}
func (s *mockInstallationStore) UpdateInstallationState(installation *model.Installation) error {
s.UpdateInstallationCalls++
return nil
}
func (s *mockInstallationStore) UpdateInstallationCRVersion(installationID, crVersion string) error {
return nil
}
func (s *mockInstallationStore) LockInstallation(installationID, lockerID string) (bool, error) {
return true, nil
}
func (s *mockInstallationStore) UnlockInstallation(installationID, lockerID string, force bool) (bool, error) {
if s.UnlockChan != nil {
close(s.UnlockChan)
}
return true, nil
}
func (s *mockInstallationStore) DeleteInstallation(installationID string) error {
return nil
}
func (s *mockInstallationStore) CreateClusterInstallation(clusterInstallation *model.ClusterInstallation) error {
return nil
}
func (s *mockInstallationStore) GetClusterInstallation(clusterInstallationID string) (*model.ClusterInstallation, error) {
return nil, nil
}
func (s *mockInstallationStore) GetClusterInstallations(filter *model.ClusterInstallationFilter) ([]*model.ClusterInstallation, error) {
installation, err := s.GetInstallation(filter.InstallationID, false, false)
if installation == nil || err != nil {
return nil, err
}
return []*model.ClusterInstallation{{
ID: model.NewID(),
ClusterID: model.NewID(),
InstallationID: installation.ID,
Namespace: installation.ID,
State: "stable",
CreateAt: installation.CreateAt,
DeleteAt: installation.DeleteAt,
APISecurityLock: false,
},
}, nil
}
func (s *mockInstallationStore) LockClusterInstallations(clusterInstallationID []string, lockerID string) (bool, error) {
return true, nil
}
func (s *mockInstallationStore) UnlockClusterInstallations(clusterInstallationID []string, lockerID string, force bool) (bool, error) {
return true, nil
}
func (s *mockInstallationStore) UpdateClusterInstallation(clusterInstallation *model.ClusterInstallation) error {
return nil
}
func (s *mockInstallationStore) GetGroup(groupId string) (*model.Group, error) {
return nil, nil
}
func (s *mockInstallationStore) LockGroup(groupID, lockerID string) (bool, error) {
return true, nil
}
func (s *mockInstallationStore) UnlockGroup(groupID, lockerID string, force bool) (bool, error) {
if s.UnlockChan != nil {
close(s.UnlockChan)
}
return true, nil
}
func (s *mockInstallationStore) GetWebhooks(filter *model.WebhookFilter) ([]*model.Webhook, error) {
return nil, nil
}
func (s *mockInstallationStore) GetAnnotationsForInstallation(installationID string) ([]*model.Annotation, error) {
return nil, nil
}
func (s *mockInstallationStore) GetInstallationBackups(filter *model.InstallationBackupFilter) ([]*model.InstallationBackup, error) {
return nil, nil
}
func (s *mockInstallationStore) UpdateInstallationBackupState(backup *model.InstallationBackup) error {
return nil
}
func (s *mockInstallationStore) LockInstallationBackups(backupIDs []string, lockerID string) (bool, error) {
return true, nil
}
func (s *mockInstallationStore) UnlockInstallationBackups(backupIDs []string, lockerID string, force bool) (bool, error) {
return true, nil
}
func (s *mockInstallationStore) GetInstallationDBMigrationOperations(filter *model.InstallationDBMigrationFilter) ([]*model.InstallationDBMigrationOperation, error) {
return nil, nil
}
func (s *mockInstallationStore) UpdateInstallationDBMigrationOperationState(operation *model.InstallationDBMigrationOperation) error {
return nil
}
func (s *mockInstallationStore) LockInstallationDBMigrationOperations(backupIDs []string, lockerID string) (bool, error) {
return true, nil
}
func (s *mockInstallationStore) UnlockInstallationDBMigrationOperations(backupIDs []string, lockerID string, force bool) (bool, error) {
return true, nil
}
func (s *mockInstallationStore) GetInstallationDBRestorationOperations(filter *model.InstallationDBRestorationFilter) ([]*model.InstallationDBRestorationOperation, error) {
return nil, nil
}
func (s *mockInstallationStore) UpdateInstallationDBRestorationOperationState(operation *model.InstallationDBRestorationOperation) error {
return nil
}
func (s *mockInstallationStore) LockInstallationDBRestorationOperations(backupIDs []string, lockerID string) (bool, error) {
return true, nil
}
func (s *mockInstallationStore) UnlockInstallationDBRestorationOperations(backupIDs []string, lockerID string, force bool) (bool, error) {
return true, nil
}
func (s *mockInstallationStore) GetStateChangeEvents(filter *model.StateChangeEventFilter) ([]*model.StateChangeEventData, error) {
return nil, nil
}
func (s *mockInstallationStore) GetDNSRecordsForInstallation(installationID string) ([]*model.InstallationDNS, error) {
installation, err := s.GetInstallation(installationID, false, false)
if installation == nil || err != nil {
return nil, err
}
return []*model.InstallationDNS{
{ID: "abcd", DomainName: "dns.example.com", InstallationID: installation.ID},
}, nil
}
func (s *mockInstallationStore) DeleteInstallationDNS(installationID string, dnsName string) error {
return nil
}
type mockMultitenantDBStore struct{}
func (m *mockMultitenantDBStore) GetMultitenantDatabase(multitenantdatabaseID string) (*model.MultitenantDatabase, error) {
return nil, nil
}
func (m *mockMultitenantDBStore) GetMultitenantDatabases(filter *model.MultitenantDatabaseFilter) ([]*model.MultitenantDatabase, error) {
return nil, nil
}
func (m *mockMultitenantDBStore) GetInstallationsTotalDatabaseWeight(installationIDs []string) (float64, error) {
return 0, nil
}
func (m *mockMultitenantDBStore) CreateMultitenantDatabase(multitenantDatabase *model.MultitenantDatabase) error {
return nil
}
func (m *mockMultitenantDBStore) LockMultitenantDatabase(multitenantdatabaseID, lockerID string) (bool, error) {
return true, nil
}
func (m *mockMultitenantDBStore) UnlockMultitenantDatabase(multitenantdatabaseID, lockerID string, force bool) (bool, error) {
return true, nil
}
func (m *mockMultitenantDBStore) UpdateMultitenantDatabase(multitenantDatabase *model.MultitenantDatabase) error {
return nil
}
func (m *mockMultitenantDBStore) GetMultitenantDatabaseForInstallationID(installationID string) (*model.MultitenantDatabase, error) {
return nil, nil
}
func (m mockMultitenantDBStore) LockMultitenantDatabases(ids []string, lockerID string) (bool, error) {
return true, nil
}
func (m mockMultitenantDBStore) UnlockMultitenantDatabases(ids []string, lockerID string, force bool) (bool, error) {
return true, nil
}
func (m mockMultitenantDBStore) GetSingleTenantDatabaseConfigForInstallation(installationID string) (*model.SingleTenantDatabaseConfig, error) {
return nil, nil
}
func (m mockMultitenantDBStore) GetProxyDatabaseResourcesForInstallation(installationID string) (*model.DatabaseResourceGrouping, error) {
return nil, nil
}
func (m mockMultitenantDBStore) GetOrCreateProxyDatabaseResourcesForInstallation(installationID, multitenantDatabaseID string) (*model.DatabaseResourceGrouping, error) {
return nil, nil
}
func (m mockMultitenantDBStore) DeleteInstallationProxyDatabaseResources(multitenantDatabase *model.MultitenantDatabase, databaseSchema *model.DatabaseSchema) error {
return nil
}
func (s *mockMultitenantDBStore) GetDatabaseSchemas(filter *model.DatabaseSchemaFilter) ([]*model.DatabaseSchema, error) {
return nil, nil
}
func (m *mockMultitenantDBStore) GetDatabaseSchema(databaseSchemaID string) (*model.DatabaseSchema, error) {
return nil, nil
}
func (s *mockMultitenantDBStore) GetLogicalDatabases(filter *model.LogicalDatabaseFilter) ([]*model.LogicalDatabase, error) {
return nil, nil
}
func (m *mockMultitenantDBStore) GetLogicalDatabase(logicalDatabaseID string) (*model.LogicalDatabase, error) {
return nil, nil
}
type mockInstallationProvisioner struct {
UseCustomClusterResources bool
CustomClusterResources *k8s.ClusterResources
}
func (p *mockInstallationProvisioner) ExecClusterInstallationCLI(cluster *model.Cluster, clusterInstallation *model.ClusterInstallation, args ...string) ([]byte, error, error) {
//TODO implement me
panic("implement me")
}
func (p *mockInstallationProvisioner) ExecMattermostCLI(cluster *model.Cluster, clusterInstallation *model.ClusterInstallation, args ...string) ([]byte, error) {
//TODO implement me
panic("implement me")
}
func (p *mockInstallationProvisioner) ClusterInstallationProvisioner(version string) supervisor.ClusterInstallationProvisioner {
return &mockInstallationProvisioner{}
}
func (p *mockInstallationProvisioner) ExecClusterInstallationJob(cluster *model.Cluster, clusterInstallation *model.ClusterInstallation, args ...string) error {
//TODO implement me
panic("implement me")
}
func (p *mockInstallationProvisioner) IsResourceReadyAndStable(cluster *model.Cluster, clusterInstallation *model.ClusterInstallation) (bool, bool, error) {
return true, true, nil
}
func (p *mockInstallationProvisioner) CreateClusterInstallation(cluster *model.Cluster, installation *model.Installation, dnsRecords []*model.InstallationDNS, clusterInstallation *model.ClusterInstallation) error {
return nil
}
func (p *mockInstallationProvisioner) UpdateClusterInstallation(cluster *model.Cluster, installation *model.Installation, dnsRecords []*model.InstallationDNS, clusterInstallation *model.ClusterInstallation) error {
return nil
}
func (p *mockInstallationProvisioner) EnsureCRMigrated(cluster *model.Cluster, clusterInstallation *model.ClusterInstallation) (bool, error) {
return true, nil
}
func (p *mockInstallationProvisioner) HibernateClusterInstallation(cluster *model.Cluster, installation *model.Installation, clusterInstallation *model.ClusterInstallation) error {
return nil
}
func (p *mockInstallationProvisioner) DeleteOldClusterInstallationLicenseSecrets(cluster *model.Cluster, installation *model.Installation, clusterInstallation *model.ClusterInstallation) error {
return nil
}
func (p *mockInstallationProvisioner) DeleteClusterInstallation(cluster *model.Cluster, installation *model.Installation, clusterInstallation *model.ClusterInstallation) error {
return nil
}
func (p *mockInstallationProvisioner) VerifyClusterInstallationMatchesConfig(cluster *model.Cluster, installation *model.Installation, clusterInstallation *model.ClusterInstallation) (bool, error) {
return true, nil
}
func (p *mockInstallationProvisioner) GetClusterResources(cluster *model.Cluster, onlySchedulable bool, logger log.FieldLogger) (*k8s.ClusterResources, error) {
if p.UseCustomClusterResources {
return p.CustomClusterResources, nil
}
return &k8s.ClusterResources{
MilliTotalCPU: 1000,
MilliUsedCPU: 200,
MilliTotalMemory: 100000000000000,
MilliUsedMemory: 25000000000000,
TotalPodCount: 1000,
UsedPodCount: 100,
},
nil
}
func (p *mockInstallationProvisioner) GetPublicLoadBalancerEndpoint(cluster *model.Cluster, namespace string) (string, error) {
return "example.elb.us-east-1.amazonaws.com", nil
}
func (p *mockInstallationProvisioner) RefreshSecrets(cluster *model.Cluster, installation *model.Installation, clusterInstallation *model.ClusterInstallation) error {
return nil
}
func (p *mockInstallationProvisioner) PrepareClusterUtilities(cluster *model.Cluster, installation *model.Installation, store model.ClusterUtilityDatabaseStoreInterface) error {
return nil
}
// TODO(gsagula): this can be replaced with /internal/mocks/aws-tools/AWS.go so that inputs and other variants
// can be tested.
type mockAWS struct{}
var _ aws.AWS = (*mockAWS)(nil)
func (a *mockAWS) ClaimSecurityGroups(cluster *model.Cluster, ngNames string, vpcID string, logger log.FieldLogger) ([]string, error) {
return []string{}, nil
}
func (a *mockAWS) WaitForEKSClusterUpdateToBeCompleted(clusterName, updateID string, timeout int) error {
return nil
}
func (a *mockAWS) WaitForActiveEKSNodeGroup(clusterName, workerName string, timeout int) (*eksTypes.Nodegroup, error) {
return nil, nil
}
func (a *mockAWS) WaitForEKSClusterToBeDeleted(clusterName string, timeout int) error {
return nil
}
func (a *mockAWS) WaitForEKSNodeGroupToBeDeleted(clusterName, workerName string, timeout int) error {
return nil
}
func (a *mockAWS) EnsureEKSClusterUpdated(cluster *model.Cluster) (*eksTypes.Update, error) {
return nil, nil
}
func (a *mockAWS) EnsureEKSNodeGroupMigrated(cluster *model.Cluster, ngPrefix string) error {
return nil
}
func (a *mockAWS) GetActiveEKSNodeGroup(clusterName, workerName string) (*eksTypes.Nodegroup, error) {
return nil, nil
}
func (a *mockAWS) EnsureEKSNodeGroupDeleted(clusterName, workerName string) error {
return nil
}
func (a *mockAWS) WaitForActiveEKSCluster(clusterName string, timeout int) (*eksTypes.Cluster, error) {
return nil, nil
}
func (a *mockAWS) GetActiveEKSCluster(clusterName string) (*eksTypes.Cluster, error) {
return &eksTypes.Cluster{}, nil
}
func (a *mockAWS) UpdateLaunchTemplate(data *model.LaunchTemplateData) error {
return nil
}
func (a *mockAWS) CreateLaunchTemplate(data *model.LaunchTemplateData) error {
return nil
}
func (a *mockAWS) IsLaunchTemplateAvailable(launchTemplateName string) (bool, error) {
return true, nil
}
func (a *mockAWS) DeleteLaunchTemplate(clusterName string) error {
return nil
}
func (a *mockAWS) GetLoadBalancerAPIByType(s string) aws.ELB {
//TODO implement me
panic("implement me")
}
func (a *mockAWS) InstallEKSAddons(cluster *model.Cluster) error {
return nil
}
func (a *mockAWS) GetRegion() string {
return aws.DefaultAWSRegion
}
func (a *mockAWS) GetAccountID() (string, error) {
return "", nil
}
func (a *mockAWS) ClaimVPC(vpcID string, cluster *model.Cluster, owner string, logger log.FieldLogger) (aws.ClusterResources, error) {
return aws.ClusterResources{}, nil
}
func (a *mockAWS) GetClaimedVPC(clusterID string, logger log.FieldLogger) (string, error) {
return "", nil
}
func (a *mockAWS) EnsureEKSCluster(cluster *model.Cluster, resources aws.ClusterResources) (*eksTypes.Cluster, error) {
return &eksTypes.Cluster{}, nil
}
func (a *mockAWS) EnsureEKSNodeGroup(cluster *model.Cluster, nodeGroupPrefix string) (*eksTypes.Nodegroup, error) {
return &eksTypes.Nodegroup{}, nil
}
func (a *mockAWS) EnsureEKSClusterDeleted(clusterName string) error {
return nil
}
func (a *mockAWS) GetCertificateSummaryByTag(key, value string, logger log.FieldLogger) (*model.Certificate, error) {
return nil, nil
}
func (a *mockAWS) GetCloudEnvironmentName() string {
return "test"
}
func (a *mockAWS) S3EnsureBucketDeleted(bucketName string, logger log.FieldLogger) error {
return nil
}
func (a *mockAWS) S3EnsureObjectDeleted(bucketName, path string) error {
return nil
}
func (a *mockAWS) GetS3RegionURL() string {
return "s3.amazonaws.test.com"
}
func (a *mockAWS) FixSubnetTagsForVPC(vpcID string, logger log.FieldLogger) error {
return nil
}
func (a *mockAWS) GetAndClaimVpcResources(cluster *model.Cluster, owner string, logger log.FieldLogger) (aws.ClusterResources, error) {
return aws.ClusterResources{}, nil
}
func (a *mockAWS) ReleaseVpc(cluster *model.Cluster, logger log.FieldLogger) error {
return nil
}
func (a *mockAWS) AttachPolicyToRole(roleName, policyName string, logger log.FieldLogger) error {
return nil
}
func (a *mockAWS) DetachPolicyFromRole(roleName, policyName string, logger log.FieldLogger) error {
return nil
}
func (a *mockAWS) GetPrivateZoneDomainName(logger log.FieldLogger) (string, error) {
return "test.domain", nil
}
func (a *mockAWS) CreatePrivateCNAME(dnsName string, dnsEndpoints []string, logger log.FieldLogger) error {
return nil
}
func (a *mockAWS) UpsertPublicCNAMEs(dnsNames, endpoints []string, logger log.FieldLogger) error {
return nil
}
func (a *mockAWS) IsProvisionedPrivateCNAME(dnsName string, logger log.FieldLogger) bool {
return false
}
func (a *mockAWS) DeletePrivateCNAME(dnsName string, logger log.FieldLogger) error {
return nil
}
func (a *mockAWS) DeletePublicCNAMEs(dnsNames []string, logger log.FieldLogger) error {
return nil
}
func (a *mockAWS) GetPublicHostedZoneNames() []string {
return []string{"public.host.name.example.com"}
}
func (a *mockAWS) IsValidAMI(AMIID string, logger log.FieldLogger) (bool, error) {
return true, nil
}
func (a *mockAWS) GeneratePerseusUtilitySecret(clusterID string, logger log.FieldLogger) (*corev1.Secret, error) {
return nil, nil
}
func (a *mockAWS) GenerateBifrostUtilitySecret(clusterID string, logger log.FieldLogger) (*corev1.Secret, error) {
return nil, nil
}
func (a *mockAWS) GetCIDRByVPCTag(vpcTagName string, logger log.FieldLogger) (string, error) {
return "", nil
}
func (a *mockAWS) S3LargeCopy(srcBucketName, srcKey, destBucketName, destKey *string) error {
return nil
}
func (a *mockAWS) GetMultitenantBucketNameForInstallation(installationID string, store model.InstallationDatabaseStoreInterface) (string, error) {
return "", nil
}
func (a *mockAWS) SecretsManagerGetPGBouncerAuthUserPassword(vpcID string) (string, error) {
return "password", nil
}
func (a *mockAWS) GetVpcsWithFilters(filters []ec2Types.Filter) ([]ec2Types.Vpc, error) {
return nil, nil
}
type mockEventProducer struct {
installationListByEventOrder []string
clusterListByEventOrder []string
clusterInstallationListByEventOrder []string
}
func (m *mockEventProducer) ProduceInstallationStateChangeEvent(installation *model.Installation, oldState string, extraDataFields ...events.DataField) error {
m.installationListByEventOrder = append(m.installationListByEventOrder, installation.ID)
return nil
}
func (m *mockEventProducer) ProduceClusterStateChangeEvent(cluster *model.Cluster, oldState string, extraDataFields ...events.DataField) error {
m.clusterListByEventOrder = append(m.clusterListByEventOrder, cluster.ID)
return nil
}
func (m *mockEventProducer) ProduceClusterInstallationStateChangeEvent(clusterInstallation *model.ClusterInstallation, oldState string, extraDataFields ...events.DataField) error {
m.clusterInstallationListByEventOrder = append(m.clusterInstallationListByEventOrder, clusterInstallation.ID)
return nil
}
type mockCloudflareClient struct{}
func (m *mockCloudflareClient) CreateDNSRecords(customerDNSName []string, dnsEndpoints []string, logger logrus.FieldLogger) error {
return nil
}
func (m *mockCloudflareClient) DeleteDNSRecords(customerDNSName []string, logger logrus.FieldLogger) error {
return nil
}
func TestInstallationSupervisorDo(t *testing.T) {
standardSchedulingOptions := supervisor.NewInstallationSupervisorSchedulingOptions(false, 80, 0, 0, 0, 0)
require.NoError(t, standardSchedulingOptions.Validate())
t.Run("no installations pending work", func(t *testing.T) {
logger := testlib.MakeLogger(t)
mockStore := &mockInstallationStore{}
supervisor := supervisor.NewInstallationSupervisor(mockStore, &mockInstallationProvisioner{}, "instanceID", false, false, standardSchedulingOptions, &utils.ResourceUtil{}, logger, cloudMetrics, nil, false, &mockCloudflareClient{}, false)
err := supervisor.Do()
require.NoError(t, err)
require.Equal(t, 0, mockStore.UpdateInstallationCalls)
})
t.Run("mock installation creation", func(t *testing.T) {
logger := testlib.MakeLogger(t)
mockStore := &mockInstallationStore{}
mockStore.UnlockedInstallationsPendingWork = []*model.Installation{{
ID: model.NewID(),
State: model.InstallationStateDeletionRequested,
}}
mockStore.Installation = mockStore.UnlockedInstallationsPendingWork[0]
mockStore.UnlockChan = make(chan interface{})
supervisor := supervisor.NewInstallationSupervisor(mockStore, &mockInstallationProvisioner{}, "instanceID", false, false, standardSchedulingOptions, &utils.ResourceUtil{}, logger, cloudMetrics, &mockEventProducer{}, false, &mockCloudflareClient{}, false)
err := supervisor.Do()
require.NoError(t, err)
<-mockStore.UnlockChan
require.Equal(t, 1, mockStore.UpdateInstallationCalls)
})
t.Run("order of pending works", func(t *testing.T) {
logger := testlib.MakeLogger(t)
priorityTaskInstallationIDs := map[string]string{
model.InstallationStateCreationRequested: "a",
model.InstallationStateCreationNoCompatibleClusters: "b",
model.InstallationStateCreationPreProvisioning: "c",
model.InstallationStateCreationInProgress: "d",
model.InstallationStateCreationDNS: "e",
}
preferredInstallationOrder := []string{"a", "b", "c", "d", "e"}
installations := make([]*model.Installation, len(model.AllInstallationStatesPendingWork))
for i, state := range model.AllInstallationStatesPendingWork {
id := model.NewID()
if _, ok := priorityTaskInstallationIDs[state]; ok {
id = priorityTaskInstallationIDs[state]
}
installations[i] = &model.Installation{
ID: id,
State: state,
}
}
rand.Shuffle(len(installations), func(i, j int) {
installations[i], installations[j] = installations[j], installations[i]
})
mockStore := &mockInstallationStore{
Installations: installations,
UnlockedInstallationsPendingWork: installations,
}
mockEventProducer := &mockEventProducer{}
supervisor := supervisor.NewInstallationSupervisor(mockStore, &mockInstallationProvisioner{}, "instanceID", false, false, standardSchedulingOptions, &utils.ResourceUtil{}, logger, cloudMetrics, mockEventProducer, false, &mockCloudflareClient{}, false)
err := supervisor.Do()
require.NoError(t, err)
installationListByWorkOrder := mockEventProducer.installationListByEventOrder
require.Equal(t, preferredInstallationOrder, installationListByWorkOrder[:len(preferredInstallationOrder)])
})
}
func TestInstallationSupervisor(t *testing.T) {
standardSchedulingOptions := supervisor.NewInstallationSupervisorSchedulingOptions(false, 80, 0, 0, 0, 0)
require.NoError(t, standardSchedulingOptions.Validate())
expectInstallationState := func(t *testing.T, sqlStore *store.SQLStore, installation *model.Installation, expectedState string) {
t.Helper()
installation, err := sqlStore.GetInstallation(installation.ID, false, false)
require.NoError(t, err)
require.Equal(t, expectedState, installation.State)
}
expectClusterInstallations := func(t *testing.T, sqlStore *store.SQLStore, installation *model.Installation, expectedCount int, state string) {
t.Helper()
clusterInstallations, err := sqlStore.GetClusterInstallations(&model.ClusterInstallationFilter{
Paging: model.AllPagesNotDeleted(),
InstallationID: installation.ID,
})
require.NoError(t, err)
require.Len(t, clusterInstallations, expectedCount)
for _, clusterInstallation := range clusterInstallations {
require.Equal(t, state, clusterInstallation.State)
}
}
expectClusterInstallationsOnCluster := func(t *testing.T, sqlStore *store.SQLStore, cluster *model.Cluster, expectedCount int) {
t.Helper()
clusterInstallations, err := sqlStore.GetClusterInstallations(&model.ClusterInstallationFilter{
Paging: model.AllPagesNotDeleted(),
ClusterID: cluster.ID,
})
require.NoError(t, err)
require.Len(t, clusterInstallations, expectedCount)
}
standardTestInstallationSupervisor := func(sqlStore *store.SQLStore, logger log.FieldLogger) *supervisor.InstallationSupervisor {
return supervisor.NewInstallationSupervisor(
sqlStore,
&mockInstallationProvisioner{},
model.NewID(),
false,
false,
standardSchedulingOptions,
&utils.ResourceUtil{},
logger,
cloudMetrics,
testutil.SetupTestEventsProducer(sqlStore, logger),
false,
&mockCloudflareClient{},
false,
)
}
standardStableTestCluster := func() *model.Cluster {
return &model.Cluster{
Provisioner: model.ProvisionerKops,
State: model.ClusterStateStable,
AllowInstallations: true,
ProvisionerMetadataKops: &model.KopsMetadata{
MasterCount: 1,
NodeMinCount: 1,
NodeMaxCount: 5,
},
}
}
standardStableTestInstallation := func() *model.Installation {
groupID := model.NewID()
return &model.Installation{
OwnerID: model.NewID(),
GroupID: &groupID,
Image: "mattermost/mattermost-enterprise-edition",
Version: "v1.0.0",
Name: "domain1",
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
State: model.InstallationStateStable,
}
}
t.Run("unexpected state", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateStable
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateStable)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateStable)
})
t.Run("state has changed since installation was selected to be worked on", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
// The stored installation is InstallationStateCreationInProgress, so we
// will pass in an installation with state of
// InstallationStateCreationRequested to simulate stale state.
installation.State = model.InstallationStateCreationRequested
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationInProgress)
})
t.Run("creation requested, cluster installations not yet created, no clusters", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationRequested
err := sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationNoCompatibleClusters)
expectClusterInstallations(t, sqlStore, installation, 0, "")
})
t.Run("creation requested, cluster installations not yet created, cluster doesn't allow scheduling", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
cluster.AllowInstallations = false
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationNoCompatibleClusters)
expectClusterInstallations(t, sqlStore, installation, 0, "")
})
t.Run("creation requested, cluster installations not yet created, no empty clusters", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: model.NewID(),
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationNoCompatibleClusters)
expectClusterInstallations(t, sqlStore, installation, 0, "")
})
t.Run("creation requested, cluster installations reconciling", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateReconciling,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReconciling)
})
t.Run("creation requested, cluster installations ready", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateReady,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateStable)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReady)
})
t.Run("creation requested, cluster installations stable", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateStable)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateStable)
})
t.Run("creation requested, cluster installations stable, in group with different sequence", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
group := &model.Group{
ID: model.NewID(),
Sequence: 2,
Version: "gversion",
Image: "gImage",
}
err = sqlStore.CreateGroup(group, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationRequested
installation.GroupID = &group.ID
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateStable)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateStable)
installation, err = sqlStore.GetInstallation(installation.ID, true, false)
require.NoError(t, err)
assert.True(t, installation.InstallationSequenceMatchesMergedGroupSequence())
})
t.Run("pre provisioning requested, cluster installations reconciling", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationPreProvisioning
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateCreationRequested,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateCreationRequested)
})
t.Run("creation requested, cluster installations failed", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationPreProvisioning
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateCreationFailed,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationFailed)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateCreationFailed)
})
t.Run("creation DNS, cluster installations reconciling", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationDNS
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateCreationRequested,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateCreationRequested)
})
t.Run("creation in progress, cluster installations reconciling", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateCreationRequested,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateCreationRequested)
})
t.Run("creation in progress, cluster installations ready", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateReady,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateStable)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReady)
})
t.Run("creation in progress, cluster installations stable", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateStable)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateStable)
})
t.Run("creation in progress, cluster installations stable, in group with same sequence", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
group := &model.Group{
ID: model.NewID(),
Version: "gversion",
Image: "gImage",
}
err = sqlStore.CreateGroup(group, nil)
require.NoError(t, err)
// Group Sequence always set to 0 when created so we need to update it.
err = sqlStore.UpdateGroup(group, true)
require.NoError(t, err)
owner := model.NewID()
installation := &model.Installation{
OwnerID: owner,
Version: "version",
Name: "dns",
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
GroupID: &group.ID,
State: model.InstallationStateCreationInProgress,
}
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
installation.MergeWithGroup(group, false)
installation.SyncGroupAndInstallationSequence()
err = sqlStore.UpdateInstallationGroupSequence(installation)
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateStable)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateStable)
installation, err = sqlStore.GetInstallation(installation.ID, true, false)
require.NoError(t, err)
assert.True(t, installation.InstallationSequenceMatchesMergedGroupSequence())
})
t.Run("creation in progress, cluster installations failed", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateCreationFailed,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationFailed)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateCreationFailed)
})
t.Run("creation final tasks, cluster installations stable", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationFinalTasks
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateStable)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateStable)
})
t.Run("no compatible clusters, cluster installations not yet created, no clusters", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationNoCompatibleClusters
err := sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationNoCompatibleClusters)
expectClusterInstallations(t, sqlStore, installation, 0, "")
})
t.Run("no compatible clusters, cluster installations not yet created, no available clusters", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: model.NewID(),
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationNoCompatibleClusters
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationNoCompatibleClusters)
expectClusterInstallations(t, sqlStore, installation, 0, "")
})
t.Run("no compatible clusters, cluster installations not yet created, available cluster", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateCreationNoCompatibleClusters
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateCreationRequested)
})
t.Run("update requested, cluster installations stable", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateUpdateRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateUpdateInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReconciling)
})
t.Run("update requested, cluster installations stable, in group with different sequence", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
group := &model.Group{
ID: model.NewID(),
Sequence: 2,
Version: "gversion",
Image: "gImage",
}
err = sqlStore.CreateGroup(group, nil)
require.NoError(t, err)
owner := model.NewID()
installation := &model.Installation{
OwnerID: owner,
Version: "version",
Name: "dns",
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
GroupID: &group.ID,
State: model.InstallationStateUpdateRequested,
}
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateUpdateInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReconciling)
installation, err = sqlStore.GetInstallation(installation.ID, true, false)
require.NoError(t, err)
assert.True(t, installation.InstallationSequenceMatchesMergedGroupSequence())
})
t.Run("update in progress, cluster installations reconciling", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateUpdateInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateReconciling,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateUpdateInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReconciling)
})
t.Run("update requested, cluster installations reconciling, in group with different sequence", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
group := &model.Group{
ID: model.NewID(),
Sequence: 2,
Version: "gversion",
Image: "gImage",
}
err = sqlStore.CreateGroup(group, nil)
require.NoError(t, err)
owner := model.NewID()
installation := &model.Installation{
OwnerID: owner,
Version: "version",
Name: "dns",
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
GroupID: &group.ID,
State: model.InstallationStateUpdateInProgress,
}
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateReconciling,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateUpdateRequested)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReconciling)
installation, err = sqlStore.GetInstallation(installation.ID, true, false)
require.NoError(t, err)
assert.False(t, installation.InstallationSequenceMatchesMergedGroupSequence())
})
t.Run("update in progress, cluster installations ready", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateUpdateInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateReady,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateUpdateInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReady)
})
t.Run("update in progress, cluster installations stable", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateUpdateInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateStable)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateStable)
})
t.Run("update requested, cluster installations stable, in group with same sequence", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
group := &model.Group{
ID: model.NewID(),
Version: "gversion",
Image: "gImage",
}
err = sqlStore.CreateGroup(group, nil)
require.NoError(t, err)
// Group Sequence always set to 0 when created so we need to update it
// by calling group update once.
oldSequence := group.Sequence
err = sqlStore.UpdateGroup(group, true)
require.NoError(t, err)
require.NotEqual(t, oldSequence, group.Sequence)
owner := model.NewID()
installation := &model.Installation{
OwnerID: owner,
Version: "version",
Name: "dns",
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
GroupID: &group.ID,
State: model.InstallationStateUpdateInProgress,
}
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
installation.MergeWithGroup(group, false)
installation.SyncGroupAndInstallationSequence()
err = sqlStore.UpdateInstallationGroupSequence(installation)
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateStable)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateStable)
installation, err = sqlStore.GetInstallation(installation.ID, true, false)
require.NoError(t, err)
assert.True(t, installation.InstallationSequenceMatchesMergedGroupSequence())
})
t.Run("hibernation requested, cluster installations stable", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateHibernationRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateHibernationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReconciling)
})
t.Run("hibernation in progress, cluster installations reconciling", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateHibernationInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateReconciling,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateHibernationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReconciling)
})
t.Run("hibernation in progress, cluster installations ready", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateHibernationInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateReady,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateHibernationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReady)
})
t.Run("hibernation in progress, cluster installations stable", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateHibernationInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateHibernating)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateStable)
})
t.Run("wake up requested, cluster installations stable", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateWakeUpRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateUpdateInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReconciling)
})
t.Run("deletion pending requested, cluster installations stable", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateDeletionPendingRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateDeletionPendingInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReconciling)
})
t.Run("deletion pending in progress, cluster installations reconciling", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateDeletionPendingInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateReconciling,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateDeletionPendingInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReconciling)
})
t.Run("deletion pending in progress, cluster installations ready", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateDeletionPendingInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateReady,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateDeletionPendingInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReady)
})
t.Run("deletion pending in progress, cluster installations stable", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateDeletionPendingInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateDeletionPending)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateStable)
})
t.Run("deletion cancellation requested, cluster installations stable", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateDeletionCancellationRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateUpdateInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateReconciling)
})
t.Run("deletion requested, cluster installations stable", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateDeletionRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateDeletionInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateDeletionRequested)
})
t.Run("deletion requested, cluster installations deleting", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateDeletionRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateDeletionRequested,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateDeletionInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateDeletionRequested)
})
t.Run("deletion in progress, cluster installations failed", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateDeletionInProgress
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateDeletionFailed,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateDeletionFailed)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateDeletionFailed)
})
t.Run("deletion requested, cluster installations failed, so retry", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateDeletionRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateDeletionFailed,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateDeletionInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateDeletionRequested)
})
t.Run("deletion requested, delete backups", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateDeletionRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateDeleted,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
backup := &model.InstallationBackup{
InstallationID: installation.ID,
ClusterInstallationID: clusterInstallation.ID,
State: model.InstallationBackupStateBackupSucceeded,
}
err = sqlStore.CreateInstallationBackup(backup)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateDeletionFinalCleanup)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateDeleted)
fetchedBackup, err := sqlStore.GetInstallationBackup(backup.ID)
require.NoError(t, err)
assert.Equal(t, model.InstallationBackupStateDeletionRequested, fetchedBackup.State)
})
t.Run("deletion requested, delete migrations and restorations", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateDeletionRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateDeleted,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
restorationOP := &model.InstallationDBRestorationOperation{
InstallationID: installation.ID,
ClusterInstallationID: clusterInstallation.ID,
State: model.InstallationDBRestorationStateSucceeded,
}
err = sqlStore.CreateInstallationDBRestorationOperation(restorationOP)
require.NoError(t, err)
migrationOP := &model.InstallationDBMigrationOperation{
InstallationID: installation.ID,
State: model.InstallationDBMigrationStateSucceeded,
}
err = sqlStore.CreateInstallationDBMigrationOperation(migrationOP)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateDeletionFinalCleanup)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateDeleted)
fetchedRestoration, err := sqlStore.GetInstallationDBRestorationOperation(restorationOP.ID)
require.NoError(t, err)
assert.Equal(t, model.InstallationDBRestorationStateDeletionRequested, fetchedRestoration.State)
fetchedMigration, err := sqlStore.GetInstallationDBMigrationOperation(migrationOP.ID)
require.NoError(t, err)
assert.Equal(t, model.InstallationDBMigrationStateDeletionRequested, fetchedMigration.State)
})
t.Run("deletion requested, cluster installations deleted", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := standardStableTestInstallation()
installation.State = model.InstallationStateDeletionRequested
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
clusterInstallation := &model.ClusterInstallation{
ClusterID: cluster.ID,
InstallationID: installation.ID,
Namespace: "namespace",
State: model.ClusterInstallationStateDeleted,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateDeleted)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateDeleted)
})
t.Run("multitenant", func(t *testing.T) {
t.Run("creation requested, cluster installations not yet created, available cluster", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
owner := model.NewID()
groupID := model.NewID()
installation := &model.Installation{
OwnerID: owner,
Version: "version",
Name: "dns",
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityMultiTenant,
GroupID: &groupID,
State: model.InstallationStateCreationRequested,
}
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateCreationRequested)
expectClusterInstallationsOnCluster(t, sqlStore, cluster, 1)
})
t.Run("creation requested, cluster installations not yet created, 3 installations, available cluster", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
for i := 1; i < 3; i++ {
t.Run(fmt.Sprintf("cluster-%d", i), func(t *testing.T) {
owner := model.NewID()
groupID := model.NewID()
installation := &model.Installation{
OwnerID: owner,
Version: "version",
Name: fmt.Sprintf("dns%d", i),
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityMultiTenant,
GroupID: &groupID,
State: model.InstallationStateCreationRequested,
}
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation(fmt.Sprintf("dns%d.example.com", i)))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateCreationRequested)
expectClusterInstallationsOnCluster(t, sqlStore, cluster, i)
})
}
})
t.Run("creation requested, cluster installations not yet created, 1 isolated and 1 multitenant, available cluster", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
owner := model.NewID()
groupID := model.NewID()
isolatedInstallation := &model.Installation{
OwnerID: owner,
Version: "version",
Name: "iso-dns",
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID,
State: model.InstallationStateCreationRequested,
}
err = sqlStore.CreateInstallation(isolatedInstallation, nil, testutil.DNSForInstallation("iso-dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(isolatedInstallation)
expectInstallationState(t, sqlStore, isolatedInstallation, model.InstallationStateCreationInProgress)
expectClusterInstallations(t, sqlStore, isolatedInstallation, 1, model.ClusterInstallationStateCreationRequested)
expectClusterInstallationsOnCluster(t, sqlStore, cluster, 1)
owner = model.NewID()
groupID = model.NewID()
multitenantInstallation := &model.Installation{
OwnerID: owner,
Version: "version",
Name: "mt-dns",
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityMultiTenant,
GroupID: &groupID,
State: model.InstallationStateCreationRequested,
}
err = sqlStore.CreateInstallation(multitenantInstallation, nil, testutil.DNSForInstallation("mt-dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(multitenantInstallation)
expectInstallationState(t, sqlStore, multitenantInstallation, model.InstallationStateCreationNoCompatibleClusters)
expectClusterInstallations(t, sqlStore, multitenantInstallation, 0, "")
expectClusterInstallationsOnCluster(t, sqlStore, cluster, 1)
})
t.Run("creation requested, cluster installations not yet created, insufficient cluster resources", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
mockInstallationProvisioner := &mockInstallationProvisioner{
UseCustomClusterResources: true,
CustomClusterResources: &k8s.ClusterResources{
MilliTotalCPU: 200,
MilliUsedCPU: 100,
MilliTotalMemory: 200,
MilliUsedMemory: 100,
TotalPodCount: 200,
UsedPodCount: 100,
},
}
supervisor := supervisor.NewInstallationSupervisor(
sqlStore,
mockInstallationProvisioner,
"instanceID",
false,
false,
standardSchedulingOptions,
&utils.ResourceUtil{},
logger,
cloudMetrics,
testutil.SetupTestEventsProducer(sqlStore, logger),
false,
&mockCloudflareClient{}, false,
)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
owner := model.NewID()
groupID := model.NewID()
installation := &model.Installation{
OwnerID: owner,
Version: "version",
Name: "dns",
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityMultiTenant,
GroupID: &groupID,
State: model.InstallationStateCreationRequested,
}
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationNoCompatibleClusters)
expectClusterInstallations(t, sqlStore, installation, 0, "")
expectClusterInstallationsOnCluster(t, sqlStore, cluster, 0)
})
})
t.Run("creation requested, cluster installations not yet created, insufficient cluster resources, but scale", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
mockInstallationProvisioner := &mockInstallationProvisioner{
UseCustomClusterResources: true,
CustomClusterResources: &k8s.ClusterResources{
MilliTotalCPU: 200,
MilliUsedCPU: 100,
MilliTotalMemory: 200,
MilliUsedMemory: 100,
TotalPodCount: 200,
UsedPodCount: 100,
},
}
schedulingOptions := supervisor.NewInstallationSupervisorSchedulingOptions(false, 80, 0, 0, 0, 2)
require.NoError(t, schedulingOptions.Validate())
supervisor := supervisor.NewInstallationSupervisor(
sqlStore,
mockInstallationProvisioner,
"instanceID",
false,
false,
schedulingOptions,
&utils.ResourceUtil{},
logger,
cloudMetrics,
testutil.SetupTestEventsProducer(sqlStore, logger),
false,
&mockCloudflareClient{}, false,
)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
owner := model.NewID()
groupID := model.NewID()
installation := &model.Installation{
OwnerID: owner,
Version: "version",
Name: "dns",
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityMultiTenant,
GroupID: &groupID,
State: model.InstallationStateCreationRequested,
}
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateCreationRequested)
expectClusterInstallationsOnCluster(t, sqlStore, cluster, 1)
})
t.Run("creation requested, cluster installations not yet created, use balanced installation scheduling", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
schedulingOptions := supervisor.NewInstallationSupervisorSchedulingOptions(true, 80, 0, 0, 0, 0)
require.NoError(t, schedulingOptions.Validate())
supervisor := supervisor.NewInstallationSupervisor(
sqlStore,
&mockInstallationProvisioner{},
"instanceID",
false,
false,
schedulingOptions,
&utils.ResourceUtil{},
logger,
cloudMetrics,
testutil.SetupTestEventsProducer(sqlStore, logger),
false,
&mockCloudflareClient{}, false,
)
cluster1 := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster1, nil)
require.NoError(t, err)
cluster2 := standardStableTestCluster()
err = sqlStore.CreateCluster(cluster2, nil)
require.NoError(t, err)
owner := model.NewID()
groupID := model.NewID()
installation := &model.Installation{
OwnerID: owner,
Version: "version",
Name: "dns",
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityMultiTenant,
GroupID: &groupID,
State: model.InstallationStateCreationRequested,
}
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateCreationRequested)
expectClusterInstallationsOnCluster(t, sqlStore, cluster1, 1)
expectClusterInstallationsOnCluster(t, sqlStore, cluster2, 0)
})
t.Run("cluster with proper annotations selected", func(t *testing.T) {
annotations := []*model.Annotation{
{Name: "multi-tenant"}, {Name: "customer-abc"},
}
installationInCreationRequestedState := func() *model.Installation {
groupID := model.NewID()
return &model.Installation{
OwnerID: model.NewID(),
Version: "version",
Name: "dns",
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityMultiTenant,
GroupID: &groupID,
State: model.InstallationStateCreationRequested,
}
}
t.Run("cluster with matching annotations exists", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, annotations)
require.NoError(t, err)
installation := installationInCreationRequestedState()
err = sqlStore.CreateInstallation(installation, annotations, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateCreationRequested)
expectClusterInstallationsOnCluster(t, sqlStore, cluster, 1)
})
t.Run("cluster with matching annotations does not exists", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := installationInCreationRequestedState()
err = sqlStore.CreateInstallation(installation, annotations, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationNoCompatibleClusters)
expectClusterInstallations(t, sqlStore, installation, 0, "")
expectClusterInstallationsOnCluster(t, sqlStore, cluster, 0)
})
t.Run("annotations filter ignored when installation without annotations", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := standardTestInstallationSupervisor(sqlStore, logger)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, annotations)
require.NoError(t, err)
installation := installationInCreationRequestedState()
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
expectInstallationState(t, sqlStore, installation, model.InstallationStateCreationInProgress)
expectClusterInstallations(t, sqlStore, installation, 1, model.ClusterInstallationStateCreationRequested)
expectClusterInstallationsOnCluster(t, sqlStore, cluster, 1)
})
})
t.Run("force CR upgrade to v1Beta", func(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
supervisor := supervisor.NewInstallationSupervisor(
sqlStore,
&mockInstallationProvisioner{},
"instanceID",
false,
false,
standardSchedulingOptions,
&utils.ResourceUtil{},
logger,
cloudMetrics,
testutil.SetupTestEventsProducer(sqlStore, logger),
true,
&mockCloudflareClient{}, false,
)
cluster := standardStableTestCluster()
err := sqlStore.CreateCluster(cluster, nil)
require.NoError(t, err)
installation := &model.Installation{
Version: "version",
Name: "dns",
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityMultiTenant,
State: model.InstallationStateUpdateRequested,
CRVersion: model.V1betaCRVersion,
}
err = sqlStore.CreateInstallation(installation, nil, testutil.DNSForInstallation("dns.example.com"))
require.NoError(t, err)
supervisor.Supervise(installation)
updatedInstallation, err := sqlStore.GetInstallation(installation.ID, false, false)
require.NoError(t, err)
require.Equal(t, model.V1betaCRVersion, updatedInstallation.CRVersion)
})
}
func TestInstallationSupervisorSchedulingOptions(t *testing.T) {
for _, testCase := range []struct {
name string
inputOptions supervisor.InstallationSupervisorSchedulingOptions
expectedOptions supervisor.InstallationSupervisorSchedulingOptions
expectError bool
}{
{
name: "valid, no overrides",
inputOptions: supervisor.NewInstallationSupervisorSchedulingOptions(true, 80, 0, 0, 0, 2),
expectedOptions: supervisor.InstallationSupervisorSchedulingOptions{
BalanceInstallations: true,
ClusterResourceThresholdCPU: 80,
ClusterResourceThresholdMemory: 80,
ClusterResourceThresholdPodCount: 80,
ClusterResourceThresholdScaleValue: 2,
},
expectError: false,
},
{
name: "valid, cpu override",
inputOptions: supervisor.NewInstallationSupervisorSchedulingOptions(true, 80, 40, 0, 0, 2),
expectedOptions: supervisor.InstallationSupervisorSchedulingOptions{
BalanceInstallations: true,
ClusterResourceThresholdCPU: 40,
ClusterResourceThresholdMemory: 80,
ClusterResourceThresholdPodCount: 80,
ClusterResourceThresholdScaleValue: 2,
},
expectError: false,
},
{
name: "valid, memory override",
inputOptions: supervisor.NewInstallationSupervisorSchedulingOptions(true, 80, 0, 40, 0, 2),
expectedOptions: supervisor.InstallationSupervisorSchedulingOptions{
BalanceInstallations: true,
ClusterResourceThresholdCPU: 80,
ClusterResourceThresholdMemory: 40,
ClusterResourceThresholdPodCount: 80,
ClusterResourceThresholdScaleValue: 2,
},
expectError: false,
},
{
name: "valid, pod count override",
inputOptions: supervisor.NewInstallationSupervisorSchedulingOptions(true, 80, 0, 0, 40, 2),
expectedOptions: supervisor.InstallationSupervisorSchedulingOptions{
BalanceInstallations: true,
ClusterResourceThresholdCPU: 80,
ClusterResourceThresholdMemory: 80,
ClusterResourceThresholdPodCount: 40,
ClusterResourceThresholdScaleValue: 2,
},
expectError: false,
},
{
name: "invalid, no overrides",
inputOptions: supervisor.NewInstallationSupervisorSchedulingOptions(true, -1, 0, 0, 0, 2),
expectedOptions: supervisor.InstallationSupervisorSchedulingOptions{
BalanceInstallations: true,
ClusterResourceThresholdCPU: -1,
ClusterResourceThresholdMemory: -1,
ClusterResourceThresholdPodCount: -1,
ClusterResourceThresholdScaleValue: 2,
},
expectError: true,
},
{
name: "invalid, cpu override",
inputOptions: supervisor.NewInstallationSupervisorSchedulingOptions(true, 80, 2, 0, 0, 2),
expectedOptions: supervisor.InstallationSupervisorSchedulingOptions{
BalanceInstallations: true,
ClusterResourceThresholdCPU: 2,
ClusterResourceThresholdMemory: 80,
ClusterResourceThresholdPodCount: 80,
ClusterResourceThresholdScaleValue: 2,
},
expectError: true,
},
{
name: "invalid, memory override",
inputOptions: supervisor.NewInstallationSupervisorSchedulingOptions(true, 80, 0, 2, 0, 2),
expectedOptions: supervisor.InstallationSupervisorSchedulingOptions{
BalanceInstallations: true,
ClusterResourceThresholdCPU: 80,
ClusterResourceThresholdMemory: 2,
ClusterResourceThresholdPodCount: 80,
ClusterResourceThresholdScaleValue: 2,
},
expectError: true,
},
{
name: "invalid, pod count override",
inputOptions: supervisor.NewInstallationSupervisorSchedulingOptions(true, 80, 0, 0, 2, 2),
expectedOptions: supervisor.InstallationSupervisorSchedulingOptions{
BalanceInstallations: true,
ClusterResourceThresholdCPU: 80,
ClusterResourceThresholdMemory: 80,
ClusterResourceThresholdPodCount: 2,
ClusterResourceThresholdScaleValue: 2,
},
expectError: true,
},
{
name: "invalid, scale value out of bounds",
inputOptions: supervisor.NewInstallationSupervisorSchedulingOptions(true, 80, 0, 0, 0, -1),
expectedOptions: supervisor.InstallationSupervisorSchedulingOptions{
BalanceInstallations: true,
ClusterResourceThresholdCPU: 80,
ClusterResourceThresholdMemory: 80,
ClusterResourceThresholdPodCount: 80,
ClusterResourceThresholdScaleValue: -1,
},
expectError: true,
},
} {
t.Run(testCase.name, func(t *testing.T) {
assert.Equal(t, testCase.inputOptions, testCase.expectedOptions)
err := testCase.expectedOptions.Validate()
if testCase.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package hpsutil contains functionality used by the HPS tast tests.
package hpsutil
import (
"context"
"regexp"
"time"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/testing"
)
const (
// PersonPresentPageArchiveFilename the file name for persen present page.
PersonPresentPageArchiveFilename = "person-present-page.tar.xz"
// WaitNOpsBeforeStart How many ops to wait before starting to test after starting Chrome.
// Since HPS has auto-exposure give it some time to figure the exposure.
WaitNOpsBeforeStart = 10
// WaitNOpsBeforeExpectingPresenceChange How many ops to wait when changing from no person to person in frame or
// vice versa.
WaitNOpsBeforeExpectingPresenceChange = 1
// GetNOpsToVerifyPresenceWorks How many ops to run to ensure that presence model works reliably.
GetNOpsToVerifyPresenceWorks = 10
)
// WaitForNPresenceOps waits for the num of ops to be run.
func WaitForNPresenceOps(hctx *HpsContext, numOps int, feature string) ([]int, error) {
var result []int
ctx := hctx.Ctx
reg := "8"
if feature == "1" {
reg = "9"
}
testing.ContextLog(ctx, "waitForNPresenceOps ", numOps)
counter := 0
if err := testing.Poll(ctx, func(ctx context.Context) error {
if counter >= numOps {
return nil
}
presence, err := GetPresenceResult(hctx, reg)
if err != nil {
return testing.PollBreak(errors.Wrap(err, "failed to get presence result"))
}
testing.ContextLog(ctx, "Got presence result: ", presence)
result = append(result, presence)
counter++
return errors.Errorf("Stoped at %d", counter)
}, &testing.PollOptions{
Interval: 50 * time.Millisecond,
Timeout: 1 * time.Duration(numOps) * time.Minute,
}); err != nil {
return result, errors.Wrap(err, "failed wait for new inference")
}
if len(result) != int(numOps) {
return result, errors.Errorf("Wrong number of presence results: Expected %q Got %q (%q)", numOps, len(result), result)
}
return result, nil
}
// EnablePresence power-cycles the HPS devboard and enables presence detection.
// TODO: fail the test if reg6 is not 0x0000 at any point in time?
// It takes ~2 minutes to enable presence after reset.
// 0 -- enable detecting one person
// 1 -- enable second person alert
func EnablePresence(hctx *HpsContext, feature string) (time.Duration, error) {
if err := hctx.PowerCycle(); err != nil {
return 0, err
}
status := `\b0x0001\b`
if feature == "1" {
status = `\b0x0002\b`
}
start := time.Now()
if err := RunHpsTool(hctx, "cmd", "launch"); err != nil {
return 0, err
}
if err := pollStatus(hctx, "2", `\bkStage1\b`); err != nil {
return 0, err
}
if err := RunHpsTool(hctx, "cmd", "appl"); err != nil {
return 0, err
}
if err := pollStatus(hctx, "2", `\bkAppl\b`); err != nil {
return 0, err
}
if err := RunHpsTool(hctx, "enable", feature); err != nil {
return 0, err
}
if err := pollStatus(hctx, "7", status); err != nil {
return 0, err
}
return time.Now().Sub(start), nil
}
func pollStatus(hctx *HpsContext, register, pattern string) error {
testing.ContextLog(hctx.Ctx, "Polling hps status register ", register, " for '", pattern, "'")
regex := regexp.MustCompile(pattern)
args := []string{"hps", hctx.Device, "status", register}
if err := testing.Poll(hctx.Ctx, func(ctx context.Context) error {
var output []byte
var err error
if hctx.DutConn != nil {
output, err = hctx.DutConn.CommandContext(ctx, args[0], args[1:]...).CombinedOutput()
} else {
output, err = testexec.CommandContext(ctx, args[0], args[1:]...).CombinedOutput()
}
if err != nil {
return err
}
matched := regex.MatchString(string(output))
if matched {
return nil
}
return errors.Errorf("%q not found in %q", pattern, string(output))
}, &testing.PollOptions{
Interval: 100 * time.Millisecond,
Timeout: 5 * time.Minute,
}); err != nil {
return errors.Wrap(err, "failed wait for kStage1")
}
return nil
}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import (
"encoding/json"
"fmt"
"strings"
)
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// NoteType is documented here http://hl7.org/fhir/ValueSet/note-type
type NoteType int
const (
NoteTypeDisplay NoteType = iota
NoteTypePrint
NoteTypePrintoper
)
func (code NoteType) MarshalJSON() ([]byte, error) {
return json.Marshal(code.Code())
}
func (code *NoteType) UnmarshalJSON(json []byte) error {
s := strings.Trim(string(json), "\"")
switch s {
case "display":
*code = NoteTypeDisplay
case "print":
*code = NoteTypePrint
case "printoper":
*code = NoteTypePrintoper
default:
return fmt.Errorf("unknown NoteType code `%s`", s)
}
return nil
}
func (code NoteType) String() string {
return code.Code()
}
func (code NoteType) Code() string {
switch code {
case NoteTypeDisplay:
return "display"
case NoteTypePrint:
return "print"
case NoteTypePrintoper:
return "printoper"
}
return "<unknown>"
}
func (code NoteType) Display() string {
switch code {
case NoteTypeDisplay:
return "Display"
case NoteTypePrint:
return "Print (Form)"
case NoteTypePrintoper:
return "Print (Operator)"
}
return "<unknown>"
}
func (code NoteType) Definition() string {
switch code {
case NoteTypeDisplay:
return "Display the note."
case NoteTypePrint:
return "Print the note on the form."
case NoteTypePrintoper:
return "Print the note for the operator."
}
return "<unknown>"
}
|
package chimera
import (
"crypto/tls"
"crypto/x509"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strings"
"github.com/engineerd/wasm-to-oci/pkg/oci"
"github.com/pkg/errors"
)
type ModuleSource int
const (
UnknownSource ModuleSource = iota
FileSource ModuleSource = iota
HTTPSource ModuleSource = iota
RegistrySource ModuleSource = iota
)
func WasmModuleSource(uri string) (ModuleSource, string, error) {
parsedUri, err := url.Parse(uri)
if err != nil {
return UnknownSource, "", errors.Errorf("invalid source: %q", uri)
}
switch parsedUri.Scheme {
case "file":
return FileSource, parsedUri.Path, nil
case "http", "https":
return HTTPSource, uri, nil
case "registry":
parsedUri.Scheme = ""
return RegistrySource, strings.TrimLeft(parsedUri.String(), "/"), nil
}
return FileSource, "", errors.Errorf("unknown scheme %q", parsedUri.Scheme)
}
func FetchRemoteWasmModule(moduleSource ModuleSource, uri string, insecure, nonTLS bool, caPath string) (string, error) {
wasmModule, err := ioutil.TempFile("", "wasm-module-*")
if err != nil {
return "", err
}
switch moduleSource {
case HTTPSource:
tlsConfig, err := newTLSConfig(insecure, caPath)
if err != nil {
os.Remove(wasmModule.Name())
return "", err
}
tr := &http.Transport{TLSClientConfig: tlsConfig}
client := http.Client{Transport: tr}
resp, err := client.Get(uri)
if err != nil {
os.Remove(wasmModule.Name())
return "", errors.Errorf("could not download Wasm module from %q: %v", uri, err)
}
defer resp.Body.Close()
if _, err := io.Copy(wasmModule, resp.Body); err != nil {
os.Remove(wasmModule.Name())
return "", errors.Errorf("could not download Wasm module from %q: %v", uri, err)
}
case RegistrySource:
if caPath != "" {
log.Printf("WARNING: currently we don't support a custom CA when pulling from an OCI registry, switching to 'insecure' mode")
insecure = true
}
if err := oci.Pull(uri, wasmModule.Name(), insecure, nonTLS); err != nil {
os.Remove(wasmModule.Name())
return "", err
}
default:
os.Remove(wasmModule.Name())
return "", errors.Errorf("invalid source: %q", uri)
}
return wasmModule.Name(), nil
}
func newTLSConfig(insecure bool, caPath string) (*tls.Config, error) {
config := tls.Config{
InsecureSkipVerify: insecure,
}
if caPath == "" {
return &config, nil
}
caPEM, err := ioutil.ReadFile(caPath)
if err != nil {
return nil, err
}
roots, err := x509.SystemCertPool()
if err != nil {
return nil, err
}
ok := roots.AppendCertsFromPEM(caPEM)
if !ok {
return nil, errors.New("failed to parse CA certificate")
}
config.RootCAs = roots
return &config, nil
}
|
package base
import (
"github.com/stretchr/testify/assert"
"math"
"testing"
)
const simTestEpsilon = 1e-3
func TestCosine(t *testing.T) {
a := NewSparseVector()
a.Add(1, 4)
a.Add(2, 5)
a.Add(3, 6)
b := NewSparseVector()
b.Add(0, 0)
b.Add(1, 1)
b.Add(2, 2)
sim := CosineSimilarity(a, b)
assert.False(t, math.Abs(sim-0.978) > simTestEpsilon)
}
func TestMSD(t *testing.T) {
a := NewSparseVector()
a.Add(1, 4)
a.Add(2, 5)
a.Add(3, 6)
b := NewSparseVector()
b.Add(0, 0)
b.Add(1, 1)
b.Add(2, 2)
sim := MSDSimilarity(a, b)
assert.False(t, math.Abs(sim-0.1) > simTestEpsilon)
}
func TestPearson(t *testing.T) {
a := NewSparseVector()
a.Add(1, 4)
a.Add(2, 5)
a.Add(3, 6)
b := NewSparseVector()
b.Add(0, 0)
b.Add(1, 1)
b.Add(2, 2)
sim := PearsonSimilarity(a, b)
assert.False(t, math.Abs(sim) > simTestEpsilon)
}
|
// DailyGakki - handler
// 2020-10-17 14:03
// Benny <benny.think@gmail.com>
package main
import (
"bytes"
"fmt"
log "github.com/sirupsen/logrus"
tb "gopkg.in/tucnak/telebot.v2"
"path/filepath"
"strconv"
"strings"
)
import "github.com/tgbot-collection/tgbot_ping"
func startHandler(m *tb.Message) {
caption := "ๆฌข่ฟๆฅๅฐๆฏๆฅๆๅฏ็ฑ็ Gakki๐\nๆไผๆฏๅคฉๅฎๆถไธบไฝ ๅ้ๆๅฏ็ฑ็ Gakki๏ผ"
filename := "start.gif"
log.Infof("Start command: %d", m.Chat.ID)
_ = b.Notify(m.Chat, tb.UploadingPhoto)
data, _ := Asset(filepath.Join("images", filename))
log.Debugf("Find %s from memory...", filename)
p := &tb.Animation{File: tb.FromReader(bytes.NewReader(data)), FileName: filename, Caption: caption}
_, err := b.Send(m.Chat, p)
if err != nil {
log.Warnf("%s send failed %v", filename, err)
}
}
func aboutHandler(m *tb.Message) {
caption := "ไฝ ่ๅฉๆฒกๅฆ๏ผ\n" +
"ๅผๅ่
๏ผ@BennyThink\n" +
"GitHub: https://github.com/tgbot-collection/DailyGakki \n" +
"Google Photos ๅฐๅ๏ผ" + album
filename := "about.gif"
log.Infof("About command: %d", m.Chat.ID)
_ = b.Notify(m.Chat, tb.UploadingPhoto)
data, _ := Asset(filepath.Join("images", filename))
log.Debugf("Find %s from memory...", filename)
p := &tb.Animation{File: tb.FromReader(bytes.NewReader(data)), FileName: filename, Caption: caption}
_, err := b.Send(m.Chat, p)
if err != nil {
log.Warnf("%s send failed %v", filename, err)
}
}
func newHandler(m *tb.Message) {
log.Infof("New command: %d", m.Chat.ID)
// ้ป่ฎคๅ้3ๅผ
_ = b.Notify(m.Chat, tb.Typing)
sendAlbum := generatePhotos()
_ = b.Notify(m.Chat, tb.UploadingPhoto)
_, _ = b.SendAlbum(m.Chat, sendAlbum)
log.Debugf("Album has been sent to %d", m.Chat.ID)
}
func settingsHandler(m *tb.Message) {
if !permissionCheck(m) {
return
}
log.Infof("Settings command: %d", m.Chat.ID)
_ = b.Notify(m.Chat, tb.Typing)
// send out push time
var btns []tb.Btn
var selector = &tb.ReplyMarkup{}
add := selector.Data("ๅขๅ ๆจ้ๆถ้ด", "AddPushStep1")
modify := selector.Data("ไฟฎๆนๆจ้ๆถ้ด", "ModifyPush")
btns = append(btns, add, modify)
selector.Inline(
selector.Row(btns...),
)
_ = b.Notify(m.Chat, tb.Typing)
pushTimeStr := strings.Join(getPushTime(m.Chat.ID), " ")
log.Infof("Push time is %s ...", pushTimeStr)
if pushTimeStr == "" {
message := fmt.Sprintf("ๅผๅ็ฒ๐๏ผ้ฝๆฒกๆ /subscribe ่ฟๆณ็๏ผ")
_, _ = b.Send(m.Chat, message)
} else {
message := fmt.Sprintf("ไฝ ็ฎๅ็ๆจ้ๆถ้ดๆ๏ผ%s๏ผไฝ ๆณ่ฆๅขๅ ่ฟๆฏๅ ้ค๏ผ", pushTimeStr)
_, _ = b.Send(m.Chat, message, selector)
}
}
func channelHandler(m *tb.Message) {
log.Infof("Channel message Handler: %d from %s", m.Chat.ID, m.Chat.Type)
me := b.Me.Username
switch m.Text {
case "/start" + "@" + me:
startHandler(m)
case "/about" + "@" + me:
aboutHandler(m)
case "/new" + "@" + me:
newHandler(m)
case "/settings" + "@" + me:
settingsHandler(m)
case "/subscribe" + "@" + me:
subHandler(m)
case "/unsubscribe" + "@" + me:
unsubHandler(m)
case "/status" + "@" + me:
statusHandler(m)
case "/submit" + "@" + me:
submitHandler(m)
case "/ping" + "@" + me:
pingHandler(m)
default:
log.Warnf("Oops. %s is not a command. Ignore it.", m.Text)
}
}
func subHandler(m *tb.Message) {
// check permission first
if !permissionCheck(m) {
return
}
caption := "ๅทฒ็ป่ฎข้
ๆๅๅฆ๏ผๅฐๅจๆฏๆ18:11ๅๆถไธบไฝ ๆจ้ๆๅฏ็ฑ็ Gakki๏ผๅฆๆ้่ฆๅฏๅจ /settings ไธญๆดๆนๆถ้ดๅ้ข็"
filename := "sub.gif"
log.Infof("Sub command: %d", m.Chat.ID)
_ = b.Notify(m.Chat, tb.UploadingPhoto)
data, _ := Asset(filepath.Join("images", filename))
log.Debugf("Find %s from memory...", filename)
p := &tb.Animation{File: tb.FromReader(bytes.NewReader(data)), FileName: filename, Caption: caption}
_, err := b.Send(m.Chat, p)
if err != nil {
log.Warnf("%s send failed %v", filename, err)
}
addInitSub(m.Chat.ID)
}
func permissionCheck(m *tb.Message) bool {
// private and channel: allow
// group: check admin
// in channel there's no m.Sender . In channel bot is always admin
var canSubscribe = false
if m.Private() || m.Chat.Type == "channel" {
canSubscribe = true
} else {
admins, _ := b.AdminsOf(m.Chat)
for _, admin := range admins {
if admin.User.ID == m.Sender.ID {
canSubscribe = true
}
}
}
log.Infof("User %d on %s permission is %v", m.Chat.ID, m.Chat.Type, canSubscribe)
//
if !canSubscribe {
log.Warnf("Denied subscribe request for: %d", m.Sender.ID)
_ = b.Notify(m.Chat, tb.Typing)
_, _ = b.Send(m.Chat, "ใใ๐ๅชๆ็ฎก็ๅๆ่ฝ่ฟ่ก่ฎพ็ฝฎๅฆ")
return false
}
return true
}
func unsubHandler(m *tb.Message) {
if !permissionCheck(m) {
return
}
caption := "Gakki ๅทฒ็ปไธๆฏไฝ ่ๅฉไบ๐"
filename := "unsub.gif"
log.Infof("Unsub command: %d", m.Chat.ID)
_ = b.Notify(m.Chat, tb.UploadingPhoto)
data, _ := Asset(filepath.Join("images", filename))
log.Debugf("Find %s from memory...", filename)
p := &tb.Animation{File: tb.FromReader(bytes.NewReader(data)), FileName: filename, Caption: caption}
_, err := b.Send(m.Chat, p)
if err != nil {
log.Warnf("%s send failed %v", filename, err)
}
_ = b.Notify(m.Chat, tb.Typing)
_, _ = b.Send(m.Chat, "๐ญ")
// ่ฏปๅๆไปถ๏ผๅขๅ ๅฏน่ฑก๏ผ็ถๅๅๅ
ฅ
remove(m.Chat.ID)
}
func messageHandler(m *tb.Message) {
log.Debugf("Message Handler: %d from %s", m.Chat.ID, m.Chat.Type)
caption := "็งใฏใไปใงใ็ฉบใจๆใใใฆใใพใใ"
var filename string
switch m.Text {
case "๐":
filename = "kiss.gif"
case "๐":
filename = "kiss.gif"
case "๐":
filename = "kiss.gif"
case "โค๏ธ":
filename = "heart1.gif"
case "โค๏ธโค๏ธ":
filename = "heart2.gif"
case "โค๏ธโค๏ธโค๏ธ":
filename = "heart3.gif"
case "๐น":
filename = "rose.gif"
case "๐ฆ":
filename = "lizard.gif"
default:
filename = "default.gif"
return
}
log.Infof("Message Handler: %d from %s,Choose %s for text %s", m.Chat.ID, m.Chat.Type, filename, m.Text)
data, _ := Asset(filepath.Join("images", filename))
_ = b.Notify(m.Chat, tb.UploadingPhoto)
p := &tb.Animation{File: tb.FromReader(bytes.NewReader(data)), FileName: filename, Caption: caption}
_, err := b.Send(m.Chat, p)
if err != nil {
log.Warnf("%s send failed %v", filename, err)
}
}
func pingHandler(m *tb.Message) {
_ = b.Notify(m.Chat, tb.Typing)
info := tgbot_ping.GetRuntime("botsrunner_gakki_1", "Gakki Bot", "html")
_, _ = b.Send(m.Chat, info, &tb.SendOptions{ParseMode: tb.ModeHTML})
}
func statusHandler(m *tb.Message) {
_ = b.Notify(m.Chat, tb.Typing)
currentJSON := readJSON()
var isSub = false
for _, user := range currentJSON {
if user.ChatId == m.Chat.ID {
isSub = true
}
}
if isSub {
_, _ = b.Send(m.Chat, "Gakki ไธไฝ ๅๅจ๐")
} else {
_, _ = b.Send(m.Chat, "่ฟๆจๆๆฏๆฅ Gakki๐")
}
}
func photoHandler(m *tb.Message) {
if !m.Private() {
return
}
userID, _ := strconv.Atoi(reviewer)
mm := tb.Message{
Sender: &tb.User{
ID: userID,
},
}
_ = b.Notify(m.Chat, tb.Typing)
botSent, _ := b.Reply(m, "ไฝ ็ๅฎกๆ ธๅทฒ็ปๅๅบๅปไบโฆโฆ่ฏท่ๅฟ็ญๅพ
๐")
var btns []tb.Btn
var selector = &tb.ReplyMarkup{}
p1, p2 := botSent.MessageSig()
data := fmt.Sprintf("%v|%v", p1, p2)
approve := selector.Data("Yes", "Yes", data)
deny := selector.Data("No", "No", data)
btns = append(btns, approve, deny)
selector.Inline(
selector.Row(btns...),
)
fwd, err := b.Forward(mm.Sender, m, selector)
if err != nil {
log.Errorln(err)
_, _ = b.Edit(botSent, "ๅโฆโฆ็ฑไบๆ็ง็ฅ็ง็ๅๅ ๏ผๅฎกๆ ธ่ฏทๆฑๅ้ๅคฑ่ดฅไบ๏ผไฝ ๅๅไธไธ่ฏ่ฏ\n"+err.Error())
} else {
_, _ = b.Reply(fwd, "่ฏทReview", selector)
}
}
func callbackEntrance(c *tb.Callback) {
log.Infof("Initiating callback data %s from %d", c.Data, c.Sender.ID)
// this callback interacts with requester
switch {
case strings.HasPrefix(c.Data, "\fYes"):
approveCallback(c)
case strings.HasPrefix(c.Data, "\fNo"):
denyCallback(c)
case c.Data == "\fAddPushStep1":
addPushStep1(c)
case strings.HasPrefix(c.Data, "\faddPushStep2SelectTime"):
addPushStep2SelectTime(c)
case strings.HasPrefix(c.Data, "\fModifyPush"):
modifyPushStep1(c)
case strings.HasPrefix(c.Data, "\fmodifyPushStep2SelectTime||"):
modifyPushStep2(c)
}
}
func modifyPushStep2(c *tb.Callback) {
uid := c.Message.Chat.ID
time := strings.Replace(c.Data, "\fmodifyPushStep2SelectTime||", "", -1)
deleteOnePush(uid, time)
_ = b.Respond(c, &tb.CallbackResponse{Text: "ๅ ้คๅฅฝไบๅฆ๏ผ"})
// edit
pushSeries := getPushTime(uid)
var btns []tb.Btn
var selector = &tb.ReplyMarkup{}
for _, v := range pushSeries {
btns = append(btns, selector.Data(v, "modifyPushStep2SelectTime||"+v))
}
selector.Inline(
selector.Row(btns...),
)
_, _ = b.EditReplyMarkup(c.Message, selector)
}
func modifyPushStep1(c *tb.Callback) {
// this could be channel id
pushSeries := getPushTime(c.Message.Chat.ID)
var btns []tb.Btn
var selector = &tb.ReplyMarkup{}
for _, v := range pushSeries {
btns = append(btns, selector.Data(v, "modifyPushStep2SelectTime||"+v))
}
selector.Inline(
selector.Row(btns...),
)
_ = b.Respond(c, &tb.CallbackResponse{Text: "็นๅปๆ้ฎๅณๅฏๅ ้ค่ฟไธชๆถ้ด็ๆจ้"})
_, _ = b.Send(c.Message.Chat, "้ๆฉ่ฆๅ ้ค็ๆถ้ด", selector)
}
func addPushStep2SelectTime(c *tb.Callback) {
newTime := strings.Replace(c.Data, "\faddPushStep2SelectTime|", "", -1)
// this id could be channel id, not initiator's id
respond, message := addMorePush(c.Message.Chat.ID, newTime)
_ = b.Respond(c, &tb.CallbackResponse{Text: respond})
_, _ = b.Send(c.Message.Chat, message)
}
func addPushStep1(c *tb.Callback) {
// duplicate time, ignore here
var inlineKeys [][]tb.InlineButton
var unique []tb.InlineButton
unique = append(unique, tb.InlineButton{
Unique: fmt.Sprintf("addPushStep2SelectTime|%s", "18:11"),
Text: "18:11",
})
inlineKeys = append(inlineKeys, unique)
var btns []tb.InlineButton
var count = 1
for _, t := range timeSeries() {
if count <= 5 {
var temp = tb.InlineButton{
Unique: fmt.Sprintf("addPushStep2SelectTime|%s", t),
Text: t,
}
btns = append(btns, temp)
count++
} else {
count = 1
inlineKeys = append(inlineKeys, btns)
btns = []tb.InlineButton{}
}
}
_, _ = b.Send(c.Message.Chat, "ๅฅฝ็๏ผ้ฃไฝ ้ไธชๆถ้ดๅง๏ผ", &tb.ReplyMarkup{InlineKeyboard: inlineKeys})
}
func getStoredMessage(data string) tb.StoredMessage {
// data Yes|5159|123456789
splits := strings.Split(data, "|")
cid, _ := strconv.ParseInt(splits[2], 10, 64)
botM := tb.StoredMessage{MessageID: splits[1], ChatID: cid}
return botM
}
func approveCallback(c *tb.Callback) {
log.Infof("approve new photos from %s", c.Data)
botM := getStoredMessage(c.Data)
approveAction(c.Message.ReplyTo)
_ = b.Respond(c, &tb.CallbackResponse{Text: "Approved"})
_, _ = b.Edit(botM, "ไฝ ็ๅพ็่ขซๆฅๅไบ๐")
_ = b.Delete(c.Message) // this message
_ = b.Delete(c.Message.ReplyTo) // original message with photo
}
func denyCallback(c *tb.Callback) {
log.Infof("deny new photos from %s", c.Data)
botM := getStoredMessage(c.Data)
_ = b.Respond(c, &tb.CallbackResponse{Text: "Denied"})
_, _ = b.Edit(botM, "ไฝ ็ๅพ็่ขซๆ็ปไบ๐ซ")
_ = b.Delete(c.Message) // this message
_ = b.Delete(c.Message.ReplyTo) // original message with photo
}
func approveAction(reviewMessage *tb.Message) {
// this handler interacts with reviewer
photo := reviewMessage.Photo
document := reviewMessage.Document
var filename = ""
var fileobject tb.File
if photo != nil {
filename = photo.UniqueID + ".jpg"
fileobject = photo.File
} else if document != nil {
filename = document.UniqueID + ".jpg"
fileobject = document.File
} else {
return
}
picPath := filepath.Join(photosPath, filename)
log.Infof("Downloading photos to %s", picPath)
err = b.Download(&fileobject, picPath)
if err != nil {
log.Errorln("Download failed", err)
}
}
func submitHandler(m *tb.Message) {
_ = b.Notify(m.Chat, tb.Typing)
_, _ = b.Send(m.Chat, "ๆณ่ฆๅๆๆไบคๆฐ็ๅพ็ๅ๏ผ็ดๆฅๆๅพ็ๅ้็ปๆๅฐฑๅฏไปฅ๏ผๅๅผ ๏ผๅคๅผ ไธบไธ็ป๏ผ่ฝฌๅ้ฝๅฏไปฅ็๏ผ\n"+
"ๆไปถๅๅพ็็ๅฝขๅผๅ้็ปbot้ฝๅฏไปฅๅฆใๅฆๆ้ฎ้ขๅฏไปฅ่็ณป @BennyThink")
}
func inline(q *tb.Query) {
var urls []string
var web = "https://bot.gakki.photos/"
for _, p := range ChoosePhotos(3) {
urls = append(urls, web+filepath.Base(p))
}
results := make(tb.Results, len(urls)) // []tb.Result
for i, url := range urls {
results[i] = &tb.PhotoResult{
URL: url,
ThumbURL: url,
}
// needed to set a unique string ID for each result
results[i].SetResultID(strconv.Itoa(i))
}
log.Infof("Inline pic %v", urls)
err := b.Answer(q, &tb.QueryResponse{
Results: results,
CacheTime: 60, // a minute
})
if err != nil {
log.Println(err)
}
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package team
import (
"bytes"
"testing"
"github.com/stretchr/testify/require"
)
func TestLoadTeams(t *testing.T) {
yamlFile := []byte(`
sql:
aliases: [sql-alias]
email: otan@cockroachlabs.com
slack: otan
triage_column_id: 1
test-infra-team:
email: jlinder@cockroachlabs.com
slack: jlinder
triage_column_id: 2
`)
ret, err := LoadTeams(bytes.NewReader(yamlFile))
require.NoError(t, err)
require.Equal(
t,
map[Alias]Team{
"sql": {
Aliases: []Alias{"sql", "sql-alias"},
Email: "otan@cockroachlabs.com",
Slack: "otan",
TriageColumnID: 1,
},
"sql-alias": {
Aliases: []Alias{"sql", "sql-alias"},
Email: "otan@cockroachlabs.com",
Slack: "otan",
TriageColumnID: 1,
},
"test-infra-team": {
Aliases: []Alias{"test-infra-team"},
Email: "jlinder@cockroachlabs.com",
Slack: "jlinder",
TriageColumnID: 2,
},
},
ret,
)
}
func TestTeamsYAMLValid(t *testing.T) {
_, err := DefaultLoadTeams()
require.NoError(t, err)
// TODO(otan): test other volatile validity conditions, e.g. triage_column_id exists.
// Gate this by a flag so this is only tested with certain flags, as these are
// not reproducible results in tests.
}
|
package types
// PredefinedAttributes the predefined document attributes
// May be converted into HTML entities
var predefinedAttributes = []string{
"sp",
"blank",
"empty",
"nbsp",
"zwsp",
"wj",
"apos",
"quot",
"lsquo",
"rsquo",
"ldquo",
"rdquo",
"deg",
"plus",
"brvbar",
"vbar",
"amp",
"lt",
"gt",
"startsb",
"endsb",
"caret",
"asterisk",
"tilde",
"backslash",
"backtick",
"two-colons",
"two-semicolons",
"cpp",
}
func isPrefedinedAttribute(a string) bool {
for _, v := range predefinedAttributes {
if v == a {
return true
}
}
return false
}
|
package sort
func ShellSort(a []int) {
h := 1
for h < len(a) {
h = 3*h + 1
}
for h >= 1 {
for i := h; i < len(a); i++ {
for j := i; j >= h && a[j] < a[j-h]; j = j - h {
a[j], a[j-h] = a[j-h], a[j]
}
}
h = h / 3
}
}
func ShellSortV2(a []int, n int) {
var i, j, increment int
for increment = n / 2; increment > 0; increment /= 2 {
for i = increment; i < n; i++ {
tmp := a[i]
for j = i; j >= increment; j -= increment {
if tmp < a[j-increment] {
a[j] = a[j-increment]
} else {
break
}
}
a[j] = tmp
}
}
}
|
//ๅนถๅ่ทๅๅคไธชURL--ไธไธชๅฐๅ็็ฝ้ๆต่ฏๅทฅๅ
ท
package main
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"time"
)
func main() {
start := time.Now()
ch := make(chan string) //ๅๅปบ้้
for _,url := range os.Args[1:] {
go fetch(url, ch)
}
// ไธปๅ็จ่ด่ดฃๅๆฐๆฎ
// ๅฝไธไธชgoroutineๅฐ่ฏๅจไธไธชchannelไธๅsendๆ่
receiveๆไฝๆถ๏ผ่ฟไธชgoroutineไผ้ปๅกๅจ่ฐ็จๅค๏ผ
// ็ดๅฐๅฆไธไธชgoroutineๅพ่ฟไธชchannel้ๅๅ
ฅใๆ่
ๆฅๆถๅผ๏ผ่ฟๆ ทไธคไธชgoroutineๆไผ็ปง็ปญๆง่กchannelๆไฝไนๅ็้ป่พใ
// ๅจ่ฟไธชไพๅญไธญ๏ผๆฏไธไธชfetchๅฝๆฐๅจๆง่กๆถ้ฝไผๅพchannel้ๅ้ไธไธชๅผ(ch <- expression)๏ผไธปๅฝๆฐ่ด่ดฃๆฅๆถ่ฟไบๅผ(<-ch)ใ
// ่ฟไธช็จๅบไธญๆไปฌ็จmainๅฝๆฐๆฅๆฅๆถๆๆfetchๅฝๆฐไผ ๅ็ๅญ็ฌฆไธฒ๏ผๅฏไปฅ้ฟๅ
ๅจgoroutineๅผๆญฅๆง่ก่ฟๆฒกๆๅฎๆๆถmainๅฝๆฐๆๅ้ๅบใ
for range os.Args[1:]{
fmt.Println(<-ch)
}
fmt.Printf("%.2fs elepsed\n", time.Since(start).Seconds()) //่ฎก็ฎๆปๆถ้ด
}
// ่ฎฟ้ฎurl ๅนถๆ็ธๅบ็็ปๆไผ ๅ
ฅ้้
func fetch(url string, ch chan<- string) {
start := time.Now()
resp, err := http.Get(url)
if err != nil{
ch<- fmt.Sprint(err)
return
}
// ioutil.Discard ๅฏไปฅๆ่ฟไธชๅ้็ไฝไธไธชๅๅพๆกถ๏ผๅฏไปฅๅ้้ขๅไธไบไธ้่ฆ็ๆฐๆฎ
// ไธป่ฆๆฏๅ ไธบ่ฟ้ๅช้่ฆ[io.Copy()]่ฟๅ็ๅญ่ๆฐ๏ผๆไปฅๆไปฌไธๅ
ณๅฟ[resp.Body]็ๅ
ทไฝๅ
ๅฎน
nbytes, err := io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
if err != nil{
ch <- fmt.Sprintf("While reading %s: %v", url, err)
return
}
secs := time.Since(start).Seconds()
ch<- fmt.Sprintf("%.2fs %7d %s",secs, nbytes, url)
} |
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package template
import (
"hash/fnv"
"reflect"
"sort"
)
// Join returns the concatenation of all the passed array or slices into a
// single, sequential list.
func (*Functions) Join(slices ...interface{}) []interface{} {
out := []interface{}{}
for _, slice := range slices {
v := reflect.ValueOf(slice)
for i, c := 0, v.Len(); i < c; i++ {
out = append(out, v.Index(i).Interface())
}
}
return out
}
// HasMore returns true if the i'th indexed item in l is not the last.
func (*Functions) HasMore(i int, l interface{}) bool {
return i < reflect.ValueOf(l).Len()-1
}
// Reverse returns a new list with all the elements of in reversed.
func (*Functions) Reverse(in interface{}) interface{} {
v := reflect.ValueOf(in)
c := v.Len()
out := reflect.MakeSlice(v.Type(), c, c)
for i := 0; i < c; i++ {
out.Index(i).Set(v.Index(c - i - 1))
}
return out.Interface()
}
// IndexOf returns the index of value in the list array. If value is not found
// in array then IndexOf returns -1.
func (*Functions) IndexOf(array interface{}, value interface{}) int {
a := reflect.ValueOf(array)
for i, c := 0, a.Len(); i < c; i++ {
if a.Index(i).Interface() == value {
return i
}
}
return -1
}
// Tail returns a slice of the list from start to len(array).
func (*Functions) Tail(start int, array interface{}) interface{} {
v := reflect.ValueOf(array)
c := v.Len() - start
out := reflect.MakeSlice(v.Type(), c, c)
for i := 0; i < c; i++ {
out.Index(i).Set(v.Index(start + i))
}
return out.Interface()
}
// ForEach returns a string list containing the strings emitted by calling the
// macro m for each sequential item in the array arr. 0 length strings will
// be ommitted from the returned list.
func (f *Functions) ForEach(arr interface{}, m string) (stringList, error) {
v := reflect.ValueOf(arr)
c := v.Len()
l := make(stringList, 0, c)
for i := 0; i < c; i++ {
e := v.Index(i).Interface()
str, err := f.Macro(m, e)
if err != nil {
return nil, err
}
if len(str) > 0 {
l = append(l, str)
}
}
return l, nil
}
type sortElem struct {
key string
idx int
}
type sortElemList []sortElem
func (l sortElemList) Len() int { return len(l) }
func (l sortElemList) Less(a, b int) bool { return l[a].key < l[b].key }
func (l sortElemList) Swap(a, b int) { l[a], l[b] = l[b], l[a] }
// SortBy returns a new list containing the sorted elements of arr. The list is
// sorted by the string values returned by calling the template function
// keyMacro with each element in the list.
func (f *Functions) SortBy(arr interface{}, keyMacro string) (interface{}, error) {
v := reflect.ValueOf(arr)
c := v.Len()
elems := make(sortElemList, c)
for i := 0; i < c; i++ {
e := v.Index(i).Interface()
key, err := f.Macro(keyMacro, e)
if err != nil {
return nil, err
}
elems[i] = sortElem{key, i}
}
sort.Sort(elems)
out := reflect.MakeSlice(v.Type(), c, c)
for i, e := range elems {
out.Index(i).Set(v.Index(e.idx))
}
return out.Interface(), nil
}
// Divides a slice into num buckets and returns a slice of new slices.
// keyMacro is the name of a macro which should generate a stable key for
// the object in the slice.
func (f *Functions) Partition(arr interface{}, keyMacro string, num int) (interface{}, error) {
arrV := reflect.ValueOf(arr)
n := arrV.Len()
// Make the slice of slices
sliceType := reflect.SliceOf(arrV.Type())
slice := reflect.MakeSlice(sliceType, num, num)
for i := 0; i < n; i++ {
v := arrV.Index(i)
key, err := f.Macro(keyMacro, v.Interface())
if err != nil {
return nil, err
}
hasher := fnv.New32()
hasher.Write([]byte(key))
hash := hasher.Sum32()
bucketNum := int(hash % uint32(num))
bucket := slice.Index(bucketNum)
bucket.Set(reflect.Append(bucket, v))
}
return slice.Interface(), nil
}
// PartitionByKey Divides a slice into one bucket for each different keyMacro returned.
// keyMacro is the name of a macro which should generate a stable key for
// the object in the slice.
func (f *Functions) PartitionByKey(arr interface{}, keyMacro string) (interface{}, error) {
arrV := reflect.ValueOf(arr)
// Bucket is a slice of the arr element type. ([]el)
bucketType := reflect.SliceOf(arrV.Type().Elem())
// Slices is a slice of buckets. ([][]el)
slicesType := reflect.SliceOf(bucketType)
slices := reflect.MakeSlice(slicesType, 0, 8)
bucketIndices := make(map[string]int)
for i, c := 0, arrV.Len(); i < c; i++ {
v := arrV.Index(i)
key, err := f.Macro(keyMacro, v.Interface())
if err != nil {
return nil, err
}
bucketIdx, ok := bucketIndices[key]
if !ok {
bucketIdx = len(bucketIndices)
bucketIndices[key] = bucketIdx
slices = reflect.Append(slices, reflect.MakeSlice(bucketType, 0, 8))
}
bucket := slices.Index(bucketIdx)
bucket.Set(reflect.Append(bucket, v))
}
return slices.Interface(), nil
}
func (f *Functions) Length(arr interface{}) (int, error) {
v := reflect.ValueOf(arr)
return v.Len(), nil
}
|
package nmt
import (
"bytes"
"errors"
"fmt"
"hash"
"math/bits"
"github.com/celestiaorg/nmt/namespace"
)
// ErrFailedCompletenessCheck indicates that the verification of a namespace proof failed due to the lack of completeness property.
var ErrFailedCompletenessCheck = errors.New("failed completeness check")
// Proof represents a namespace proof of a namespace.ID in an NMT. In case this
// proof proves the absence of a namespace.ID in a tree it also contains the
// leaf hashes of the range where that namespace would be.
type Proof struct {
// start index of the leaves that match the queried namespace.ID.
start int
// end index (non-inclusive) of the leaves that match the queried
// namespace.ID.
end int
// nodes hold the tree nodes necessary for the Merkle range proof of
// `[start, end)` in the order of an in-order traversal of the tree. in
// specific, nodes contain: 1) the namespaced hash of the left siblings for
// the Merkle inclusion proof of the `start` leaf 2) the namespaced hash of
// the right siblings of the Merkle inclusion proof of the `end` leaf
nodes [][]byte
// leafHash are nil if the namespace is present in the NMT. In case the
// namespace to be proved is in the min/max range of the tree but absent,
// this will contain the leaf hash necessary to verify the proof of absence.
// leafHash contains a tree leaf that 1) its namespace ID is the smallest
// namespace ID larger than nid and 2) the namespace ID of the leaf to the
// left of it is smaller than the nid.
leafHash []byte
// isMaxNamespaceIDIgnored is set to true if the tree from which this Proof
// was generated from is initialized with Options.IgnoreMaxNamespace ==
// true. The IgnoreMaxNamespace flag influences the calculation of the
// namespace ID range for intermediate nodes in the tree. This flag signals
// that, when determining the upper limit of the namespace ID range for a
// tree node, the maximum possible namespace ID (equivalent to
// "NamespaceIDSize" bytes of 0xFF, or 2^NamespaceIDSize-1) should be
// omitted if feasible. For a more in-depth understanding of this field,
// refer to the "HashNode" method in the "Hasher.
isMaxNamespaceIDIgnored bool
}
// Start index of this proof.
func (proof Proof) Start() int {
return proof.start
}
// End index of this proof, non-inclusive.
func (proof Proof) End() int {
return proof.end
}
// Nodes return the proof nodes that together with the corresponding leaf values
// can be used to recompute the root and verify this proof.
func (proof Proof) Nodes() [][]byte {
return proof.nodes
}
// IsOfAbsence returns true if this proof proves the absence of leaves of a
// namespace in the tree.
func (proof Proof) IsOfAbsence() bool {
return len(proof.leafHash) > 0
}
// LeafHash returns nil if the namespace has leaves in the NMT. In case the
// namespace.ID to be proved is in the min/max range of the tree but absent,
// this will contain the leaf hash necessary to verify the proof of absence.
func (proof Proof) LeafHash() []byte {
return proof.leafHash
}
// IsNonEmptyRange returns true if this proof contains a valid, non-empty proof
// range.
func (proof Proof) IsNonEmptyRange() bool {
return proof.start >= 0 && proof.start < proof.end
}
// IsMaxNamespaceIDIgnored returns true if the proof has been created under the ignore max namespace logic.
// see ./docs/nmt-lib.md for more details.
func (proof Proof) IsMaxNamespaceIDIgnored() bool {
return proof.isMaxNamespaceIDIgnored
}
// NewEmptyRangeProof constructs a proof that proves that a namespace.ID does
// not fall within the range of an NMT.
func NewEmptyRangeProof(ignoreMaxNamespace bool) Proof {
return Proof{0, 0, nil, nil, ignoreMaxNamespace}
}
// NewInclusionProof constructs a proof that proves that a namespace.ID is
// included in an NMT.
func NewInclusionProof(proofStart, proofEnd int, proofNodes [][]byte, ignoreMaxNamespace bool) Proof {
return Proof{proofStart, proofEnd, proofNodes, nil, ignoreMaxNamespace}
}
// NewAbsenceProof constructs a proof that proves that a namespace.ID falls
// within the range of an NMT but no leaf with that namespace.ID is included.
func NewAbsenceProof(proofStart, proofEnd int, proofNodes [][]byte, leafHash []byte, ignoreMaxNamespace bool) Proof {
return Proof{proofStart, proofEnd, proofNodes, leafHash, ignoreMaxNamespace}
}
// IsEmptyProof checks whether the proof corresponds to an empty proof as defined in NMT specifications https://github.com/celestiaorg/nmt/blob/master/docs/spec/nmt.md.
func (proof Proof) IsEmptyProof() bool {
return proof.start == proof.end && len(proof.nodes) == 0
}
// VerifyNamespace verifies a whole namespace, i.e. 1) it verifies inclusion of
// the provided `data` in the tree (or the proof.leafHash in case of absence
// proof) 2) it verifies that the namespace is complete i.e., the data items
// matching the namespace ID `nID` are within the range [`proof.start`,
// `proof.end`) and no data of that namespace was left out. VerifyNamespace
// deems an empty `proof` valid if the queried `nID` falls outside the namespace
// range of the supplied `root` or if the `root` is empty
//
// `h` MUST be the same as the underlying hash function used to generate the
// proof. Otherwise, the verification will fail. `nID` is the namespace ID for
// which the namespace `proof` is generated. `data` contains the namespaced data
// items (but not namespace hash) underlying the leaves of the tree in the
// range of [`proof.start`, `proof.end`). For an absence `proof`, the `data` is
// empty. `data` items MUST be ordered according to their index in the tree,
// with `data[0]` corresponding to the namespaced data at index `start`,
//
// and the last element in `data` corresponding to the data item at index
// `end-1` of the tree.
//
// `root` is the root of the NMT against which the `proof` is verified.
func (proof Proof) VerifyNamespace(h hash.Hash, nID namespace.ID, leaves [][]byte, root []byte) bool {
nIDLen := nID.Size()
nth := NewNmtHasher(h, nIDLen, proof.isMaxNamespaceIDIgnored)
// perform some consistency checks:
// check that the root is valid w.r.t the NMT hasher
if err := nth.ValidateNodeFormat(root); err != nil {
return false
}
// check that all the proof.nodes are valid w.r.t the NMT hasher
for _, node := range proof.nodes {
if err := nth.ValidateNodeFormat(node); err != nil {
return false
}
}
// if the proof is an absence proof, the leafHash must be valid w.r.t the NMT hasher
if proof.IsOfAbsence() {
if err := nth.ValidateNodeFormat(proof.leafHash); err != nil {
return false
}
}
isEmptyRange := proof.start == proof.end
if isEmptyRange {
if proof.IsEmptyProof() && len(leaves) == 0 {
rootMin := namespace.ID(MinNamespace(root, nIDLen))
rootMax := namespace.ID(MaxNamespace(root, nIDLen))
// empty proofs are always rejected unless 1) nID is outside the range of
// namespaces covered by the root 2) the root represents an empty tree, since
// it purports to cover the zero namespace but does not actually include
// any such nodes
if nID.Less(rootMin) || rootMax.Less(nID) {
return true
}
if bytes.Equal(root, nth.EmptyRoot()) {
return true
}
return false
}
// the proof range is empty, and invalid
return false
}
gotLeafHashes := make([][]byte, 0, len(leaves))
if proof.IsOfAbsence() {
gotLeafHashes = append(gotLeafHashes, proof.leafHash)
// conduct some sanity checks:
leafMinNID := namespace.ID(proof.leafHash[:nIDLen])
if !nID.Less(leafMinNID) {
// leafHash.minNID must be greater than nID
return false
}
} else {
// collect leaf hashes from provided data and do some sanity checks:
hashLeafFunc := nth.HashLeaf
for _, gotLeaf := range leaves {
if nth.ValidateLeaf(gotLeaf) != nil {
return false
}
// check whether the namespace ID of the data matches the queried nID
if gotLeafNid := namespace.ID(gotLeaf[:nIDLen]); !gotLeafNid.Equal(nID) {
// conflicting namespace IDs in data
return false
}
// hash the leaf data
leafHash, err := hashLeafFunc(gotLeaf)
if err != nil { // this can never happen due to the initial validation of the leaf at the beginning of the loop
return false
}
gotLeafHashes = append(gotLeafHashes, leafHash)
}
}
// check whether the number of leaves match the proof range i.e., end-start.
// If not, make an early return.
expectedLeafCount := proof.End() - proof.Start()
if !proof.IsOfAbsence() && len(gotLeafHashes) != expectedLeafCount {
return false
}
// with verifyCompleteness set to true:
res, err := proof.VerifyLeafHashes(nth, true, nID, gotLeafHashes, root)
if err != nil {
return false
}
return res
}
// The VerifyLeafHashes function checks whether the given proof is a valid Merkle
// range proof for the leaves in the leafHashes input. It returns true or false accordingly.
// If there is an issue during the proof verification e.g., a node does not conform to the namespace hash format, then a proper error is returned to indicate the root cause of the issue.
// The leafHashes parameter is a list of leaf hashes, where each leaf hash is represented
// by a byte slice.
// If the verifyCompleteness parameter is set to true, the function also checks
// the completeness of the proof by verifying that there is no leaf in the
// tree represented by the root parameter that matches the namespace ID nID
// but is not present in the leafHashes list.
func (proof Proof) VerifyLeafHashes(nth *Hasher, verifyCompleteness bool, nID namespace.ID, leafHashes [][]byte, root []byte) (bool, error) {
// check that the proof range is valid
if proof.Start() < 0 || proof.Start() >= proof.End() {
return false, fmt.Errorf("proof range [proof.start=%d, proof.end=%d) is not valid: %w", proof.Start(), proof.End(), ErrInvalidRange)
}
// perform some consistency checks:
if nID.Size() != nth.NamespaceSize() {
return false, fmt.Errorf("namespace ID size (%d) does not match the namespace size of the NMT hasher (%d)", nID.Size(), nth.NamespaceSize())
}
// check that the root is valid w.r.t the NMT hasher
if err := nth.ValidateNodeFormat(root); err != nil {
return false, fmt.Errorf("root does not match the NMT hasher's hash format: %w", err)
}
// check that all the proof.nodes are valid w.r.t the NMT hasher
for _, node := range proof.nodes {
if err := nth.ValidateNodeFormat(node); err != nil {
return false, fmt.Errorf("proof nodes do not match the NMT hasher's hash format: %w", err)
}
}
// check that all the proof.nodes are valid w.r.t the NMT hasher
for _, leaf := range leafHashes {
if err := nth.ValidateNodeFormat(leaf); err != nil {
return false, fmt.Errorf("leaf hash does not match the NMT hasher's hash format: %w", err)
}
}
var leafIndex uint64
// leftSubtrees is to be populated by the subtree roots upto [0, r.Start)
leftSubtrees := make([][]byte, 0, len(proof.nodes))
nodes := proof.nodes
for leafIndex != uint64(proof.Start()) && len(nodes) > 0 {
subtreeSize := nextSubtreeSize(leafIndex, uint64(proof.Start()))
leftSubtrees = append(leftSubtrees, nodes[0])
nodes = nodes[1:]
leafIndex += uint64(subtreeSize)
}
// rightSubtrees only contains the subtrees after r.End
rightSubtrees := nodes
if verifyCompleteness {
// leftSubtrees contains the subtree roots upto [0, r.Start)
for _, subtree := range leftSubtrees {
leftSubTreeMax := MaxNamespace(subtree, nth.NamespaceSize())
if nID.LessOrEqual(namespace.ID(leftSubTreeMax)) {
return false, ErrFailedCompletenessCheck
}
}
for _, subtree := range rightSubtrees {
rightSubTreeMin := MinNamespace(subtree, nth.NamespaceSize())
if namespace.ID(rightSubTreeMin).LessOrEqual(nID) {
return false, ErrFailedCompletenessCheck
}
}
}
var computeRoot func(start, end int) ([]byte, error)
// computeRoot can return error iff the HashNode function fails while calculating the root
computeRoot = func(start, end int) ([]byte, error) {
// reached a leaf
if end-start == 1 {
// if the leaf index falls within the proof range, pop and return a
// leaf
if proof.Start() <= start && start < proof.End() {
leafHash := leafHashes[0]
// advance leafHashes
leafHashes = leafHashes[1:]
return leafHash, nil
}
// if the leaf index is outside the proof range, pop and return a
// proof node (which in this case is a leaf) if present, else return
// nil because leaf doesn't exist
return popIfNonEmpty(&proof.nodes), nil
}
// if current range does not overlap with the proof range, pop and
// return a proof node if present, else return nil because subtree
// doesn't exist
if end <= proof.Start() || start >= proof.End() {
return popIfNonEmpty(&proof.nodes), nil
}
// Recursively get left and right subtree
k := getSplitPoint(end - start)
left, err := computeRoot(start, start+k)
if err != nil {
return nil, fmt.Errorf("failed to compute subtree root [%d, %d): %w", start, start+k, err)
}
right, err := computeRoot(start+k, end)
if err != nil {
return nil, fmt.Errorf("failed to compute subtree root [%d, %d): %w", start+k, end, err)
}
// only right leaf/subtree can be non-existent
if right == nil {
return left, nil
}
hash, err := nth.HashNode(left, right)
if err != nil {
return nil, fmt.Errorf("failed to hash node: %w", err)
}
return hash, nil
}
// estimate the leaf size of the subtree containing the proof range
proofRangeSubtreeEstimate := getSplitPoint(proof.end) * 2
if proofRangeSubtreeEstimate < 1 {
proofRangeSubtreeEstimate = 1
}
rootHash, err := computeRoot(0, proofRangeSubtreeEstimate)
if err != nil {
return false, fmt.Errorf("failed to compute root [%d, %d): %w", 0, proofRangeSubtreeEstimate, err)
}
for i := 0; i < len(proof.nodes); i++ {
rootHash, err = nth.HashNode(rootHash, proof.nodes[i])
if err != nil {
return false, fmt.Errorf("failed to hash node: %w", err)
}
}
return bytes.Equal(rootHash, root), nil
}
// VerifyInclusion checks that the inclusion proof is valid by using leaf data
// and the provided proof to regenerate and compare the root. Note that the leavesWithoutNamespace data should not contain the prefixed namespace, unlike the tree.Push method,
// which takes prefixed data. All leaves implicitly have the same namespace ID:
// `nid`.
// VerifyInclusion does not verify the completeness of the proof, so it's possible for leavesWithoutNamespace to be a subset of the leaves in the tree that have the namespace ID nid.
func (proof Proof) VerifyInclusion(h hash.Hash, nid namespace.ID, leavesWithoutNamespace [][]byte, root []byte) bool {
// check the range of the proof
isEmptyRange := proof.start == proof.end
if isEmptyRange {
// the only case in which an empty proof is valid is when the supplied leavesWithoutNamespace is also empty.
// rationale: no proof (i.e., an empty proof) is needed to prove that an empty set of leaves belong to the tree with root `root`.
// unlike VerifyNamespace(), we do not care about the queried `nid` here, because VerifyInclusion does not verify the completeness of the proof
// i.e., whether the leavesWithoutNamespace is the full set of leaves matching the queried `nid`.
if proof.IsEmptyProof() && len(leavesWithoutNamespace) == 0 {
return true
}
// if the proof range is empty but !proof.IsEmptyProof() || len(leavesWithoutNamespace) != 0, then the verification should fail
return false
}
nth := NewNmtHasher(h, nid.Size(), proof.isMaxNamespaceIDIgnored)
// perform some consistency checks:
// check that the root is valid w.r.t the NMT hasher
if err := nth.ValidateNodeFormat(root); err != nil {
return false
}
// check that all the proof.nodes are valid w.r.t the NMT hasher
for _, node := range proof.nodes {
if err := nth.ValidateNodeFormat(node); err != nil {
return false
}
}
// add namespace to all the leaves
hashes := make([][]byte, len(leavesWithoutNamespace))
for i, d := range leavesWithoutNamespace {
// prepend the namespace to the leaf data
leafData := append(
append(make([]byte, 0, len(d)+len(nid)), nid...), d...,
)
res, err := nth.HashLeaf(leafData)
if err != nil {
return false // this never can happen since the leafData is guaranteed to be namespaced
}
hashes[i] = res
}
res, err := proof.VerifyLeafHashes(nth, false, nid, hashes, root)
if err != nil {
return false
}
return res
}
// nextSubtreeSize returns the number of leaves of the subtree adjacent to start
// that does not overlap end.
func nextSubtreeSize(start, end uint64) int {
// the highest left subtree
ideal := bits.TrailingZeros64(start)
// number of bits required to represent end-start
max := bits.Len64(end-start) - 1
if ideal > max {
return 1 << uint(max)
}
return 1 << uint(ideal)
}
// popIfNonEmpty pops the first element off of a slice only if the slice is
// non-empty, else returns a nil slice
func popIfNonEmpty(s *[][]byte) []byte {
if len(*s) != 0 {
first := (*s)[0]
*s = (*s)[1:]
return first
}
return nil
}
|
package delta
import (
"fmt"
"github.com/cskr/pubsub"
"github.com/mozilla/libaudit-go"
"syscall"
)
var PubSub *pubsub.PubSub
var AuditChannel = "audit"
func (dc *DeltaCore) StartAuditEngine() {
s, err := libaudit.NewNetlinkConnection()
if err != nil {
fmt.Printf("%v\n", err)
return
}
defer s.Close()
// enable audit in kernel
err = libaudit.AuditSetEnabled(s, 1)
if err != nil {
fmt.Printf("%v\n", err)
return
}
// check if audit is enabled
status, err := libaudit.AuditIsEnabled(s)
if err == nil && status == 1 {
fmt.Printf("Enabled Audit\n")
} else if err == nil && status == 0 {
fmt.Printf("Audit Not Enabled\n")
return
} else {
fmt.Printf("%v\n", err)
return
}
// set the maximum number of messages
// that the kernel will send per second
err = libaudit.AuditSetRateLimit(s, 450)
if err != nil {
fmt.Printf("%v\n", err)
return
}
// set max limit audit message queue
err = libaudit.AuditSetBacklogLimit(s, 16438)
if err != nil {
fmt.Printf("%v\n", err)
return
}
// register current pid with audit
err = libaudit.AuditSetPID(s, syscall.Getpid())
if err != nil {
fmt.Printf("%v\n", err)
return
}
// delete all rules that are previously present in kernel
err = libaudit.DeleteAllRules(s)
if err != nil {
fmt.Printf("%v\n", err)
return
}
// set audit rules
// specify rules in JSON format (for example see: https://github.com/arunk-s/gsoc16/blob/master/audit.rules.json)
//out, _ := ioutil.ReadFile("conf.d/audit.rules.json")
rulesJson := []byte(auditConfig)
err = libaudit.SetRules(s, rulesJson)
if err != nil {
fmt.Printf("%v\n", err)
return
}
// create a channel to indicate libaudit to stop collecting messages
done := make(chan bool)
// spawn a go routine that will stop the collection after 5 seconds
//go func(){
// time.Sleep(time.Second*5)
// done <- true
//}()
// collect messages and handle them in a function
libaudit.GetAuditMessages(s, callback, &done)
}
// provide a function to handle the messages
func callback(msg *libaudit.AuditEvent, ce error, args ...interface{}) {
if ce != nil {
fmt.Printf("%v\n", ce)
} else if msg != nil {
key := genKeyName(AuditChannel, msg.Type)
event := BuildEvent(msg.Serial, msg.Timestamp, key, msg.Data)
event.PublishEvent(AuditChannel)
}
}
var auditConfig = `
{
"delete": true,
"enable": "1",
"buffer": "16348",
"rate": "500",
"strict_path_check": false,
"file_rules": [
{
"path": "/etc/",
"key": "files",
"permission": "wa"
}],
"syscall_rules": [
{
"key": "exec",
"syscalls": [
"execve"
],
"actions": [
"exit",
"always"
]
},
{
"key": "connect",
"syscalls": [
"connect"
],
"actions": [
"exit",
"always"
]
}
]
}`
|
// +build !race
package common
import (
"os"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
)
func TestMergeConfig(t *testing.T) {
path := "./test.lock"
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, path)
_, err := os.Create(path)
assert.Nil(t, err)
defer func() {
c, err := clientcmd.LoadFromFile(path)
assert.Nil(t, err)
assert.Len(t, c.Clusters, 0)
_ = os.Remove(path)
}()
config := api.Config{
Clusters: map[string]*api.Cluster{},
}
config.Clusters["test1"] = &api.Cluster{}
config.Clusters["test2"] = &api.Cluster{}
_ = clientcmd.WriteToFile(config, path)
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
cm := ConfigFileManager{}
err = cm.OverwriteCfg(path, "test1", cm.RemoveCfg)
assert.Nil(t, err)
}()
go func() {
defer wg.Done()
cm := ConfigFileManager{}
err = cm.OverwriteCfg(path, "test2", cm.RemoveCfg)
assert.Nil(t, err)
}()
wg.Wait()
}
|
package usecase
import (
"github.com/ikmski/git-lfs3/entity"
)
// BatchService is ...
type BatchService interface {
Batch(req *BatchRequest) (*BatchResult, error)
}
type batchService struct {
MetaDataRepository MetaDataRepository
ContentRepository ContentRepository
}
// NewBatchService is ...
func NewBatchService(metaDataRepo MetaDataRepository, contentRepo ContentRepository) BatchService {
return &batchService{
MetaDataRepository: metaDataRepo,
ContentRepository: contentRepo,
}
}
func (c *batchService) Batch(req *BatchRequest) (*BatchResult, error) {
var objectResults []*ObjectResult
for _, obj := range req.Objects {
meta, err := c.MetaDataRepository.Get(obj.Oid)
if err == nil && c.ContentRepository.Exists(meta) {
// Object is found and exists
objectResult := createObjectResult(obj, meta, true, true)
objectResults = append(objectResults, objectResult)
continue
}
// Object is not found
meta, err = c.MetaDataRepository.Put(obj.Oid, obj.Size)
if err == nil {
objectResult := createObjectResult(obj, meta, true, false)
objectResults = append(objectResults, objectResult)
}
}
result := &BatchResult{
Objects: objectResults,
}
return result, nil
}
func createObjectResult(o *ObjectRequest, meta *entity.MetaData, metaExists, objectExists bool) *ObjectResult {
return &ObjectResult{
Oid: meta.Oid,
Size: meta.Size,
MetaExists: metaExists,
ObjectExists: objectExists,
}
}
|
package ThreadPool
import (
"fmt"
"math"
"runtime"
"sync"
)
type TaskStruct struct {
Fun func() error
TheWaitGroup *sync.WaitGroup
}
// Synergetic Process Pool
type SPP struct {
Capacity int
TaskChan chan TaskStruct
sync.Mutex
}
func NewSPP(capacity int) *SPP {
taskChannel := make(chan TaskStruct)
p := &SPP{
Capacity: capacity,
TaskChan: taskChannel,
}
for ii := 0; ii < capacity; ii++ { // ไธๅผๅงๅฐฑๅๅปบ ๅ็จ ็ญๅพ
ไปปๅก
p.run()
}
return p
}
/*
1. ็ดๆฅๅ
ณ้ญ channel ้ๅบๆๆ็ run ๅฝๆฐ
*/
func (p *SPP) Close() {
close(p.TaskChan)
}
func (p *SPP) run() {
var (
task TaskStruct
ok bool
)
go func() {
defer func() {
if r := recover(); r != nil {
// for-selectๅฏไปฅๅฐ่ฃ
๏ผ่ฟ้ๅๆฌก่งฆๅ่ฟๅป. var forSelect := func() {for {select: ...}}
fmt.Print("error:", r)
trace := make([]byte, 1<<16)
n := runtime.Stack(trace, false)
s := fmt.Sprintf("panic: '%v'\n, Stack Trace:\n %s", r, string(trace[:int(math.Min(float64(n), float64(7000)))]))
// ่พๅบ่ฏฆ็ป็ๆกไฟกๆฏ
fmt.Println(s)
task.TheWaitGroup.Done()
}
go p.run()
}()
for {
select {
case task, ok = <-p.TaskChan:
if !ok {
return
}
taskErr := task.Fun()
if taskErr != nil {
fmt.Print("taskErr:", taskErr)
}
task.TheWaitGroup.Done()
}
}
}()
}
func (p *SPP) Put(task TaskStruct) error {
if task.TheWaitGroup != nil {
task.TheWaitGroup.Add(1)
}
p.TaskChan <- task
return nil
}
|
package main
import (
"fmt"
"time"
)
func producer2(queue chan string) {
for i := 0; i < 100; i++ {
queue <- fmt.Sprintf("liuruichao_%d", i)
fmt.Printf("send %d to queue.\n", i)
}
}
func consumer2(queue chan string) {
for {
str := <-queue
fmt.Println("consumer receive: " + str)
time.Sleep(3 * time.Second)
}
}
func main() {
queue := make(chan string, 100)
go producer2(queue)
go consumer2(queue)
time.Sleep(300 * time.Second) //่ฎฉProducerไธConsumerๅฎๆ
}
|
package httpclient
import (
"context"
"log"
"net/http"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/valyala/fasthttp"
"github.com/mts-test-task/internal/sitesdataservice"
"github.com/mts-test-task/internal/sitesdataservice/httpserver"
svc "github.com/mts-test-task/pkg/sitesdataservice"
"github.com/mts-test-task/pkg/sitesdataservice/api"
"github.com/mts-test-task/pkg/sitesdataservice/httperror"
)
const (
serverAddr = "localhost:38812"
hostAddr = "localhost:38812"
maxConns = 512
maxRequestBodySize = 15 * 1024 * 1024
serverTimeout = 1 * time.Millisecond
serverLaunchingWaitSleep = 1 * time.Second
getDataFromURLsSuccess = "GetDataFromURLs success test"
getDataFromURLsFail = "GetDataFromURLs fail test"
serviceMethodGetDataFromURLs = "GetDataFromURLs"
ozonURL = "http://ozon.ru"
wikiURL = "https://ru.wikipedia.org"
siteData = "html"
fail = "fail"
)
var (
nilError error
)
func TestClient_GetDataFromURLsSuccess(t *testing.T) {
urls := []string{ozonURL, wikiURL}
response := []*api.SiteData{
{
URL: wikiURL,
Data: siteData,
},
{
URL: ozonURL,
Data: siteData,
},
}
t.Run(getDataFromURLsSuccess, func(t *testing.T) {
serviceMock := new(sitesdataservice.MockService)
serviceMock.On(serviceMethodGetDataFromURLs, context.Background(), urls).
Return(response, nilError).
Once()
server, client := makeServerClient(serverAddr, serviceMock)
defer func() {
err := server.Shutdown()
if err != nil {
log.Printf("server shut down err: %v", err)
}
}()
time.Sleep(serverLaunchingWaitSleep)
resp, err := client.GetDataFromURLs(context.Background(), urls)
assert.Equal(t, response, resp)
assert.NoError(t, err, "unexpected error:", err)
})
}
func TestClient_GetDataFromURLsFail(t *testing.T) {
urls := []string{ozonURL, wikiURL}
var response []*api.SiteData
t.Run(getDataFromURLsFail, func(t *testing.T) {
serviceMock := new(sitesdataservice.MockService)
serviceMock.On(serviceMethodGetDataFromURLs, context.Background(), urls).
Return(response, httperror.NewError(http.StatusBadRequest, fail, fail)).
Once()
server, client := makeServerClient(serverAddr, serviceMock)
defer func() {
err := server.Shutdown()
if err != nil {
log.Printf("server shut down err: %v", err)
}
}()
time.Sleep(serverLaunchingWaitSleep)
resp, err := client.GetDataFromURLs(context.Background(), urls)
assert.Equal(t, response, resp)
assert.Equal(t, err, httperror.NewError(http.StatusBadRequest, fail, ""))
})
}
func makeServerClient(serverAddr string, svc svc.Service) (server *fasthttp.Server, client svc.Service) {
client = NewPreparedClient(serverAddr, hostAddr, maxConns)
router := httpserver.NewPreparedServer(svc)
server = &fasthttp.Server{
Handler: router.Handler,
MaxRequestBodySize: maxRequestBodySize,
ReadTimeout: serverTimeout,
}
go func() {
err := server.ListenAndServe(serverAddr)
if err != nil {
log.Printf("server shut down err: %v", err)
}
}()
return
}
|
package leetcode_0006_Zๅญๅฝขๅๆข
/*
ๅฐไธไธช็ปๅฎๅญ็ฌฆไธฒๆ นๆฎ็ปๅฎ็่กๆฐ๏ผไปฅไปไธๅพไธใไปๅทฆๅฐๅณ่ฟ่ก Z ๅญๅฝขๆๅใ
ๆฏๅฆ่พๅ
ฅๅญ็ฌฆไธฒไธบ "LEETCODEISHIRING" ่กๆฐไธบ 3 ๆถ๏ผๆๅๅฆไธ๏ผ
L C I R
E T O E S I I G
E D H N
ไนๅ๏ผไฝ ็่พๅบ้่ฆไปๅทฆๅพๅณ้่ก่ฏปๅ๏ผไบง็ๅบไธไธชๆฐ็ๅญ็ฌฆไธฒ๏ผๆฏๅฆ๏ผ"LCIRETOESIIGEDHN"ใ
่ฏทไฝ ๅฎ็ฐ่ฟไธชๅฐๅญ็ฌฆไธฒ่ฟ่กๆๅฎ่กๆฐๅๆข็ๅฝๆฐ๏ผ
string convert(string s, int numRows);
็คบไพ 1:
่พๅ
ฅ: s = "LEETCODEISHIRING", numRows = 3
่พๅบ: "LCIRETOESIIGEDHN"
็คบไพ 2:
่พๅ
ฅ: s = "LEETCODEISHIRING", numRows = 4
่พๅบ: "LDREOEIIECIHNTSG"
่งฃ้:
L D R
E O E I I
E C I H N
T S G
*/
/*
numRows = 3
tmp[0]:LCIR
tmp[1]:ETOESIIG
tmp[2]:EDHN
ๆฅๅไธไธๆซ๏ผๆไปฅtmp[0]ๅ
ๆพL๏ผtmp[1]ๆพE๏ผtmp[2]ๆพE
่ฟๆถๅ๏ผๆซๅฐๅบไบ๏ผๅพไธๆซ๏ผtmp[1]ๆพT๏ผtmp[0]ๆพC
่ฟๆถๅ๏ผๆซๅฐ้กถไบ๏ผๅพไธๆซ๏ผtmp[1]ๆพO๏ผtmp[2]ๆพD
่ฟๆถๅ๏ผๆซๅฐๅบไบ๏ผ็ปง็ปญใใใ
ๅกซๅ
็้กบๅบๅฐฑๅcosๅฝๆฐ็ๆญฃๆฐ้จๅ
*/
func convert(s string, numRows int) string {
// ไธๆปก่ถณ๏ผๆๅ่ฟๅ
if len(s) <= 2 || numRows == 1 {
return s
}
// ไฟๅญๆ็ป็ปๆ
var res string
// ไฟๅญๆฏๆฌก็่ฎฐ่ฟ
var tmp = make([]string, numRows)
// ๅๅงไฝ็ฝฎ
curPos := 0
// ็จๆฅๆ ่ฎฐๆฏๅฆ่ฏฅ่ฝฌๅไบ
shouldTurn := -1
// ้ๅๆฏไธชๅ
็ด
for _, val := range s {
// ๆทปๅ ่ฟtmp้้ข๏ผไฝ็ฝฎไธบcurPos
tmp[curPos] += string(val)
// ๅฆๆ่ตฐๅฐๅคดๆ่
ๅฐพ๏ผๅฐฑ่ฏฅ่ฝฌๅไบ
// ๅ ไธบๅฐฑๅจnumRows็้ฟๅบฆ้้ขๅทฆๅณ้่ก
if curPos == 0 || curPos == numRows-1 {
// ่ฝฌๅ
shouldTurn = -shouldTurn
}
// tmp้้ข่ตฐ็ๆนๅ
curPos += shouldTurn
}
// ่ฟๆถtmp้้ขๅทฒ็ปไฟๅญไบๆฐๆฎไบ๏ผๆไปฌๅช้่ฆ่ฝฌๆขไธไธ่พๅบๆ ผๅผ
for _, val := range tmp {
res += val
}
// ๆๅ็่พๅบ
return res
}
|
package search
// ็ฉบ็ปๆๅจๅๅปบๅฎไพๆถ๏ผไธไผๅ้
ไปปไฝๅ
ๅญใ
type defaultMatcher struct {
}
type defaultMatcherV2 struct {
}
// ไฝฟ็จๅผไฝไธบๆฅๆถ่
ๅฃฐๆ็ๆนๆณ๏ผๅจๆฅๅฃ็ฑปๅ็ๅผไธบๅผๆ่
ๆ้ๆถ๏ผ้ฝๅฏไปฅ่ขซ่ฐ็จใไธๆ๏ผ TODO
func (m defaultMatcher) Search(feed *Feed, searchTerm string) ([]*Result, error) {
return nil, nil
}
// ไฝฟ็จๆ้ไฝไธบๆฅๆถ่
ๅฃฐๆ็ๆนๆณ๏ผๅช่ฝๅจๆฅๅฃ็ฑปๅ็ๅผๆฏไธไธชๆ้็ๆถๅ่ขซ่ฐ็จใไธๆ๏ผ TODO
func (m *defaultMatcherV2) Search(feed *Feed, searchTerm string) ([]*Result, error) {
panic("implement me")
}
func init() {
var dm11 defaultMatcher
dm11.Search(nil, "hello")
dm12 := new(defaultMatcher)
dm12.Search(nil, "hello")
var dm21 defaultMatcherV2
dm21.Search(nil, "hello")
dm22 := new(defaultMatcherV2)
dm22.Search(nil, "hello")
var dm13 defaultMatcher
var matcher Matcher = &dm13 // ๅฐๆ้่ตๅผ็ปๆฅๅฃ็ฑปๅ
matcher.Search(nil, "hello")
}
|
package main
//1370. ไธๅไธ้ๅญ็ฌฆไธฒ
//็ปไฝ ไธไธชๅญ็ฌฆไธฒs๏ผ่ฏทไฝ ๆ นๆฎไธ้ข็็ฎๆณ้ๆฐๆ้ ๅญ็ฌฆไธฒ๏ผ
//
//ไป sไธญ้ๅบ ๆๅฐ็ๅญ็ฌฆ๏ผๅฐๅฎ ๆฅๅจ็ปๆๅญ็ฌฆไธฒ็ๅ้ขใ
//ไป sๅฉไฝๅญ็ฌฆไธญ้ๅบๆๅฐ็ๅญ็ฌฆ๏ผไธ่ฏฅๅญ็ฌฆๆฏไธไธไธชๆทปๅ ็ๅญ็ฌฆๅคง๏ผๅฐๅฎ ๆฅๅจ็ปๆๅญ็ฌฆไธฒๅ้ขใ
//้ๅคๆญฅ้ชค 2 ๏ผ็ดๅฐไฝ ๆฒกๆณไป sไธญ้ๆฉๅญ็ฌฆใ
//ไป sไธญ้ๅบ ๆๅคง็ๅญ็ฌฆ๏ผๅฐๅฎ ๆฅๅจ็ปๆๅญ็ฌฆไธฒ็ๅ้ขใ
//ไป sๅฉไฝๅญ็ฌฆไธญ้ๅบๆๅคง็ๅญ็ฌฆ๏ผไธ่ฏฅๅญ็ฌฆๆฏไธไธไธชๆทปๅ ็ๅญ็ฌฆๅฐ๏ผๅฐๅฎ ๆฅๅจ็ปๆๅญ็ฌฆไธฒๅ้ขใ
//้ๅคๆญฅ้ชค 5๏ผ็ดๅฐไฝ ๆฒกๆณไป sไธญ้ๆฉๅญ็ฌฆใ
//้ๅคๆญฅ้ชค 1 ๅฐ 6 ๏ผ็ดๅฐ sไธญๆๆๅญ็ฌฆ้ฝๅทฒ็ป่ขซ้่ฟใ
//ๅจไปปไฝไธๆญฅไธญ๏ผๅฆๆๆๅฐๆ่
ๆๅคงๅญ็ฌฆไธๆญขไธไธช๏ผไฝ ๅฏไปฅ้ๆฉๅ
ถไธญไปปๆไธไธช๏ผๅนถๅฐๅ
ถๆทปๅ ๅฐ็ปๆๅญ็ฌฆไธฒใ
//
//่ฏทไฝ ่ฟๅๅฐsไธญๅญ็ฌฆ้ๆฐๆๅบๅ็ ็ปๆๅญ็ฌฆไธฒ ใ
func sortString(s string) string {
dic := make([]int, 26)
for _, v := range s {
dic[v-'a']++
}
n := len(s)
result := make([]byte, 0, n)
for len(result) < n {
for i := 0; i < 26; i++ {
if dic[i] > 0 {
result = append(result, byte(i+'a'))
dic[i]--
}
}
for i := 25; i > -1; i-- {
if dic[i] > 0 {
result = append(result, byte(i+'a'))
dic[i]--
}
}
}
return string(result)
}
|
package main
import (
"flag"
"github.com/golang/glog"
"net/http"
)
func init() {
flag.Set("alsologtostderr", "true")
}
var resp_chanel = make(chan interface{})
func getRequest(url string) {
resp, err := http.Get("http://www.baidu.com")
if err != nil {
panic(err)
}
//s,err:=io.Copy(os.Stdout,resp.Body)
//resp_chanel<-s
resp_chanel <- resp.StatusCode
defer resp.Body.Close()
}
func main() {
flag.Parse()
for i := 0; i < 50; i++ {
go getRequest("")
}
for i := 0; i < 50; i++ {
a := <-resp_chanel
glog.Infoln(a)
}
//for{
// go getRequest("")
// time.Sleep(time.Second)
//}
//
//for{
// a:=<-resp_chanel
// glog.Infoln(a)
//}
}
|
package lib
import (
"encoding/json"
"net/http"
)
func Json(w http.ResponseWriter, v interface{}) error {
err := json.NewEncoder(w).Encode(v)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return err
}
return nil
}
func BodyToJson(r *http.Request, v interface{}) error {
err := json.NewDecoder(r.Body).Decode(v)
if err != nil {
return err
}
return nil
}
|
package handlers
type banksDetails struct {
Details []branchInformation `json:"bank_details"`
}
type branchInformation struct {
Name string `json:"name,omitempty"`
Ifsc string `json:"ifsc,omitempty"`
BankID int64 `json:"bank_id,omitempty"`
Branch string `json:"branch,omitempty"`
Address string `json:"address,omitempty"`
City string `json:"city,omitempty"`
District string `json:"district,omitempty"`
State string `json:"state,omitempty"`
}
|
package main
import (
"fmt"
"math/big"
)
func main() {
lightSpeed := big.NewInt(299792)
secondPerDay := big.NewInt(86400)
distance := new(big.Int)
distance.SetString("24000000000000000000", 10)
fmt.Println("ใขใณใใญใกใ้ๆฒณใพใงใฎ่ท้ขใฏใ", distance, "km.")
seconds := new(big.Int)
seconds.Div(distance, lightSpeed)
days := new(big.Int)
days.Div(seconds, secondPerDay)
fmt.Println("ๅ
ใฎ้ๅบฆใงใ", days, "ๆฅใใใใ")
}
|
package orm
type sqlParameter map[string]interface{}
func (this sqlParameter) Set(key string, value interface{}) SqlParameter {
this[key] = value
return this
}
func NewSqlParameter() SqlParameter {
return sqlParameter{}
}
|
package utils
import (
"HeartBolg/models"
"fmt"
"github.com/astaxie/beego/orm"
)
type ArticlePageables struct {
//ๆป้กตๆฐ
PageCount int `json:"pageCount"`
//ๅฝๅ้กต*M
CurrentPage int `json:"currentPage"`
//ๆปๆกๆฐ
TotalCount int `json:"totalCount"`
//ๆฏ้กตๆกๆฐ*M
TotalNumber int `json:"totalNumber"`
//่ตทๅงๆกๆฐ
InitialNumber int `json:"initialNumber"`
//ๆฅ่ฏขๅฑๆง
Fields string `json:"fields"`
//ๆฐๆฎ
Articles []models.Article `json:"articles"`
}
//่ฎก็ฎๆป้กตๆฐ
func (p *ArticlePageables) SetPageCount() int {
//ๅๅงๅๆป้กตๆฐ
if p.TotalCount != 0 {
if (p.TotalCount % p.TotalNumber) == 0 {
return p.TotalCount / p.TotalNumber
} else {
return (p.TotalCount / p.TotalNumber) + 1
}
}
return 0
}
func (p *ArticlePageables) SetPageableData() {
i, err := orm.NewOrm().Raw("SELECT * FROM article LIMIT ?,?", p.InitialNumber, p.TotalNumber).QueryRows(&p.Articles)
if err == nil {
fmt.Println("user nums: ", i)
}
}
func (p *ArticlePageables) SetInitialNumber() {
p.InitialNumber = (p.CurrentPage - 1) * p.TotalNumber
}
|
package udwDockerV2
import (
"fmt"
"github.com/tachyon-protocol/udw/udwFile"
"github.com/tachyon-protocol/udw/udwGoSource/udwGoBuild"
"github.com/tachyon-protocol/udw/udwGoSource/udwGoBuildCtx"
"github.com/tachyon-protocol/udw/udwGoSource/udwGoWriter/udwGoTypeMarshal"
"github.com/tachyon-protocol/udw/udwProjectPath"
"path/filepath"
"strings"
)
func BuildRunnerToDownload(pkgPath string, os string) {
resp := BuildRunner(pkgPath, os)
copyFile := filepath.Join(udwFile.MustGetHomeDirPath(), "Downloads", getImageNameFromPkgPath(pkgPath)+resp.GetOutputExeFileExt())
udwFile.MustCopy(resp.GetOutputExeFilePath(), copyFile)
}
func BuildRunner(pkgPath string, os string) (resp *udwGoBuildCtx.Ctx) {
buildImage(pkgPath)
imageFile := filepath.Join(getBuildPath(pkgPath), "image")
exportImageToFile(pkgPath, imageFile)
content := udwFile.MustReadFile(imageFile)
buildPath := getBuildPath(pkgPath)
runnerPath := filepath.Join(buildPath, "main.go")
udwFile.MustMkdirForFile(runnerPath)
udwFile.MustWriteFile(runnerPath, []byte(`package main
import (
"bytes"
"github.com/tachyon-protocol/udw/udwCmd"
"github.com/tachyon-protocol/udw/udwConsole"
"github.com/tachyon-protocol/udw/udwErr"
"os"
"os/exec"
"strconv"
"fmt"
)
func main() {
imageName := "`+getImageNameFromPkgPath(pkgPath)+`"
udwConsole.AddCommandWithName("Run", func(req struct {
Port uint16
Command string
}) {
cmd := exec.Command("docker", "image", "load")
cmd.Stdin = bytes.NewReader([]byte(`+udwGoTypeMarshal.WriteStringToGolang(string(content))+`))
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
udwErr.PanicIfError(err)
err = udwCmd.Run("docker container rm --force " + imageName)
if err != nil {
fmt.Println(err)
}
command := "docker container run"
if req.Port != 0 {
command += " --publish " + strconv.Itoa(int(req.Port)) + ":" + strconv.Itoa(int(req.Port))
}
command += " --privileged --cap-add=NET_ADMIN --device=/dev/net/tun --name " + imageName + " " + imageName + " " + req.Command
udwCmd.MustRun(command)
})
udwConsole.AddCommandWithName("Stop", func() {
udwCmd.MustRun("docker container rm --force " + imageName)
})
udwConsole.Main()
}`))
runnerPkgPath := strings.TrimPrefix(
strings.TrimPrefix(buildPath, filepath.Join(udwProjectPath.MustGetProjectPath(), "src")),
"/",
)
resp = udwGoBuild.MustBuild(udwGoBuild.BuildRequest{
PkgPath: runnerPkgPath,
TargetOs: os,
TargetCpuArch: `amd64`,
})
fmt.Println("- - -\nRunner:", resp.GetOutputExeFilePath())
return resp
}
|
// Copyright (C) 2015-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under
// the terms of the under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package on_demand_service_broker_test
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"sync"
"github.com/pivotal-cf/brokerapi/v10/domain"
brokerConfig "github.com/pivotal-cf/on-demand-service-broker/config"
sdk "github.com/pivotal-cf/on-demand-services-sdk/serviceadapter"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Catalog", func() {
var schemaParameters = map[string]interface{}{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": map[string]interface{}{
"flibbles": map[string]interface{}{
"description": "Number of flibbles to spawn",
"type": "integer",
},
},
"type": "object",
"required": []interface{}{"flibbles"},
}
var defaultSchemas = domain.ServiceSchemas{
Instance: domain.ServiceInstanceSchema{
Create: domain.Schema{Parameters: schemaParameters},
Update: domain.Schema{Parameters: schemaParameters},
},
Binding: domain.ServiceBindingSchema{
Create: domain.Schema{Parameters: schemaParameters},
},
}
var (
defaultSchemasJSON []byte
zero int
)
BeforeEach(func() {
var err error
defaultSchemasJSON, err = json.Marshal(defaultSchemas)
Expect(err).NotTo(HaveOccurred())
})
Context("without optional fields", func() {
BeforeEach(func() {
serviceCatalogConfig := defaultServiceCatalogConfig()
serviceCatalogConfig.DashboardClient = nil
conf := brokerConfig.Config{
Broker: brokerConfig.Broker{
Port: serverPort, Username: brokerUsername, Password: brokerPassword,
EnablePlanSchemas: true,
},
ServiceCatalog: serviceCatalogConfig,
}
StartServer(conf)
})
It("returns catalog", func() {
fakeCommandRunner.RunWithInputParamsReturns(defaultSchemasJSON, []byte{}, &zero, nil)
response, bodyContent := doCatalogRequest()
By("returning the correct HTTP status")
Expect(response.StatusCode).To(Equal(http.StatusOK))
By("returning the correct catalog response")
catalog := make(map[string][]domain.Service)
Expect(json.Unmarshal(bodyContent, &catalog)).To(Succeed())
Expect(catalog).To(Equal(map[string][]domain.Service{
"services": {
{
ID: serviceID,
Name: serviceName,
Description: serviceDescription,
Bindable: serviceBindable,
PlanUpdatable: servicePlanUpdatable,
Metadata: &domain.ServiceMetadata{
DisplayName: serviceMetadataDisplayName,
ImageUrl: serviceMetadataImageURL,
LongDescription: serviceMetaDataLongDescription,
ProviderDisplayName: serviceMetaDataProviderDisplayName,
DocumentationUrl: serviceMetaDataDocumentationURL,
SupportUrl: serviceMetaDataSupportURL,
Shareable: &trueVar,
},
DashboardClient: nil,
Tags: serviceTags,
Plans: []domain.ServicePlan{
{
ID: dedicatedPlanID,
Name: dedicatedPlanName,
Description: dedicatedPlanDescription,
Free: &trueVar,
Bindable: &trueVar,
Schemas: &defaultSchemas,
Metadata: &domain.ServicePlanMetadata{
Bullets: dedicatedPlanBullets,
DisplayName: dedicatedPlanDisplayName,
Costs: []domain.ServicePlanCost{
{
Unit: dedicatedPlanCostUnit,
Amount: dedicatedPlanCostAmount,
},
},
AdditionalMetadata: map[string]interface{}{
"foo": "bar",
},
},
MaintenanceInfo: nil,
},
{
ID: highMemoryPlanID,
Name: highMemoryPlanName,
Description: highMemoryPlanDescription,
Metadata: &domain.ServicePlanMetadata{
Bullets: highMemoryPlanBullets,
DisplayName: highMemoryPlanDisplayName,
},
Schemas: &defaultSchemas,
MaintenanceInfo: nil,
},
},
},
},
}))
})
It("can deal with concurrent requests", func() {
fakeCommandRunner.RunWithInputParamsReturns(defaultSchemasJSON, []byte{}, &zero, nil)
var wg sync.WaitGroup
const threads = 2
wg.Add(threads)
for i := 0; i < threads; i++ {
go func() {
defer wg.Done()
defer GinkgoRecover()
response, _ := doCatalogRequest()
By("returning the correct HTTP status")
Expect(response.StatusCode).To(Equal(http.StatusOK))
}()
}
wg.Wait()
})
})
Context("with optional fields", func() {
BeforeEach(func() {
fakeMapHasher.HashStub = func(m map[string]string) string {
var s string
for key, value := range m {
s += "hashed-" + key + "-" + value + ";"
}
return s
}
serviceCatalogConfig := defaultServiceCatalogConfig()
serviceCatalogConfig.Requires = []string{"syslog_drain", "route_forwarding"}
serviceCatalogConfig.Plans[0].Metadata.AdditionalMetadata = map[string]interface{}{
"yo": "bill",
}
serviceCatalogConfig.Metadata.AdditionalMetadata = map[string]interface{}{
"random": "george",
}
serviceCatalogConfig.MaintenanceInfo = &brokerConfig.MaintenanceInfo{
Public: map[string]string{
"name": "jorge",
},
Private: map[string]string{
"secret": "global_value",
},
Version: "1.2.4+global",
Description: "Global description",
}
serviceCatalogConfig.Plans[0].MaintenanceInfo = &brokerConfig.MaintenanceInfo{
Public: map[string]string{
"stemcell_version": "1234",
"name": "gloria",
},
Private: map[string]string{
"secret": "plan_value",
},
Version: "1.2.3+plan",
Description: "Plan description",
}
conf := brokerConfig.Config{
Broker: brokerConfig.Broker{
Port: serverPort, Username: brokerUsername, Password: brokerPassword,
EnablePlanSchemas: true,
},
ServiceCatalog: serviceCatalogConfig,
}
StartServer(conf)
})
It("returns catalog", func() {
fakeCommandRunner.RunWithInputParamsReturns(defaultSchemasJSON, []byte{}, &zero, nil)
response, bodyContent := doCatalogRequest()
By("returning the correct HTTP status")
Expect(response.StatusCode).To(Equal(http.StatusOK))
By("returning the correct catalog response")
catalog := make(map[string][]domain.Service)
Expect(json.Unmarshal(bodyContent, &catalog)).To(Succeed())
Expect(catalog).To(Equal(map[string][]domain.Service{
"services": {
{
ID: serviceID,
Name: serviceName,
Description: serviceDescription,
Bindable: serviceBindable,
PlanUpdatable: servicePlanUpdatable,
Metadata: &domain.ServiceMetadata{
DisplayName: serviceMetadataDisplayName,
ImageUrl: serviceMetadataImageURL,
LongDescription: serviceMetaDataLongDescription,
ProviderDisplayName: serviceMetaDataProviderDisplayName,
DocumentationUrl: serviceMetaDataDocumentationURL,
SupportUrl: serviceMetaDataSupportURL,
Shareable: &trueVar,
AdditionalMetadata: map[string]interface{}{
"random": "george",
},
},
DashboardClient: &domain.ServiceDashboardClient{
ID: "client-id-1",
Secret: "secret-1",
RedirectURI: "https://dashboard.url",
},
Requires: []domain.RequiredPermission{"syslog_drain", "route_forwarding"},
Tags: serviceTags,
Plans: []domain.ServicePlan{
{
ID: dedicatedPlanID,
Name: dedicatedPlanName,
Description: dedicatedPlanDescription,
Free: &trueVar,
Bindable: &trueVar,
Metadata: &domain.ServicePlanMetadata{
Bullets: dedicatedPlanBullets,
DisplayName: dedicatedPlanDisplayName,
Costs: []domain.ServicePlanCost{
{
Unit: dedicatedPlanCostUnit,
Amount: dedicatedPlanCostAmount,
},
},
AdditionalMetadata: map[string]interface{}{
"yo": "bill",
},
},
Schemas: &defaultSchemas,
MaintenanceInfo: &domain.MaintenanceInfo{
Public: map[string]string{
"name": "gloria",
"stemcell_version": "1234",
},
Private: "hashed-secret-plan_value;",
Version: "1.2.3+plan",
Description: "Plan description",
},
},
{
ID: highMemoryPlanID,
Name: highMemoryPlanName,
Description: highMemoryPlanDescription,
Metadata: &domain.ServicePlanMetadata{
Bullets: highMemoryPlanBullets,
DisplayName: highMemoryPlanDisplayName,
},
Schemas: &defaultSchemas,
MaintenanceInfo: &domain.MaintenanceInfo{
Public: map[string]string{
"name": "jorge",
},
Private: "hashed-secret-global_value;",
Version: "1.2.4+global",
Description: "Global description",
},
},
},
},
},
}))
})
})
When("GeneratePlanSchemas returns an error", func() {
var (
serviceCatalogConfig brokerConfig.ServiceOffering
)
BeforeEach(func() {
serviceCatalogConfig = defaultServiceCatalogConfig()
conf := brokerConfig.Config{
Broker: brokerConfig.Broker{
Port: serverPort, Username: brokerUsername, Password: brokerPassword,
EnablePlanSchemas: true,
},
ServiceCatalog: serviceCatalogConfig,
}
StartServer(conf)
})
It("fails with 500 status code", func() {
fakeCommandRunner.RunWithInputParamsReturns(nil, nil, nil, errors.New("oops"))
response, bodyContent := doCatalogRequest()
Expect(response.StatusCode).To(Equal(http.StatusInternalServerError))
Expect(bodyContent).To(ContainSubstring("oops"))
})
It("fails with a proper message if not implemented", func() {
errCode := sdk.NotImplementedExitCode
fakeCommandRunner.RunWithInputParamsReturns([]byte{}, []byte{}, &errCode, nil)
response, bodyContent := doCatalogRequest()
Expect(response.StatusCode).To(Equal(http.StatusInternalServerError))
Expect(string(bodyContent)).To(ContainSubstring("enable_plan_schemas is set to true, but the service adapter does not implement generate-plan-schemas"))
})
})
Context("without version header", func() {
BeforeEach(func() {
conf := brokerConfig.Config{
Broker: brokerConfig.Broker{
Port: serverPort, Username: brokerUsername, Password: brokerPassword,
EnablePlanSchemas: true,
},
}
StartServer(conf)
})
It("fails with 412 status code", func() {
response, bodyContent := doRequestWithAuthAndHeaderSet(
http.MethodGet,
fmt.Sprintf("http://%s/v2/catalog", serverURL),
nil,
func(r *http.Request) {
r.Header.Del("X-Broker-API-Version")
})
Expect(response.StatusCode).To(Equal(http.StatusPreconditionFailed))
Expect(string(bodyContent)).To(ContainSubstring(`{"Description":"X-Broker-API-Version Header not set"}`))
})
})
Context("without authentication", func() {
BeforeEach(func() {
conf := brokerConfig.Config{
Broker: brokerConfig.Broker{
Port: serverPort, Username: brokerUsername, Password: brokerPassword,
EnablePlanSchemas: true,
},
}
StartServer(conf)
})
It("fails with 401 status code", func() {
response, _ := doRequestWithoutAuth(
http.MethodGet,
fmt.Sprintf("http://%s/v2/catalog", serverURL),
nil,
func(r *http.Request) {
r.Header.Set("X-Broker-API-Version", "2.14")
})
Expect(response.StatusCode).To(Equal(http.StatusUnauthorized))
})
})
})
func doCatalogRequest() (*http.Response, []byte) {
return doRequestWithAuthAndHeaderSet(http.MethodGet, fmt.Sprintf("http://%s/v2/catalog", serverURL), nil)
}
func defaultServiceCatalogConfig() brokerConfig.ServiceOffering {
return brokerConfig.ServiceOffering{
ID: serviceID,
Name: serviceName,
Description: serviceDescription,
Bindable: serviceBindable,
PlanUpdatable: servicePlanUpdatable,
Metadata: brokerConfig.ServiceMetadata{
DisplayName: serviceMetadataDisplayName,
ImageURL: serviceMetadataImageURL,
LongDescription: serviceMetaDataLongDescription,
ProviderDisplayName: serviceMetaDataProviderDisplayName,
DocumentationURL: serviceMetaDataDocumentationURL,
SupportURL: serviceMetaDataSupportURL,
Shareable: serviceMetaDataShareable,
},
DashboardClient: &brokerConfig.DashboardClient{
ID: "client-id-1",
Secret: "secret-1",
RedirectUri: "https://dashboard.url",
},
Tags: serviceTags,
GlobalProperties: sdk.Properties{
"global_property": "global_value",
},
GlobalQuotas: brokerConfig.Quotas{},
Plans: []brokerConfig.Plan{
{
Name: dedicatedPlanName,
ID: dedicatedPlanID,
Description: dedicatedPlanDescription,
Free: &trueVar,
Bindable: &trueVar,
Update: dedicatedPlanUpdateBlock,
Metadata: brokerConfig.PlanMetadata{
DisplayName: dedicatedPlanDisplayName,
Bullets: dedicatedPlanBullets,
Costs: []brokerConfig.PlanCost{
{
Amount: dedicatedPlanCostAmount,
Unit: dedicatedPlanCostUnit,
},
},
AdditionalMetadata: map[string]interface{}{
"foo": "bar",
},
},
Quotas: brokerConfig.Quotas{
ServiceInstanceLimit: &dedicatedPlanQuota,
},
Properties: sdk.Properties{
"type": "dedicated-plan-property",
},
InstanceGroups: []sdk.InstanceGroup{
{
Name: "instance-group-name",
VMType: dedicatedPlanVMType,
VMExtensions: dedicatedPlanVMExtensions,
PersistentDiskType: dedicatedPlanDisk,
Instances: dedicatedPlanInstances,
Networks: dedicatedPlanNetworks,
AZs: dedicatedPlanAZs,
},
{
Name: "instance-group-errand",
Lifecycle: "errand",
VMType: dedicatedPlanVMType,
PersistentDiskType: dedicatedPlanDisk,
Instances: dedicatedPlanInstances,
Networks: dedicatedPlanNetworks,
AZs: dedicatedPlanAZs,
},
},
},
{
Name: highMemoryPlanName,
ID: highMemoryPlanID,
Description: highMemoryPlanDescription,
Metadata: brokerConfig.PlanMetadata{
DisplayName: highMemoryPlanDisplayName,
Bullets: highMemoryPlanBullets,
},
Properties: sdk.Properties{
"type": "high-memory-plan-property",
"global_property": "overrides_global_value",
},
InstanceGroups: []sdk.InstanceGroup{
{
Name: "instance-group-name",
VMType: highMemoryPlanVMType,
VMExtensions: highMemoryPlanVMExtensions,
Instances: highMemoryPlanInstances,
Networks: highMemoryPlanNetworks,
AZs: highMemoryPlanAZs,
},
},
},
},
}
}
|
package main
import (
"encoding/json"
"fmt"
"log"
)
type codes struct {
Code int
Description string
}
func main() {
var data []codes
rcvd := `
[{"Code":200,"Description":"StatusOK"},
{"Code":301,"Description":"StatusMovedPermanently"},
{"Code":302,"Description":"StatusFound"},
{"Code":303,"Description":"StatusSeeOther"},
{"Code":307,"Description":"StatusTemporaryRedirect"},
{"Code":400,"Description":"StatusBadRequest"},
{"Code":401,"Description":"StatusUnauthorized"},
{"Code":402,"Description":"StatusPaymentRequired"},
{"Code":403,"Description":"StatusForbidden"},
{"Code":404,"Description":"StatusNotFound"},
{"Code":405,"Description":"StatusMethodNotAllowed"},
{"Code":418,"Description":"StatusTeapot"},
{"Code":500,"Description":"StatusInternalServerError"}]
`
err := json.Unmarshal([]byte(rcvd), &data)
if err != nil {
log.Fatal(err)
}
for _, v := range data {
fmt.Println(v.Code, "-", v.Description)
}
}
|
package domain
import (
"context"
"time"
"cloud.google.com/go/spanner"
"github.com/google/uuid"
)
type SessionService interface {
GetSessionByID(ctx context.Context, txn Transaction, id string) (*Session, error)
CreateSession(ctx context.Context, userID string) (string, error)
ExpiresSessionByID(ctx context.Context, client *spanner.Client, id string) error
}
type sessionService struct {
sessionRepository SessionRepository
}
const (
SESSION_VALID_PERIOD = 12 * time.Hour
)
func NewSessionService(sessionRepository SessionRepository) SessionService {
return &sessionService{sessionRepository: sessionRepository}
}
func (s *sessionService) GetSessionByID(ctx context.Context, txn Transaction, id string) (*Session, error) {
return s.sessionRepository.Get(ctx, txn, id)
}
func (s *sessionService) CreateSession(ctx context.Context, userID string) (string, error) {
now := time.Now()
sessionID := uuid.New().String()
session := &Session{
ID: sessionID,
UserID: userID,
CreatedAt: now,
ExpiresAt: now.Add(SESSION_VALID_PERIOD),
}
if err := s.sessionRepository.Insert(ctx, session); err != nil {
return "", err
}
return sessionID, nil
}
func (s *sessionService) ExpiresSessionByID(ctx context.Context, client *spanner.Client, id string) error {
_, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error {
return s.sessionRepository.ExpiresSessionByID(ctx, txn, id)
})
return err
}
|
package sham
import (
log "github.com/sirupsen/logrus"
"time"
)
// Scheduler ๆฏๆจกๆ็่ฐๅบฆๅจ
type Scheduler interface {
// schedule ๅฎๆ่ฐๅบฆ๏ผไผ ๅ
ฅCPU ๅฎไพใ่ฟ็จ่กจ๏ผ
// ๅๅๅฎๆ่ฟ็จ้ๅ่ฐๆด๏ผๅนถๅจ้่ฆ็ๆถๅๅ CPU.Switch๏ผไนๅฏไปฅไธๅ๏ผ
schedule(os *OS)
}
// NoScheduler ๆฏไธไธชไธ่ฐๅบฆ็่ฐๅบฆๅจ๏ผ
// ๅฎๅช่ฟ่ก่กจไธญ็ฌฌไธไธชไธ่ฅฟ๏ผ็ถๅ็ปๆ้ๅบใ
type NoScheduler struct{}
func (n NoScheduler) schedule(os *OS) {
// ่ตทๆๅผ๏ผ่ฟ่กไธไธช็บฟ็จ
if len(os.ReadyProcs) > 0 {
os.ReadyToRunning(os.ReadyProcs[0].Id)
}
// ่ฐๅบฆ่ฟ็จ
for len(os.ReadyProcs) > 0 || os.RunningProc.Status != StatusDone {
select {
case pid := <-os.CPU.Done:
os.RunningToDone()
log.WithField("done_process", pid).Info("first thread done, do no more schedule. Shutdown NoScheduler")
return
}
}
}
// FCFSScheduler do first-come first-served schedule.
// ๅ
ๆฅๅ
ๆๅก๏ผไธๆฌก่ทๅฐๅค(้ๅบ๏ฝ้ปๅก)๏ผๅๆฅๆ้ๅฐพใ
type FCFSScheduler struct{}
func (F FCFSScheduler) schedule(os *OS) {
field := "[FCFSScheduler] "
log.Info(field, "FCFSScheduler on")
if len(os.ReadyProcs) > 0 {
log.WithField("first_process", os.ReadyProcs[0].Id).Info(field, "Boot the first process")
os.ReadyToRunning(os.ReadyProcs[0].Id)
}
for {
select {
case status := <-os.CPU.Done:
logger := log.WithFields(log.Fields{
"process": os.RunningProc.Id,
"status": status,
"contextual_PC": os.RunningProc.Thread.contextual.PC,
})
logger.Info(field, "process stop running. Do schedule")
switch status {
case StatusDone:
os.RunningToDone()
case StatusBlocked:
os.RunningToBlocked()
default:
os.RunningToReady()
}
os.HandleInterrupts()
if len(os.ReadyProcs) > 0 {
F._schedule(os)
}
case <-time.After(3 * time.Second):
if os.RunningProc.Status != StatusRunning {
// ้ฟๅ
"all goroutines are asleep - deadlock"๏ผๅซ้ฒ็๏ผๅป่ท Noop
log.Warn("no process ready. Waiting with noop...")
os.ProcsMutex.Lock()
os.ReadyProcs = append(os.ReadyProcs, &Noop)
os.ProcsMutex.Unlock()
F._schedule(os)
}
}
os.ProcsMutex.RLock()
hasJobsToDo := len(os.ReadyProcs) > 0 || os.RunningProc.Status != StatusDone || len(os.BlockedProcs) > 0
os.ProcsMutex.RUnlock()
if !hasJobsToDo {
break
}
}
log.Info(field, "All process done. no process to schedule. Shutdown FCFSScheduler")
}
// _schedule ๅฎๆ็ๆญฃ็่ฐๅบฆๅทฅไฝ๏ผๅณๅฎๅนถ่ฟ่ก่ฐ
// ่ฏฅๅฝๆฐๅ่ฎพ process ไธไธบ็ฉบ๏ผไธ cpu ็ฉบ้ฒ๏ผThread == nil๏ผ
func (F FCFSScheduler) _schedule(os *OS) {
log.WithField("process_to_run", os.ReadyProcs[0].Id).Info("[FCFSScheduler] ", "run the head process")
os.ReadyToRunning(os.ReadyProcs[0].Id)
}
|
package sqlx
import "fmt"
type ParameterGetter interface {
Get(key string) (interface{}, error)
}
type ParameterHolder func(ParameterGetter) (interface{}, error)
type ParameterHolders []ParameterHolder
func (ph ParameterHolders) Values(getter ParameterGetter) ([]interface{}, error) {
values := make([]interface{}, 0)
for _, holder := range ph {
val, err := holder(getter)
if err != nil {
return nil, err
}
values = append(values, val)
}
return values, nil
}
type MapParameters map[string]interface{}
func (params MapParameters) Get(key string) (interface{}, error) {
val, ok := params[key]
if !ok {
return nil, fmt.Errorf("Parameter %q not found", key)
}
return val, nil
}
|
// +build gateway
package gateway
import (
"github.com/kumahq/kuma/pkg/api-server/definitions"
core_plugins "github.com/kumahq/kuma/pkg/core/plugins"
core_mesh "github.com/kumahq/kuma/pkg/core/resources/apis/mesh"
"github.com/kumahq/kuma/pkg/core/resources/registry"
"github.com/kumahq/kuma/pkg/kds"
"github.com/kumahq/kuma/pkg/kds/global"
"github.com/kumahq/kuma/pkg/kds/zone"
)
// NOTE: this is non-deterministic in testing. Some tests will import
// the plugin and trigger registration and some won't. This means that
// whether the Gateway types are registered in tests depends on which
// subset of tests are running.
func init() {
registry.RegisterType(core_mesh.NewGatewayResource())
registry.RegistryListType(&core_mesh.GatewayResourceList{})
// A Gateway is local to a zone, which means that it propagates in one
// direction, from a zone CP up to a global CP. The reason for this
// is that the Kubernetes Gateway API is the native Kubernetes API
// for Kuma gateways. If we propagated a Universal Gateway resource
// to a Kubernetes zone, we would need to be able to transform Gateway
// resources from Universal -> Kubernetes and have to deal with namespace
// semantics and a lot of other unpleasantness.
kds.SupportedTypes = append(kds.SupportedTypes, core_mesh.GatewayType)
zone.ProvidedTypes = append(zone.ProvidedTypes, core_mesh.GatewayType)
global.ConsumedTypes = append(global.ConsumedTypes, core_mesh.GatewayType)
definitions.All = append(definitions.All,
definitions.ResourceWsDefinition{
Type: core_mesh.GatewayType,
Path: "gateways",
},
)
core_plugins.Register("gateway", &plugin{})
}
|
//
// Copyright (c) SAS Institute Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package p11token
import (
"crypto/rand"
"encoding/hex"
"io"
"math/big"
"strings"
)
func makeKeyID() []byte {
keyID := make([]byte, 20)
if _, err := io.ReadFull(rand.Reader, keyID); err != nil {
return nil
}
return keyID
}
func parseKeyID(value string) ([]byte, error) {
return hex.DecodeString(strings.ReplaceAll(value, ":", ""))
}
func bytesToBig(val []byte) *big.Int {
return new(big.Int).SetBytes(val)
}
|
package config
type Device[T any] interface {
Config() Named
Instance() T
}
type ConfigurableDevice[T any] interface {
Device[T]
ID() int
Update(map[string]any, T) error
Delete() error
}
type configurableDevice[T any] struct {
config Config
instance T
}
func NewConfigurableDevice[T any](config Config, instance T) ConfigurableDevice[T] {
return &configurableDevice[T]{
config: config,
instance: instance,
}
}
func (d *configurableDevice[T]) Config() Named {
return d.config.Named()
}
func (d *configurableDevice[T]) Instance() T {
return d.instance
}
func (d *configurableDevice[T]) ID() int {
return d.config.ID
}
func (d *configurableDevice[T]) Update(config map[string]any, instance T) error {
if err := d.config.Update(config); err != nil {
return err
}
d.instance = instance
return nil
}
func (d *configurableDevice[T]) Delete() error {
return d.config.Delete()
}
type staticDevice[T any] struct {
config Named
instance T
}
func NewStaticDevice[T any](config Named, instance T) Device[T] {
return &staticDevice[T]{
config: config,
instance: instance,
}
}
func (d *staticDevice[T]) Configurable() bool {
return true
}
func (d *staticDevice[T]) Config() Named {
return d.config
}
func (d *staticDevice[T]) Instance() T {
return d.instance
}
|
package main
import (
"log"
"net/http"
"text/template"
)
var tpl *template.Template
func init() {
tpl= template.Must(template.ParseGlob("temp/*"))
}
func main() {
http.HandleFunc("/slice",HomePage)
log.Fatal(http.ListenAndServe(":8086",nil))
}
func HomePage(w http.ResponseWriter, r *http.Request) {
s := []string{"shrikar", "dinesh", "mohanish", "prathamesh"}
err := tpl.ExecuteTemplate(w, "slice.html", s)
if err != nil {
log.Fatal(err)
}
} |
package odoo
import (
"fmt"
)
// AccountChartTemplate represents account.chart.template model.
type AccountChartTemplate struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
AccountIds *Relation `xmlrpc:"account_ids,omptempty"`
BankAccountCodePrefix *String `xmlrpc:"bank_account_code_prefix,omptempty"`
CashAccountCodePrefix *String `xmlrpc:"cash_account_code_prefix,omptempty"`
CodeDigits *Int `xmlrpc:"code_digits,omptempty"`
CompanyId *Many2One `xmlrpc:"company_id,omptempty"`
CompleteTaxSet *Bool `xmlrpc:"complete_tax_set,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
CurrencyId *Many2One `xmlrpc:"currency_id,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
ExpenseCurrencyExchangeAccountId *Many2One `xmlrpc:"expense_currency_exchange_account_id,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
IncomeCurrencyExchangeAccountId *Many2One `xmlrpc:"income_currency_exchange_account_id,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
ParentId *Many2One `xmlrpc:"parent_id,omptempty"`
PropertyAccountExpenseCategId *Many2One `xmlrpc:"property_account_expense_categ_id,omptempty"`
PropertyAccountExpenseId *Many2One `xmlrpc:"property_account_expense_id,omptempty"`
PropertyAccountIncomeCategId *Many2One `xmlrpc:"property_account_income_categ_id,omptempty"`
PropertyAccountIncomeId *Many2One `xmlrpc:"property_account_income_id,omptempty"`
PropertyAccountPayableId *Many2One `xmlrpc:"property_account_payable_id,omptempty"`
PropertyAccountReceivableId *Many2One `xmlrpc:"property_account_receivable_id,omptempty"`
PropertyStockAccountInputCategId *Many2One `xmlrpc:"property_stock_account_input_categ_id,omptempty"`
PropertyStockAccountOutputCategId *Many2One `xmlrpc:"property_stock_account_output_categ_id,omptempty"`
PropertyStockValuationAccountId *Many2One `xmlrpc:"property_stock_valuation_account_id,omptempty"`
TaxTemplateIds *Relation `xmlrpc:"tax_template_ids,omptempty"`
TransferAccountId *Many2One `xmlrpc:"transfer_account_id,omptempty"`
UseAngloSaxon *Bool `xmlrpc:"use_anglo_saxon,omptempty"`
Visible *Bool `xmlrpc:"visible,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// AccountChartTemplates represents array of account.chart.template model.
type AccountChartTemplates []AccountChartTemplate
// AccountChartTemplateModel is the odoo model name.
const AccountChartTemplateModel = "account.chart.template"
// Many2One convert AccountChartTemplate to *Many2One.
func (act *AccountChartTemplate) Many2One() *Many2One {
return NewMany2One(act.Id.Get(), "")
}
// CreateAccountChartTemplate creates a new account.chart.template model and returns its id.
func (c *Client) CreateAccountChartTemplate(act *AccountChartTemplate) (int64, error) {
ids, err := c.CreateAccountChartTemplates([]*AccountChartTemplate{act})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateAccountChartTemplate creates a new account.chart.template model and returns its id.
func (c *Client) CreateAccountChartTemplates(acts []*AccountChartTemplate) ([]int64, error) {
var vv []interface{}
for _, v := range acts {
vv = append(vv, v)
}
return c.Create(AccountChartTemplateModel, vv)
}
// UpdateAccountChartTemplate updates an existing account.chart.template record.
func (c *Client) UpdateAccountChartTemplate(act *AccountChartTemplate) error {
return c.UpdateAccountChartTemplates([]int64{act.Id.Get()}, act)
}
// UpdateAccountChartTemplates updates existing account.chart.template records.
// All records (represented by ids) will be updated by act values.
func (c *Client) UpdateAccountChartTemplates(ids []int64, act *AccountChartTemplate) error {
return c.Update(AccountChartTemplateModel, ids, act)
}
// DeleteAccountChartTemplate deletes an existing account.chart.template record.
func (c *Client) DeleteAccountChartTemplate(id int64) error {
return c.DeleteAccountChartTemplates([]int64{id})
}
// DeleteAccountChartTemplates deletes existing account.chart.template records.
func (c *Client) DeleteAccountChartTemplates(ids []int64) error {
return c.Delete(AccountChartTemplateModel, ids)
}
// GetAccountChartTemplate gets account.chart.template existing record.
func (c *Client) GetAccountChartTemplate(id int64) (*AccountChartTemplate, error) {
acts, err := c.GetAccountChartTemplates([]int64{id})
if err != nil {
return nil, err
}
if acts != nil && len(*acts) > 0 {
return &((*acts)[0]), nil
}
return nil, fmt.Errorf("id %v of account.chart.template not found", id)
}
// GetAccountChartTemplates gets account.chart.template existing records.
func (c *Client) GetAccountChartTemplates(ids []int64) (*AccountChartTemplates, error) {
acts := &AccountChartTemplates{}
if err := c.Read(AccountChartTemplateModel, ids, nil, acts); err != nil {
return nil, err
}
return acts, nil
}
// FindAccountChartTemplate finds account.chart.template record by querying it with criteria.
func (c *Client) FindAccountChartTemplate(criteria *Criteria) (*AccountChartTemplate, error) {
acts := &AccountChartTemplates{}
if err := c.SearchRead(AccountChartTemplateModel, criteria, NewOptions().Limit(1), acts); err != nil {
return nil, err
}
if acts != nil && len(*acts) > 0 {
return &((*acts)[0]), nil
}
return nil, fmt.Errorf("account.chart.template was not found with criteria %v", criteria)
}
// FindAccountChartTemplates finds account.chart.template records by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountChartTemplates(criteria *Criteria, options *Options) (*AccountChartTemplates, error) {
acts := &AccountChartTemplates{}
if err := c.SearchRead(AccountChartTemplateModel, criteria, options, acts); err != nil {
return nil, err
}
return acts, nil
}
// FindAccountChartTemplateIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountChartTemplateIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(AccountChartTemplateModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindAccountChartTemplateId finds record id by querying it with criteria.
func (c *Client) FindAccountChartTemplateId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(AccountChartTemplateModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("account.chart.template was not found with criteria %v and options %v", criteria, options)
}
|
// Speak HTTP like a local -- a simple, intuitive HTTP console
// This is a port of http://github.com/cloudhead/http-console
package main
import (
"bufio"
"bytes"
"crypto/tls"
"encoding/base64"
"flag"
"fmt"
"github.com/mattn/go-colorable"
"github.com/peterh/liner"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"path"
"regexp"
"runtime"
"strconv"
"strings"
"time"
)
var (
colors = flag.Bool("colors", true, "colorful output")
useSSL = flag.Bool("ssl", false, "use SSL")
useJSON = flag.Bool("json", false, "use JSON")
rememberCookies = flag.Bool("cookies", false, "remember cookies")
verbose = flag.Bool("v", false, "be verbose, print out the request in wire format before sending")
out = colorable.NewColorableStdout()
)
// Color scheme, ref: http://linuxgazette.net/issue65/padala.html
const (
C_Prompt = "\x1b[90m"
C_Header = "\x1b[1m"
C_2xx = "\x1b[1;32m"
C_3xx = "\x1b[1;36m"
C_4xx = "\x1b[1;31m"
C_5xx = "\x1b[1;37;41m"
C_Reset = "\x1b[0m"
)
func colorize(color, s string) string {
return color + s + C_Reset
}
type myCloser struct {
io.Reader
}
func (myCloser) Close() error { return nil }
type Cookie struct {
Items map[string]string
path string
expires time.Time
domain string
secure bool
httpOnly bool
}
type Session struct {
scheme string
host string
conn *httputil.ClientConn
headers http.Header
cookies *[]*Cookie
path *string
}
func dial(host string) (conn *httputil.ClientConn) {
var tcp net.Conn
var err error
fmt.Fprintf(os.Stderr, "http-gonsole: establishing a TCP connection ...\n")
proxy := os.Getenv("HTTP_PROXY")
if strings.Split(host, ":")[0] != "localhost" && len(proxy) > 0 {
proxy_url, _ := url.Parse(proxy)
tcp, err = net.Dial("tcp", proxy_url.Host)
} else {
tcp, err = net.Dial("tcp", host)
}
if err != nil {
fmt.Fprintln(os.Stderr, "http-gonsole:", err)
os.Exit(1)
}
if *useSSL {
if len(proxy) > 0 {
connReq := &http.Request{
Method: "CONNECT",
URL: &url.URL{Path: host},
Host: host,
Header: make(http.Header),
}
connReq.Write(tcp)
resp, err := http.ReadResponse(bufio.NewReader(tcp), connReq)
if resp.StatusCode != 200 {
fmt.Fprintln(os.Stderr, "http-gonsole:", resp.Status)
os.Exit(1)
}
if err != nil {
fmt.Fprintln(os.Stderr, "http-gonsole:", err)
os.Exit(1)
}
tcp = tls.Client(tcp, nil)
conn = httputil.NewClientConn(tcp, nil)
} else {
tcp = tls.Client(tcp, nil)
conn = httputil.NewClientConn(tcp, nil)
}
if err = tcp.(*tls.Conn).Handshake(); err != nil {
fmt.Fprintln(os.Stderr, "http-gonsole:", err)
os.Exit(1)
}
if err = tcp.(*tls.Conn).VerifyHostname(strings.Split(host, ":")[0]); err != nil {
fmt.Fprintln(os.Stderr, "http-gonsole:", err)
os.Exit(1)
}
} else {
conn = httputil.NewClientConn(tcp, nil)
}
return
}
func (s Session) perform(method, uri, data string) {
var req http.Request
req.URL, _ = url.Parse(uri)
req.Method = method
req.Header = s.headers
req.ContentLength = int64(len([]byte(data)))
req.Body = myCloser{bytes.NewBufferString(data)}
if *verbose {
req.Write(os.Stderr)
}
retry := 0
request:
req.Body = myCloser{bytes.NewBufferString(data)} // recreate anew, in case of retry
err := s.conn.Write(&req)
if err != nil {
if retry < 2 {
if err == io.ErrUnexpectedEOF {
// the underlying connection has been closed "gracefully"
retry++
s.conn.Close()
s.conn = dial(s.host)
goto request
} else if protoerr, ok := err.(*http.ProtocolError); ok && protoerr == httputil.ErrPersistEOF {
// the connection has been closed in an HTTP keepalive sense
retry++
s.conn.Close()
s.conn = dial(s.host)
goto request
}
}
fmt.Fprintln(os.Stderr, "http-gonsole: could not send request:", err)
os.Exit(1)
}
r, err := s.conn.Read(&req)
if err != nil {
if protoerr, ok := err.(*http.ProtocolError); ok && protoerr == httputil.ErrPersistEOF {
// the remote requested that this be the last request serviced,
// we proceed as the response is still valid
defer s.conn.Close()
defer func() { s.conn = dial(s.host) }()
goto output
}
fmt.Fprintln(os.Stderr, "http-gonsole: could not read response:", err)
os.Exit(1)
}
output:
if len(data) > 0 {
fmt.Println()
}
if r.StatusCode >= 500 {
fmt.Fprintf(out, colorize(C_5xx, "%s %s\n"), r.Proto, r.Status)
} else if r.StatusCode >= 400 {
fmt.Fprintf(out, colorize(C_4xx, "%s %s\n"), r.Proto, r.Status)
} else if r.StatusCode >= 300 {
fmt.Fprintf(out, colorize(C_3xx, "%s %s\n"), r.Proto, r.Status)
} else if r.StatusCode >= 200 {
fmt.Fprintf(out, colorize(C_2xx, "%s %s\n"), r.Proto, r.Status)
}
if len(r.Header) > 0 {
for key, arr := range r.Header {
for _, val := range arr {
fmt.Fprintf(out, colorize(C_Header, "%s: "), key)
fmt.Println(val)
}
}
fmt.Println()
}
if *rememberCookies {
if cookies, found := r.Header["Set-Cookie"]; found {
for _, h := range cookies {
cookie := new(Cookie)
cookie.Items = map[string]string{}
re, _ := regexp.Compile("^[^=]+=[^;]+(; *(expires=[^;]+|path=[^;,]+|domain=[^;,]+|secure))*,?")
rs := re.FindAllString(h, -1)
for _, ss := range rs {
m := strings.Split(ss, ";")
for _, n := range m {
t := strings.SplitN(n, "=", 2)
if len(t) == 2 {
t[0] = strings.Trim(t[0], " ")
t[1] = strings.Trim(t[1], " ")
switch t[0] {
case "domain":
cookie.domain = t[1]
case "path":
cookie.path = t[1]
case "expires":
tm, err := time.Parse("Fri, 02-Jan-2006 15:04:05 MST", t[1])
if err != nil {
tm, err = time.Parse("Fri, 02-Jan-2006 15:04:05 -0700", t[1])
}
cookie.expires = tm
case "secure":
cookie.secure = true
case "HttpOnly":
cookie.httpOnly = true
default:
cookie.Items[t[0]] = t[1]
}
}
}
}
*s.cookies = append(*s.cookies, cookie)
}
}
}
h := r.Header.Get("Content-Length")
if len(h) > 0 {
n, _ := strconv.ParseInt(h, 10, 64)
b := make([]byte, n)
io.ReadFull(r.Body, b)
fmt.Println(string(b))
} else if method != "HEAD" {
b, _ := ioutil.ReadAll(r.Body)
fmt.Println(string(b))
} else {
// TODO: streaming?
}
}
// Parse a single command and execute it. (REPL without the loop)
// Return true when the quit command is given.
func (s Session) repl() bool {
var prompt string
if runtime.GOOS == "windows" {
prompt = fmt.Sprintf("%s://%s%s: ", s.scheme, s.host, *s.path)
} else {
prompt = fmt.Sprintf(colorize(C_Prompt, "%s://%s%s: "), s.scheme, s.host, *s.path)
}
var err error
var line string
ln := liner.NewLiner()
defer ln.Close()
for {
line, err = ln.Prompt(prompt)
if err != nil {
fmt.Println()
return true
}
line = strings.Trim(line, "\n")
line = strings.Trim(line, "\r")
if line != "" {
break
}
}
if match, _ := regexp.MatchString("^(/[^ \t]*)|(\\.\\.)$", line); match {
if line == "/" || line == "//" {
*s.path = "/"
} else {
*s.path = strings.Replace(path.Clean(path.Join(*s.path, line)), "\\", "/", -1)
if len(line) > 1 && line[len(line)-1] == '/' {
*s.path += "/"
}
}
return false
}
re := regexp.MustCompile("^([a-zA-Z][a-zA-Z0-9\\-]+):(.*)")
if match := re.FindStringSubmatch(line); match != nil {
key := match[1]
val := strings.TrimSpace(match[2])
if len(val) > 0 {
s.headers.Set(key, val)
}
return false
}
re = regexp.MustCompile("^([A-Z]+)(.*)")
if match := re.FindStringSubmatch(line); match != nil {
method := match[1]
p := strings.TrimSpace(match[2])
trailingSlash := (len(*s.path) > 1) && ((*s.path)[len(*s.path)-1] == '/')
if len(p) == 0 {
p = "/"
} else {
trailingSlash = p[len(p)-1] == '/'
}
p = strings.Replace(path.Clean(path.Join(*s.path, p)), "\\", "/", -1)
if trailingSlash {
p += "/"
}
data := ""
if method == "POST" || method == "PUT" {
prompt = colorize(C_Prompt, "...: ")
line, err = ln.Prompt(prompt)
if line == "" {
return false
}
}
ln.AppendHistory(line)
s.perform(method, s.scheme+"://"+s.host+p, data)
return false
}
if line == ".h" || line == ".headers" {
for key, arr := range s.headers {
for _, val := range arr {
fmt.Println(key + ": " + val)
}
}
return false
}
if line == ".c" || line == ".cookies" {
for _, cookie := range *s.cookies {
for key, val := range cookie.Items {
fmt.Println(key + ": " + val)
}
}
return false
}
if line == ".v" || line == ".verbose" {
*verbose = !*verbose
return false
}
if line == ".o" || line == ".options" {
fmt.Printf("useSSL=%v, rememberCookies=%v, verbose=%v\n", *useSSL, *rememberCookies, *verbose)
return false
}
if line == ".?" || line == ".help" {
fmt.Println(".headers, .h show active request headers\n" +
".options, .o show options\n" +
".cookies, .c show client cookies\n" +
".help, .? display this message\n" +
".exit, .q, ^D exit console\n")
return false
}
if line == ".q" || line == ".exit" {
return true
}
fmt.Fprintln(os.Stderr, "unknown command:", line)
return false
}
func main() {
scheme := "http"
host := "localhost:80"
headers := make(http.Header)
cookies := new([]*Cookie)
p := "/"
flag.Parse()
if flag.NArg() > 0 {
tmp := flag.Arg(0)
if match, _ := regexp.MatchString("^[^:]+(:[0-9]+)?$", tmp); match {
tmp = "http://" + tmp
}
targetURL, err := url.Parse(tmp)
if err != nil {
fmt.Fprintln(os.Stderr, "malformed URL")
os.Exit(-1)
}
host = targetURL.Host
if len(host) == 0 {
fmt.Fprintln(os.Stderr, "invalid host name")
os.Exit(-1)
}
if *useSSL || targetURL.Scheme == "https" {
*useSSL = true
scheme = "https"
}
if match, _ := regexp.MatchString("^[^:]+:[0-9]+$", host); !match {
if *useSSL {
host = host + ":443"
} else {
host = host + ":80"
}
}
scheme = targetURL.Scheme
if info := targetURL.User; info != nil {
enc := base64.URLEncoding
encoded := make([]byte, enc.EncodedLen(len(info.String())))
enc.Encode(encoded, []byte(info.String()))
headers.Set("Authorization", "Basic "+string(encoded))
}
p = strings.Replace(path.Clean(targetURL.Path), "\\", "/", -1)
if p == "." {
p = "/"
}
} else if *useSSL {
scheme = "https"
host = "localhost:443"
}
headers.Set("Host", host)
session := &Session{
scheme: scheme,
host: host,
conn: dial(host),
headers: headers,
cookies: cookies,
path: &p,
}
if *useJSON {
headers.Set("Accept", "*/*")
headers.Set("Content-Type", "appliaction/json")
}
defer session.conn.Close()
done := false
for !done {
done = session.repl()
}
}
|
package main
import "fmt"
func main(){
i:=1
fmt.Println(i)
i=2
fmt.Println(i)
i=1
for i<3 {
fmt.Println(i)
i=i+1
}
for j:=5; j<9; j++{
fmt.Println("loop",j)
if j==8{
break
}
}
for n:=0; n<5;n++{
if n%2==0{
fmt.Println(n)
}
}
}
|
// 16 july 2014
package ui
import (
"unsafe"
)
// #include "objc_darwin.h"
import "C"
type label struct {
*controlSingleObject
}
func newLabel(text string) Label {
l := &label{
controlSingleObject: newControlSingleObject(C.newLabel()),
}
l.SetText(text)
return l
}
func (l *label) Text() string {
return C.GoString(C.textfieldText(l.id))
}
func (l *label) SetText(text string) {
ctext := C.CString(text)
defer C.free(unsafe.Pointer(ctext))
C.textfieldSetText(l.id, ctext)
}
/*TODO
func (l *label) commitResize(c *allocation, d *sizing) {
if !l.standalone && c.neighbor != nil {
c.neighbor.getAuxResizeInfo(d)
if d.neighborAlign.baseline != 0 { // no adjustment needed if the given control has no baseline
// in order for the baseline value to be correct, the label MUST BE AT THE HEIGHT THAT OS X WANTS IT TO BE!
// otherwise, the baseline calculation will be relative to the bottom of the control, and everything will be wrong
origsize := C.controlPreferredSize(l._id)
c.height = int(origsize.height)
newrect := C.struct_xrect{
x: C.intptr_t(c.x),
y: C.intptr_t(c.y),
width: C.intptr_t(c.width),
height: C.intptr_t(c.height),
}
ourAlign := C.alignmentInfo(l._id, newrect)
// we need to find the exact Y positions of the baselines
// fortunately, this is easy now that (x,y) is the bottom-left corner
thisbasey := ourAlign.rect.y + ourAlign.baseline
neighborbasey := d.neighborAlign.rect.y + d.neighborAlign.baseline
// now the amount we have to move the label down by is easy to find
yoff := neighborbasey - thisbasey
// and we just add that
c.y += int(yoff)
}
// in the other case, the most correct thing would be for Label to be aligned to the alignment rect, but I can't get this working, and it looks fine as it is anyway
}
basecommitResize(l, c, d)
}
*/
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package binaryxml
import (
"bytes"
"context"
"fmt"
"io"
"github.com/google/gapid/core/data/binary"
"github.com/google/gapid/core/data/endian"
"github.com/google/gapid/core/log"
"github.com/google/gapid/core/os/device"
"github.com/pkg/errors"
)
// AOSP references:
// https://android.googlesource.com/platform/frameworks/base/+/master/tools/aapt2/XmlFlattener.cpp
// https://android.googlesource.com/platform/frameworks/base/+/master/include/androidfw/ResourceTypes.h
const (
resNullType = 0x0000
resStringPoolType = 0x0001
resTableType = 0x0002
resXMLType = 0x0003
resXMLFirstChunkType = 0x0100
resXMLStartNamespaceType = 0x0100
resXMLEndNamespaceType = 0x0101
resXMLStartElementType = 0x0102
resXMLEndElementType = 0x0103
resXMLCDataType = 0x0104
resXMLLastChunkType = 0x017f
resXMLResourceMapType = 0x0180
resTablePackageType = 0x0200
resTableTypeType = 0x0201
resTableTypeSpecType = 0x0202
resTableLibraryType = 0x0203
)
const (
beforeContextChange = 0x00
afterContextChange = 0x01
)
type chunkVisitor func(*xmlContext, chunk, int)
type contextChange interface {
updateContext(*xmlContext)
}
// Decode decodes a binary Android XML file to a string.
func Decode(ctx context.Context, data []byte) (string, error) {
xmlTree, err := decodeXmlTree(bytes.NewReader(data))
if err != nil {
return "", log.Err(ctx, err, "Decoding binary XML")
}
return xmlTree.toXmlString(), nil
}
type rootHolder struct {
rootNode *xmlTree
}
func (rh *rootHolder) root() *xmlTree {
return rh.rootNode
}
func (rh *rootHolder) setRoot(x *xmlTree) {
rh.rootNode = x
}
type chunk interface {
root() *xmlTree
setRoot(x *xmlTree)
decode(header, data []byte) error
xml(*xmlContext) string
encode() []byte
}
func decodeXmlTree(r io.Reader) (*xmlTree, error) {
br := endian.Reader(r, device.LittleEndian)
chunk, err := decodeChunk(br, &xmlTree{})
if err != nil {
return nil, err
}
tree, ok := chunk.(*xmlTree)
if !ok {
return nil, fmt.Errorf("Expected XML tree, found chunk type %T", chunk)
}
return tree, nil
}
func decodeChunk(r binary.Reader, x *xmlTree) (chunk, error) {
ty := r.Uint16()
if err := r.Error(); err != nil {
return nil, err
}
headerSize := r.Uint16()
if headerSize < 8 {
return nil, fmt.Errorf("Unexpected header size %d", headerSize)
}
dataSize := r.Uint32()
header := make([]byte, headerSize-8)
data := make([]byte, dataSize-uint32(headerSize))
r.Data(header)
r.Data(data)
var c chunk
switch ty {
case resXMLResourceMapType:
c = &xmlResourceMap{}
case resStringPoolType:
c = &stringPool{}
case resXMLCDataType:
c = &xmlCData{}
case resXMLEndElementType:
c = &xmlEndElement{}
case resXMLEndNamespaceType:
c = &xmlEndNamespace{}
case resXMLStartElementType:
c = &xmlStartElement{}
case resXMLStartNamespaceType:
c = &xmlStartNamespace{}
case resXMLType:
c = x
default:
return nil, fmt.Errorf("Unknown chunk type 0x%x", ty)
}
c.setRoot(x)
err := c.decode(header, data)
if errors.Cause(err) == io.EOF {
return nil, fmt.Errorf("Chunk type %T read past end of data", c)
}
return c, err
}
func decodeLength(r binary.Reader) uint32 {
length := uint32(r.Uint16())
if length&0x8000 != 0 {
panic("UNTESTED CODE")
length = (length << 16) | uint32(r.Uint16())
}
return length
}
func encodeLength(w binary.Writer, length uint32) {
if length >= 0x8000 {
panic("TODO: UNSUPPORTED")
}
w.Uint16(uint16(length))
}
// encodeChunk takes functions that output chunk-specific header and data to a writer, and then uses them to
// compute header and chunk sizes, as well as writing the whole chunk to a byte array, which is then returned.
func encodeChunk(chunkType uint16, headerf func(w binary.Writer), dataf func(w binary.Writer)) []byte {
var headerBuffer bytes.Buffer
headerf(endian.Writer(&headerBuffer, device.LittleEndian))
headerBytes := headerBuffer.Bytes()
var dataBuffer bytes.Buffer
dataf(endian.Writer(&dataBuffer, device.LittleEndian))
dataBytes := dataBuffer.Bytes()
var chunkBuffer bytes.Buffer
w := endian.Writer(&chunkBuffer, device.LittleEndian)
w.Uint16(chunkType)
w.Uint16(uint16(len(headerBytes) + 8))
w.Uint32(uint32(len(headerBytes) + len(dataBytes) + 8))
w.Data(headerBytes)
w.Data(dataBytes)
return chunkBuffer.Bytes()
}
|
package channel
// ReceiveLightMeasurementParams holds the channel parameters used by the ReceiveLightMeasurement operation
type ReceiveLightMeasurementParams struct {
// StreetlightId is the ID of the streetlight.
StreetlightId string
}
// TurnOnParams holds the channel parameters used by the TurnOn operation
type TurnOnParams struct {
// StreetlightId is the ID of the streetlight.
StreetlightId string
}
// TurnOffParams holds the channel parameters used by the TurnOff operation
type TurnOffParams struct {
// StreetlightId is the ID of the streetlight.
StreetlightId string
}
// DimLightParams holds the channel parameters used by the DimLight operation
type DimLightParams struct {
// StreetlightId is the ID of the streetlight.
StreetlightId string
} |
package webui
import (
"encoding/json"
"fmt"
"io"
"net/http"
"path"
)
func parseUUID(r *http.Request) (string, error) {
p := path.Base(r.URL.Path)
if len(p) <= 1 {
return "", fmt.Errorf("UUID value %q rejected", p)
}
return p, nil
}
func parseMap(r *http.Request) (v map[string]interface{}, err error) {
data, err := io.ReadAll(r.Body)
if err != nil {
return
}
r.Body.Close()
v = make(map[string]interface{})
return v, json.Unmarshal(data, &v)
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"os"
"strings"
"time"
"github.com/PuerkitoBio/goquery"
"github.com/gocolly/colly"
)
type CoinEvent struct {
Coin string
Event string
EventDate string
}
func main() {
// Instantiate default collector
c := colly.NewCollector(
// Visit only domains
colly.AllowedDomains("cryptocalendar.pro"),
// Cache responses to prevent multiple download of pages
// even if the collector is restarted
colly.CacheDir("./page_cache"),
)
c.Limit(&colly.LimitRule{
DomainGlob: "*",
Delay: 1 * time.Second,
Parallelism: 1,
})
coinEvents := make([]CoinEvent, 0, 200)
stringifyEvents := "Source: https://cryptocalendar.pro \n"
c.OnHTML(`div[class="col-md-6"]`, func(e *colly.HTMLElement) {
title := strings.TrimSpace(e.DOM.Find(`h4[class="bold"]`).Text())
if len(title) > 0 {
formattedTitle := strings.Join(strings.Fields(title), " ")
if strings.Contains(strings.ToLower(formattedTitle), "upcoming events") {
log.Printf("%v \n", formattedTitle)
stringifyEvents += fmt.Sprintf("%v \n", formattedTitle)
e.DOM.Find(`li`).Each(func(i int, sel *goquery.Selection) {
coinEvent := new(CoinEvent)
eventDesc := strings.Join(strings.Fields(strings.TrimSpace(sel.Text())), " ")
if strings.Contains(strings.ToLower(eventDesc), "no upcoming events") {
stringifyEvents += fmt.Sprintf(" %v\n", "No events")
} else {
log.Printf("%v\n", eventDesc)
stringifyEvents += fmt.Sprintf(" %v\n", eventDesc)
splittedEventDesc := strings.Split(eventDesc, "โ")
coinEvent.Coin = strings.Replace(e.Request.URL.Path, "/events/", "", -1)
coinEvent.EventDate = splittedEventDesc[0]
coinEvent.Event = splittedEventDesc[1]
coinEvents = append(coinEvents, *coinEvent)
}
})
}
}
})
sites := [...]string{
"https://cryptocalendar.pro/events/bitcoin",
"https://cryptocalendar.pro/events/bitcoin-cash",
"https://cryptocalendar.pro/events/bitcoin-cash-abc",
"https://cryptocalendar.pro/events/litecoin",
"https://cryptocalendar.pro/events/ethereum",
"https://cryptocalendar.pro/events/oax",
"https://cryptocalendar.pro/events/xrp",
}
for i := 0; i < len(sites); i++ {
c.Visit(sites[i])
fmt.Println("")
}
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
// Dump json to the standard output
//enc.Encode(coinEvents)
sendEmail(stringifyEvents)
}
func sendEmail(content string) {
sender := NewSender("blah@gmail.com", "<YOUR EMAIL PASSWORD>")
//The receiver needs to be in slice as the receive supports multiple receiver
Receiver := []string{"xyz@email.com", "xyz@gmail.com"}
Subject := "Cryptocurrency Events"
message := content
bodyMessage := sender.WritePlainEmail(Receiver, Subject, message)
sender.SendMail(Receiver, Subject, bodyMessage)
}
|
// Copyright (c) 2019-present Mattermost, Inc. All Rights Reserved.
// See License for license information.
package jobs
import (
"io"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"github.com/mattermost/mattermost-plugin-api/cluster"
"github.com/mattermost/mattermost-plugin-mscalendar/server/jobs/mock_cluster"
"github.com/mattermost/mattermost-plugin-mscalendar/server/mscalendar"
"github.com/mattermost/mattermost-plugin-mscalendar/server/mscalendar/mock_plugin_api"
"github.com/mattermost/mattermost-plugin-mscalendar/server/remote/mock_remote"
"github.com/mattermost/mattermost-plugin-mscalendar/server/store/mock_store"
"github.com/mattermost/mattermost-plugin-mscalendar/server/utils/bot"
"github.com/mattermost/mattermost-plugin-mscalendar/server/utils/bot/mock_bot"
)
func newTestEnv(ctrl *gomock.Controller) mscalendar.Env {
s := mock_store.NewMockStore(ctrl)
poster := mock_bot.NewMockPoster(ctrl)
mockRemote := mock_remote.NewMockRemote(ctrl)
mockPluginAPI := mock_plugin_api.NewMockPluginAPI(ctrl)
logger := &bot.NilLogger{}
return mscalendar.Env{
Dependencies: &mscalendar.Dependencies{
Store: s,
Logger: logger,
Poster: poster,
Remote: mockRemote,
PluginAPI: mockPluginAPI,
},
}
}
func TestJobManagerOnConfigurationChange(t *testing.T) {
for name, tc := range map[string]struct {
enabled bool
active bool
numCloseCalls int
expectedActive bool
}{
"Not active, config set to disabled": {
enabled: false,
active: false,
numCloseCalls: 0,
expectedActive: false,
},
"Not active, config set to enabled": {
enabled: true,
active: false,
numCloseCalls: 0,
expectedActive: true,
},
"Active, config set to disabled": {
enabled: false,
active: true,
numCloseCalls: 1,
expectedActive: false,
},
"Active, config set to enabled": {
enabled: true,
active: true,
numCloseCalls: 0,
expectedActive: true,
},
} {
t.Run(name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockJobsPluginAPI := mock_cluster.NewMockJobPluginAPI(ctrl)
mc := &mockCloser{numCalls: 0}
scheduleFunc = func(api cluster.JobPluginAPI, id string, wait cluster.NextWaitInterval, cb func()) (io.Closer, error) {
cb()
return mc, nil
}
env := newTestEnv(ctrl)
j := RegisteredJob{
id: name,
interval: 5 * time.Minute,
work: func(env mscalendar.Env) {},
isEnabledByConfig: func(env mscalendar.Env) bool { return tc.enabled },
}
jm := NewJobManager(mockJobsPluginAPI, env)
jm.AddJob(j)
defer jm.Close()
if tc.active {
err := jm.activateJob(j)
require.Nil(t, err)
}
err := jm.OnConfigurationChange(env)
require.Nil(t, err)
time.Sleep(5 * time.Millisecond)
require.Equal(t, tc.expectedActive, jm.isJobActive(j.id))
require.Equal(t, tc.numCloseCalls, mc.numCalls)
})
}
}
type mockCloser struct {
numCalls int
}
func (mc *mockCloser) Close() error {
mc.numCalls++
return nil
}
|
package tcx
import (
"encoding/xml"
"fmt"
"io"
"math"
"os"
"time"
)
// Tcx represents the root of a TCX file
type Tcx struct {
XMLName xml.Name `xml:"TrainingCenterDatabase"`
XMLNs string `xml:"xmlns,attr"`
XMLNsXsi string `xml:"xsi,attr,omitempty"`
XMLNsXsd string `xml:"xsd,attr,omitempty"`
XMLSchemaLoc string `xml:"schemaLocation,attr,omitempty"`
Activities []Activity `xml:"Activities>Activity"`
}
type Activity struct {
Sport string `xml:"Sport,attr"`
ID time.Time `xml:"Id"`
Creator Creator `xml:"Creator"`
Laps []Lap `xml:"Lap"`
}
type Creator struct {
Name string `xml:"Name"`
UnitID int `xml:"UnitId"`
ProductID int `xml:"ProductID"`
}
type Lap struct {
StartTime time.Time `xml:"StartTime,attr"`
TotalTimeInSeconds float64 `xml:"TotalTimeSeconds"`
DistanceInMeters float64 `xml:"DistanceMeters"`
MaximumSpeedInMetersPerSec float64 `xml:"MaximumSpeed"`
Calories float64 `xml:"Calories"`
Intensity string `xml:"Intensity"`
TriggerMethod string `xml:"TriggerMethod"`
Track []Trackpoint `xml:"Track>Trackpoint"`
}
type Trackpoint struct {
Time time.Time `xml:"Time"`
LatitudeInDegrees float64 `xml:"Position>LatitudeDegrees"`
LongitudeInDegrees float64 `xml:"Position>LongitudeDegrees"`
AltitudeInMeters float64 `xml:"AltitudeMeters"`
HeartRateInBpm int `xml:"HeartRateBpm>Value"`
Cadence int `xml:"Cadence"`
Extensions Extensions `xml:"Extensions"`
}
type Extensions struct {
TrackPoint TPX `xml:"TPX"`
Lap LX `xml:"LX"`
Course CX `xml:"CX"`
}
type TPX struct {
Speed float64 `xml:"Speed"`
RunCadence int `xml:"RunCadence"`
Watts int `xml:"Watts"`
}
type LX struct {
AvgSpeed float64 `xml:"AvgSpeed"`
MaxBikeCadence int `xml:"MaxBikeCadence"`
AvgRunCadence int `xml:"AvgRunCadence"`
MaxRunCadence int `xml:"MaxRunCadence"`
Steps int `xml:"Steps"`
AvgWatts int `xml:"AvgWatts"`
MaxWatts int `xml:"MaxWatts"`
}
type CX struct {
AvgWatts int `xml:"AvgWatts"`
}
type Pace struct {
float64
}
// Parse parses a TCX reader and return a Tcx object.
func Parse(r io.Reader) (*Tcx, error) {
g := NewTcx()
d := xml.NewDecoder(r)
err := d.Decode(g)
if err != nil {
return nil, fmt.Errorf("couldn't parse tcx data: %v", err)
}
return g, nil
}
// ParseFile reads a TCX file and parses it.
func ParseFile(filepath string) (*Tcx, error) {
f, err := os.Open(filepath)
if err != nil {
return nil, err
}
defer f.Close()
return Parse(f)
}
// NewTcx creates and returns a new Gpx objects.
func NewTcx() *Tcx {
tcx := new(Tcx)
return tcx
}
func (a *Activity) TotalDuration() time.Duration {
var duration time.Duration = 0
for _, l := range a.Laps {
duration += time.Duration(l.TotalTimeInSeconds) * time.Second
}
return duration
}
func (a *Activity) TotalDistance() float64 {
var d float64 = 0
for _, l := range a.Laps {
d += l.DistanceInMeters
}
return d
}
func (a *Activity) AverageHeartbeat() float64 {
var totalhr int = 0
var nbhr int = 0
for _, l := range a.Laps {
for _, p := range l.Track {
totalhr += p.HeartRateInBpm
nbhr += 1
}
}
return float64(totalhr) / float64(nbhr)
}
func (p *Pace) String() string {
intpart, fracpart := math.Modf(p.float64)
return fmt.Sprintf("%.f:%.f", intpart, fracpart*60)
}
func GetPaceFromSpeedInMs(speed float64) *Pace {
var p *Pace = new(Pace)
p.float64 = 50 / (speed * 3)
return p
}
func (a *Activity) AveragePace() *Pace {
var totals float64 = 0
var nbs int = 0
for _, l := range a.Laps {
for _, p := range l.Track {
totals += p.Extensions.TrackPoint.Speed
nbs += 1
}
}
return GetPaceFromSpeedInMs(totals / float64(nbs))
}
|
package main
import (
"log"
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/vickleford/promex/flipper"
"github.com/vickleford/promex/flopper"
)
func main() {
flipper := flipper.New()
flopper := flopper.New()
flopper.RegisterMetricsTo(prometheus.DefaultRegisterer)
mux := http.NewServeMux()
mux.Handle("/flipper", flipper)
mux.Handle("/flopper", flopper)
mux.Handle("/metrics", promhttp.Handler())
server := http.Server{
Addr: "0.0.0.0:8000",
ReadTimeout: 5 * time.Second,
WriteTimeout: 5 * time.Second,
IdleTimeout: 5 * time.Second,
Handler: mux,
}
log.Printf("Starting server...")
err := server.ListenAndServe()
log.Fatalln(err)
}
|
package database
//** Structs for Mutable's config.json **//
// Mutable Configs
type config struct {
Prod envConfig
Dev envConfig
}
//** Example Environment Configs //
type envConfig struct {
// Postgres postgresConfig
}
|
// Copyright (C) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package status
import (
"context"
"fmt"
"runtime"
"sync"
)
var listeners = map[int]Listener{}
var listenerMutex sync.RWMutex
var nextListenerID int
// EventScope defines the scope for a particular event
type EventScope int
const (
// TaskScope is an event that applies to the current task
TaskScope EventScope = iota
// ProcessScope is an event that only applies to this process
ProcessScope
// GlobalScope is an event that applies globally
GlobalScope
)
func (s EventScope) String() string {
switch s {
case TaskScope:
return "Task"
case ProcessScope:
return "Process"
case GlobalScope:
return "Global"
default:
return "Unknown"
}
}
// Listener is the interface implemented by types that want to listen to
// application status messages.
type Listener interface {
OnTaskStart(context.Context, *Task)
OnTaskProgress(context.Context, *Task)
OnTaskFinish(context.Context, *Task)
OnEvent(context.Context, *Task, string, EventScope)
OnMemorySnapshot(context.Context, runtime.MemStats)
OnTaskBlock(context.Context, *Task)
OnTaskUnblock(context.Context, *Task)
OnReplayStatusUpdate(context.Context, *Replay, uint64, uint32, uint32)
}
// Unregister is the function returned by RegisterListener and is used to
// unregister the listenenr.
type Unregister func()
// RegisterListener registers l for status updates
func RegisterListener(l Listener) Unregister {
listenerMutex.Lock()
defer listenerMutex.Unlock()
id := nextListenerID
nextListenerID++
listeners[id] = l
return func() {
listenerMutex.Lock()
defer listenerMutex.Unlock()
delete(listeners, id)
}
}
func onTaskStart(ctx context.Context, t *Task) {
listenerMutex.RLock()
defer listenerMutex.RUnlock()
for _, l := range listeners {
l.OnTaskStart(ctx, t)
}
}
func onTaskProgress(ctx context.Context, t *Task) {
listenerMutex.RLock()
defer listenerMutex.RUnlock()
for _, l := range listeners {
l.OnTaskProgress(ctx, t)
}
}
func onBlock(ctx context.Context, t *Task) {
listenerMutex.RLock()
defer listenerMutex.RUnlock()
for _, l := range listeners {
l.OnTaskBlock(ctx, t)
}
}
func onUnblock(ctx context.Context, t *Task) {
listenerMutex.RLock()
defer listenerMutex.RUnlock()
for _, l := range listeners {
l.OnTaskUnblock(ctx, t)
}
}
func onTaskFinish(ctx context.Context, t *Task) {
listenerMutex.RLock()
defer listenerMutex.RUnlock()
for _, l := range listeners {
l.OnTaskFinish(ctx, t)
}
}
func onEvent(ctx context.Context, t *Task, scope EventScope, name string, args []interface{}) {
listenerMutex.RLock()
defer listenerMutex.RUnlock()
if len(listeners) > 0 {
msg := fmt.Sprintf(name, args...)
for _, l := range listeners {
l.OnEvent(ctx, t, msg, scope)
}
}
}
func onMemorySnapshot(ctx context.Context, snapshot runtime.MemStats) {
listenerMutex.RLock()
defer listenerMutex.RUnlock()
for _, l := range listeners {
l.OnMemorySnapshot(ctx, snapshot)
}
}
func onReplayStatusUpdate(ctx context.Context, r *Replay, label uint64, totalInstrs, finishedInstrs uint32) {
listenerMutex.RLock()
defer listenerMutex.RUnlock()
for _, l := range listeners {
l.OnReplayStatusUpdate(ctx, r, label, totalInstrs, finishedInstrs)
}
}
|
package expr
import (
"image/color"
"net/http"
"time"
"math"
"strconv"
"strings"
)
var DefaultColorList = []string{"blue", "green", "red", "purple", "brown", "yellow", "aqua", "grey", "magenta", "pink", "gold", "rose"}
type YAxisSide int
const (
YAxisSideRight YAxisSide = 1
YAxisSideLeft = 2
)
func getAxisSide(s string, def YAxisSide) YAxisSide {
if s == "" {
return def
}
if s == "right" {
return YAxisSideRight
}
return YAxisSideLeft
}
type LineMode int
const (
LineModeSlope LineMode = 1
LineModeStaircase = 2
LineModeConnected = 4
)
type AreaMode int
const (
AreaModeNone AreaMode = 1
AreaModeFirst = 2
AreaModeAll = 4
AreaModeStacked = 8
)
func getAreaMode(s string, def AreaMode) AreaMode {
if s == "" {
return def
}
switch s {
case "first":
return AreaModeFirst
case "all":
return AreaModeAll
case "stacked":
return AreaModeStacked
}
return AreaModeNone
}
type PieMode int
const (
PieModeMaximum PieMode = 1
PieModeMinimum = 2
PieModeAverage = 4
)
func getPieMode(s string, def PieMode) PieMode {
if s == "" {
return def
}
if s == "maximum" {
return PieModeMaximum
}
if s == "minimum" {
return PieModeMinimum
}
return PieModeAverage
}
func getLineMode(s string, def LineMode) LineMode {
if s == "" {
return def
}
if s == "slope" {
return LineModeSlope
}
if s == "staircase" {
return LineModeStaircase
}
return LineModeConnected
}
type FontWeight int
const (
FontWeightNormal FontWeight = iota
FontWeightBold
)
func getFontWeight(s string) FontWeight {
if TruthyBool(s) {
return FontWeightBold
}
return FontWeightNormal
}
type FontSlant int
const (
FontSlantNormal FontSlant = iota
FontSlantItalic
FontSlantOblique
)
func getFontItalic(s string) FontSlant {
if TruthyBool(s) {
return FontSlantItalic
}
return FontSlantNormal
}
type PictureParams struct {
Width float64
Height float64
Margin int
LogBase float64
FgColor string
BgColor string
MajorLine string
MinorLine string
FontName string
FontSize float64
FontBold FontWeight
FontItalic FontSlant
GraphOnly bool
HideLegend bool
HideGrid bool
HideAxes bool
HideYAxis bool
HideXAxis bool
YAxisSide YAxisSide
Title string
Vtitle string
VtitleRight string
Tz *time.Location
ConnectedLimit int
LineMode LineMode
AreaMode AreaMode
AreaAlpha float64
PieMode PieMode
LineWidth float64
ColorList []string
YMin float64
YMax float64
XMin float64
XMax float64
YStep float64
XStep float64
MinorY int
XFormat string
YMaxLeft float64
YLimitLeft float64
YMaxRight float64
YLimitRight float64
YMinLeft float64
YMinRight float64
YStepL float64
YStepR float64
UniqueLegend bool
DrawNullAsZero bool
DrawAsInfinite bool
YUnitSystem string
YDivisors []float64
RightWidth float64
RightDashed bool
RightColor string
LeftWidth float64
LeftDashed bool
LeftColor string
MinorGridLineColor string
MajorGridLineColor string
}
func GetPictureParams(r *http.Request, metricData []*MetricData) PictureParams {
return PictureParams{
Width: getFloat64(r.FormValue("width"), DefaultParams.Width),
Height: getFloat64(r.FormValue("height"), DefaultParams.Height),
Margin: getInt(r.FormValue("margin"), DefaultParams.Margin),
LogBase: getLogBase(r.FormValue("logBase")),
FgColor: getString(r.FormValue("fgcolor"), DefaultParams.FgColor),
BgColor: getString(r.FormValue("bgcolor"), DefaultParams.BgColor),
MajorLine: getString(r.FormValue("majorLine"), DefaultParams.MajorLine),
MinorLine: getString(r.FormValue("minorLine"), DefaultParams.MinorLine),
FontName: getString(r.FormValue("fontName"), DefaultParams.FontName),
FontSize: getFloat64(r.FormValue("fontSize"), DefaultParams.FontSize),
FontBold: getFontWeight(r.FormValue("fontBold")),
FontItalic: getFontItalic(r.FormValue("fontItalic")),
GraphOnly: getBool(r.FormValue("graphOnly"), DefaultParams.GraphOnly),
HideLegend: getBool(r.FormValue("hideLegend"), len(metricData) > 10),
HideGrid: getBool(r.FormValue("hideGrid"), DefaultParams.HideGrid),
HideAxes: getBool(r.FormValue("hideAxes"), DefaultParams.HideAxes),
HideYAxis: getBool(r.FormValue("hideYAxis"), DefaultParams.HideYAxis),
HideXAxis: getBool(r.FormValue("hideXAxis"), DefaultParams.HideXAxis),
YAxisSide: getAxisSide(r.FormValue("yAxisSide"), DefaultParams.YAxisSide),
Title: getString(r.FormValue("title"), DefaultParams.Title),
Vtitle: getString(r.FormValue("vtitle"), DefaultParams.Vtitle),
VtitleRight: getString(r.FormValue("vtitleRight"), DefaultParams.VtitleRight),
Tz: getTimeZone(r.FormValue("tz"), DefaultParams.Tz),
ConnectedLimit: getInt(r.FormValue("connectedLimit"), DefaultParams.ConnectedLimit),
LineMode: getLineMode(r.FormValue("lineMode"), DefaultParams.LineMode),
AreaMode: getAreaMode(r.FormValue("areaMode"), DefaultParams.AreaMode),
AreaAlpha: getFloat64(r.FormValue("areaAlpha"), DefaultParams.AreaAlpha),
PieMode: getPieMode(r.FormValue("pieMode"), DefaultParams.PieMode),
LineWidth: getFloat64(r.FormValue("lineWidth"), DefaultParams.LineWidth),
ColorList: getStringArray(r.FormValue("colorList"), DefaultParams.ColorList),
YMin: getFloat64(r.FormValue("yMin"), DefaultParams.YMin),
YMax: getFloat64(r.FormValue("yMax"), DefaultParams.YMax),
YStep: getFloat64(r.FormValue("yStep"), DefaultParams.YStep),
XMin: getFloat64(r.FormValue("xMin"), DefaultParams.XMin),
XMax: getFloat64(r.FormValue("xMax"), DefaultParams.XMax),
XStep: getFloat64(r.FormValue("xStep"), DefaultParams.XStep),
XFormat: getString(r.FormValue("xFormat"), DefaultParams.XFormat),
MinorY: getInt(r.FormValue("minorY"), DefaultParams.MinorY),
UniqueLegend: getBool(r.FormValue("uniqueLegend"), DefaultParams.UniqueLegend),
DrawNullAsZero: getBool(r.FormValue("drawNullAsZero"), DefaultParams.DrawNullAsZero),
DrawAsInfinite: getBool(r.FormValue("drawAsInfinite"), DefaultParams.DrawAsInfinite),
YMinLeft: getFloat64(r.FormValue("yMinLeft"), DefaultParams.YMinLeft),
YMinRight: getFloat64(r.FormValue("yMinRight"), DefaultParams.YMinRight),
YMaxLeft: getFloat64(r.FormValue("yMaxLeft"), DefaultParams.YMaxLeft),
YMaxRight: getFloat64(r.FormValue("yMaxRight"), DefaultParams.YMaxRight),
YStepL: getFloat64(r.FormValue("yStepLeft"), DefaultParams.YStepL),
YStepR: getFloat64(r.FormValue("yStepRight"), DefaultParams.YStepR),
YLimitLeft: getFloat64(r.FormValue("yLimitLeft"), DefaultParams.YLimitLeft),
YLimitRight: getFloat64(r.FormValue("yLimitRight"), DefaultParams.YLimitRight),
YUnitSystem: getString(r.FormValue("yUnitSystem"), DefaultParams.YUnitSystem),
YDivisors: getFloatArray(r.FormValue("yDivisors"), DefaultParams.YDivisors),
RightWidth: getFloat64(r.FormValue("rightWidth"), DefaultParams.RightWidth),
RightDashed: getBool(r.FormValue("rightDashed"), DefaultParams.RightDashed),
RightColor: getString(r.FormValue("rightColor"), DefaultParams.RightColor),
LeftWidth: getFloat64(r.FormValue("leftWidth"), DefaultParams.LeftWidth),
LeftDashed: getBool(r.FormValue("leftDashed"), DefaultParams.LeftDashed),
LeftColor: getString(r.FormValue("leftColor"), DefaultParams.LeftColor),
MajorGridLineColor: getString(r.FormValue("majorGridLineColor"), DefaultParams.MajorGridLineColor),
MinorGridLineColor: getString(r.FormValue("minorGridLineColor"), DefaultParams.MinorGridLineColor),
}
}
func getStringArray(s string, def []string) []string {
if s == "" {
return def
}
ss := strings.Split(s, ",")
var strs []string
for _, v := range ss {
strs = append(strs, strings.TrimSpace(v))
}
return strs
}
func getFloatArray(s string, def []float64) []float64 {
if s == "" {
return def
}
ss := strings.Split(s, ",")
var fs []float64
for _, v := range ss {
f, err := strconv.ParseFloat(v, 64)
if err != nil {
return def
}
fs = append(fs, f)
}
return fs
}
func getLogBase(s string) float64 {
if s == "e" {
return math.E
}
b, err := strconv.ParseFloat(s, 64)
if err != nil || b < 1 {
return 0
}
return b
}
func getTimeZone(s string, def *time.Location) *time.Location {
if s == "" {
return def
}
tz, err := time.LoadLocation(s)
if err != nil {
return def
}
return tz
}
func string2RGBA(clr string) color.RGBA {
if c, ok := colors[clr]; ok {
return c
}
return hexToRGBA(clr)
}
// https://code.google.com/p/sadbox/source/browse/color/hex.go
// hexToColor converts an Hex string to a RGB triple.
func hexToRGBA(h string) color.RGBA {
var r, g, b uint8
if len(h) > 0 && h[0] == '#' {
h = h[1:]
}
if len(h) == 3 {
h = h[:1] + h[:1] + h[1:2] + h[1:2] + h[2:] + h[2:]
}
alpha := byte(255)
if len(h) == 6 {
if rgb, err := strconv.ParseUint(string(h), 16, 32); err == nil {
r = uint8(rgb >> 16)
g = uint8(rgb >> 8)
b = uint8(rgb)
}
}
if len(h) == 8 {
if rgb, err := strconv.ParseUint(string(h), 16, 32); err == nil {
r = uint8(rgb >> 24)
g = uint8(rgb >> 16)
b = uint8(rgb >> 8)
alpha = uint8(rgb)
}
}
return color.RGBA{r, g, b, alpha}
}
var DefaultParams = PictureParams{
Width: 330,
Height: 250,
Margin: 10,
LogBase: 0,
FgColor: "white",
BgColor: "black",
MajorLine: "rose",
MinorLine: "grey",
FontName: "Sans",
FontSize: 10,
FontBold: FontWeightNormal,
FontItalic: FontSlantNormal,
GraphOnly: false,
HideLegend: false,
HideGrid: false,
HideAxes: false,
HideYAxis: false,
HideXAxis: false,
YAxisSide: YAxisSideLeft,
Title: "",
Vtitle: "",
VtitleRight: "",
Tz: time.Local,
ConnectedLimit: math.MaxInt32,
LineMode: LineModeSlope,
AreaMode: AreaModeNone,
AreaAlpha: math.NaN(),
PieMode: PieModeAverage,
LineWidth: 1.2,
ColorList: DefaultColorList,
YMin: math.NaN(),
YMax: math.NaN(),
YStep: math.NaN(),
XMin: math.NaN(),
XMax: math.NaN(),
XStep: math.NaN(),
XFormat: "",
MinorY: 1,
UniqueLegend: false,
DrawNullAsZero: false,
DrawAsInfinite: false,
YMinLeft: math.NaN(),
YMinRight: math.NaN(),
YMaxLeft: math.NaN(),
YMaxRight: math.NaN(),
YStepL: math.NaN(),
YStepR: math.NaN(),
YLimitLeft: math.NaN(),
YLimitRight: math.NaN(),
YUnitSystem: "si",
YDivisors: []float64{4, 5, 6},
RightWidth: 1.2,
RightDashed: false,
RightColor: "",
LeftWidth: 1.2,
LeftDashed: false,
LeftColor: "",
MajorGridLineColor: "white",
MinorGridLineColor: "grey",
}
|
package abcc
import (
"encoding/json"
"fmt"
"github.com/hexoul/go-abcc/types"
)
// Me obtains your own personal asset information
// arg: -
// src: https://api.abcc.com/api/v1/members/me
// doc: -
func (s *Client) Me(options *types.Options) (*types.UserInfo, error) {
url := fmt.Sprintf("%s/members/me?%s", baseURL, s.parseOptions("/api/v1/members/me", s.fillTimestampFromServer(options)))
body, err := s.getResponse(url)
if err != nil {
return nil, err
}
var result = new(types.UserInfo)
if err := json.Unmarshal(body, result); err != nil {
return nil, err
}
return result, nil
}
// Trades list you ordered
// arg: market_code, start_time, end_time, start_id, end_id, direction, page, per_page
// src: https://api.abcc.com/api/v1/members/trades
// doc: -
func (s *Client) Trades(options *types.Options) (*types.Trades, error) {
url := fmt.Sprintf("%s/members/trades?%s", baseURL, s.parseOptions("/api/v1/members/trades", s.fillTimestampFromServer(options)))
body, err := s.getResponse(url)
if err != nil {
return nil, err
}
var result = new(types.Trades)
if err := json.Unmarshal(body, result); err != nil {
return nil, err
}
return result, nil
}
|
package auction
import (
"io"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
)
func TestGetAuctionHouseSnapshotURL(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
if r.URL.EscapedPath() != "/wow/auction/data/server" {
t.Errorf("Expected request to โ/wow/auction/data/serverโ, got โ%sโ", r.URL.EscapedPath())
}
//Check if file exists and open
file, err := os.Open("./fixtures/api_auction_call.json")
defer file.Close()
if err != nil {
t.Errorf("unexpected error loading fixture '%s'", err)
}
w.Header().Set("Content-Type", "application/json")
io.Copy(w, file)
}))
defer ts.Close()
const expectedURL = "http://auction-api-eu.worldofwarcraft.com/auction-data/random-number/auctions.json"
var expectedLastMod jsonTime
parsedLastMod, err := time.Parse(time.RFC1123, "Fri, 10 Aug 2018 18:42:37 UTC")
expectedLastMod = jsonTime(parsedLastMod)
if err != nil {
t.Errorf("Unexpected error parsing expectedLastMod date '%s'", err)
}
snapshot, err := GetSnapshotURL(ts.URL, "apikey", "server")
if err != nil {
t.Fatalf("unexpected error GetAuctionHouseSnapshotURL() %s", err)
}
if snapshot.URL != expectedURL {
t.Errorf("Unexpected snapshot URL\n* expected: '%s'\n* got: %s", expectedURL, snapshot.URL)
}
if expectedLastMod != snapshot.LastModified {
t.Errorf("unexpedted LastModified\n* expected: '%s'\n* got: %s", expectedLastMod, snapshot.LastModified)
}
}
|
package main
import (
"fmt"
"io"
"os"
//"path/filepath"
//"strings"
"sort"
)
func dirTree(out io.Writer, path string, printFiles bool) error {
current_file, err := os.Open(path)
if err != nil {
return err
}
res, err1 := current_file.Readdir(-1) // ัะผะพััะธะผ, ััะพ ะฒ ะดะธัะตะบัะพัะธะธ
sort.Sort(ByName(res)) // ัะพััะธััะตะผ
if !printFiles { // ะฟัะธ ะฝะตะพะฑั
ะพะดะธะผะพััะธ ะธัะบะปััะฐะตะผ ัะฐะนะปั
res = filterDirs(res)
}
for index, element := range res {
if element.IsDir() {
if index == len(res) - 1 {
fmt.Fprintf(out, "โโโโ%v\n", element.Name())
printFolder(out, path + "/" + element.Name(), printFiles, "")
} else {
fmt.Fprintf(out, "โโโโ%v\n", element.Name())
printFolder(out, path + "/" + element.Name(), printFiles, "โ")
}
} else if printFiles {
if element.Size() == 0 {
if index == len(res) - 1 {
fmt.Fprintf(out, "โโโโ%v (empty)\n", element.Name())
} else {
fmt.Fprintf(out, "โโโโ%v (empty)\n", element.Name())
}
} else {
if index == len(res) - 1 {
fmt.Fprintf(out, "โโโโ%v (%vb)\n", element.Name(), element.Size())
} else {
fmt.Fprintf(out, "โโโโ%v (%vb)\n", element.Name(), element.Size())
}
}
}
}
return err1
}
func printFolder(out io.Writer, path string, printFiles bool, outputPrefix string) error {
current_file, err := os.Open(path)
if err != nil {
return err
}
res, err1 := current_file.Readdir(-1) // ัะผะพััะธะผ, ััะพ ะฒ ะดะธัะตะบัะพัะธะธ
sort.Sort(ByName(res)) // ัะพััะธััะตะผ
if !printFiles { // ะฟัะธ ะฝะตะพะฑั
ะพะดะธะผะพััะธ ะธัะบะปััะฐะตะผ ัะฐะนะปั
res = filterDirs(res)
}
for index, element := range res {
if element.IsDir() {
if index == len(res) - 1 {
fmt.Fprintf(out, "%v\tโโโโ%v\n", outputPrefix, element.Name())
printFolder(out, path + "/" + element.Name(), printFiles, outputPrefix + "\t")
} else {
fmt.Fprintf(out, "%v\tโโโโ%v\n", outputPrefix, element.Name())
printFolder(out, path + "/" + element.Name(), printFiles, outputPrefix + "\tโ")
}
} else if printFiles {
if element.Size() == 0 {
if index == len(res) - 1 {
fmt.Fprintf(out, "%v\tโโโโ%v (empty)\n", outputPrefix, element.Name())
} else {
fmt.Fprintf(out, "%v\tโโโโ%v (empty)\n", outputPrefix, element.Name())
}
} else {
if index == len(res) - 1 {
fmt.Fprintf(out, "%v\tโโโโ%v (%vb)\n", outputPrefix, element.Name(), element.Size())
} else {
fmt.Fprintf(out, "%v\tโโโโ%v (%vb)\n", outputPrefix, element.Name(), element.Size())
}
}
}
}
return err1
}
// ััะฝะบัะธั, ะฒะพะทะฒัะฐัะฐััะฐั ัะพะปัะบะพ ะดะธะบะตััะพัะธะธ (ะธัะบะปััะฐะตั ะธะท ะผะฐััะธะฒะฐ ัะฐะนะปั)
func filterDirs(files []os.FileInfo) []os.FileInfo {
dirs := []os.FileInfo{}
for _, value := range files {
if(value.IsDir()) {
dirs = append(dirs, value)
}
}
return dirs
}
// ะดะปั ัะพััะธัะพะฒะบะธ
type ByName []os.FileInfo
func (a ByName) Len() int { return len(a) }
func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByName) Less(i, j int) bool { return a[i].Name() < a[j].Name() }
func main() {
out := os.Stdout // ะบัะดะฐ ะฒัะฒะพะด
if !(len(os.Args) == 2 || len(os.Args) == 3) {
panic("usage go run main.go . [-f]")
}
path := os.Args[1]
printFiles := len(os.Args) == 3 && os.Args[2] == "-f"
err := dirTree(out, path, printFiles)
if err != nil {
panic(err.Error())
}
}
|
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bulk
import (
"bytes"
"encoding/json"
"fmt"
"github.com/go-logr/logr"
"github.com/gardener/test-infra/pkg/util"
)
// 50 mb
const maxBufferSize = 50 * 1024 * 1024
// Marshal creates an elastic search bulk json of its metadata and sources and returns a list of bulk files with a max size of 50mb
func (b *Bulk) Marshal() ([]byte, error) {
meta, err := util.MarshalNoHTMLEscape(b.Metadata)
if err != nil {
return nil, fmt.Errorf("cannot marshal ElasticsearchBulk %s", err.Error())
}
buf := bytes.NewBuffer([]byte{})
buf.Write(meta)
buf.Write(b.Source)
return buf.Bytes(), nil
}
// NewList creates a list of Bulks with the same metadata
func NewList(meta interface{}, sources [][]byte) BulkList {
bulks := make([]*Bulk, 0)
for _, source := range sources {
bulks = append(bulks, &Bulk{
Metadata: meta,
Source: source,
})
}
return bulks
}
func (l BulkList) Marshal() ([][]byte, error) {
content := [][]byte{}
buffer := bytes.NewBuffer([]byte{})
for _, bulk := range l {
data, err := bulk.Marshal()
if err != nil {
return nil, err
}
if (buffer.Len() + len(data)) >= maxBufferSize {
content = append(content, buffer.Bytes())
buffer = bytes.NewBuffer([]byte{})
}
buffer.Write(data)
}
content = append(content, buffer.Bytes())
return content, nil
}
// ParseExportedFiles reads jsondocuments line by line from an expected file where multiple jsons are separated by newline.
func ParseExportedFiles(log logr.Logger, name string, stepMeta interface{}, docs []byte) BulkList {
// first try to parse document as normal json.
var jsonBody map[string]interface{}
err := json.Unmarshal(docs, &jsonBody)
if err == nil {
jsonBody["tm"] = stepMeta
patchedDoc, err := util.MarshalNoHTMLEscape(jsonBody)
if err != nil {
log.Info("cannot marshal exported json with metadata", "file", name)
return make(BulkList, 0)
}
bulk := &Bulk{
Source: patchedDoc,
Metadata: ESMetadata{
Index: ESIndex{
Index: fmt.Sprintf("tm-%s", name),
Type: "_doc",
},
},
}
return []*Bulk{bulk}
}
// if the document is not in json format try to parse it as newline delimited json
return parseExportedBulkFormat(log, name, stepMeta, docs)
}
func parseExportedBulkFormat(log logr.Logger, name string, stepMeta interface{}, docs []byte) BulkList {
bulks := make(BulkList, 0)
var meta map[string]interface{}
for doc := range util.ReadLines(docs) {
var jsonBody map[string]interface{}
err := json.Unmarshal(doc, &jsonBody)
if err != nil {
log.V(5).Info(fmt.Sprintf("cannot unmarshal document %s", err.Error()))
continue
}
// if a bulk is defined we preifx the index with tm- to ensure it does not collide with any other index
if jsonBody["index"] != nil {
meta = jsonBody
meta["index"].(map[string]interface{})["_index"] = fmt.Sprintf("tm-%s", meta["index"].(map[string]interface{})["_index"])
continue
}
// construct own bulk with index = tm-<testdef name>
jsonBody["tm"] = stepMeta
patchedDoc, err := util.MarshalNoHTMLEscape(jsonBody) // json.Marshal(jsonBody)
if err != nil {
log.V(3).Info(fmt.Sprintf("cannot marshal artifact %s", err.Error()))
continue
}
bulk := &Bulk{
Source: patchedDoc,
Metadata: meta,
}
if meta == nil {
bulk.Metadata = ESMetadata{
Index: ESIndex{
Index: fmt.Sprintf("tm-%s", name),
Type: "_doc",
},
}
}
bulks = append(bulks, bulk)
meta = nil
}
return bulks
}
|
package main
//Structure representation of game informations specific for a player
type game struct {
id int
NbPlayer int
Statistics statistic
Team int
}
|
package main
import "fmt"
func main() {
expr := "favSport"
switch expr {
case "favSport":
fmt.Println("Should Print")
case "favColor":
fmt.Println("Should Not Print")
}
}
|
package main
import (
"fmt"
"sort"
)
type foo []int32
func (a foo) Len() int { return len(a) }
func (a foo) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a foo) Less(i, j int) bool { return a[i] < a[j] }
// Complete the maximumToys function below.
func maximumToys(prices []int32, k int32) int32 {
sort.Sort(foo(prices))
var maxToys int32
n := int32(len(prices))
for maxToys = int32(0); maxToys < n && k > 0; maxToys++ {
k -= prices[maxToys]
fmt.Println(k)
}
return maxToys - 1
}
func main() {
input := []int32{1, 12, 5, 111, 200, 1000, 10}
k := int32(50)
output := maximumToys(input, k)
fmt.Println(output)
}
|
package cf
import (
"log"
"github.com/cloudfoundry-community/go-cfclient"
"github.com/cloudfoundry-community/go-cfenv"
)
// Bind - Bind will add a route to the App published on CF
func Bind(routehost string, appGUID string, spaceGUID string, Username string, Password string, searchdomain string) (err error) {
log.Println("Running CF Bind workflow")
appEnv, _ := cfenv.Current()
c := &cfclient.Config{
ApiAddress: appEnv.CFAPI,
Username: Username,
Password: Password,
SkipSslValidation: true,
}
client, err := cfclient.NewClient(c)
if err != nil {
return err
}
SharedDomains, err := client.ListSharedDomains()
if err != nil {
return err
}
var sharedDomain cfclient.SharedDomain
for _, domain := range SharedDomains {
if domain.Name == searchdomain {
sharedDomain = domain
}
}
spaceroute, err := client.CreateRoute(cfclient.RouteRequest{
DomainGuid: sharedDomain.Guid,
SpaceGuid: spaceGUID,
Host: routehost,
})
if err != nil {
return err
}
_, err = client.MappingAppAndRoute(cfclient.RouteMappingRequest{
RouteGUID: spaceroute.Guid,
AppGUID: appGUID,
AppPort: 8080,
})
if err != nil {
return err
}
return nil
}
|
/*
Copyright 2019 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package latest
// TestStruct for testing the schema generator.
type TestStruct struct {
// RequiredField should be required
RequiredField string `yaml:"reqField" yamltags:"required"`
InlineStruct `yaml:"inline"`
// Field4 should be listed last in `preferredOrder`
Field4 string `yaml:"field4"`
}
// InlineStruct is embedded inline into TestStruct.
type InlineStruct struct {
// Field1 should be the first field
Field1 string `yaml:"field1"`
// Field2 should be the second field. Valid modes are
// `a`: first letter
// `b` (default): second letter
Field2 string `yaml:"field2"`
// Field3 should be the third field and listed in required
RequiredField3 string `yaml:"reqField3" yamltags:"required"`
}
|
package parser_test
import (
"github.com/bytesparadise/libasciidoc/pkg/types"
. "github.com/bytesparadise/libasciidoc/testsupport"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("checked lists", func() {
Context("in final documents", func() {
It("with title and dashes", func() {
source := `.Checklist
- [*] checked
- [x] also checked
- [ ] not checked
- normal list item`
expected := &types.Document{
Elements: []interface{}{
&types.List{
Kind: types.UnorderedListKind,
Attributes: types.Attributes{
types.AttrTitle: "Checklist",
},
Elements: []types.ListElement{
&types.UnorderedListElement{
BulletStyle: types.Dash,
CheckStyle: types.Checked,
Elements: []interface{}{
&types.Paragraph{
Attributes: types.Attributes{
types.AttrCheckStyle: types.Checked,
},
Elements: []interface{}{
&types.StringElement{
Content: "checked",
},
},
},
},
},
&types.UnorderedListElement{
BulletStyle: types.Dash,
CheckStyle: types.Checked,
Elements: []interface{}{
&types.Paragraph{
Attributes: types.Attributes{
types.AttrCheckStyle: types.Checked,
},
Elements: []interface{}{
&types.StringElement{
Content: "also checked",
},
},
},
},
},
&types.UnorderedListElement{
BulletStyle: types.Dash,
CheckStyle: types.Unchecked,
Elements: []interface{}{
&types.Paragraph{
Attributes: types.Attributes{
types.AttrCheckStyle: types.Unchecked,
},
Elements: []interface{}{
&types.StringElement{
Content: "not checked",
},
},
},
},
},
&types.UnorderedListElement{
BulletStyle: types.Dash,
CheckStyle: types.NoCheck,
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "normal list item",
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with interactive checkboxes", func() {
source := `[%interactive]
* [*] checked
* [x] also checked
* [ ] not checked
* normal list item`
expected := &types.Document{
Elements: []interface{}{
&types.List{
Kind: types.UnorderedListKind,
Attributes: types.Attributes{
types.AttrOptions: types.Options{
types.AttrInteractive,
},
},
Elements: []types.ListElement{
&types.UnorderedListElement{
BulletStyle: types.OneAsterisk,
CheckStyle: types.CheckedInteractive,
Elements: []interface{}{
&types.Paragraph{
Attributes: types.Attributes{
types.AttrCheckStyle: types.CheckedInteractive,
},
Elements: []interface{}{
&types.StringElement{
Content: "checked",
},
},
},
},
},
&types.UnorderedListElement{
BulletStyle: types.OneAsterisk,
CheckStyle: types.CheckedInteractive,
Elements: []interface{}{
&types.Paragraph{
Attributes: types.Attributes{
types.AttrCheckStyle: types.CheckedInteractive,
},
Elements: []interface{}{
&types.StringElement{
Content: "also checked",
},
},
},
},
},
&types.UnorderedListElement{
BulletStyle: types.OneAsterisk,
CheckStyle: types.UncheckedInteractive,
Elements: []interface{}{
&types.Paragraph{
Attributes: types.Attributes{
types.AttrCheckStyle: types.UncheckedInteractive,
},
Elements: []interface{}{
&types.StringElement{
Content: "not checked",
},
},
},
},
},
&types.UnorderedListElement{
BulletStyle: types.OneAsterisk,
CheckStyle: types.NoCheck,
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "normal list item",
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with title and nested checklist", func() {
source := `.Checklist
* [ ] parent not checked
** [*] checked
** [x] also checked
** [ ] not checked
* normal list item`
expected := &types.Document{
Elements: []interface{}{
&types.List{
Kind: types.UnorderedListKind,
Attributes: types.Attributes{
types.AttrTitle: "Checklist",
},
Elements: []types.ListElement{
&types.UnorderedListElement{
BulletStyle: types.OneAsterisk,
CheckStyle: types.Unchecked,
Elements: []interface{}{
&types.Paragraph{
Attributes: types.Attributes{
types.AttrCheckStyle: types.Unchecked,
},
Elements: []interface{}{
&types.StringElement{
Content: "parent not checked",
},
},
},
&types.List{
Kind: types.UnorderedListKind,
Elements: []types.ListElement{
&types.UnorderedListElement{
BulletStyle: types.TwoAsterisks,
CheckStyle: types.Checked,
Elements: []interface{}{
&types.Paragraph{
Attributes: types.Attributes{
types.AttrCheckStyle: types.Checked,
},
Elements: []interface{}{
&types.StringElement{
Content: "checked",
},
},
},
},
},
&types.UnorderedListElement{
BulletStyle: types.TwoAsterisks,
CheckStyle: types.Checked,
Elements: []interface{}{
&types.Paragraph{
Attributes: types.Attributes{
types.AttrCheckStyle: types.Checked,
},
Elements: []interface{}{
&types.StringElement{
Content: "also checked",
},
},
},
},
},
&types.UnorderedListElement{
BulletStyle: types.TwoAsterisks,
CheckStyle: types.Unchecked,
Elements: []interface{}{
&types.Paragraph{
Attributes: types.Attributes{
types.AttrCheckStyle: types.Unchecked,
},
Elements: []interface{}{
&types.StringElement{
Content: "not checked",
},
},
},
},
},
},
},
},
},
&types.UnorderedListElement{
BulletStyle: types.OneAsterisk,
CheckStyle: types.NoCheck,
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "normal list item",
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("with title and nested normal list", func() {
source := `.Checklist
* [ ] parent not checked
** a normal list item
** another normal list item
* normal list item`
expected := &types.Document{
Elements: []interface{}{
&types.List{
Kind: types.UnorderedListKind,
Attributes: types.Attributes{
types.AttrTitle: "Checklist",
},
Elements: []types.ListElement{
&types.UnorderedListElement{
BulletStyle: types.OneAsterisk,
CheckStyle: types.Unchecked,
Elements: []interface{}{
&types.Paragraph{
Attributes: types.Attributes{
types.AttrCheckStyle: types.Unchecked,
},
Elements: []interface{}{
&types.StringElement{
Content: "parent not checked",
},
},
},
&types.List{
Kind: types.UnorderedListKind,
Elements: []types.ListElement{
&types.UnorderedListElement{
BulletStyle: types.TwoAsterisks,
CheckStyle: types.NoCheck,
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "a normal list item",
},
},
},
},
},
&types.UnorderedListElement{
BulletStyle: types.TwoAsterisks,
CheckStyle: types.NoCheck,
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "another normal list item",
},
},
},
},
},
},
},
},
},
&types.UnorderedListElement{
BulletStyle: types.OneAsterisk,
CheckStyle: types.NoCheck,
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "normal list item",
},
},
},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
})
})
|
package ezgobot
import (
"bytes"
"regexp"
"strings"
"testing"
)
func test(result, expected string, t *testing.T) {
if result != expected {
t.Errorf("Wrong output. Got |%s|, should be |%s|.", result, expected)
}
}
func buildBot() {
bootState := Init()
s1 := bootState.BuildTransitionState("Hi. What's your name?", "default").
SetMemoryWrite("name").SetID("init")
s2 := s1.BuildTransitionState("Hello %s! How can I help you?", "default").
SetID("helpful").
SetMemoryWrite("des_act").
SetMemoryRead([]string{"name"})
s2.BuildTransitionState("Sorry %s, I don't know how to %s. How can I help you?", "default").
SetID("cantdo").
SetMemoryRead([]string{"name", "des_act"}).
AddImmediateTransition(s2)
s2.BuildTransitionState("My name is Machina. How can I help you?", "ask_name").
SetID("ask_name").
AddImmediateTransition(s2)
s2.AddTransitionMapping("what is your name", "ask_name")
}
func TestLoop(t *testing.T) {
buildBot()
w := bytes.NewBufferString("")
ConversationLoop(strings.NewReader("Bob\ndo nothing\nexit"), w)
test(w.String(), "\nHi. What's your name? \nHello Bob! How can I help you? \nSorry Bob, I don't know how to do nothing. How can I help you? ", t)
}
func TestAct(t *testing.T) {
debug = true
buildBot()
test(act(""), "\nHi. What's your name? ", t)
test(act("Bob\n"), "\nHello Bob! How can I help you? ", t)
test(act("What is your name? \n"), "\nMy name is Machina. How can I help you? ", t)
test(act("do nothing\n"), "\nSorry Bob, I don't know how to do nothing. How can I help you? ", t)
}
func TestDetermineTransition(t *testing.T) {
r1, _ := regexp.Compile("I'm \\d+ years old")
t1 := transitionMapping{r1, "age_input"}
r2, _ := regexp.Compile("I'm at \\w")
t2 := transitionMapping{r2, "loc_input"}
transitions := []transitionMapping{t1, t2}
trans := determineTransition("I'm 19 years old", transitions)
test(trans, "age_input", t)
trans = determineTransition("I'm at home", transitions)
test(trans, "loc_input", t)
trans = determineTransition("ugh", transitions)
test(trans, "default", t)
}
func TestNormalizeInput(t *testing.T) {
test(normalizeInput("Abc!.?"), "abc", t)
}
|
package main
import (
"fmt"
"os"
"bufio"
"strings"
"strconv"
)
func main() {
fmt.Print("Enter a string:")
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
text := scanner.Text()
fmt.Printf("IP string : <%s>..\n", text)
words := strings.Split(text,".")
if (len(words) != 4) {
fmt.Printf("wronf ip address, should be in this format a.b.c.d where a,b,c,d can contain 0-255 value and should only contain digit\n");
return
}
for _, each_word := range words {
value, err := strconv.Atoi(each_word)
if (value> 255 || value < 0 || err != nil) {
fmt.Printf("wrong ip address, should be in this format a.b.c.d where a,b,c,d can contain 0-255 value and should only contain digit\n");
return
}
}
}
|
package main
import (
"appengine"
"appengine/datastore"
"encoding/json"
"io/ioutil"
"net/http"
"time"
"strings"
"fmt"
"appengine/urlfetch"
"errors"
"bytes"
)
// Data structure got from datastore user kind
type User struct {
InstanceId string `json:"instanceid"`
RegistrationToken string `json:"registrationtoken"`
LastUpdateTime time.Time `json:"lastupdatetime"`
}
// HTTP response body from Google Instance ID authenticity service
type UserRegistrationTokenAuthenticity struct {
Application string `json:"application"`
AuthorizedEntity string `json:"authorizedEntity"`
// Other properties in the response body are "don't care"
}
// HTTP response body to user registration
type UserRegistrationResponseBody struct {
UserId string `json:"userid"`
}
// Data structure got from datastore group kind
type Group struct {
Name string `json:"name"`
Owner string `json:"owner"` // Instance ID
Members []string `json:"members"` // Instance ID list
NotificationKey string `json:"notificationkey"` // GCM device group unique ID
}
// HTTP body of joining or leaving group requests from users
type GroupUser struct {
// To authentication
InstanceId string `json:"instanceid"`
// The group
GroupName string `json:"groupname"`
}
// HTTP body to send to Google Cloud Messaging server to manage device groups
type GroupOperation struct {
Operation string `json:"operation"` // "create", "add", "remove"
Notification_key_name string `json:"notification_key_name"` // A unique group name in a Google project
Notification_key string `json:"notification_key,omitempty"` // A unique key to identify a group
Registration_ids []string `json:"registration_ids"` // APP registration tokens in the group
}
// HTTP body received from Google Cloud Messaging server
type GroupOperationResponse struct {
Notification_key string `json:"notification_key"` // A unique key to identify a group
Error string `json:"error"` // Error message
}
// HTTP body of sending a message to a user
type UserMessage struct {
// To authentication
InstanceId string `json:"instanceid"`
// To the target user
UserId string `json:"userid"` // Datastore user kind key string
Message string `json:"message"`
}
// HTTP body of sending a message to a topic
type TopicMessage struct {
// To authentication
InstanceId string `json:"instanceid"`
// To the target user
Topic string `json:"topic"`
Message string `json:"message"`
}
// HTTP body of sending a message to a group
type GroupMessage struct {
// To authentication
InstanceId string `json:"instanceid"`
// To the target user
GroupName string `json:"groupname"`
Message string `json:"message"`
}
const BaseUrl = "/api/0.1/"
const UserKind = "User"
const UserRoot = "User root"
const GroupKind = "Group"
const GroupRoot = "Group root"
const AppNamespace = "com.vernonsung.testgcmapp"
const GaeProjectId = "testgcmserver-1120"
const GaeProjectNumber = "846181647582"
// GCM server
const GcmURL = "https://gcm-http.googleapis.com/gcm/send"
const GcmGroupURL = "https://android.googleapis.com/gcm/notification"
const InstanceIdVerificationUrl = "https://iid.googleapis.com/iid/info/"
const GcmApiKey = "AIzaSyAODu6tKbQp8sAwEBDNLzW9uDCBmmluQ4A"
func init() {
http.HandleFunc(BaseUrl, rootPage)
http.HandleFunc(BaseUrl+"myself", UpdateMyself) // PUT
http.HandleFunc(BaseUrl+"groups", groups) // PUT
http.HandleFunc(BaseUrl+"groups/", groups) // DELETE
http.HandleFunc(BaseUrl+"user-messages", SendUserMessage) // POST
http.HandleFunc(BaseUrl+"topic-messages", SendTopicMessage) // POST
http.HandleFunc(BaseUrl+"group-messages", SendGroupMessage) // POST
}
func rootPage(rw http.ResponseWriter, req *http.Request) {
c := appengine.NewContext(req)
c.Debugf("This is root")
}
func groups(rw http.ResponseWriter, req *http.Request) {
switch req.Method {
// case "GET":
// listMember(rw, req)
// case "POST":
// SendMessage(rw, req)
case "PUT":
JoinGroup(rw, req)
case "DELETE":
LeaveGroup(rw, req)
default:
http.Error(rw, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
}
}
// Receive a message from an APP instance.
// Check it's instancd ID.
// Send the message back.
// POST https://testgcmserver-1120.appspot.com/api/0.1/user-messages"
// Success: 204 No Content
// Failure: 400 Bad Request, 403 Forbidden
func SendUserMessage(rw http.ResponseWriter, req *http.Request) {
// Appengine
var c appengine.Context = appengine.NewContext(req)
// Result, 0: success, 1: failed
var r int = http.StatusNoContent
// Return code
defer func() {
// Return status. WriteHeader() must be called before call to Write
if r == http.StatusNoContent {
// Changing the header after a call to WriteHeader (or Write) has no effect.
rw.WriteHeader(http.StatusNoContent)
} else if r == http.StatusBadRequest {
// http.Error(rw, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
http.Error(rw, `Please follow https://aaa.appspot.com/api/0.1/user-messages
{
"instanceid":""
"userid":""
"message":""
}`, http.StatusBadRequest)
} else {
http.Error(rw, http.StatusText(r), r)
}
}()
// Get body
b, err := ioutil.ReadAll(req.Body)
if err != nil {
c.Errorf("%s in reading body %s", err, b)
r = http.StatusBadRequest
return
}
var message UserMessage
if err = json.Unmarshal(b, &message); err != nil {
c.Errorf("%s in decoding body %s", err, b)
r = http.StatusBadRequest
return
}
// Authenticate sender
var isValid bool = false
isValid, err = verifyRequest(message.InstanceId, c)
if err != nil {
c.Errorf("%s in authenticating request", err)
r = http.StatusBadRequest
return
}
if isValid == false {
c.Warningf("Invalid request, ignore")
r = http.StatusForbidden
return
}
// Decode datastore key from string
key, err := datastore.DecodeKey(message.UserId)
if err != nil {
c.Errorf("%s in decoding key string", err)
r = http.StatusBadRequest
return
}
// Get target user from datastore
var dst User
if err := datastore.Get(c, key, &dst); err != nil {
c.Errorf("%s in getting entity from datastore by key %s", err, message.UserId)
r = http.StatusNotFound
return
}
// Make GCM message body
var bodyString string = fmt.Sprintf(`
{
"to":"%s",
"data": {
"message":"%s"
}
}`, dst.RegistrationToken, message.Message)
// Make a POST request for GCM
pReq, err := http.NewRequest("POST", GcmURL, strings.NewReader(bodyString))
if err != nil {
c.Errorf("%s in makeing a HTTP request", err)
r = http.StatusInternalServerError
return
}
pReq.Header.Add("Content-Type", "application/json")
pReq.Header.Add("Authorization", "key="+GcmApiKey)
// Debug
c.Infof("%s", *pReq)
// Send request
var client = urlfetch.Client(c)
resp, err := client.Do(pReq)
if err != nil {
c.Errorf("%s in sending request", err)
r = http.StatusInternalServerError
return
}
defer resp.Body.Close()
// Check response
c.Infof("%d %s", resp.StatusCode, resp.Status)
// Get response body
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
c.Errorf("%s in reading response body", err)
r = http.StatusInternalServerError
return
}
c.Infof("%s", respBody)
}
// Receive a message from an APP instance.
// Check it's instancd ID.
// Send the message to the topic.
// POST https://testgcmserver-1120.appspot.com/api/0.1/topic-messages"
// Success: 204 No Content
// Failure: 400 Bad Request, 403 Forbidden, 404 NotFound, 500 InternalError
func SendTopicMessage(rw http.ResponseWriter, req *http.Request) {
// Appengine
var c appengine.Context = appengine.NewContext(req)
// Result, 0: success, 1: failed
var r int = http.StatusNoContent
// Return code
defer func() {
// Return status. WriteHeader() must be called before call to Write
if r == http.StatusNoContent {
// Changing the header after a call to WriteHeader (or Write) has no effect.
rw.WriteHeader(http.StatusNoContent)
} else if r == http.StatusBadRequest {
// http.Error(rw, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
http.Error(rw, `Please follow https://aaa.appspot.com/api/0.1/topic-messages
{
"instanceid":""
"topic":""
"message":""
}`, http.StatusBadRequest)
} else {
http.Error(rw, http.StatusText(r), r)
}
}()
// Get body
b, err := ioutil.ReadAll(req.Body)
if err != nil {
c.Errorf("%s in reading body %s", err, b)
r = http.StatusBadRequest
return
}
var message TopicMessage
if err = json.Unmarshal(b, &message); err != nil {
c.Errorf("%s in decoding body %s", err, b)
r = http.StatusBadRequest
return
}
// Authenticate sender
var isValid bool = false
isValid, err = verifyRequest(message.InstanceId, c)
if err != nil {
c.Errorf("%s in authenticating request", err)
r = http.StatusBadRequest
return
}
if isValid == false {
c.Warningf("Invalid request, ignore")
r = http.StatusForbidden
return
}
// Make GCM message body
var bodyString string = fmt.Sprintf(`
{
"to":"/topics/%s",
"data": {
"message":"%s"
}
}`, message.Topic, message.Message)
// Make a POST request for GCM
pReq, err := http.NewRequest("POST", GcmURL, strings.NewReader(bodyString))
if err != nil {
c.Errorf("%s in makeing a HTTP request", err)
r = http.StatusInternalServerError
return
}
pReq.Header.Add("Content-Type", "application/json")
pReq.Header.Add("Authorization", "key="+GcmApiKey)
// Debug
c.Infof("%s", *pReq)
// Send request
var client = urlfetch.Client(c)
resp, err := client.Do(pReq)
if err != nil {
c.Errorf("%s in sending request", err)
r = http.StatusInternalServerError
return
}
defer resp.Body.Close()
// Check response
c.Infof("%d %s", resp.StatusCode, resp.Status)
// Get response body
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
c.Errorf("%s in reading response body", err)
r = http.StatusInternalServerError
return
}
c.Infof("%s", respBody)
}
// Receive a message from an APP instance.
// Check it's instancd ID.
// Send the message to the gruop.
// POST https://testgcmserver-1120.appspot.com/api/0.1/group-messages"
// Success: 204 No Content
// Failure: 400 Bad Request, 403 Forbidden, 404 NotFound, 500 InternalError
func SendGroupMessage(rw http.ResponseWriter, req *http.Request) {
// Appengine
var c appengine.Context = appengine.NewContext(req)
// Result, 0: success, 1: failed
var r int = http.StatusNoContent
// Return code
defer func() {
// Return status. WriteHeader() must be called before call to Write
if r == http.StatusNoContent {
// Changing the header after a call to WriteHeader (or Write) has no effect.
rw.WriteHeader(http.StatusNoContent)
} else if r == http.StatusBadRequest {
// http.Error(rw, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
http.Error(rw, `Please follow https://aaa.appspot.com/api/0.1/group-messages
{
"instanceid":""
"groupName":""
"message":""
}`, http.StatusBadRequest)
} else {
http.Error(rw, http.StatusText(r), r)
}
}()
// Get body
b, err := ioutil.ReadAll(req.Body)
if err != nil {
c.Errorf("%s in reading body %s", err, b)
r = http.StatusBadRequest
return
}
var message GroupMessage
if err = json.Unmarshal(b, &message); err != nil {
c.Errorf("%s in decoding body %s", err, b)
r = http.StatusBadRequest
return
}
// Authenticate sender
var isValid bool = false
isValid, err = verifyRequest(message.InstanceId, c)
if err != nil {
c.Errorf("%s in authenticating request", err)
r = http.StatusBadRequest
return
}
if isValid == false {
c.Warningf("Invalid request, ignore")
r = http.StatusForbidden
return
}
// Search for existing group
var cKey *datastore.Key
var pGroup *Group
cKey, pGroup, err = searchGroup(message.GroupName, c)
if err != nil {
c.Errorf("%s in searching existing group %s", err, message.GroupName)
r = http.StatusInternalServerError
return
}
if cKey == nil {
c.Warningf("Group %s is not found", message.GroupName)
r = http.StatusBadRequest
return
}
// Make GCM message body
var bodyString string = fmt.Sprintf(`
{
"to":"%s",
"data": {
"message":"%s"
}
}`, pGroup.NotificationKey, message.Message)
// Make a POST request for GCM
pReq, err := http.NewRequest("POST", GcmURL, strings.NewReader(bodyString))
if err != nil {
c.Errorf("%s in makeing a HTTP request", err)
r = http.StatusInternalServerError
return
}
pReq.Header.Add("Content-Type", "application/json")
pReq.Header.Add("Authorization", "key="+GcmApiKey)
// Debug
c.Infof("Send request to GCM server %s", *pReq)
// Send request
var client = urlfetch.Client(c)
resp, err := client.Do(pReq)
if err != nil {
c.Errorf("%s in sending request", err)
r = http.StatusInternalServerError
return
}
defer resp.Body.Close()
// Check response
c.Infof("%d %s", resp.StatusCode, resp.Status)
// Get response body
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
c.Errorf("%s in reading response body", err)
r = http.StatusInternalServerError
return
}
c.Infof("%s", respBody)
}
// PUT https://testgcmserver-1120.appspot.com/api/0.1/myself"
// Success: 200 OK
// Failure: 400 Bad Request
func UpdateMyself(rw http.ResponseWriter, req *http.Request) {
// Appengine
var c appengine.Context = appengine.NewContext(req)
// Result, 0: success, 1: failed
var r int = 0
var cKey *datastore.Key = nil
defer func() {
// Return status. WriteHeader() must be called before call to Write
if r == 0 {
// Return status. WriteHeader() must be called before call to Write
rw.WriteHeader(http.StatusOK)
// Return body
var dst UserRegistrationResponseBody = UserRegistrationResponseBody{ UserId:cKey.Encode() }
if err := json.NewEncoder(rw).Encode(dst); err != nil {
c.Errorf("%s in encoding result %v", err, dst)
}
} else {
http.Error(rw, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
}
}()
// Get data from body
b, err := ioutil.ReadAll(req.Body)
if err != nil {
c.Errorf("%s in reading body %s", err, b)
r = 1
return
}
// Vernon debug
c.Debugf("Got body %s", b)
var user User
if err = json.Unmarshal(b, &user); err != nil {
c.Errorf("%s in decoding body %s", err, b)
r = 1
return
}
// Check registration token starts with instance ID. That's the rule of Google API service authenticity
// Also check registration token is official-signed by sending the token to Google token authenticity check service
if user.RegistrationToken[0:len(user.InstanceId)] != user.InstanceId || isRegistrationTokenValid(user.RegistrationToken, c) == false {
c.Errorf("Instance ID %s is invalid", user.InstanceId)
r = 1
return
}
// Set now as the creation time. Precision to a second.
user.LastUpdateTime = time.Unix(time.Now().Unix(), 0)
// Search for existing user
var pKey *datastore.Key
var pOldUser *User
pKey, pOldUser, err = searchUser(user.InstanceId, c)
if err != nil {
c.Errorf("%s in searching existing user %v", err, user)
r = 1
return
}
if pKey == nil {
// Add new user into datastore
pKey = datastore.NewKey(c, UserKind, UserRoot, 0, nil)
cKey, err = datastore.Put(c, datastore.NewIncompleteKey(c, UserKind, pKey), &user)
if err != nil {
c.Errorf("%s in storing to datastore", err)
r = 1
return
}
c.Infof("Add user %+v", user)
} else if user.RegistrationToken == pOldUser.RegistrationToken {
// Duplicate request. Do nothing to datastore and return existing key
cKey = pKey
} else {
cKey, err = datastore.Put(c, pKey, &user)
if err != nil {
c.Errorf("%s in storing to datastore", err)
r = 1
return
}
c.Infof("Update user %+v", user)
}
}
// PUT https://testgcmserver-1120.appspot.com/api/0.1/groups"
// Success: 204 No Content
// Failure: 400 Bad Request, 403 Forbidden, 500 Internal Server Error
func JoinGroup(rw http.ResponseWriter, req *http.Request) {
// Appengine
var c appengine.Context = appengine.NewContext(req)
// Result, 0: success, 1: failed
var r int = http.StatusNoContent
var cKey *datastore.Key = nil
defer func() {
if r == http.StatusNoContent {
// Changing the header after a call to WriteHeader (or Write) has no effect.
rw.Header().Set("Location", req.URL.String()+"/"+cKey.Encode())
// Return status. WriteHeader() must be called before call to Write
rw.WriteHeader(r)
} else {
http.Error(rw, http.StatusText(r), r)
}
}()
// Get data from body
b, err := ioutil.ReadAll(req.Body)
if err != nil {
c.Errorf("%s in reading body %s", err, b)
r = http.StatusInternalServerError
return
}
// Vernon debug
c.Debugf("Got body %s", b)
var user GroupUser
if err = json.Unmarshal(b, &user); err != nil {
c.Errorf("%s in decoding body %s", err, b)
r = http.StatusBadRequest
return
}
// Authenticate sender & Search for user registration token
var pUser *User
var token string
_, pUser, err = searchUser(user.InstanceId, c)
if err != nil {
c.Errorf("%s in searching user %v", err, user.InstanceId)
r = http.StatusInternalServerError
return
}
if pUser == nil {
c.Errorf("User %s not found. Invalid request. Ignore.", user.InstanceId)
r = http.StatusForbidden
return
}
token = pUser.RegistrationToken
// Search for existing group
var pKey *datastore.Key
var pGroup *Group
pKey, pGroup, err = searchGroup(user.GroupName, c)
if err != nil {
c.Errorf("%s in searching existing group %s", err, user.GroupName)
r = http.StatusInternalServerError
return
}
// Make GCM message body
var operation GroupOperation
if pKey == nil {
// Create a new group on GCM server
operation.Operation = "create"
operation.Notification_key_name = user.GroupName
operation.Registration_ids = []string{token}
if r = sendGroupOperationToGcm(&operation, c); r != http.StatusOK {
c.Errorf("Send group operation to GCM failed")
return
}
r = http.StatusNoContent
// Add new group to the datastore
pGroup = &Group {
Name: user.GroupName,
Owner: user.InstanceId,
Members: []string {user.InstanceId},
NotificationKey: operation.Notification_key,
}
pKey = datastore.NewKey(c, GroupKind, GroupRoot, 0, nil)
cKey, err = datastore.Put(c, datastore.NewIncompleteKey(c, GroupKind, pKey), pGroup)
if err != nil {
c.Errorf("%s in storing to datastore", err)
r = http.StatusInternalServerError
return
}
c.Infof("Create group %+v", pGroup)
} else {
// Add the new user to the existing group on GCM server
operation.Operation = "add"
operation.Notification_key_name = user.GroupName
operation.Notification_key = pGroup.NotificationKey
operation.Registration_ids = []string{token}
if r = sendGroupOperationToGcm(&operation, c); r != http.StatusOK {
c.Errorf("Send group operation to GCM failed")
return
}
r = http.StatusNoContent
// Modify datastore
pGroup.Members = append(pGroup.Members, token)
cKey, err = datastore.Put(c, pKey, pGroup)
if err != nil {
c.Errorf("%s in storing to datastore", err)
r = http.StatusInternalServerError
return
}
c.Infof("Add user %s to group %s", user.InstanceId, user.GroupName)
}
}
// DELETE https://testgcmserver-1120.appspot.com/api/0.1/groups/xxx", xxx: Group name
// Header {"Instance-Id":"..."}
// Success returns 204 No Content
// Failure returns 400 Bad Request, 403 Forbidden, 500 Internal Server Error
func LeaveGroup(rw http.ResponseWriter, req *http.Request) {
// Appengine
var c appengine.Context = appengine.NewContext(req)
// Result, 0: success, 1: failed
var r int = http.StatusNoContent
// Sender instance ID
var instanceId string
// Sender registration token
var registrationToken string
// Group name to leave
var groupName string
// Then operation sent to GCM server
var operation GroupOperation
// Group in datastore
var cKey *datastore.Key
var pGroup *Group
// Error
var err error
// Function to write response header
defer func() {
if r == http.StatusNoContent {
// Return status. WriteHeader() must be called before call to Write
rw.WriteHeader(r)
} else {
http.Error(rw, http.StatusText(r), r)
}
}()
// Get instance ID from header
instanceId = req.Header.Get("Instance-Id")
if instanceId == "" {
c.Warningf("Missing instance ID. Ignore the request.")
r = http.StatusBadRequest
return
}
// Authenticate sender & Search for user registration token
var pUser *User
_, pUser, err = searchUser(instanceId, c)
if err != nil {
c.Errorf("%s in searching user %v", err, instanceId)
r = http.StatusInternalServerError
return
}
if pUser == nil {
c.Errorf("User %s not found. Invalid request. Ignore.", instanceId)
r = http.StatusForbidden
return
}
registrationToken = pUser.RegistrationToken
// Get group name from URL
var tokens []string
tokens = strings.Split(req.URL.Path, "/")
for i, v := range tokens {
if v == "groups" && i + 1 < len(tokens) {
groupName = tokens[i + 1]
break
}
}
if groupName == "" {
c.Warningf("Missing group name. Ignore the request.")
r = http.StatusBadRequest
return
}
// Vernon debug
c.Debugf("User %s is going to leave group %s", instanceId, groupName)
// Search for existing group
cKey, pGroup, err = searchGroup(groupName, c)
if err != nil {
c.Errorf("%s in searching existing group %s", err, groupName)
r = http.StatusInternalServerError
return
}
if cKey == nil {
c.Infof("Group %s has been deleted already", groupName)
return
}
var returnCode int = http.StatusOK
if instanceId == pGroup.Owner {
// Vernon debug
c.Debugf("User %s owns the group %s", instanceId, groupName)
// Remove all user from GCM server so that the group will be removed at the same time
for _, v := range pGroup.Members {
// Search user registration token
_, pUser, err = searchUser(v, c)
if err != nil {
c.Warningf("%s in searching user %v", err, v)
continue
}
if pUser == nil {
c.Warningf("User %s not found. Ignore.", v)
continue
}
registrationToken = pUser.RegistrationToken
// Make operation structure
operation.Operation = "remove"
operation.Notification_key_name = pGroup.Name
operation.Notification_key = pGroup.NotificationKey
operation.Registration_ids = []string{registrationToken}
if returnCode = sendGroupOperationToGcm(&operation, c); returnCode != http.StatusOK {
c.Warningf("Failed to remove user %s from group %s because sending group operation to GCM failed", v, groupName)
r = returnCode
continue
}
c.Infof("User %s is removed from group %s", pUser.InstanceId, groupName)
}
// Modify datastore
if err = datastore.Delete(c, cKey); err != nil {
c.Errorf("%s in delete group %s from datastore", err, groupName)
r = http.StatusInternalServerError
return
}
c.Infof("User %s removed group %s", instanceId, groupName)
} else {
// Vernon debug
c.Debugf("User %s doesn't own the group %s", instanceId, groupName)
// Remove the user from the existing group on GCM server
operation.Operation = "remove"
operation.Notification_key_name = groupName
operation.Notification_key = pGroup.NotificationKey
operation.Registration_ids = []string{registrationToken}
if returnCode = sendGroupOperationToGcm(&operation, c); returnCode != http.StatusOK {
c.Errorf("Send group operation to GCM failed")
r = returnCode
return
}
// Modify datastore
a := pGroup.Members
for i, x := range a {
if x == instanceId {
a[i] = a[len(a)-1]
a[len(a)-1] = ""
a = a[:len(a)-1]
break
}
}
pGroup.Members = a
cKey, err = datastore.Put(c, cKey, pGroup)
if err != nil {
c.Errorf("%s in storing to datastore", err)
r = http.StatusInternalServerError
return
}
c.Infof("Remove user %s from group %s", instanceId, groupName)
}
}
// Send a Google Cloud Messaging Device Group operation to GCM server
// Success: 200 OK. Store the notification key from server to the operation structure
// Failure: 400 Bad Request, 403 Forbidden, 500 Internal Server Error
func sendGroupOperationToGcm(pOperation *GroupOperation, c appengine.Context) (r int) {
// Initial variables
var err error = nil
r = http.StatusOK
// Check parameters
if pOperation == nil {
c.Errorf("Parameter pOperation is nil")
r = http.StatusInternalServerError
return
}
// Make a POST request for GCM
var b []byte
b, err = json.Marshal(pOperation)
if err != nil {
c.Errorf("%s in encoding an operation as JSON", err)
r = http.StatusBadRequest
return
}
pReq, err := http.NewRequest("POST", GcmGroupURL, bytes.NewReader(b))
if err != nil {
c.Errorf("%s in makeing a HTTP request", err)
r = http.StatusInternalServerError
return
}
pReq.Header.Add("Content-Type", "application/json")
pReq.Header.Add("Authorization", "key="+GcmApiKey)
pReq.Header.Add("project_id", GaeProjectNumber)
// Debug
c.Debugf("Send request to GCM server %s", *pReq)
c.Debugf("Send body to GCM server %s", b)
// Send request
var client = urlfetch.Client(c)
resp, err := client.Do(pReq)
if err != nil {
c.Errorf("%s in sending request", err)
r = http.StatusInternalServerError
return
}
// Get response body
var respBody GroupOperationResponse
defer resp.Body.Close()
b, err = ioutil.ReadAll(resp.Body)
if err != nil {
c.Errorf("%s in reading response body", err)
r = http.StatusInternalServerError
return
}
c.Infof("%s", b)
if err = json.Unmarshal(b, &respBody); err != nil {
c.Errorf("%s in decoding JSON response body", err)
r = http.StatusInternalServerError
return
}
// Check response
c.Infof("%d %s", resp.StatusCode, resp.Status)
if resp.StatusCode == http.StatusOK {
// Success. Write Notification Key to operation structure
pOperation.Notification_key = respBody.Notification_key
return
} else {
c.Errorf("GCM server replied that %s", respBody.Error)
r = http.StatusBadRequest
return
}
}
// Send APP instance ID to Google server to verify its authenticity
func isRegistrationTokenValid(token string, c appengine.Context) (isValid bool) {
if token == "" {
c.Warningf("Instance ID is empty")
return false
}
// Make a GET request for Google Instance ID service
pReq, err := http.NewRequest("GET", InstanceIdVerificationUrl + token, nil)
if err != nil {
c.Errorf("%s in makeing a HTTP request", err)
return false
}
pReq.Header.Add("Authorization", "key="+GcmApiKey)
// Debug
c.Infof("%s", *pReq)
// Send request
pClient := urlfetch.Client(c)
var resp *http.Response
var sleepTime int
// A Google APP Engine process must end within 60 seconds. So sleep no more than 16 seconds each retry.
for sleepTime = 1; sleepTime <= 16; sleepTime *= 2 {
resp, err = pClient.Do(pReq)
if err != nil {
c.Errorf("%s in verifying instance ID %s", err, token)
return false
}
// Retry while server is temporary invalid
if resp.StatusCode != http.StatusServiceUnavailable {
break
}
time.Sleep(1 * time.Second)
}
// Check response code
if resp.StatusCode != http.StatusOK {
c.Warningf("Invalid instance ID with response code %d %s", resp.StatusCode, resp.Status)
return false
}
// Get body
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
c.Errorf("%s in reading HTTP response body")
return false
}
// Decode body as JSON
var authenticity UserRegistrationTokenAuthenticity
if err := json.Unmarshal(body, &authenticity); err != nil {
c.Warningf("%s in decoding HTTP response body %s", body)
return false
}
if authenticity.Application != AppNamespace || authenticity.AuthorizedEntity != GaeProjectNumber {
c.Warningf("Invalid instance ID with authenticity application %s and authorized entity %s",
authenticity.Application, authenticity.AuthorizedEntity)
return false
}
return true
}
func searchUser(instanceId string, c appengine.Context) (key *datastore.Key, user *User, err error) {
var v []User
// Initial variables
key = nil
user = nil
err = nil
// Query
f := datastore.NewQuery(UserKind)
f = f.Filter("InstanceId=", instanceId)
k, err := f.GetAll(c, &v)
if err != nil {
c.Errorf("%s in getting data from datastore\n", err)
err = errors.New("Datastore is temporary unavailable")
return
}
if k == nil || len(k) == 0 {
return
}
key = k[0]
user = &v[0]
return
}
func searchGroup(name string, c appengine.Context) (key *datastore.Key, group *Group, err error) {
var v []Group
// Initial variables
key = nil
group = nil
err = nil
// Query
f := datastore.NewQuery(GroupKind)
f = f.Filter("Name=", name)
k, err := f.GetAll(c, &v)
if err != nil {
c.Errorf("%s in getting data from datastore\n", err)
err = errors.New("Datastore is temporary unavailable")
return
}
if k == nil || len(k) == 0 {
return
}
key = k[0]
group = &v[0]
return
}
func verifyRequest(instanceId string, c appengine.Context) (isValid bool, err error) {
// Search for user from datastore
var pUser *User
// Initial variables
isValid = false
err = nil
_, pUser, err = searchUser(instanceId, c)
if err != nil {
c.Errorf("%s in searching user %v", err, instanceId)
return
}
// Verify registration token
if pUser == nil {
c.Warningf("Invalid instance ID %s is not found in datastore", instanceId)
return
}
isValid = true
return
}
//func queryMember(rw http.ResponseWriter, req *http.Request) {
// // To log messages
// c := appengine.NewContext(req)
//
// if len(req.URL.Query()) == 0 {
// // Get key from URL
// tokens := strings.Split(req.URL.Path, "/")
// var keyIndexInTokens int = 0
// for i, v := range tokens {
// if v == "members" {
// keyIndexInTokens = i + 1
// }
// }
// if keyIndexInTokens >= len(tokens) {
// c.Debugf("Key is not given so that list all members")
// listMember(rw, req)
// return
// }
// keyString := tokens[keyIndexInTokens]
// if keyString == "" {
// c.Debugf("Key is empty so that list all members")
// listMember(rw, req)
// } else {
// queryOneMember(rw, req, keyString)
// }
// } else {
// searchMember(rw, req)
// }
//}
//func listMember(rw http.ResponseWriter, req *http.Request) {
// // To access datastore and to log
// c := appengine.NewContext(req)
// c.Debugf("listMember()")
//
// // Get all entities
// var dst []UserRegistration
// r := 0
// k, err := datastore.NewQuery(UserKind).Order("-CreateTime").GetAll(c, &dst)
// if err != nil {
// c.Errorf("%s", err)
// r = 1
// }
//
// // Map keys and items
// for i, v := range k {
// dst[i].Id = v.Encode()
// }
//
// // Return status. WriteHeader() must be called before call to Write
// if r == 0 {
// rw.WriteHeader(http.StatusOK)
// } else {
// http.Error(rw, http.StatusText(http.StatusNotFound), http.StatusNotFound)
// return
// }
//
// // Return body
// encoder := json.NewEncoder(rw)
// if err = encoder.Encode(dst); err != nil {
// c.Errorf("%s in encoding result %v", err, dst)
// } else {
// c.Infof("listMember() returns %d members", len(dst))
// }
//}
//
//func clearMember(rw http.ResponseWriter, req *http.Request) {
// // To access datastore and to log
// c := appengine.NewContext(req)
// c.Infof("clearMember()")
//
// // Delete root entity after other entities
// r := 0
// pKey := datastore.NewKey(c, UserKind, UserRoot, 0, nil)
// if keys, err := datastore.NewQuery(UserKind).KeysOnly().GetAll(c, nil); err != nil {
// c.Errorf("%s", err)
// r = 1
// } else if err := datastore.DeleteMulti(c, keys); err != nil {
// c.Errorf("%s", err)
// r = 1
// } else if err := datastore.Delete(c, pKey); err != nil {
// c.Errorf("%s", err)
// r = 1
// }
//
// // Return status. WriteHeader() must be called before call to Write
// if r == 0 {
// rw.WriteHeader(http.StatusOK)
// } else {
// http.Error(rw, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
// }
//}
|
package gpgcli
import (
// "io"
"os/exec"
"bytes"
// "fmt"
)
func (g *Gpg) cmd (args ...string) (string, string, error) {
var err error
var stdout bytes.Buffer
var stderr bytes.Buffer
var cmdArgs []string
cmdArgs = append(cmdArgs, g.gpgOptions...)
cmdArgs = append(cmdArgs, args...)
cmd := exec.Command(`gpg` ,cmdArgs...)
// make commands consistent
cmd.Env = g.filteredEnv
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
return "", "", err
}
if err := cmd.Wait(); err != nil {
return stdout.String(), stderr.String(), err
}
return stdout.String(), stderr.String(), err
}
|
package test_test
import (
"testing"
"github.com/Rubentxu/lbricks/bgo"
"os"
"fmt"
)
var testMemory *bgo.Memory
func TestMain(m *testing.M) {
// your func
setup()
retCode := m.Run()
// your func
//teardown()
// call with result of m.Run()
os.Exit(retCode)
}
func setup() {
blackboard := bgo.CreateBlackboard();
testMemory = blackboard.GetBaseMemory()
}
func BenchmarkConcurrent(b *testing.B) {
for i:=0; i < b.N; i++ {
key := fmt.Sprintf("%d",i)
go mBool(key)
}
}
func mBool(key string) {
testMemory.Bool[key+"A"] = true
testMemory.Bool[key+"B"] = false
}
func TestBool(t *testing.T) {
testMemory.Bool["A"] = true
testMemory.Bool["B"] = false
if testMemory.Bool["A"] != true && testMemory.Bool["B"] != false {
t.Error("Error testBool not true")
}
}
func TestInteger(t *testing.T) {
testMemory.Integer["test"] = 1
if testMemory.Integer["test"] != 1 {
t.Error("Error testInteger")
}
}
func TestFloat(t *testing.T) {
testMemory.Float["test"] = 1
if testMemory.Float["test"] != 1 {
t.Error("Error testFloat")
}
}
func TestComplex(t *testing.T) {
testMemory.Complex["test"] = complex64(1)
if testMemory.Complex["test"] != complex64(1) {
t.Errorf("Error testComplex expected 1 ---> %c", testMemory.Complex["test"])
}
}
|
package nopaste
import (
"fmt"
"log"
"net/http"
"time"
irc "github.com/thoj/go-ircevent"
)
const (
MsgBufferLen = 100
)
var (
IRCThrottleWindow = 1 * time.Second
)
type MessageChan interface {
PostNopaste(np nopasteContent, url string)
PostMsgr(np *http.Request)
}
type IRCMessage struct {
Channel string
Notice bool
Text string
}
type IRCMessageChan chan IRCMessage
func (ch IRCMessageChan) PostNopaste(np nopasteContent, url string) {
summary := np.Summary
nick := np.Nick
msg := IRCMessage{
Channel: np.Channel,
Text: fmt.Sprintf("%s %s %s", nick, summary, url),
Notice: false,
}
if np.Notice != "" {
// true if 'notice' argument has any value (includes '0', 'false', 'null'...)
msg.Notice = true
}
select {
case ch <- msg:
default:
log.Println("[warn] Can't send msg to IRC")
}
}
func (ch IRCMessageChan) PostMsgr(req *http.Request) {
msg := IRCMessage{
Channel: req.FormValue("channel"),
Text: req.FormValue("msg"),
Notice: true,
}
if _notice := req.FormValue("notice"); _notice == "" || _notice == "0" {
msg.Notice = false
}
select {
case ch <- msg:
default:
log.Println("[warn] Can't send msg to IRC")
}
}
func RunIRCAgent(c *Config, ch chan IRCMessage) {
log.Println("[info] running irc agent")
for {
agent := irc.IRC(c.IRC.Nick, c.IRC.Nick)
agent.UseTLS = c.IRC.Secure
agent.Password = c.IRC.Password
addr := fmt.Sprintf("%s:%d", c.IRC.Host, c.IRC.Port)
err := agent.Connect(addr)
if err != nil {
log.Println("[warn]", err)
time.Sleep(10 * time.Second)
continue
}
done := make(chan interface{})
go sendMsgToIRC(c, agent, ch, done)
agent.Loop()
close(done)
time.Sleep(10 * time.Second)
}
}
func sendMsgToIRC(c *Config, agent *irc.Connection, ch chan IRCMessage, done chan interface{}) {
joined := make(map[string]chan IRCMessage)
for {
select {
case <-done:
return
case msg := <-ch:
if _, ok := joined[msg.Channel]; !ok {
log.Println("[info] join", msg.Channel)
agent.Join(msg.Channel)
joined[msg.Channel] = make(chan IRCMessage, MsgBufferLen)
go sendMsgToIRCChannel(agent, joined[msg.Channel], done)
c.AddChannel(msg.Channel)
}
select {
case joined[msg.Channel] <- msg:
default:
log.Println("[warn] Can't send msg to IRC. Channel buffer flooding.")
}
}
}
}
func sendMsgToIRCChannel(agent *irc.Connection, ch chan IRCMessage, done chan interface{}) {
lastPostedAt := time.Now()
for {
select {
case <-done:
return
case msg := <-ch:
throttle(lastPostedAt, IRCThrottleWindow)
if msg.Notice {
agent.Notice(msg.Channel, msg.Text)
} else {
agent.Privmsg(msg.Channel, msg.Text)
}
lastPostedAt = time.Now()
}
}
}
func throttle(last time.Time, window time.Duration) {
now := time.Now()
diff := now.Sub(last)
if diff < window {
// throttle
log.Println("[info] throttled. sleeping", window-diff)
time.Sleep(window - diff)
}
}
|
/*
* @Author: Sy.
* @Create: 2019-11-01 20:54:15
* @LastTime: 2019-11-16 18:30:34
* @LastEdit: Sy.
* @FilePath: \server\models\role_auth.go
* @Description: ่ง่ฒๆ้ๆงๅถ
*/
package models
import (
"bytes"
"strconv"
"strings"
"github.com/astaxie/beego/orm"
)
type RoleAuth struct {
AuthId int `orm:"pk"`
RoleId int64
}
const TABLE_ROLE_AUTH = "admin_role_auth"
func (ra *RoleAuth) TableName() string {
return TableName(TABLE_ROLE_AUTH)
}
func RoleAuthAdd(ra *RoleAuth) (int64, error) {
return orm.NewOrm().Insert(ra)
}
func RoleAuthGetById(id int) ([]*RoleAuth, error) {
list := make([]*RoleAuth, 0)
query := orm.NewOrm().QueryTable(TableName(TABLE_ROLE_AUTH))
_, err := query.Filter("role_id", id).All(&list, "AuthId")
if err != nil {
return nil, err
}
return list, nil
}
func RoleAuthDelete(id int) (int64, error) {
query := orm.NewOrm().QueryTable(TableName(TABLE_ROLE_AUTH))
return query.Filter("role_id", id).Delete()
}
//่ทๅๅคไธช
func RoleAuthGetByIds(RoleIds string) (Authids string, err error) {
list := make([]*RoleAuth, 0)
query := orm.NewOrm().QueryTable(TableName(TABLE_ROLE_AUTH))
ids := strings.Split(RoleIds, ",")
_, err = query.Filter("role_id__in", ids).All(&list, "AuthId")
if err != nil {
return "", err
}
b := bytes.Buffer{}
for _, v := range list {
if v.AuthId != 0 && v.AuthId != 1 {
b.WriteString(strconv.Itoa(v.AuthId))
b.WriteString(",")
}
}
Authids = strings.TrimRight(b.String(), ",")
return Authids, nil
}
func RoleAuthMultiAdd(ras []*RoleAuth) (n int, err error) {
query := orm.NewOrm().QueryTable(TableName(TABLE_ROLE_AUTH))
i, _ := query.PrepareInsert()
for _, ra := range ras {
_, err := i.Insert(ra)
if err == nil {
n = n + 1
}
}
i.Close() // ๅซๅฟ่ฎฐๅ
ณ้ญ statement
return n, err
}
|
package todo
type Todo struct {
Title string
Done bool
}
func (t *Todo) Finish() {
t.Done = true
}
|
/*
Package gadget provides a smallish web application framework with a soft spot
for content negotiation. An overview of usage can be found at
http://redneckbeard.github.io/gadget.
*/
package gadget
|
/*
Copyright 2019 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"fmt"
"io"
"os"
"github.com/spf13/cobra"
"github.com/GoogleContainerTools/skaffold/v2/cmd/skaffold/app/flags"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/config"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/runner"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/util"
)
var (
quietFlag bool
defaultBuildFormatTemplate = "{{json .}}"
buildFormatFlag = flags.NewTemplateFlag(defaultBuildFormatTemplate, flags.BuildOutput{})
buildOutputFlag string
)
// NewCmdBuild describes the CLI command to build artifacts.
func NewCmdBuild() *cobra.Command {
return NewCmd("build").
WithDescription("Build the artifacts").
WithLongDescription("Build, test and tag the artifacts").
WithExample("Build all the artifacts", "build").
WithExample("Build artifacts with a profile activated", "build -p <profile>").
WithExample("Build artifacts whose image name contains <db>", "build -b <db>").
WithExample("Quietly build artifacts and output the image names as json", "build -q > build_result.json").
WithExample("Build the artifacts and then deploy them", "build -q | skaffold deploy --build-artifacts -").
WithExample("Print the final image names", "build -q --dry-run").
WithCommonFlags().
WithFlags([]*Flag{
{Value: &quietFlag, Name: "quiet", Shorthand: "q", DefValue: false, Usage: "Suppress the build output and print image built on success. See --output to format output.", IsEnum: true},
{Value: buildFormatFlag, Name: "output", Shorthand: "o", DefValue: defaultBuildFormatTemplate, Usage: "Used in conjunction with --quiet flag. " + buildFormatFlag.Usage()},
{Value: &buildOutputFlag, Name: "file-output", DefValue: "", Usage: "Filename to write build images to"},
{Value: &opts.DryRun, Name: "dry-run", DefValue: false, Usage: "Don't build images, just compute the tag for each artifact.", IsEnum: true},
{Value: &opts.PushImages, Name: "push", DefValue: nil, Usage: "Push the built images to the specified image repository.", IsEnum: true, NoOptDefVal: "true"},
}).
WithHouseKeepingMessages().
NoArgs(doBuild)
}
func doBuild(ctx context.Context, out io.Writer) error {
buildOut := out
if quietFlag {
buildOut = io.Discard
}
return withRunner(ctx, out, func(r runner.Runner, configs []util.VersionedConfig) error {
bRes, err := r.Build(ctx, buildOut, targetArtifacts(opts, configs))
if quietFlag || buildOutputFlag != "" {
cmdOut := flags.BuildOutput{Builds: bRes}
var buildOutput bytes.Buffer
if err := buildFormatFlag.Template().Execute(&buildOutput, cmdOut); err != nil {
return fmt.Errorf("executing template: %w", err)
}
if quietFlag {
if _, err := out.Write(buildOutput.Bytes()); err != nil {
return fmt.Errorf("writing build output: %w", err)
}
}
if buildOutputFlag != "" {
if err := os.WriteFile(buildOutputFlag, buildOutput.Bytes(), 0644); err != nil {
return fmt.Errorf("writing build output to file: %w", err)
}
}
}
return err
})
}
func targetArtifacts(opts config.SkaffoldOptions, configs []util.VersionedConfig) []*latest.Artifact {
var targetArtifacts []*latest.Artifact
for _, cfg := range configs {
for _, artifact := range cfg.(*latest.SkaffoldConfig).Build.Artifacts {
if opts.IsTargetImage(artifact) {
targetArtifacts = append(targetArtifacts, artifact)
}
}
}
return targetArtifacts
}
|
package io
import (
"encoding/csv"
"fmt"
"io"
"log"
"os"
)
type FileHandler interface {
ReadFile()
WriteFile()
CreateFile()
}
func WriteToJsonFile(jsonStr string) {
f, err :=os.OpenFile("data.json", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
log.Fatal(err)
}
defer f.Close()
f.WriteString(jsonStr)
}
func ReadCSVFile(fileName string) *csv.Reader {
userRecords, err := os.Open(fileName)
csvReader := csv.NewReader(userRecords)
if err != nil {
fmt.Println(err)
}
return csvReader
}
func GetRecordsFromCSVReader(fileReader *csv.Reader) [][]string{
var records [][]string
for {
record, err := fileReader.Read()
records = append(records, record)
if err == io.EOF {
break
}
if err == nil{
//fmt.Println(record)
}
}
return records
} |
// +build storage_all !storage_pgx,!storage_boltdb,!storage_fs,!storage_badger,!storage_sqlite
package app
import (
"path"
"unsafe"
authbadger "github.com/go-ap/auth/badger"
authboltdb "github.com/go-ap/auth/boltdb"
authfs "github.com/go-ap/auth/fs"
authpgx "github.com/go-ap/auth/pgx"
authsqlite "github.com/go-ap/auth/sqlite"
"github.com/go-ap/errors"
"github.com/go-ap/fedbox/internal/config"
"github.com/go-ap/fedbox/storage/badger"
"github.com/go-ap/fedbox/storage/boltdb"
"github.com/go-ap/fedbox/storage/fs"
"github.com/go-ap/fedbox/storage/pgx"
"github.com/go-ap/fedbox/storage/sqlite"
st "github.com/go-ap/storage"
"github.com/openshift/osin"
"github.com/sirupsen/logrus"
)
func getBadgerStorage(c config.Options, l logrus.FieldLogger) (st.Store, osin.Storage, error) {
path := c.BaseStoragePath()
conf := badger.Config{
Path: path,
BaseURL: c.BaseURL,
}
if l != nil {
l.Debugf("Initializing badger storage at %s", path)
conf.LogFn = InfoLogFn(l)
conf.ErrFn = ErrLogFn(l)
}
db, err := badger.New(conf)
if err != nil {
return db, nil, err
}
authConf := (*authbadger.Config)(unsafe.Pointer(&conf))
authConf.Path = c.BadgerOAuth2(path)
oauth := authbadger.New(*authConf)
return db, oauth, nil
}
func getBoltStorage(c config.Options, l logrus.FieldLogger) (st.Store, osin.Storage, error) {
path := c.BaseStoragePath()
l.Debugf("Initializing boltdb storage at %s", path)
db, err := boltdb.New(boltdb.Config{
Path: path,
BaseURL: c.BaseURL,
LogFn: InfoLogFn(l),
ErrFn: ErrLogFn(l),
})
if err != nil {
return nil, nil, err
}
oauth := authboltdb.New(authboltdb.Config{
Path: c.BoltDBOAuth2(),
BucketName: c.Host,
LogFn: InfoLogFn(l),
ErrFn: ErrLogFn(l),
})
return db, oauth, nil
}
func getFsStorage(c config.Options, l logrus.FieldLogger) (st.Store, osin.Storage, error) {
p := c.BaseStoragePath()
l.Debugf("Initializing fs storage at %s", c.BaseStoragePath())
oauth := authfs.New(authfs.Config{
Path: p,
LogFn: InfoLogFn(l),
ErrFn: ErrLogFn(l),
})
db, err := fs.New(fs.Config{
StoragePath: path.Dir(p),
BaseURL: c.BaseURL,
EnableCache: c.StorageCache,
})
if err != nil {
return nil, oauth, err
}
return db, oauth, nil
}
func getSqliteStorage(c config.Options, l logrus.FieldLogger) (st.Store, osin.Storage, error) {
path := c.BaseStoragePath()
l.Debugf("Initializing sqlite storage at %s", path)
oauth := authsqlite.New(authsqlite.Config{
Path: path,
LogFn: InfoLogFn(l),
ErrFn: ErrLogFn(l),
})
db, err := sqlite.New(sqlite.Config{
StoragePath: path,
BaseURL: c.BaseURL,
EnableCache: c.StorageCache,
})
if err != nil {
return nil, nil, errors.Annotatef(err, "unable to connect to sqlite storage")
}
return db, oauth, nil
}
func getPgxStorage(c config.Options, l logrus.FieldLogger) (st.Store, osin.Storage, error) {
// @todo(marius): we're no longer loading SQL db config env variables
l.Debugf("Initializing pgx storage at %s", c.StoragePath)
conf := pgx.Config{}
db, err := pgx.New(conf, c.BaseURL, l)
if err != nil {
return nil, nil, errors.Annotatef(err, "unable to connect to pgx storage")
}
oauth := authpgx.New(authpgx.Config{
Enabled: true,
Host: conf.Host,
Port: int64(conf.Port),
User: conf.User,
Pw: conf.Password,
Name: conf.Database,
LogFn: InfoLogFn(l),
ErrFn: ErrLogFn(l),
})
return db, oauth, errors.NotImplementedf("pgx storage is not implemented yet")
}
func Storage(c config.Options, l logrus.FieldLogger) (st.Store, osin.Storage, error) {
switch c.Storage {
case config.StorageBoltDB:
return getBoltStorage(c, l)
case config.StorageBadger:
return getBadgerStorage(c, l)
case config.StoragePostgres:
return getPgxStorage(c, l)
case config.StorageSqlite:
return getSqliteStorage(c, l)
case config.StorageFS:
return getFsStorage(c, l)
}
return nil, nil, errors.NotImplementedf("Invalid storage type %s", c.Storage)
}
|
package repository
import "github.com/fwchen/jellyfish/domain/taco"
type Repository interface {
List(userId string, filter taco.TacoFilter) ([]taco.Taco, error)
Save(taco *taco.Taco) (*string, error)
SaveList(tacos []taco.Taco) error
FindById(tacoID string) (*taco.Taco, error)
MaxOrderByCreatorId(userId string) (*float64, error)
MaxOrderByBoxId(userId string) (*float64, error)
Delete(tacoId string) error
}
|
// Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package spec
import (
"encoding/json"
"testing"
testingutil "github.com/go-swagger/go-swagger/internal/testing"
"github.com/stretchr/testify/assert"
)
func TestUnknownSpecVersion(t *testing.T) {
_, err := New([]byte{}, "0.9")
assert.Error(t, err)
}
func TestDefaultsTo20(t *testing.T) {
d, err := New(testingutil.PetStoreJSONMessage, "")
assert.NoError(t, err)
assert.NotNil(t, d)
assert.Equal(t, "2.0", d.Version())
// assert.Equal(t, "2.0", d.data["swagger"].(string))
assert.Equal(t, "/api", d.BasePath())
}
// func TestValidatesValidSchema(t *testing.T) {
// d, err := New(testingutil.PetStoreJSONMessage, "")
// assert.NoError(t, err)
// assert.NotNil(t, d)
// res := d.Validate()
// assert.NotNil(t, res)
// assert.True(t, res.Valid())
// assert.Empty(t, res.Errors())
// }
// func TestFailsInvalidSchema(t *testing.T) {
// d, err := New(testingutil.InvalidJSONMessage, "")
// assert.NoError(t, err)
// assert.NotNil(t, d)
// res := d.Validate()
// assert.NotNil(t, res)
// assert.False(t, res.Valid())
// assert.NotEmpty(t, res.Errors())
// }
func TestFailsInvalidJSON(t *testing.T) {
_, err := New(json.RawMessage([]byte("{]")), "")
assert.Error(t, err)
}
|
package controller
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"time"
"github.com/slack-go/slack"
"github.com/tarao1006/attendance-slackapp/sheet"
)
type Submit struct {
client *slack.Client
spreadsheetService *sheet.SpreadsheetService
}
func NewSubmit(client *slack.Client, spreadsheetService *sheet.SpreadsheetService) *Submit {
return &Submit{
client: client,
spreadsheetService: spreadsheetService,
}
}
func (submit *Submit) HandleSubmit(w http.ResponseWriter, r *http.Request) {
var payload slack.InteractionCallback
if err := json.Unmarshal([]byte(r.FormValue("payload")), &payload); err != nil {
log.Printf("Could not parse action response JSON: %v", err)
w.WriteHeader(http.StatusBadRequest)
return
}
userID := payload.User.ID
userName := payload.User.Name
dateString := payload.View.State.Values["date"]["date"].SelectedDate
startTimeString := payload.View.State.Values["start_time"]["startTime"].Value
endTimeString := payload.View.State.Values["end_time"]["endTime"].Value
message := fmt.Sprintf("%s ใไบๅฎใ่ฟฝๅ ใใพใใ\nDate: %s\nStart Time: %s\nEnd Time: %s", userName, dateString, startTimeString, endTimeString)
date, _ := time.Parse("2006-01-02", dateString)
if date.Before(time.Now().AddDate(0, 0, -1)) {
submit.ReturnError(w, map[string]string{"date": "้ๅปใฎๆฅไปใซไบๅฎใฏ่ฟฝๅ ใงใใพใใใ"})
return
}
errorMessage := make(map[string]string)
startTime, err := time.Parse("15:04", startTimeString)
if err != nil {
errorMessage["start_time"] = "ไธๆญฃใชๅ
ฅๅใงใใ"
}
endTime, err := time.Parse("15:04", endTimeString)
if err != nil {
errorMessage["end_time"] = "ไธๆญฃใชๅ
ฅๅใงใใ"
}
if len(errorMessage) != 0 {
submit.ReturnError(w, errorMessage)
return
}
if !endTime.After(startTime) {
submit.ReturnError(w, map[string]string{"end_time": "็ตไบๆๅปใ้ๅงๆๅปใใใๆฉใใงใใ"})
return
}
submit.spreadsheetService.Add(userID, dateString, startTimeString, endTimeString)
if _, err := submit.client.PostEphemeral(
os.Getenv("ATTENDANCE_CHANNEL_ID"),
userID,
slack.MsgOptionText(message, false),
); err != nil {
log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
}
func (submit *Submit) ReturnError(w http.ResponseWriter, errorMessages map[string]string) {
resp, _ := json.Marshal(slack.NewErrorsViewSubmissionResponse(errorMessages))
w.Header().Add("Content-Type", "application/json")
_, err := w.Write(resp)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
}
|
package Produce
import (
"bytes"
"reflect"
"github.com/mkocikowski/libkafka/wire"
)
func UnmarshalResponse(b []byte) (*Response, error) {
r := &Response{}
buf := bytes.NewBuffer(b)
err := wire.Read(buf, reflect.ValueOf(r))
return r, err
}
type Response struct {
TopicResponses []TopicResponse
ThrottleTimeMs int32
}
type TopicResponse struct {
Topic string
PartitionResponses []PartitionResponse
}
type PartitionResponse struct {
Partition int32
ErrorCode int16
BaseOffset int64
LogAppendTime int64
LogStartOffset int64
}
|
// MIT License
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE
package watchdog
import (
"net/http"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestGarbageCollection(t *testing.T) {
m := newMockK8sServer()
m.addResponseByFile("/apis/scheduling.k8s.io/v1/priorityclasses",
"../../testdata/priority_class_list.json", http.MethodGet)
m.addResponseByFile("/api/v1/namespaces/default/secrets", "../../testdata/secret_list.json", http.MethodGet)
m.addResponseByFile("/apis/frameworkcontroller.microsoft.com/v1/namespaces/default/frameworks",
"../../testdata/framework_list.json", http.MethodGet)
m.addResponse("/api/v1/namespaces/default/secrets/059cf3d85cb5f6280e9606d47551554c-configcred", "",
http.MethodDelete)
m.addResponse("/apis/scheduling.k8s.io/v1/priorityclasses/059cf3d85cb5f6280e9606d47551554c-priority",
"", http.MethodDelete)
url := m.start()
defer m.stop()
os.Setenv("KUBE_APISERVER_ADDRESS", url)
c, _ := NewK8sClient()
gc := NewGarbageCollector(c, time.Minute)
gc.collect()
removedPcNum := gc.removeOrphanPriorityClasses()
removedSecretNum := gc.removeOrphanSecrets()
assert.Equal(t, 1, removedPcNum)
assert.Equal(t, 1, removedSecretNum)
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tasks
import (
"context"
"yunion.io/x/jsonutils"
"yunion.io/x/pkg/errors"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudcommon/db"
"yunion.io/x/onecloud/pkg/cloudcommon/db/taskman"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/compute/models"
"yunion.io/x/onecloud/pkg/util/logclient"
)
type WafRuleUpdateTask struct {
taskman.STask
}
func init() {
taskman.RegisterTask(WafRuleUpdateTask{})
}
func (self *WafRuleUpdateTask) taskFailed(ctx context.Context, rule *models.SWafRule, err error) {
rule.SetStatus(self.UserCred, api.WAF_RULE_STATUS_UPDATE_FAILED, err.Error())
logclient.AddActionLogWithStartable(self, rule, logclient.ACT_UPDATE, err, self.UserCred, false)
self.SetStageFailed(ctx, jsonutils.NewString(err.Error()))
}
func (self *WafRuleUpdateTask) OnInit(ctx context.Context, obj db.IStandaloneModel, body jsonutils.JSONObject) {
rule := obj.(*models.SWafRule)
iRule, err := rule.GetICloudWafRule()
if err != nil {
self.taskFailed(ctx, rule, errors.Wrapf(err, "GetICloudWafRule"))
return
}
opts := cloudprovider.SWafRule{
Name: rule.Name,
Desc: rule.Description,
Action: rule.Action,
Priority: rule.Priority,
Statements: []cloudprovider.SWafStatement{},
}
opts.StatementCondition = rule.StatementConditon
statements, err := rule.GetRuleStatements()
if err != nil {
self.taskFailed(ctx, rule, errors.Wrapf(err, "GetRuleStatements"))
return
}
for i := range statements {
opts.Statements = append(opts.Statements, statements[i].SWafStatement)
}
err = iRule.Update(&opts)
if err != nil {
self.taskFailed(ctx, rule, errors.Wrapf(err, "iRule.Update"))
return
}
self.taskComplete(ctx, rule)
}
func (self *WafRuleUpdateTask) taskComplete(ctx context.Context, rule *models.SWafRule) {
rule.SetStatus(self.UserCred, api.WAF_RULE_STATUS_AVAILABLE, "")
self.SetStageComplete(ctx, nil)
}
|
// Package v1beta1 contains API Schema definitions for the auth v1beta1 API group
// +k8s:deepcopy-gen=package,register
// +groupName=auth.example.com
package v1beta1
|
package prototmpl
import (
"encoding/json"
"fmt"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"regexp"
"strconv"
)
var substitutionExpressionRegex *regexp.Regexp
func init() {
var err error
substitutionExpressionRegex, err = regexp.Compile("\\{\\{(.*)}}")
if err != nil {
panic(err)
}
}
type Template struct {
subs map[string]substitution
partialMessage proto.Message
}
func (t* Template) Evaluate(args map[string]interface{}) (proto.Message, error) {
cloned := proto.Clone(t.partialMessage)
for name, s := range t.subs {
walker := protoreflect.ValueOf(cloned.ProtoReflect())
for _, p := range s.path[:len(s.path)-1] {
switch {
case p.field != nil:
msg := walker.Message()
//msg, ok := walker.Interface().(protoreflect.Message)
//if !ok {
// return nil, fmt.Errorf("path traversal mismatch: attempted field access but current node is not a message")
//}
walker = msg.Get(p.field)
case p.index != nil:
// TODO(snowp): Seems like there's no way to do this without panicing on type mismatch?
l := walker.List()
walker = l.Get(*p.index)
}
}
last := s.path[len(s.path)-1]
switch {
case last.index != nil:
// This shouldn't be a Set, we want to insert
walker.List().Set(*last.index, protoreflect.ValueOf(args[name]))
case last.field != nil:
//msg, ok := walker.Interface().(proto.Message)
msg := walker.Message()
//if !ok {
// return nil, fmt.Errorf("path traversal mismatch: attempted field access but current node is not a message")
//}
msg.Set(last.field, protoreflect.ValueOf(args[name]))
}
}
return cloned, nil
}
func templatePathsRecurse(tree interface{}, paths *map[string][]string, currentPath []string) (bool, error) {
// The only thing we care about finding is string elements, so we don't have to handle all the cases here.
// We only care about aggregate types and the literal string type: nothing else matters because it would never
// result in us finding another string.
switch element := tree.(type) {
case map[string]interface{}:
for key, value := range element {
found, err := templatePathsRecurse(value, paths, append(currentPath, key))
if err != nil {
return false, err
}
if found {
delete(element, key)
}
}
case []interface{}:
var newEntries []interface{}
for index, value := range element {
found, err := templatePathsRecurse(value, paths, append(currentPath, strconv.Itoa(index)))
if err != nil {
return false, err
}
if !found {
newEntries = append(newEntries, value)
}
}
element = newEntries
case string:
result := substitutionExpressionRegex.FindStringSubmatch(element)
if len(result) > 0 {
(*paths)[result[1]] = currentPath
}
return true, nil
}
return false, nil
}
func templatePaths(js string) (map[string][]string, string, error) {
structured := map[string]interface{}{}
err := json.Unmarshal([]byte(js), &structured)
if err != nil {
return nil, "", err
}
paths := map[string][]string{}
_, err = templatePathsRecurse(structured, &paths, []string{})
if err != nil {
return nil, "", err
}
marshalled, err := json.Marshal(structured)
if err != nil {
return nil, "", err
}
return paths, string(marshalled), nil
}
type fieldOrArrayAccess struct {
index *int
field protoreflect.FieldDescriptor
}
type substitution struct {
path []fieldOrArrayAccess
// Should be the same as path[-1].Kind(), here for convenience
kind protoreflect.Kind
}
type TemplateCompiler struct {
}
func NewTemplateCompiler() TemplateCompiler {
return TemplateCompiler{
}
}
func (tc *TemplateCompiler) createSubstitutions(jsonPaths map[string][]string, messageDescriptor protoreflect.MessageDescriptor) (map[string]substitution, error) {
// Lots of optimizations that can be done here to avoid traversing the same path multiple times, but for now we do it the good old boring way.
subs := map[string]substitution{}
for name, value := range jsonPaths {
currentDescriptor := messageDescriptor
var lastFieldDescriptor protoreflect.FieldDescriptor
var protoPath []fieldOrArrayAccess
for _, segment := range value {
// segment is either a numeric value (indicating an array access) or a string (indicating a field access).
numeric, err := strconv.Atoi(segment)
if err == nil {
protoPath = append(protoPath, fieldOrArrayAccess{index: &numeric})
continue
}
if currentDescriptor == nil {
return nil, fmt.Errorf("attempting to traverse non-message type: next path segment '%s', full path '%s'", segment, value)
}
found := false
for i := 0; i < currentDescriptor.Fields().Len(); i++ {
f := currentDescriptor.Fields().Get(i)
if f.JSONName() == segment {
protoPath = append(protoPath, fieldOrArrayAccess{field: f})
currentDescriptor = f.Message()
lastFieldDescriptor = f
found = true
break
}
}
if !found {
return nil, fmt.Errorf("field %s not found in type %s", segment, currentDescriptor.Name())
}
}
subs[name] = substitution{protoPath, lastFieldDescriptor.Kind()}
}
return subs, nil
}
func (tc *TemplateCompiler) CompileTemplate(message proto.Message, js string) (*Template, error) {
paths, prunedJson, err := templatePaths(js)
if err != nil {
return nil, err
}
subs, err := tc.createSubstitutions(paths, message.ProtoReflect().Descriptor())
cloned := proto.Clone(message)
err = protojson.Unmarshal([]byte(prunedJson), cloned)
if err != nil {
return nil, err
}
return &Template{subs: subs, partialMessage: cloned}, nil
}
|
package helpers
import "golang.org/x/crypto/bcrypt"
func PasswordHash(password string) (string, error) {
hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.MinCost)
return string(hash), err
}
func ValidateHash(secret, hash string) bool {
err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(secret))
return err == nil
}
|
package config
import (
"github.com/hashicorp/vault/api"
"github.com/mitchellh/mapstructure"
)
type Vault interface {
LookupWithAppRole(roleID, secretID, path string) (*Config, error)
LookupWithToken(token, path string) (*Config, error)
}
type vault struct {
client *api.Client
}
func NewVault(vaultAddr, caCertFile string) (Vault, error) {
cfg := api.DefaultConfig()
if err := cfg.ReadEnvironment(); err != nil {
return nil, err
}
if vaultAddr != "" {
cfg.Address = vaultAddr
}
if caCertFile != "" {
t := &api.TLSConfig{CACert: caCertFile}
if err := cfg.ConfigureTLS(t); err != nil {
return nil, err
}
}
client, err := api.NewClient(cfg)
if err != nil {
return nil, err
}
return &vault{client}, nil
}
func (v *vault) LookupWithAppRole(roleID, secretID, path string) (*Config, error) {
body := map[string]interface{}{
"role_id": roleID,
"secret_id": secretID,
}
secret, err := v.client.Logical().Write("auth/approle/login", body)
if err != nil {
return nil, err
}
return v.LookupWithToken(secret.Auth.ClientToken, path)
}
func (v *vault) LookupWithToken(token, path string) (*Config, error) {
if token != "" {
v.client.SetToken(token)
}
secret, err := v.client.Logical().Read(path)
if err != nil {
return nil, err
}
var cfg Config
if err = mapstructure.Decode(secret.Data, &cfg); err != nil {
return nil, err
}
return &cfg, nil
}
|
package runner
import (
"bytes"
"flag"
"testing"
)
func TestProcessChunks(t *testing.T) {
flag.Set("log_chunk_size", "3")
in := []byte("aaa\naaa\naaa\n")
ch := make(chan LogChunk)
go func() {
processChunks(ch, bytes.NewReader(in))
close(ch)
}()
cnt := 0
for _ = range ch {
cnt++
}
if cnt != 3 {
t.Fail()
}
}
func TestProcessMessage(t *testing.T) {
in := "aaa\naaa\naaa\n"
ch := make(chan LogChunk)
go func() {
processMessage(ch, in)
close(ch)
}()
var out []LogChunk
for c := range ch {
out = append(out, c)
}
if len(out) != 1 {
t.Fail()
}
if bytes.Equal(out[0].Payload, []byte(in)) {
t.Fail()
}
}
|
package models
//LoginCredentials model for manage login data
type LoginCredentials struct {
Email string `json:"email"`
Password string `json:"password"`
}
|
package main
import (
"testing"
)
func TestBitSet_Has(t *testing.T) {
bs := NewBitSet()
bs.Add(1)
bs.Add(3)
bs.Add(5)
bs.Add(7)
bs.Add(100)
bs.Add(101)
bs.Add(102)
bs.Add(103)
bs.Add(104)
bs.Add(105)
bs.Add(106)
bs.Del(101)
bs.Del(103)
bs.Del(105)
tests := []struct {
val int
want bool
}{
{0, false}, {1, true}, {2, false}, {3, true}, {4, false}, {5, true}, {6, false}, {7, true},
{8, false}, {9, false},
{100, true}, {101, false}, {102, true}, {103, false}, {104, true}, {105, false}, {106, true}, {107, false},
}
for _, tt := range tests {
if got := bs.Has(tt.val); got != tt.want {
t.Errorf("BitSet.Has(%d) = %v, want %v", tt.val, got, tt.want)
}
}
}
func TestBitSet_Len(t *testing.T) {
bs := NewBitSet()
if got := bs.Len(); got != 0 {
t.Errorf("BitSet.Len() = %v, want %v", got, 0)
}
bs.Add(123)
if got := bs.Len(); got != 1 {
t.Errorf("BitSet.Len() = %v, want %v", got, 1)
}
bs.Add(8)
if got := bs.Len(); got != 2 {
t.Errorf("BitSet.Len() = %v, want %v", got, 2)
}
bs.Del(7)
if got := bs.Len(); got != 2 {
t.Errorf("BitSet.Len() = %v, want %v", got, 2)
}
bs.Del(123)
if got := bs.Len(); got != 1 {
t.Errorf("BitSet.Len() = %v, want %v", got, 1)
}
bs.Add(9999)
if got := bs.Len(); got != 2 {
t.Errorf("BitSet.Len() = %v, want %v", got, 2)
}
}
func TestBitSet_MinNotExistsFrom(t *testing.T) {
bs := NewBitSet()
if got := bs.MinNotExistsFrom(0); got != 0 {
t.Errorf("BitSet.MinNotExistsFrom(%d) = %v, want %v", 0, got, 0)
}
if got := bs.MinNotExistsFrom(1); got != 1 {
t.Errorf("BitSet.MinNotExistsFrom(%d) = %v, want %v", 1, got, 1)
}
if got := bs.MinNotExistsFrom(123); got != 123 {
t.Errorf("BitSet.MinNotExistsFrom(%d) = %v, want %v", 123, got, 123)
}
for i := 30; i <= 70; i++ {
bs.Add(i)
}
if got := bs.MinNotExistsFrom(0); got != 0 {
t.Errorf("BitSet.MinNotExistsFrom(%d) = %v, want %v", 0, got, 0)
}
if got := bs.MinNotExistsFrom(1); got != 1 {
t.Errorf("BitSet.MinNotExistsFrom(%d) = %v, want %v", 1, got, 1)
}
if got := bs.MinNotExistsFrom(33); got != 71 {
t.Errorf("BitSet.MinNotExistsFrom(%d) = %v, want %v", 33, got, 71)
}
if got := bs.MinNotExistsFrom(123); got != 123 {
t.Errorf("BitSet.MinNotExistsFrom(%d) = %v, want %v", 123, got, 123)
}
}
|
package info
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"github.com/golang/glog"
"github.com/julienschmidt/httprouter"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/openrtb_ext"
)
const (
statusActive string = "ACTIVE"
statusDisabled string = "DISABLED"
)
// NewBiddersDetailEndpoint builds a handler for the /info/bidders/<bidder> endpoint.
func NewBiddersDetailEndpoint(bidders config.BidderInfos, aliases map[string]string) httprouter.Handle {
responses, err := prepareBiddersDetailResponse(bidders, aliases)
if err != nil {
glog.Fatalf("error creating /info/bidders/<bidder> endpoint response: %v", err)
}
return func(w http.ResponseWriter, _ *http.Request, ps httprouter.Params) {
bidder := ps.ByName("bidderName")
if response, ok := responses[bidder]; ok {
w.Header().Set("Content-Type", "application/json")
if _, err := w.Write(response); err != nil {
glog.Errorf("error writing response to /info/bidders/%s: %v", bidder, err)
}
} else {
w.WriteHeader(http.StatusNotFound)
}
}
}
func prepareBiddersDetailResponse(bidders config.BidderInfos, aliases map[string]string) (map[string][]byte, error) {
details, err := mapDetails(bidders, aliases)
if err != nil {
return nil, err
}
responses, err := marshalDetailsResponse(details)
if err != nil {
return nil, err
}
all, err := marshalAllResponse(responses)
if err != nil {
return nil, err
}
responses["all"] = all
return responses, nil
}
func mapDetails(bidders config.BidderInfos, aliases map[string]string) (map[string]bidderDetail, error) {
details := map[string]bidderDetail{}
for bidderName, bidderInfo := range bidders {
details[bidderName] = mapDetailFromConfig(bidderInfo)
}
for aliasName, bidderName := range aliases {
aliasBaseInfo, aliasBaseInfoFound := details[bidderName]
if !aliasBaseInfoFound {
return nil, fmt.Errorf("base adapter %s for alias %s not found", bidderName, aliasName)
}
aliasInfo := aliasBaseInfo
aliasInfo.AliasOf = bidderName
details[aliasName] = aliasInfo
}
return details, nil
}
func marshalDetailsResponse(details map[string]bidderDetail) (map[string][]byte, error) {
responses := map[string][]byte{}
for bidder, detail := range details {
json, err := json.Marshal(detail)
if err != nil {
return nil, fmt.Errorf("unable to marshal info for bidder %s: %v", bidder, err)
}
responses[bidder] = json
}
return responses, nil
}
func marshalAllResponse(responses map[string][]byte) ([]byte, error) {
responsesJSON := make(map[string]json.RawMessage, len(responses))
for k, v := range responses {
responsesJSON[k] = json.RawMessage(v)
}
json, err := json.Marshal(responsesJSON)
if err != nil {
return nil, fmt.Errorf("unable to marshal info for bidder all: %v", err)
}
return json, nil
}
type bidderDetail struct {
Status string `json:"status"`
UsesHTTPS *bool `json:"usesHttps,omitempty"`
Maintainer *maintainer `json:"maintainer,omitempty"`
Capabilities *capabilities `json:"capabilities,omitempty"`
AliasOf string `json:"aliasOf,omitempty"`
}
type maintainer struct {
Email string `json:"email"`
}
type capabilities struct {
App *platform `json:"app,omitempty"`
Site *platform `json:"site,omitempty"`
}
type platform struct {
MediaTypes []string `json:"mediaTypes"`
}
func mapDetailFromConfig(c config.BidderInfo) bidderDetail {
var bidderDetail bidderDetail
if c.Maintainer != nil {
bidderDetail.Maintainer = &maintainer{
Email: c.Maintainer.Email,
}
}
if c.IsEnabled() {
bidderDetail.Status = statusActive
usesHTTPS := strings.HasPrefix(strings.ToLower(c.Endpoint), "https://")
bidderDetail.UsesHTTPS = &usesHTTPS
if c.Capabilities != nil {
bidderDetail.Capabilities = &capabilities{}
if c.Capabilities.App != nil {
bidderDetail.Capabilities.App = &platform{
MediaTypes: mapMediaTypes(c.Capabilities.App.MediaTypes),
}
}
if c.Capabilities.Site != nil {
bidderDetail.Capabilities.Site = &platform{
MediaTypes: mapMediaTypes(c.Capabilities.Site.MediaTypes),
}
}
}
} else {
bidderDetail.Status = statusDisabled
}
return bidderDetail
}
func mapMediaTypes(m []openrtb_ext.BidType) []string {
mediaTypes := make([]string, len(m))
for i, v := range m {
mediaTypes[i] = string(v)
}
return mediaTypes
}
|
package appendOnly_test
import (
"bytes"
"github.com/SarthakMakhija/basics-of-database-design/kv/appendOnly"
"os"
"testing"
)
func TestCreatesANewFile(t *testing.T) {
fileIO := appendOnly.NewFileIO()
fileName := "./kv.test"
fileIO.CreateOrOpenReadWrite(fileName)
defer deleteFile(fileName)
if fileIO.File.Name() != fileName {
t.Fatalf("Expected file to be created with name %v but received %v", fileName, fileIO.File.Name())
}
}
func TestCanNotCreatesANewFileGivenItIsADirectory(t *testing.T) {
fileIO := appendOnly.NewFileIO()
fileName := "/"
fileIO.CreateOrOpenReadWrite(fileName)
defer deleteFile(fileName)
if fileIO.Err == nil {
t.Fatalf("Expected error to be found while creating a directory instead of file but received no error")
}
}
func TestOpensANewFile(t *testing.T) {
fileIO := appendOnly.NewFileIO()
fileName := "./kv.test"
defer deleteFile(fileName)
fileIO.CreateOrOpenReadWrite(fileName)
fileIO.Open(fileName, os.O_RDWR, 0600)
if fileIO.Err != nil {
t.Fatalf("Expected not error to be found while opening a file but received %v", fileIO.Err)
}
}
func TestDoesNotOpenANonExistentNewFile(t *testing.T) {
fileIO := appendOnly.NewFileIO()
fileName := "./kv.test"
defer deleteFile(fileName)
fileIO.Open(fileName, os.O_RDWR, 0600)
if fileIO.Err == nil {
t.Fatalf("Expected error to be found while opening a non existent file but received no error")
}
}
func TestMemoryMapsAFile(t *testing.T) {
fileIO := appendOnly.NewFileIO()
fileName := "./kv.test"
defer deleteFile(fileName)
fileIO.CreateOrOpenReadWrite(fileName)
fileIO.Open(fileName, os.O_RDWR, 0600)
content := []byte{'h', 'e', 'l', 'l', 'o'}
_, _ = fileIO.File.WriteAt(content, 0)
fileIO.Open(fileName, os.O_RDWR, 0400)
mappedBytes, _ := fileIO.Mmap(fileIO.File, 5)
if !bytes.Equal(content, mappedBytes) {
t.Fatalf("Expected %v, received %v", content, mappedBytes)
}
}
func TestResizesAFileOnMemoryMap(t *testing.T) {
fileIO := appendOnly.NewFileIO()
fileName := "./kv.test"
defer deleteFile(fileName)
fileIO.CreateOrOpenReadWrite(fileName)
fileIO.Mmap(fileIO.File, 5)
size := fileIO.FileSize(fileName)
if size != 5 {
t.Fatalf("Expected resized file to be of size %v, received %v", 5, size)
}
}
func TestDoesNotMemoryMapANonExistentFile(t *testing.T) {
fileIO := appendOnly.NewFileIO()
fileName := "./kv.test"
defer deleteFile(fileName)
fileIO.Open(fileName, os.O_RDWR, 0600)
mappedBytes, _ := fileIO.Mmap(fileIO.File, 5)
if mappedBytes != nil {
t.Fatalf("Expected %v, received %v", nil, mappedBytes)
}
}
func TestUnMapsAFile(t *testing.T) {
fileIO := appendOnly.NewFileIO()
fileName := "./kv.test"
defer deleteFile(fileName)
fileIO.CreateOrOpenReadWrite(fileName)
fileIO.Open(fileName, os.O_RDWR, 0600)
content := []byte{'h', 'e', 'l', 'l', 'o'}
_, _ = fileIO.File.WriteAt(content, 0)
fileIO.Open(fileName, os.O_RDWR, 0400)
mappedBytes, _ := fileIO.Mmap(fileIO.File, 5)
fileIO.Munmap(mappedBytes)
if fileIO.Err != nil {
t.Fatalf("Expected no error while unmapping but received %v", fileIO.Err)
}
}
func TestDoesNotUnMapANonExistentFile(t *testing.T) {
fileIO := appendOnly.NewFileIO()
fileName := "./kv.test"
defer deleteFile(fileName)
fileIO.Open(fileName, os.O_RDWR, 0600)
fileIO.Munmap([]byte{'a', 'b'})
if fileIO.Err == nil {
t.Fatalf("Expected error while unmapping but received none")
}
}
func TestReturnsTheFileSize(t *testing.T) {
fileIO := appendOnly.NewFileIO()
fileName := "./kv.test"
defer deleteFile(fileName)
fileIO.CreateOrOpenReadWrite(fileName)
size := fileIO.FileSize(fileName)
if size != 0 {
t.Fatalf("Expected %v, received %v", 0, size)
}
}
func TestDoesNotReturnTheFileSizeOfDirectory(t *testing.T) {
fileIO := appendOnly.NewFileIO()
fileName := "/"
defer deleteFile(fileName)
fileIO.CreateOrOpenReadWrite(fileName)
size := fileIO.FileSize(fileName)
if size != -1 {
t.Fatalf("Expected %v, received %v", -1, size)
}
}
func TestOpensANewFileForReading(t *testing.T) {
fileIO := appendOnly.NewFileIO()
fileName := "./kv.test"
defer deleteFile(fileName)
fileIO.CreateOrOpenReadWrite(fileName)
fileIO.OpenReadOnly(fileName)
if fileIO.Err != nil {
t.Fatalf("Expected not error to be found while opening a file for reading but received %v", fileIO.Err)
}
}
func TestDoesNotOpenANonExistentNewFileForReading(t *testing.T) {
fileIO := appendOnly.NewFileIO()
fileName := "./kv.test"
defer deleteFile(fileName)
fileIO.OpenReadOnly(fileName)
if fileIO.Err == nil {
t.Fatalf("Expected error to be found while opening a non existent file for reading but received no error")
}
}
|
// Copyright 2021 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kernel
import (
"bytes"
"fmt"
"sort"
"gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/vfs"
)
// InvalidCgroupHierarchyID indicates an uninitialized hierarchy ID.
const InvalidCgroupHierarchyID uint32 = 0
// InvalidCgroupID indicates an uninitialized cgroup ID.
const InvalidCgroupID uint32 = 0
// CgroupControllerType is the name of a cgroup controller.
type CgroupControllerType string
// Available cgroup controllers.
const (
CgroupControllerCPU = CgroupControllerType("cpu")
CgroupControllerCPUAcct = CgroupControllerType("cpuacct")
CgroupControllerCPUSet = CgroupControllerType("cpuset")
CgroupControllerDevices = CgroupControllerType("devices")
CgroupControllerJob = CgroupControllerType("job")
CgroupControllerMemory = CgroupControllerType("memory")
CgroupControllerPIDs = CgroupControllerType("pids")
)
// ParseCgroupController parses a string as a CgroupControllerType.
func ParseCgroupController(val string) (CgroupControllerType, error) {
switch val {
case "cpu":
return CgroupControllerCPU, nil
case "cpuacct":
return CgroupControllerCPUAcct, nil
case "cpuset":
return CgroupControllerCPUSet, nil
case "devices":
return CgroupControllerDevices, nil
case "job":
return CgroupControllerJob, nil
case "memory":
return CgroupControllerMemory, nil
case "pids":
return CgroupControllerPIDs, nil
default:
return "", fmt.Errorf("no such cgroup controller")
}
}
// CgroupResourceType represents a resource type tracked by a particular
// controller.
type CgroupResourceType int
// Resources for the cpuacct controller.
const (
// CgroupResourcePID represents a charge for pids.current.
CgroupResourcePID CgroupResourceType = iota
)
// CgroupController is the common interface to cgroup controllers available to
// the entire sentry. The controllers themselves are defined by cgroupfs.
//
// Callers of this interface are often unable access synchronization needed to
// ensure returned values remain valid. Some of values returned from this
// interface are thus snapshots in time, and may become stale. This is ok for
// many callers like procfs.
type CgroupController interface {
// Returns the type of this cgroup controller (ex "memory", "cpu"). Returned
// value is valid for the lifetime of the controller.
Type() CgroupControllerType
// Hierarchy returns the ID of the hierarchy this cgroup controller is
// attached to. Returned value is valid for the lifetime of the controller.
HierarchyID() uint32
// EffectiveRootCgroup returns the effective root cgroup for this
// controller. This is either the actual root of the underlying cgroupfs
// filesystem, or the override root configured at sandbox startup. Returned
// value is valid for the lifetime of the controller.
EffectiveRootCgroup() Cgroup
// NumCgroups returns the number of cgroups managed by this controller.
// Returned value is a snapshot in time.
NumCgroups() uint64
// Enabled returns whether this controller is enabled. Returned value is a
// snapshot in time.
Enabled() bool
}
// Cgroup represents a named pointer to a cgroup in cgroupfs. When a task enters
// a cgroup, it holds a reference on the underlying dentry pointing to the
// cgroup.
//
// +stateify savable
type Cgroup struct {
*kernfs.Dentry
CgroupImpl
}
// decRef drops a reference on the cgroup. This must happen outside a Task.mu
// critical section.
func (c *Cgroup) decRef() {
c.Dentry.DecRef(context.Background())
}
// Path returns the absolute path of c, relative to its hierarchy root.
func (c *Cgroup) Path() string {
return c.FSLocalPath()
}
// Walk returns the cgroup at p, starting from c.
func (c *Cgroup) Walk(ctx context.Context, vfsObj *vfs.VirtualFilesystem, p fspath.Path) (Cgroup, error) {
d, err := c.Dentry.WalkDentryTree(ctx, vfsObj, p)
if err != nil {
return Cgroup{}, err
}
return Cgroup{
Dentry: d,
CgroupImpl: d.Inode().(CgroupImpl),
}, nil
}
// CgroupMigrationContext represents an in-flight cgroup migration for
// a single task.
type CgroupMigrationContext struct {
src Cgroup
dst Cgroup
t *Task
}
// Abort cancels a migration.
func (ctx *CgroupMigrationContext) Abort() {
ctx.dst.AbortMigrate(ctx.t, &ctx.src)
}
// Commit completes a migration.
func (ctx *CgroupMigrationContext) Commit() {
ctx.dst.CommitMigrate(ctx.t, &ctx.src)
ctx.t.mu.Lock()
delete(ctx.t.cgroups, ctx.src)
ctx.src.DecRef(ctx.t)
ctx.dst.IncRef()
ctx.t.cgroups[ctx.dst] = struct{}{}
ctx.t.mu.Unlock()
}
// CgroupImpl is the common interface to cgroups.
type CgroupImpl interface {
// Controllers lists the controller associated with this cgroup.
Controllers() []CgroupController
// HierarchyID returns the id of the hierarchy that contains this cgroup.
HierarchyID() uint32
// Name returns the name for this cgroup, if any. If no name was provided
// when the hierarchy was created, returns "".
Name() string
// Enter moves t into this cgroup.
Enter(t *Task)
// Leave moves t out of this cgroup.
Leave(t *Task)
// PrepareMigrate initiates a migration of t from src to this cgroup. See
// cgroupfs.controller.PrepareMigrate.
PrepareMigrate(t *Task, src *Cgroup) error
// CommitMigrate completes an in-flight migration. See
// cgroupfs.controller.CommitMigrate.
CommitMigrate(t *Task, src *Cgroup)
// AbortMigrate cancels an in-flight migration. See
// cgroupfs.controller.AbortMigrate.
AbortMigrate(t *Task, src *Cgroup)
// Charge charges a controller in this cgroup for a particular resource. key
// must match a valid resource for the specified controller type.
//
// The implementer should silently succeed if no matching controllers are
// found.
//
// The underlying implementaion will panic if passed an incompatible
// resource type for a given controller.
//
// See cgroupfs.controller.Charge.
Charge(t *Task, d *kernfs.Dentry, ctl CgroupControllerType, res CgroupResourceType, value int64) error
// ReadControlFromBackground allows a background context to read a cgroup's
// control values.
ReadControl(ctx context.Context, name string) (string, error)
// WriteControl allows a background context to write a cgroup's control
// values.
WriteControl(ctx context.Context, name string, val string) error
// ID returns the id of this cgroup.
ID() uint32
}
// hierarchy represents a cgroupfs filesystem instance, with a unique set of
// controllers attached to it. Multiple cgroupfs mounts may reference the same
// hierarchy.
//
// +stateify savable
type hierarchy struct {
id uint32
name string
// These are a subset of the controllers in CgroupRegistry.controllers,
// grouped here by hierarchy for conveninent lookup.
controllers map[CgroupControllerType]CgroupController
// fs is not owned by hierarchy. The FS is responsible for unregistering the
// hierarchy on destruction, which removes this association.
fs *vfs.Filesystem
}
func (h *hierarchy) match(ctypes []CgroupControllerType) bool {
if len(ctypes) != len(h.controllers) {
return false
}
for _, ty := range ctypes {
if _, ok := h.controllers[ty]; !ok {
return false
}
}
return true
}
// cgroupFS is the public interface to cgroupfs. This lets the kernel package
// refer to cgroupfs.filesystem methods without directly depending on the
// cgroupfs package, which would lead to a circular dependency.
type cgroupFS interface {
// Returns the vfs.Filesystem for the cgroupfs.
VFSFilesystem() *vfs.Filesystem
// InitializeHierarchyID sets the hierarchy ID for this filesystem during
// filesystem creation. May only be called before the filesystem is visible
// to the vfs layer.
InitializeHierarchyID(hid uint32)
// RootCgroup returns the root cgroup of this instance. This returns the
// actual root, and ignores any overrides setting an effective root.
RootCgroup() Cgroup
}
// CgroupRegistry tracks the active set of cgroup controllers on the system.
//
// +stateify savable
type CgroupRegistry struct {
// lastHierarchyID is the id of the last allocated cgroup hierarchy. Valid
// ids are from 1 to math.MaxUint32.
//
lastHierarchyID atomicbitops.Uint32
// lastCgroupID is the id of the last allocated cgroup. Valid ids are
// from 1 to math.MaxUint32.
//
lastCgroupID atomicbitops.Uint32
mu cgroupMutex `state:"nosave"`
// controllers is the set of currently known cgroup controllers on the
// system.
//
// +checklocks:mu
controllers map[CgroupControllerType]CgroupController
// hierarchies is the active set of cgroup hierarchies. This contains all
// hierarchies on the system.
//
// +checklocks:mu
hierarchies map[uint32]hierarchy
// hierarchiesByName is a map of named hierarchies. Only named hierarchies
// are tracked on this map.
//
// +checklocks:mu
hierarchiesByName map[string]hierarchy
// cgroups is the active set of cgroups. This contains all the cgroups
// on the system.
//
// +checklocks:mu
cgroups map[uint32]CgroupImpl
}
func newCgroupRegistry() *CgroupRegistry {
return &CgroupRegistry{
controllers: make(map[CgroupControllerType]CgroupController),
hierarchies: make(map[uint32]hierarchy),
hierarchiesByName: make(map[string]hierarchy),
cgroups: make(map[uint32]CgroupImpl),
}
}
// nextHierarchyID returns a newly allocated, unique hierarchy ID.
func (r *CgroupRegistry) nextHierarchyID() (uint32, error) {
if hid := r.lastHierarchyID.Add(1); hid != 0 {
return hid, nil
}
return InvalidCgroupHierarchyID, fmt.Errorf("cgroup hierarchy ID overflow")
}
// FindHierarchy returns a cgroup filesystem containing exactly the set of
// controllers named in ctypes, and optionally the name specified in name if it
// isn't empty. If no such FS is found, FindHierarchy return nil. FindHierarchy
// takes a reference on the returned FS, which is transferred to the caller.
func (r *CgroupRegistry) FindHierarchy(name string, ctypes []CgroupControllerType) (*vfs.Filesystem, error) {
r.mu.Lock()
defer r.mu.Unlock()
// If we have a hierarchy name, lookup by name.
if name != "" {
h, ok := r.hierarchiesByName[name]
if !ok {
// Name not found.
return nil, nil
}
if h.match(ctypes) {
if !h.fs.TryIncRef() {
// May be racing with filesystem destruction, see below.
r.unregisterLocked(h.id)
return nil, nil
}
return h.fs, nil
}
// Name matched, but controllers didn't. Fail per linux
// kernel/cgroup.c:cgroup_mount().
log.Debugf("cgroupfs: Registry lookup for name=%s controllers=%v failed; named matched but controllers didn't (have controllers=%v)", name, ctypes, h.controllers)
return nil, linuxerr.EBUSY
}
for _, h := range r.hierarchies {
if h.match(ctypes) {
if !h.fs.TryIncRef() {
// Racing with filesystem destruction, namely h.fs.Release.
// Since we hold r.mu, we know the hierarchy hasn't been
// unregistered yet, but its associated filesystem is tearing
// down.
//
// If we simply indicate the hierarchy wasn't found without
// cleaning up the registry, the caller can race with the
// unregister and find itself temporarily unable to create a new
// hierarchy with a subset of the relevant controllers.
//
// To keep the result of FindHierarchy consistent with the
// uniqueness of controllers enforced by Register, drop the
// dying hierarchy now. The eventual unregister by the FS
// teardown will become a no-op.
r.unregisterLocked(h.id)
return nil, nil
}
return h.fs, nil
}
}
return nil, nil
}
// FindCgroup locates a cgroup with the given parameters.
//
// A cgroup is considered a match even if it contains other controllers on the
// same hierarchy.
func (r *CgroupRegistry) FindCgroup(ctx context.Context, ctype CgroupControllerType, path string) (Cgroup, error) {
p := fspath.Parse(path)
if !p.Absolute {
return Cgroup{}, fmt.Errorf("path must be absolute")
}
k := KernelFromContext(ctx)
vfsfs, err := r.FindHierarchy("", []CgroupControllerType{ctype})
if err != nil {
return Cgroup{}, err
}
if vfsfs == nil {
return Cgroup{}, fmt.Errorf("controller not active")
}
rootCG := vfsfs.Impl().(cgroupFS).RootCgroup()
if !p.HasComponents() {
// Explicit root '/'.
return rootCG, nil
}
return rootCG.Walk(ctx, k.VFS(), p)
}
// Register registers the provided set of controllers with the registry as a new
// hierarchy. If any controller is already registered, the function returns an
// error without modifying the registry. Register sets the hierarchy ID for the
// filesystem on success.
func (r *CgroupRegistry) Register(name string, cs []CgroupController, fs cgroupFS) error {
r.mu.Lock()
defer r.mu.Unlock()
if name == "" && len(cs) == 0 {
return fmt.Errorf("can't register hierarchy with both no controllers and no name")
}
for _, c := range cs {
if _, ok := r.controllers[c.Type()]; ok {
return fmt.Errorf("controllers may only be mounted on a single hierarchy")
}
}
if _, ok := r.hierarchiesByName[name]; name != "" && ok {
return fmt.Errorf("hierarchy named %q already exists", name)
}
hid, err := r.nextHierarchyID()
if err != nil {
return err
}
// Must not fail below here, once we publish the hierarchy ID.
fs.InitializeHierarchyID(hid)
h := hierarchy{
id: hid,
name: name,
controllers: make(map[CgroupControllerType]CgroupController),
fs: fs.VFSFilesystem(),
}
for _, c := range cs {
n := c.Type()
r.controllers[n] = c
h.controllers[n] = c
}
r.hierarchies[hid] = h
if name != "" {
r.hierarchiesByName[name] = h
}
return nil
}
// Unregister removes a previously registered hierarchy from the registry. If no
// such hierarchy is registered, Unregister is a no-op.
func (r *CgroupRegistry) Unregister(hid uint32) {
r.mu.Lock()
r.unregisterLocked(hid)
r.mu.Unlock()
}
// Precondition: Caller must hold r.mu.
// +checklocks:r.mu
func (r *CgroupRegistry) unregisterLocked(hid uint32) {
if h, ok := r.hierarchies[hid]; ok {
for name := range h.controllers {
delete(r.controllers, name)
}
delete(r.hierarchies, hid)
}
}
// computeInitialGroups takes a reference on each of the returned cgroups. The
// caller takes ownership of this returned reference.
func (r *CgroupRegistry) computeInitialGroups(inherit map[Cgroup]struct{}) map[Cgroup]struct{} {
r.mu.Lock()
defer r.mu.Unlock()
ctlSet := make(map[CgroupControllerType]CgroupController)
cgset := make(map[Cgroup]struct{})
// Remember controllers from the inherited cgroups set...
for cg := range inherit {
cg.IncRef() // Ref transferred to caller.
for _, ctl := range cg.Controllers() {
ctlSet[ctl.Type()] = ctl
cgset[cg] = struct{}{}
}
}
// ... and add the root cgroups of all the missing controllers.
for name, ctl := range r.controllers {
if _, ok := ctlSet[name]; !ok {
cg := ctl.EffectiveRootCgroup()
// Multiple controllers may share the same hierarchy, so may have
// the same root cgroup. Grab a single ref per hierarchy root.
if _, ok := cgset[cg]; ok {
continue
}
cg.IncRef() // Ref transferred to caller.
cgset[cg] = struct{}{}
}
}
return cgset
}
// GenerateProcCgroups writes the contents of /proc/cgroups to buf.
func (r *CgroupRegistry) GenerateProcCgroups(buf *bytes.Buffer) {
r.mu.Lock()
entries := make([]string, 0, len(r.controllers))
for _, c := range r.controllers {
en := 0
if c.Enabled() {
en = 1
}
entries = append(entries, fmt.Sprintf("%s\t%d\t%d\t%d\n", c.Type(), c.HierarchyID(), c.NumCgroups(), en))
}
r.mu.Unlock()
sort.Strings(entries)
fmt.Fprint(buf, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n")
for _, e := range entries {
fmt.Fprint(buf, e)
}
}
// NextCgroupID returns a newly allocated, unique cgroup ID.
func (r *CgroupRegistry) NextCgroupID() (uint32, error) {
if cid := r.lastCgroupID.Add(1); cid != 0 {
return cid, nil
}
return InvalidCgroupID, fmt.Errorf("cgroup ID overflow")
}
// AddCgroup adds the ID and cgroup in the map.
func (r *CgroupRegistry) AddCgroup(cg CgroupImpl) {
r.mu.Lock()
r.cgroups[cg.ID()] = cg
r.mu.Unlock()
}
// GetCgroup returns the cgroup associated with the cgroup ID.
func (r *CgroupRegistry) GetCgroup(cid uint32) (CgroupImpl, error) {
r.mu.Lock()
defer r.mu.Unlock()
cg, ok := r.cgroups[cid]
if !ok {
return nil, fmt.Errorf("cgroup with ID %d does not exist", cid)
}
return cg, nil
}
|
package main
import (
"context"
"fmt"
"time"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
khjobv1 "github.com/kuberhealthy/kuberhealthy/v2/pkg/apis/khjob/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
// ReapCheckerPods is a variable mapping all reaper pods
var ReapCheckerPods map[string]v1.Pod
// Default values for reaper configurations
const minKHJobAge = time.Minute * 5
const minCheckPodAge = time.Second * 30
const checkReaperRunIntervalDefault = time.Second * 30
type KubernetesAPI struct {
Client kubernetes.Interface
}
// parseDurationOrUseDefault parses a string duration into a time.Duration. If string is empty, return the defaultDuration.
// If the parsed time.Duration is 0, return defaultDuration.
func parseDurationOrUseDefault(d string, defaultDuration time.Duration) (time.Duration, error) {
if len(d) == 0 {
return defaultDuration, nil
}
duration, err := time.ParseDuration(d)
if err != nil {
return defaultDuration, err
}
if duration == 0 {
log.Errorln("checkReaper: duration value 0 is not valid")
log.Infoln("checkReaper: Using default duration:", defaultDuration)
return defaultDuration, nil
}
return duration, nil
}
// reaper runs until the supplied context expires and reaps khjobs and khchecks
func reaper(ctx context.Context) {
reaperRunInterval, err := parseDurationOrUseDefault(checkReaperRunInterval, checkReaperRunIntervalDefault)
if err != nil {
log.Errorln("checkReaper: Error occurred attempting to parse checkReaperRunInterval:", err)
log.Infoln("checkReaper: Using default checkReaperRunInterval:", checkReaperRunIntervalDefault)
}
// Parse configs when reaper starts up.
log.Infoln("checkReaper: starting up...")
log.Infoln("checkReaper: run interval:", reaperRunInterval)
log.Infoln("checkReaper: max khjob age:", cfg.MaxKHJobAge)
log.Infoln("checkReaper: max khcheck pod age:", cfg.MaxCheckPodAge)
log.Infoln("checkReaper: max completed check pod count:", cfg.MaxCompletedPodCount)
log.Infoln("checkReaper: max error check pod count:", cfg.MaxErrorPodCount)
// set MaxCheckPodAge to minCheckPodAge before getting reaped if no maxCheckPodAge is set
// Want to make sure the completed pod is around for at least 30s before getting reaped
if cfg.MaxCheckPodAge < minCheckPodAge {
cfg.MaxCheckPodAge = minCheckPodAge
}
// set MaxKHJobAge to minKHJobAge before getting reaped if no maxCheckPodAge is set
// Want to make sure the completed job is around for at least 5m before getting reaped
if cfg.MaxKHJobAge < minKHJobAge {
cfg.MaxKHJobAge = minKHJobAge
}
// start a new ticker
t := time.NewTicker(reaperRunInterval)
defer t.Stop()
// iterate until our context expires and run reaper operations
keepGoing := true
for keepGoing {
<-t.C
// create a context for this run that times out
runCtx, runCtxCancel := context.WithTimeout(ctx, time.Minute*3)
defer runCtxCancel()
// run our check and job reapers
runCheckReap(runCtx)
runJobReap(runCtx)
// check if the parent context has expired
select {
case <-ctx.Done():
log.Debugln("checkReaper: context has expired...")
keepGoing = false
default:
}
}
log.Infoln("checkReaper: check reaper shutting down...")
}
// runCheckReap runs a process which locates checkpods that need reaped and reaps them
func runCheckReap(ctx context.Context) {
kubeApi := KubernetesAPI{
Client: kubernetesClient,
}
// list checker pods in all namespaces
podList, err := kubeApi.listCompletedCheckerPods(ctx, listenNamespace)
if err != nil {
log.Errorln("checkReaper: Failed to list and delete old checker pods", err)
}
if len(podList) == 0 {
log.Infoln("checkReaper: No pods found that need reaped.")
return
}
err = kubeApi.deleteFilteredCheckerPods(ctx, kubernetesClient, podList)
if err != nil {
log.Errorln("checkReaper: Error found while deleting old pods:", err)
}
log.Infoln("checkReaper: Finished reaping checker pods.")
}
// runJobReap runs a process to reap jobs that need deleted (those that were created by a khjob)
func runJobReap(ctx context.Context) {
jobClient, err := khjobv1.Client(cfg.kubeConfigFile)
if err != nil {
log.Errorln("checkReaper: Unable to create khJob client", err)
}
log.Infoln("checkReaper: Beginning to search for khjobs.")
// fetch and delete khjobs that meet criteria
err = khJobDelete(jobClient)
if err != nil {
log.Errorln("checkReaper: Failed to reap khjobs with error: ", err)
}
log.Infoln("checkReaper: Finished reaping khjobs.")
}
// listCompletedCheckerPods returns a list of completed (Failed of Succeeded) pods with the khcheck name label
func (k *KubernetesAPI) listCompletedCheckerPods(ctx context.Context, namespace string) (map[string]v1.Pod, error) {
log.Infoln("checkReaper: Listing checker pods")
ReapCheckerPods = make(map[string]v1.Pod)
pods, err := k.Client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: "kuberhealthy-check-name"})
if err != nil {
log.Errorln("checkReaper: Failed to list checker pods")
return ReapCheckerPods, err
}
log.Infoln("checkReaper: Found:", len(pods.Items), "checker pods")
for _, p := range pods.Items {
if p.Status.Phase == v1.PodSucceeded || p.Status.Phase == v1.PodFailed {
ReapCheckerPods[p.Name] = p
}
}
return ReapCheckerPods, err
}
// deleteFilteredCheckerPods goes through map of all checker pods and deletes older checker pods
func (k *KubernetesAPI) deleteFilteredCheckerPods(ctx context.Context, client *kubernetes.Clientset, reapCheckerPods map[string]v1.Pod) error {
var err error
for n, v := range reapCheckerPods {
podTerminatedTime, err := getPodCompletedTime(v)
if err != nil {
log.Warnln(err)
continue
}
// Delete pods older than maxCheckPodAge and is in status Succeeded
if v.Status.Phase == v1.PodSucceeded && time.Now().Sub(podTerminatedTime) > cfg.MaxCheckPodAge {
log.Infoln("checkReaper: Found completed pod older than:", cfg.MaxCheckPodAge, "in status `Succeeded`. Deleting pod:", n)
err = k.deletePod(ctx, v)
if err != nil {
log.Errorln("checkReaper: Failed to delete pod:", n, err)
continue
}
delete(reapCheckerPods, n)
}
// Delete failed pods (status Failed) older than maxCheckPodAge
if v.Status.Phase == v1.PodFailed && time.Now().Sub(podTerminatedTime) > cfg.MaxCheckPodAge {
log.Infoln("checkReaper: Found completed pod older than:", cfg.MaxCheckPodAge, "in status `Failed`. Deleting pod:", n)
err = k.deletePod(ctx, v)
if err != nil {
log.Errorln("checkReaper: Failed to delete pod:", n, err)
continue
}
delete(reapCheckerPods, n)
}
// Delete if there are more than MaxCompletedPodCount checker pods with the same name in status Succeeded that were created more recently
// Delete if the checker pod is Failed and there are more than MaxErrorPodCount checker pods of the same type which were created more recently
allCheckPods := getAllCompletedPodsWithCheckName(reapCheckerPods, v)
if len(allCheckPods) > cfg.MaxCompletedPodCount {
failOldCount := 0
failCount := 0
successOldCount := 0
successCount := 0
for _, p := range allCheckPods {
if v.CreationTimestamp.Time.Before(p.CreationTimestamp.Time) && p.Status.Phase != v1.PodSucceeded && v.Namespace == p.Namespace {
failOldCount++
}
if p.Status.Phase != v1.PodSucceeded && v.Namespace == p.Namespace {
failCount++
}
if v.CreationTimestamp.Time.Before(p.CreationTimestamp.Time) && p.Status.Phase == v1.PodSucceeded && v.Namespace == p.Namespace {
successOldCount++
}
if p.Status.Phase == v1.PodSucceeded && v.Namespace == p.Namespace {
successCount++
}
}
// Delete if there are more than MaxCompletedPodCount checker pods with the same name in status Succeeded that were created more recently
if v.Status.Phase == v1.PodSucceeded && successOldCount >= cfg.MaxCompletedPodCount && successCount >= cfg.MaxCompletedPodCount {
log.Infoln("checkReaper: Found more than", cfg.MaxCompletedPodCount, "checker pods with the same name in status `Succeeded` that were created more recently. Deleting pod:", n)
err = k.deletePod(ctx, v)
if err != nil {
log.Errorln("checkReaper: Failed to delete pod:", n, err)
continue
}
delete(reapCheckerPods, n)
}
// Delete if there are more than MaxErrorPodCount checker pods with the same name in status Failed that were created more recently
if v.Status.Phase == v1.PodFailed && failOldCount >= cfg.MaxErrorPodCount && failCount >= cfg.MaxErrorPodCount {
log.Infoln("checkReaper: Found more than", cfg.MaxErrorPodCount, "checker pods with the same name in status Failed` that were created more recently. Deleting pod:", n)
err = k.deletePod(ctx, v)
if err != nil {
log.Errorln("checkReaper: Failed to delete pod:", n, err)
continue
}
delete(reapCheckerPods, n)
}
}
}
return err
}
// getAllCompletedPodsWithCheckName finds all completed checker pods for a given khcheck that are older than minCheckPodAge
func getAllCompletedPodsWithCheckName(reapCheckerPods map[string]v1.Pod, pod v1.Pod) []v1.Pod {
var allCheckPods []v1.Pod
checkName := pod.Annotations["comcast.github.io/check-name"]
for _, v := range reapCheckerPods {
if v.Labels["kuberhealthy-check-name"] == checkName {
podTerminatedTime, err := getPodCompletedTime(v)
if err != nil {
log.Warnln(err)
continue
}
if time.Now().Sub(podTerminatedTime) > minCheckPodAge {
allCheckPods = append(allCheckPods, v)
}
}
}
return allCheckPods
}
// deletePod deletes a given pod
func (k *KubernetesAPI) deletePod(ctx context.Context, pod v1.Pod) error {
log.Infoln("checkReaper: Deleting Pod: ", pod.Name, " in namespace: ", pod.Namespace)
propagationForeground := metav1.DeletePropagationForeground
options := metav1.DeleteOptions{PropagationPolicy: &propagationForeground}
return k.Client.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, options)
}
// jobConditions returns true if conditions are met to be deleted for khjob
func jobConditions(job khjobv1.KuberhealthyJob, duration time.Duration, phase khjobv1.JobPhase) bool {
if time.Now().Sub(job.CreationTimestamp.Time) > duration && job.Spec.Phase == phase {
log.Infoln("checkReaper: Found khjob older than", duration, "minutes in status", phase)
return true
}
return false
}
// KHJobDelete fetches a list of khjobs in a namespace and will delete them if they meet given criteria
func khJobDelete(client *khjobv1.KHJobV1Client) error {
opts := metav1.ListOptions{}
del := metav1.DeleteOptions{}
// list khjobs in Namespace
list, err := client.KuberhealthyJobs(listenNamespace).List(opts)
if err != nil {
log.Errorln("checkReaper: Error: failed to retrieve khjob list with error", err)
return err
}
log.Infoln("checkReaper: Found", len(list.Items), "khjobs")
// Range over list and delete khjobs
for _, j := range list.Items {
if jobConditions(j, cfg.MaxKHJobAge, "Completed") {
log.Infoln("checkReaper: Deleting khjob", j.Name)
err := client.KuberhealthyJobs(j.Namespace).Delete(j.Name, &del)
if err != nil {
log.Errorln("checkReaper: Failure to delete khjob", j.Name, "with error:", err)
return err
}
}
}
return nil
}
// getPodCompletedTime returns a boolean to ensure container terminated state exists and returns containers' latest finished time
func getPodCompletedTime(pod v1.Pod) (time.Time, error) {
var podCompletedTime time.Time
for _, cs := range pod.Status.ContainerStatuses {
if cs.State.Terminated != nil {
finishedTime := cs.State.Terminated.FinishedAt
if finishedTime.After(podCompletedTime) {
podCompletedTime = finishedTime.Time
}
} else {
return podCompletedTime, fmt.Errorf("could not fetch pod: %s completed time", pod.Name)
}
}
return podCompletedTime, nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.