text stringlengths 11 4.05M |
|---|
/*
Copyright 2018 Blindside Networks
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http:// www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dataStructs
import "errors"
// the following structs are the types we create to interact with the API
// ie participants, meetingRooms, recordings
type Recording struct {
MeetingID string
RecordID string
State string
Meta string
Publish string
}
type Participants struct {
IsAdmin_ int
FullName_ string
MeetingID_ string
Password_ string
CreateTime string
UserID string
WebVoiceConf string
ConfigToken string
AvatarURL string
Redirect string
ClientURL string
JoinURL string
}
func (p *Participants) IsValid() error {
if p.FullName_ == "" {
return errors.New("full name cannot be empty")
}
if p.MeetingID_ == "" {
return errors.New("meeting ID cannot be empty")
}
if p.Password_ == "" {
return errors.New("password cannot be empty")
}
return nil
}
type MeetingRoom struct {
Name_ string
MeetingID_ string
InternalMeetingId string
AttendeePW_ string
ModeratorPW_ string
Welcome string
DialNumber string
VoiceBridge string
WebVoice string
LogoutURL string
Record string
Duration int
Meta string
ModeratorOnlyMessage string
AutoStartRecording bool
AllowStartStopRecording bool
Created bool
PostId string
CreatedAt int64
EndedAt int64
AttendeeNames []string
LoopCount int
ValidToken string
Meta_bbb_recording_ready_url string // this needs to be properly url encoded
Meta_channelid string
Meta_endcallbackurl string
Meta_bbb_origin string
Meta_bbb_origin_version string
Meta_bbb_origin_server_name string
Meta_dc_creator string
CreateMeetingResponse CreateMeetingResponse
MeetingInfo GetMeetingInfoResponse
}
type WebHook struct {
HookID string
CallBackURL string
MeetingId string
WebhookResponse CreateWebhookResponse
}
|
package starttls
import (
"crypto/tls"
"fmt"
log "github.com/sirupsen/logrus"
config "github.com/spf13/viper"
. "github.com/trapped/gomaild2/smtp/structs"
. "github.com/trapped/gomaild2/structs"
)
func initTLS() {
WaitConfig("config.loaded")
if config.GetBool("tls.enabled") {
log.Info("Enabled TLS")
Extensions = append(Extensions, "STARTTLS")
}
}
func init() {
go initTLS()
}
func getCerts() ([]tls.Certificate, error) {
if !config.GetBool("tls.enabled") ||
config.GetString("tls.certificate") == "" ||
config.GetString("tls.key") == "" {
return []tls.Certificate{}, fmt.Errorf("extension disabled")
}
cert, err := tls.LoadX509KeyPair(config.GetString("tls.certificate"), config.GetString("tls.key"))
if err != nil {
log.Error("Couldn't load TLS certificate: ", err)
return []tls.Certificate{}, fmt.Errorf("crypto error")
}
return []tls.Certificate{cert}, nil
}
func getConfig() (*tls.Config, error) {
certs, err := getCerts()
if err != nil {
return &tls.Config{}, err
}
return &tls.Config{
Certificates: certs,
}, nil
}
func Process(c *Client, cmd Command) Reply {
conf, err := getConfig()
if err != nil {
switch err.Error() {
case "extension disabled":
return Reply{
Result: CommandNotImplemented,
Message: err.Error(),
}
case "crypto error":
return Reply{
Result: LocalError,
Message: err.Error(),
}
default:
return Reply{
Result: LocalError,
Message: "unknown processing error",
}
}
}
c.Send(Reply{
Result: Ready,
Message: "ready to start TLS",
})
conn := tls.Server(c.Conn, conf)
err = conn.Handshake()
if err != nil {
log.WithFields(log.Fields{
"id": c.ID,
"err": err,
}).Error("TLS handshake failed")
c.State = Disconnected
return Reply{
Result: LocalError,
Message: "TLS handshake failed",
}
}
c.Conn = conn
log.WithField("id", c.ID).Info("Switched to TLS")
c.ResetData()
c.Set("secure", true)
c.State = Connected
c.MakeReader()
return Reply{
Result: Ignore,
}
}
|
package types
import "github.com/docker/distribution/reference"
// Ref reference to a registry/repository
// If the tag or digest is available, it's also included in the reference.
// Reference itself is the unparsed string.
// While this is currently a struct, that may change in the future and access
// to contents should not be assumed/used.
type Ref struct {
Reference, Registry, Repository, Tag, Digest string
}
// NewRef returns a repository reference including a registry, repository (path), digest, and tag
func NewRef(ref string) (Ref, error) {
parsed, err := reference.ParseNormalizedNamed(ref)
var ret Ref
ret.Reference = ref
if err != nil {
return ret, err
}
ret.Registry = reference.Domain(parsed)
ret.Repository = reference.Path(parsed)
if canonical, ok := parsed.(reference.Canonical); ok {
ret.Digest = canonical.Digest().String()
}
if tagged, ok := parsed.(reference.Tagged); ok {
ret.Tag = tagged.Tag()
}
if ret.Tag == "" && ret.Digest == "" {
ret.Tag = "latest"
}
return ret, nil
}
// CommonName outputs a parsable name from a reference
func (r Ref) CommonName() string {
cn := ""
if r.Registry != "" {
cn = r.Registry + "/"
}
if r.Repository == "" {
return ""
}
cn = cn + r.Repository
if r.Digest != "" {
cn = cn + "@" + r.Digest
} else if r.Tag != "" {
cn = cn + ":" + r.Tag
}
return cn
}
|
package handlers
import (
"fmt"
"net/http"
)
// IndexHandler is the root at "/"
func IndexHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Welcome to the JSON restAPI!")
}
|
package main
import (
"log"
"os"
"github.com/logic/gkp/keepassrpc"
"github.com/logic/gkp/keepassrpc/cli"
)
var config *cli.Configuration
var client *keepassrpc.Client
func main() {
ParseEnvironment()
var err error
config, err = cli.LoadConfig()
if err != nil {
log.Fatal("loadConfig: ", err)
}
client, err = cli.Dial(config, cli.Prompt)
if err != nil {
log.Fatal("initSRP: ", err)
}
defer client.Close()
ParseCommand(os.Args)
}
|
package main
import "sort"
type point struct {
x, y int
}
func minAreaRect(points [][]int) int {
projectionX := make(map[int][]int)
projectionY := make(map[int][]int)
pointSet := make(map[point]bool)
for _, pair := range points {
x := pair[0]
y := pair[1]
projectionX[x] = append(projectionX[x], y)
projectionY[y] = append(projectionY[y], x)
pointSet[point{x, y}] = true
}
for _, list := range projectionX {
sort.Ints(list)
}
for _, list := range projectionY {
sort.Ints(list)
}
var minArea int
updateMinArea := func(x1, y1, x2, y2 int) {
area := (x2 - x1) * (y2 - y1)
if minArea == 0 || minArea > area {
minArea = area
}
}
for ybottom, xlist := range projectionY {
for ix, xleft := range xlist {
for _, xright := range xlist[ix+1:] {
for _, ytop := range projectionX[xright] {
if ytop <= ybottom || !pointSet[point{xleft, ytop}] {
continue
}
updateMinArea(xleft, ybottom, xright, ytop)
break // all further will be larger
}
}
}
}
return minArea
}
|
package main
import (
"github.com/stretchr/testify/assert"
"strings"
"testing"
)
func TestRunCircuit(t *testing.T) {
instructions := `123 -> x
456 -> y
x AND y -> d
x OR y -> e
x LSHIFT 2 -> f
y RSHIFT 2 -> g
NOT x -> h
NOT y -> i
`
c := NewCircuit(strings.Split(strings.TrimSpace(instructions), "\n"))
assert.Equal(t, uint16(72), c.GetValue("d"))
assert.Equal(t, uint16(507), c.GetValue("e"))
assert.Equal(t, uint16(492), c.GetValue("f"))
assert.Equal(t, uint16(114), c.GetValue("g"))
assert.Equal(t, uint16(65412), c.GetValue("h"))
assert.Equal(t, uint16(65079), c.GetValue("i"))
assert.Equal(t, uint16(123), c.GetValue("x"))
assert.Equal(t, uint16(456), c.GetValue("y"))
}
|
// 147. Concurrency Is Not Parallelism 並行 非 併發 解說?
// 解說
// https://medium.com/mr-efacani-teatime/concurrency%E8%88%87parallelism%E7%9A%84%E4%B8%8D%E5%90%8C%E4%B9%8B%E8%99%95-1b212a020e30
// Concurrency:相同的工作集合,一起完成同一份工作,互相合作,做團稽
// Parallelism:不同的工作集合,各自完成自己的工作,不互相干擾,有各自的考績
package main
import "fmt"
func main() {
fmt.Println(".")
}
|
package housepassword
import "testing"
func isTrue(t *testing.T, v bool) {
if !v {
t.Error("Expect true but false")
}
}
func isFalse(t *testing.T, v bool) {
if v {
t.Error("Expect false but true")
}
}
func TestCheckio(t *testing.T) {
isFalse(t, checkio("A1213pokl"))
isTrue(t, checkio("bAse730onE"))
isFalse(t, checkio("asasasasasasasaas"))
isFalse(t, checkio("QWERTYqwerty"))
isFalse(t, checkio("123456123456"))
isTrue(t, checkio("QwErTy911poqqqq"))
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package base
import (
"io"
"os"
)
// FileReader provides read operation on a file.
type FileReader interface {
io.Reader
io.ReaderAt
io.Seeker
io.Closer
Size() int64
}
// FileReadWriter provides read/write operation on a file.
type FileReadWriter interface {
FileReader
io.Writer
io.WriterAt
Cancel() error // required by docker registry.
Commit() error // required by docker registry.
}
// LocalFileReadWriter implements FileReadWriter interface, provides read/write
// operation on a local file.
type localFileReadWriter struct {
entry *localFileEntry
descriptor *os.File
writePartSize int
readPartSize int
}
func (readWriter *localFileReadWriter) close() error {
return readWriter.descriptor.Close()
}
// Close closes underlying OS.File object.
func (readWriter localFileReadWriter) Close() error {
return readWriter.close()
}
// Write writes up to len(b) bytes to the File.
func (readWriter localFileReadWriter) Write(p []byte) (int, error) {
if readWriter.writePartSize == 0 {
return readWriter.descriptor.Write(p)
}
totalBytesWritten := 0
for totalBytesWritten < len(p) {
blockSize := readWriter.writePartSize
if remainning := len(p) - totalBytesWritten; remainning < blockSize {
blockSize = remainning
}
n, err := readWriter.descriptor.Write(p[totalBytesWritten : totalBytesWritten+blockSize])
totalBytesWritten += n
if err != nil {
return totalBytesWritten, err
}
}
return totalBytesWritten, nil
}
// WriteAt writes len(p) bytes from p to the underlying data stream at offset.
func (readWriter localFileReadWriter) WriteAt(p []byte, offset int64) (int, error) {
if readWriter.writePartSize == 0 {
return readWriter.descriptor.WriteAt(p, offset)
}
totalBytesWritten := 0
for totalBytesWritten < len(p) {
blockSize := readWriter.writePartSize
if remainning := len(p) - totalBytesWritten; remainning < blockSize {
blockSize = remainning
}
n, err := readWriter.descriptor.WriteAt(p[totalBytesWritten:totalBytesWritten+blockSize], offset)
totalBytesWritten += n
offset += int64(n)
if err != nil {
return totalBytesWritten, err
}
}
return totalBytesWritten, nil
}
// Read reads up to len(b) bytes from the File.
func (readWriter localFileReadWriter) Read(p []byte) (int, error) {
if readWriter.readPartSize == 0 {
return readWriter.descriptor.Read(p)
}
totalBytesRead := 0
for totalBytesRead < len(p) {
blockSize := readWriter.readPartSize
if remaining := len(p) - totalBytesRead; remaining < blockSize {
blockSize = remaining
}
n, err := readWriter.descriptor.Read(p[totalBytesRead : totalBytesRead+blockSize])
totalBytesRead += n
if err != nil {
return totalBytesRead, err
}
}
return totalBytesRead, nil
}
// ReadAt reads len(b) bytes from the File starting at byte offset off.
func (readWriter localFileReadWriter) ReadAt(p []byte, offset int64) (int, error) {
if readWriter.readPartSize == 0 {
return readWriter.descriptor.ReadAt(p, offset)
}
totalBytesRead := 0
for totalBytesRead < len(p) {
blockSize := readWriter.readPartSize
if remaining := len(p) - totalBytesRead; remaining < blockSize {
blockSize = remaining
}
n, err := readWriter.descriptor.ReadAt(p[totalBytesRead:totalBytesRead+blockSize], offset)
totalBytesRead += n
offset += int64(n)
if err != nil {
return totalBytesRead, err
}
}
return totalBytesRead, nil
}
// Seek sets the offset for the next Read or Write on file to offset,
// interpreted according to whence:
// 0 means relative to the origin of the file;
// 1 means relative to the current offset;
// 2 means relative to the end.
func (readWriter localFileReadWriter) Seek(offset int64, whence int) (int64, error) {
return readWriter.descriptor.Seek(offset, whence)
}
// Size returns the size of the file.
func (readWriter localFileReadWriter) Size() int64 {
// Use file entry instead of descriptor, because descriptor could have been closed.
fileInfo, err := readWriter.entry.GetStat()
if err != nil {
return 0
}
return fileInfo.Size()
}
// Cancel is supposed to remove any written content.
// In this implementation file is not actually removed, and it's fine since there won't be name
// collision between upload files.
func (readWriter localFileReadWriter) Cancel() error {
return readWriter.close()
}
// Commit is supposed to flush all content for buffered writer.
// In this implementation all writes write to the file directly through syscall.
func (readWriter localFileReadWriter) Commit() error {
return readWriter.close()
}
|
package metrics
import (
"fmt"
"net/http"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
"github.com/void616/gotask"
)
// Service is metrics service that serves Prometheus /metrics endpoint
type Service struct {
logger *logrus.Entry
port uint16
}
// New instance
func New(port uint16, logger *logrus.Entry) *Service {
return &Service{
port: port,
logger: logger,
}
}
// Task loop
func (s *Service) Task(token *gotask.Token) {
wg := sync.WaitGroup{}
server := &http.Server{
Addr: fmt.Sprintf("0.0.0.0:%d", s.port),
Handler: promhttp.Handler(),
}
wg.Add(1)
go func() {
defer wg.Done()
s.logger.Infof("Serving metrics on port %v", s.port)
err := server.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
s.logger.WithError(err).Fatal("Failed to setup server")
}
}()
for !token.Stopped() {
token.Sleep(time.Second)
}
if err := server.Shutdown(nil); err != nil {
s.logger.WithError(err).Error("Failed to shutdown server")
}
wg.Wait()
}
|
package handler
import (
"context"
"github.com/jacexh/golang-ddd-template/internal/domain/user"
"github.com/jacexh/golang-ddd-template/internal/eventbus"
"github.com/jacexh/golang-ddd-template/internal/logger"
"go.uber.org/zap"
)
type (
UserPrinter struct{}
)
func (up UserPrinter) Handle(ctx context.Context, ev eventbus.DomainEvent) {
logger.Logger.Info("created a new user", zap.String("user_name", ev.(user.EventUserCreated).Name),
logger.MustExtractTracingIDFromCtx(ctx))
}
|
package record
import (
"fmt"
"github.com/NodeFactoryIo/vedran/internal/models"
"github.com/NodeFactoryIo/vedran/internal/repositories"
aMock "github.com/NodeFactoryIo/vedran/mocks/actions"
mocks "github.com/NodeFactoryIo/vedran/mocks/repositories"
"github.com/stretchr/testify/mock"
"testing"
)
func TestFailedRequest(t *testing.T) {
tests := []struct {
name string
penalizedNodeCallCount int
saveNodeRecordCallCount int
saveNodeRecordResult error
}{
{
name: "Calls penalize node and saves request record",
penalizedNodeCallCount: 1,
saveNodeRecordCallCount: 1,
saveNodeRecordResult: nil},
{
name: "Logs error if save request record fails",
penalizedNodeCallCount: 1,
saveNodeRecordCallCount: 1,
saveNodeRecordResult: fmt.Errorf("error")},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
node := models.Node{
ID: "test-id",
}
recordRepoMock := mocks.RecordRepository{}
recordRepoMock.On("Save", mock.Anything).Once().Return(tt.saveNodeRecordResult)
actionsMock := aMock.Actions{}
actionsMock.On("PenalizeNode", node, mock.Anything, mock.Anything).Return()
FailedRequest(node, repositories.Repos{
RecordRepo: &recordRepoMock,
}, &actionsMock)
actionsMock.AssertNumberOfCalls(t, "PenalizeNode", tt.penalizedNodeCallCount)
recordRepoMock.AssertNumberOfCalls(t, "Save", tt.saveNodeRecordCallCount)
})
}
}
func TestSuccessfulRequest(t *testing.T) {
tests := []struct {
name string
updateNodeUsedCallCount int
saveNodeRecordCallCount int
saveNodeRecordResult error
}{
{
name: "Calls reward node and saves request record",
updateNodeUsedCallCount: 1,
saveNodeRecordCallCount: 1,
saveNodeRecordResult: nil},
{
name: "Logs error if save request record fails",
updateNodeUsedCallCount: 1,
saveNodeRecordCallCount: 1,
saveNodeRecordResult: fmt.Errorf("Error")},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
node := models.Node{
ID: "test-id",
}
nodeRepoMock := mocks.NodeRepository{}
nodeRepoMock.On("UpdateNodeUsed", node).Return()
recordRepoMock := mocks.RecordRepository{}
recordRepoMock.On("Save", mock.Anything).Once().Return(tt.saveNodeRecordResult)
SuccessfulRequest(node, repositories.Repos{
NodeRepo: &nodeRepoMock,
RecordRepo: &recordRepoMock,
})
recordRepoMock.AssertNumberOfCalls(t, "Save", tt.saveNodeRecordCallCount)
nodeRepoMock.AssertNumberOfCalls(t, "UpdateNodeUsed", tt.updateNodeUsedCallCount)
})
}
}
|
package main
import "fmt"
/*
*Target- 适配的目标抽象类 对应为 MediaPlayer
*request():void 对应为 pay(audioType string,fileName string)
*/
type MediaPlayer interface {
Pay(audioType string,fileName string)
}
/*
*Adaptee -适配者接口 对应 AdvancedMediaPlayer -高级媒体播放器接口
*SpecificRequest 对应为 PlayVlc(fileName string)
*SpecificRequest 对应为 PayMp4(fileName string)
*/
type AdvancedMediaPlayer interface {
PlayVlc(fileName string)
PayMp4(fileName string)
}
//适配者具体实现类 对应 VlcPlayer-Vlc播放器
type VlcPlayer struct{}
//Adaptee SpecificRequest 对应 PlayVlc
func (v *VlcPlayer)PlayVlc(fileName string){
fmt.Println("Playing vlc file. Name: "+fileName)
}
//Adaptee SpecificRequest 对应 PayMp4
func (v *VlcPlayer)PayMp4(fileName string){
//Do not thing
}
//适配者具体实现类 对应 Mp4Player-Mp4播放器
type Mp4Player struct{}
//Adaptee SpecificRequest 对应 PlayVlc
func (m *Mp4Player)PlayVlc(fileName string){
//Do not thing
}
//Adaptee SpecificRequest 对应 PayMp4
func (m *Mp4Player)PayMp4(fileName string){
fmt.Println("Playing mp4 file. Name: "+fileName)
}
//Adapter- 适配器类 对应为 MediaAdapter
type MediaAdapter struct{
AdvancedMediaPlayer AdvancedMediaPlayer
}
func NewMediaAdapter(audioType string)*MediaAdapter{
if audioType == "vlc"{
return &MediaAdapter{AdvancedMediaPlayer:&VlcPlayer{}}
}else if audioType == "mp4"{
return &MediaAdapter{AdvancedMediaPlayer:&Mp4Player{}}
}
return nil
}
//Adapter- Request实现Target抽象类接口 对应 MediaPlayer Pay(audioType string,fileName string)
func (a *MediaAdapter)Pay(audioType string,fileName string){
//执行Adaptee的SpecificRequest 方法
if audioType == "vlc"{
a.AdvancedMediaPlayer.PlayVlc(fileName)
} else if audioType == "mp4"{
a.AdvancedMediaPlayer.PayMp4(fileName)
}
}
//AudioPlayer -实现了MediaPlayer
type AudioPlayer struct{
}
func (a *AudioPlayer)Pay(audioType string,fileName string){
if audioType == "mp3"{
fmt.Println("Playing mp3 file. Name: "+fileName)
}else if audioType == "vlc" || audioType == "mp4"{
mediaAdapter := NewMediaAdapter(audioType)
mediaAdapter.Pay(audioType,fileName)
}else{
fmt.Println("Invalid media. ",audioType ," format not supported ")
}
}
func main(){
audioPlayer := AudioPlayer{}
audioPlayer.Pay("mp3", "beyond the horizon.mp3")
audioPlayer.Pay("mp4", "alone.mp4")
audioPlayer.Pay("vlc", "far far away.vlc")
audioPlayer.Pay("avi", "mind me.avi")
}
|
package osc
import (
"encoding/binary"
"strings"
"time"
)
type Bundle struct {
timeTag time.Time
elements []Packet
}
func (bnd *Bundle) internal() {}
func NewBundle() *Bundle {
return &Bundle{}
}
func (bnd *Bundle) Clear() *Bundle {
bnd.timeTag = time.Time{}
bnd.elements = nil
return bnd
}
func (bnd *Bundle) TimeTag() time.Time {
return bnd.timeTag
}
func (bnd *Bundle) SetTimeTag(v time.Time) *Bundle {
bnd.timeTag = v
return bnd
}
func (bnd *Bundle) Elements() []Packet {
return bnd.elements
}
func (bnd *Bundle) AddElements(elements ...Packet) *Bundle {
bnd.elements = append(bnd.elements, elements...)
return bnd
}
func (bnd *Bundle) MarshalBinary() ([]byte, error) {
binaryData := strings.Builder{}
binaryData.Write(createOSCString(bundleIdentifier))
binaryData.Write(timeToTimeTag(bnd.timeTag))
for _, packet := range bnd.elements {
packetBinary, _ := packet.MarshalBinary()
packetLength := make([]byte, 4)
binary.BigEndian.PutUint32(packetLength, uint32(len(packetBinary)))
binaryData.Write(packetLength)
binaryData.Write(packetBinary)
}
return []byte(binaryData.String()), nil
}
|
package test
import (
"context"
)
type DependencyStruct struct {
Ctx context.Context
}
func (t *DependencyStruct) InnerDependency() context.Context {
return t.Ctx
}
type DependencyInterface interface {
InnerDependency() context.Context
}
|
package migration
import (
"context"
"k8s.io/client-go/rest"
"github.com/harvester/harvester/pkg/config"
virtv1 "github.com/harvester/harvester/pkg/generated/clientset/versioned/typed/kubevirt.io/v1"
)
const (
vmiControllerName = "migrationTargetController"
vmimControllerName = "migrationAnnotationController"
)
func Register(ctx context.Context, management *config.Management, options config.Options) error {
copyConfig := rest.CopyConfig(management.RestConfig)
virtv1Client, err := virtv1.NewForConfig(copyConfig)
if err != nil {
return err
}
vms := management.VirtFactory.Kubevirt().V1().VirtualMachine()
pods := management.CoreFactory.Core().V1().Pod()
vmis := management.VirtFactory.Kubevirt().V1().VirtualMachineInstance()
vmims := management.VirtFactory.Kubevirt().V1().VirtualMachineInstanceMigration()
handler := &Handler{
namespace: options.Namespace,
vmiCache: vmis.Cache(),
vms: vms,
vmCache: vms.Cache(),
pods: pods,
podCache: pods.Cache(),
restClient: virtv1Client.RESTClient(),
}
vmis.OnChange(ctx, vmiControllerName, handler.OnVmiChanged)
vmims.OnChange(ctx, vmimControllerName, handler.OnVmimChanged)
return nil
}
|
package shardmaster
import (
"container/heap"
"fmt"
"log"
"sync"
)
const Debug = 0
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 {
log.Printf(format, a...)
}
return
}
type Wait struct {
l sync.RWMutex
m map[string]chan OpResult
}
// New creates a Wait.
func NewWait() *Wait {
return &Wait{m: make(map[string]chan OpResult)}
}
func (w *Wait) Register(info OpInfo) <-chan OpResult {
w.l.Lock()
defer w.l.Unlock()
key := fmt.Sprintf("%d_%d", info.getClientId(), info.getOpId())
ch := w.m[key]
if ch != nil {
close(ch)
}
ch = make(chan OpResult, 1)
w.m[key] = ch
return ch
}
func (w *Wait) Unregister(info OpInfo) {
w.l.Lock()
defer w.l.Unlock()
key := fmt.Sprintf("%d_%d", info.getClientId(), info.getOpId())
ch := w.m[key]
delete(w.m, key)
if ch != nil {
close(ch)
}
}
func (w *Wait) Trigger(result OpResult) {
w.l.RLock()
defer w.l.RUnlock()
key := fmt.Sprintf("%d_%d", result.ClientId, result.OpId)
ch := w.m[key]
if ch != nil {
ch <- result
}
}
/* average divide shards for groups (result from high to low )
shardNum fixed to 10
groupNum shardResult:
1 [10]
2 [5 5]
3 [4 3 3]
4 [3 3 2 2]
5 [2 2 2 2 2]
6 [2 2 2 2 1 1]
7 [2 2 2 1 1 1 1]
8 [2 2 1 1 1 1 1 1]
9 [2 1 1 1 1 1 1 1 1]
10 [1 1 1 1 1 1 1 1 1 1]
11 [1 1 1 1 1 1 1 1 1 1 0]
12 [1 1 1 1 1 1 1 1 1 1 0 0]
*/
func shardDivide(shardNum, groupNum int) []int {
if groupNum == 0 {
return []int{0}
}
res := make([]int, groupNum)
for i := 0; i < shardNum; i++ {
res[i%groupNum]++
}
return res
}
type GroupShard struct {
Index int
Gid int
Shards []int
}
func NewHeapGroupShard(groups map[int][]string) *heapGroupShard {
h := &heapGroupShard{m: make(map[int]*GroupShard)}
for gid := range groups {
h.addGs(gid, -1)
}
return h
}
type heapGroupShard struct {
data []*GroupShard
m map[int]*GroupShard
}
func (h heapGroupShard) Len() int { return len(h.data) }
func (h heapGroupShard) Less(i, j int) bool {
if len(h.data[i].Shards) != len(h.data[j].Shards) {
return len(h.data[i].Shards) > len(h.data[j].Shards)
} else {
return h.data[i].Gid > h.data[j].Gid
}
}
func (h heapGroupShard) Swap(i, j int) {
h.data[i], h.data[j] = h.data[j], h.data[i]
h.data[i].Index = i
h.data[j].Index = j
}
func (h *heapGroupShard) Push(x interface{}) {
gs := x.(*GroupShard)
gs.Index = len(h.data)
h.data = append(h.data, gs)
}
func (h *heapGroupShard) Pop() interface{} {
old := h
n := len(old.data)
x := old.data[n-1]
x.Index = -1
h.data = old.data[0 : n-1]
delete(h.m, x.Gid)
return x
}
func (h *heapGroupShard) addGs(gid, shard int) {
gs, ok := h.m[gid]
if ok {
gs.Shards = append(gs.Shards, shard)
heap.Fix(h, gs.Index)
} else {
if shard == -1 {
gs = &GroupShard{-1, gid, []int{}}
} else {
gs = &GroupShard{-1, gid, []int{shard}}
}
h.m[gid] = gs
heap.Push(h, gs)
}
}
|
package main
import "fmt"
/*
Given an array, rotate the array to the right by k steps, where k is non-negative.
Example 1:
Input: [1,2,3,4,5,6,7] and k = 3
Output: [5,6,7,1,2,3,4]
Explanation:
rotate 1 steps to the right: [7,1,2,3,4,5,6]
rotate 2 steps to the right: [6,7,1,2,3,4,5]
rotate 3 steps to the right: [5,6,7,1,2,3,4]
Example 2:
Input: [-1,-100,3,99] and k = 2
Output: [3,99,-1,-100]
Explanation:
rotate 1 steps to the right: [99,-1,-100,3]
rotate 2 steps to the right: [3,99,-1,-100]
Note:
Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem.
Could you do it in-place with O(1) extra space?
*/
func rotate(nums []int, k int) {
k = k%len(nums)
rotateArry(nums,0,len(nums))
rotateArry(nums,0,k)
rotateArry(nums,k,len(nums))
}
func rotateArry(nums []int,start,end int) {
fmt.Println(start,end,len(nums))
for start < end-1 && len(nums) >= end {
nums[start],nums[end-1]=nums[end-1],nums[start]
start,end = start+1,end-1
}
}
func main() {
nums := []int{1,2,3}
//nums := []int{1}
//nums := []int{1,2,3,4,5,6,7}
rotate(nums,4)
fmt.Println(nums)
}
|
package main
import (
"fmt"
"math"
)
var setIndex int
var originalLength int
func findMatchVal(text string, pattern string) bool {
if text == pattern || pattern == "*" || len(pattern) == 0 {
return true
}
if len(text) == 0 {
return false
}
if string(pattern[0]) == "*" {
return findMatchVal(text, pattern[1:]) || findMatchVal(text[1:], pattern)
}
if text[0] == pattern[0] {
if setIndex > (originalLength - len(text)) {
setIndex = originalLength - len(text)
}
return findMatchVal(text[1:], pattern[1:])
} else if text[0] != pattern[0] {
return findMatchVal(text[1:], pattern)
}
return false
}
func findMatchIndex(s string, p string) int32 {
setIndex = math.MaxInt16
originalLength = len(s)
ans := findMatchVal(s, p)
if !ans {
return -1
}
return int32(setIndex)
}
func isSubstring(s string, p string, f bool) bool {
if s == p || len(p) == 0 {
return true
}
if len(s) <= len(p) {
return false
}
if s[0] == p[0] {
return isSubstring(s[1:], p[1:], true) || isSubstring(s[1:], p, false)
}
if f {
return false
}
return isSubstring(s[1:], p, false)
}
func isSubstringAlternate(s string, p string) bool {
if len(p) > len(s) {
return false
}
if s[:len(p)] != p {
return isSubstringAlternate(s[1:], p)
}
return true
}
func main() {
fmt.Printf("abcdef a**c*e %v \n", findMatchIndex("abcdef", "a**c*e"))
fmt.Printf("abcdef c*e %v \n", findMatchIndex("abcdef", "c*e"))
fmt.Printf("abf b*e %v \n", findMatchIndex("abf", "b*e"))
fmt.Printf("zzzzwewei w* %v \n", findMatchIndex("zzzzwewei", "w*"))
}
/*
Everything is a recursive structure:
strings, arrays, trees, graphs
*/ |
package main
import (
"fmt"
//"log"
"os"
"path/filepath"
)
// scanDir stands for the directory scanning implementation
func scanDir(dir string) error {
var files []string
//dirs := 0
dirs, symlink, blocked,other := 0,0,0,0 // Directories, Symbolic Links, Acess Denied and others
//total := 0 //Testing counter of total files
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error{
files = append(files, info.Name())
if info.IsDir(){
dirs++
}else if info.Mode()&(1<<2) == 0{ //chmod 000 No one has acess to
blocked++
}else if info.Mode()&os.ModeSymlink != 0{
symlink++
}else{
other++
}
//total++
return nil
})
if err != nil {
panic(err)
}
//fmt.Println("Total files ",total)
//~ for _, file := range files{
//~ fmt.Println(file)
//~ }
fmt.Println("Path \t\t", dir)
fmt.Println("Directories \t", dirs )
fmt.Println("Symbolic Links \t", symlink)
fmt.Println("Access Denied \t", blocked)
fmt.Println("Other files \t", other)
return nil
}
func main() {
if len(os.Args) < 2 {
fmt.Println("Usage: ./dir-scan <directory>")
os.Exit(1)
}
scanDir(os.Args[1])
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package aws
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"math"
"strconv"
"strings"
"time"
"github.com/mattermost/mattermost-cloud/internal/common"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/arn"
"github.com/aws/aws-sdk-go-v2/service/rds"
rdsTypes "github.com/aws/aws-sdk-go-v2/service/rds/types"
gt "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi"
gtTypes "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi/types"
"github.com/aws/aws-sdk-go-v2/service/secretsmanager"
smTypes "github.com/aws/aws-sdk-go-v2/service/secretsmanager/types"
"github.com/mattermost/mattermost-cloud/model"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
// Database drivers
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
)
// RDSMultitenantDatabase is a database backed by RDS that supports multi-tenancy.
type RDSMultitenantDatabase struct {
databaseType string
installationID string
instanceID string
db SQLDatabaseManager
client *Client
maxSupportedInstallations int
disableDBCheck bool
}
// NewRDSMultitenantDatabase returns a new instance of RDSMultitenantDatabase that implements database interface.
func NewRDSMultitenantDatabase(databaseType, instanceID, installationID string, client *Client, installationsLimit int, diableDBCheck bool) *RDSMultitenantDatabase {
var defaultSupportedInstallations int
if databaseType == model.DatabaseEngineTypeMySQL {
defaultSupportedInstallations = DefaultRDSMultitenantDatabaseMySQLCountLimit
} else {
defaultSupportedInstallations = DefaultRDSMultitenantDatabasePostgresCountLimit
}
return &RDSMultitenantDatabase{
databaseType: databaseType,
instanceID: instanceID,
installationID: installationID,
client: client,
maxSupportedInstallations: valueOrDefault(installationsLimit, defaultSupportedInstallations),
disableDBCheck: diableDBCheck,
}
}
// IsValid returns if the given RDSMultitenantDatabase configuration is valid.
func (d *RDSMultitenantDatabase) IsValid() error {
if len(d.installationID) == 0 {
return errors.New("installation ID is not set")
}
switch d.databaseType {
case model.DatabaseEngineTypeMySQL,
model.DatabaseEngineTypePostgres:
default:
return errors.Errorf("invalid database type %s", d.databaseType)
}
return nil
}
// DatabaseEngineTypeTagValue returns the tag value used for filtering RDS cluster
// resources based on database engine type.
func (d *RDSMultitenantDatabase) DatabaseEngineTypeTagValue() string {
if d.databaseType == model.DatabaseEngineTypeMySQL {
return DatabaseTypeMySQLAurora
}
return DatabaseTypePostgresSQLAurora
}
// MaxSupportedDatabases returns the maximum number of databases supported on
// one RDS cluster for this database type.
func (d *RDSMultitenantDatabase) MaxSupportedDatabases() int {
return d.maxSupportedInstallations
}
// RefreshResourceMetadata ensures various multitenant database resource's
// metadata are correct.
func (d *RDSMultitenantDatabase) RefreshResourceMetadata(store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) error {
return d.updateMultitenantDatabase(store, logger)
}
func (d *RDSMultitenantDatabase) updateMultitenantDatabase(store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) error {
database, unlockFn, err := d.getAndLockAssignedMultitenantDatabase(store, logger)
if err != nil {
return errors.Wrap(err, "failed to get and lock assigned database")
}
if database == nil {
return errors.Wrap(err, "failed to find assigned multitenant database")
}
defer unlockFn()
logger = logger.WithField("assigned-database", database.ID)
rdsCluster, err := describeRDSCluster(database.RdsClusterID, d.client)
if err != nil {
return errors.Wrapf(err, "failed to describe the multitenant RDS cluster ID %s", database.ID)
}
return updateCounterTagWithCurrentWeight(database, rdsCluster, store, d.client, logger)
}
func updateCounterTagWithCurrentWeight(database *model.MultitenantDatabase, rdsCluster *rdsTypes.DBCluster, store model.InstallationDatabaseStoreInterface, client *Client, logger log.FieldLogger) error {
weight, err := store.GetInstallationsTotalDatabaseWeight(database.Installations)
if err != nil {
return errors.Wrap(err, "failed to calculate total database weight")
}
roundedUpWeight := int(math.Ceil(weight))
err = updateCounterTag(rdsCluster.DBClusterArn, roundedUpWeight, client)
if err != nil {
return errors.Wrapf(err, "failed to update tag:counter in RDS cluster ID %s", *rdsCluster.DBClusterIdentifier)
}
logger.Debugf("Multitenant database %s counter value updated to %d", database.ID, roundedUpWeight)
return nil
}
// Provision claims a multitenant RDS cluster and creates a database schema for
// the installation.
func (d *RDSMultitenantDatabase) Provision(store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) error {
err := d.IsValid()
if err != nil {
return errors.Wrap(err, "multitenant database configuration is invalid")
}
installationDatabaseName := MattermostRDSDatabaseName(d.installationID)
logger = logger.WithFields(log.Fields{
"multitenant-rds-database": installationDatabaseName,
"database-type": d.databaseType,
})
logger.Info("Provisioning Multitenant AWS RDS database")
vpc, err := getVPCForInstallation(d.installationID, store, d.client)
if err != nil {
return errors.Wrap(err, "failed to find cluster installation VPC")
}
database, unlockFn, err := d.getAndLockAssignedMultitenantDatabase(store, logger)
if err != nil {
return errors.Wrap(err, "failed to get and lock assigned database")
}
if database == nil {
logger.Debug("Assigning installation to multitenant database")
database, unlockFn, err = d.assignInstallationToMultitenantDatabaseAndLock(*vpc.VpcId, store, logger)
if err != nil {
return errors.Wrap(err, "failed to assign installation to a multitenant database")
}
}
defer unlockFn()
logger = logger.WithField("assigned-database", database.ID)
rdsCluster, err := describeRDSCluster(database.RdsClusterID, d.client)
if err != nil {
return errors.Wrapf(err, "failed to describe the multitenant RDS cluster ID %s", database.ID)
}
if *rdsCluster.Status != DefaultRDSStatusAvailable {
return errors.Errorf("multitenant RDS cluster ID %s is not available (status: %s)", database.ID, *rdsCluster.Status)
}
rdsID := *rdsCluster.DBClusterIdentifier
logger = logger.WithField("rds-cluster-id", rdsID)
err = d.runProvisionSQLCommands(installationDatabaseName, *vpc.VpcId, rdsCluster, logger)
if err != nil {
return errors.Wrap(err, "failed to run provisioning sql commands")
}
err = updateCounterTagWithCurrentWeight(database, rdsCluster, store, d.client, logger)
if err != nil {
return errors.Wrap(err, "failed to update counter tag with current weight")
}
logger.Infof("Installation %s assigned to multitenant database", d.installationID)
return nil
}
// Snapshot creates a snapshot of single RDS multitenant database.
func (d *RDSMultitenantDatabase) Snapshot(store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) error {
return errors.New("not implemented")
}
// GenerateDatabaseSecret creates the k8s database spec and secret for
// accessing a single database inside a RDS multitenant cluster.
func (d *RDSMultitenantDatabase) GenerateDatabaseSecret(store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) (*corev1.Secret, error) {
err := d.IsValid()
if err != nil {
return nil, errors.Wrap(err, "multitenant database configuration is invalid")
}
installationDatabaseName := MattermostRDSDatabaseName(d.installationID)
logger = logger.WithFields(log.Fields{
"multitenant-rds-database": installationDatabaseName,
"database-type": d.databaseType,
})
multitenantDatabase, err := store.GetMultitenantDatabaseForInstallationID(d.installationID)
if err != nil {
return nil, errors.Wrap(err, "failed to query for the multitenant database")
}
// TODO: probably split this up.
unlock, err := lockMultitenantDatabase(multitenantDatabase.ID, d.instanceID, store, logger)
if err != nil {
return nil, errors.Wrap(err, "failed to lock multitenant database")
}
defer unlock()
rdsCluster, err := describeRDSCluster(multitenantDatabase.RdsClusterID, d.client)
if err != nil {
return nil, errors.Wrap(err, "failed to describe RDS cluster")
}
logger = logger.WithField("rds-cluster-id", *rdsCluster.DBClusterIdentifier)
installationSecretName := RDSMultitenantSecretName(d.installationID)
installationSecret, err := d.client.secretsManagerGetRDSSecret(installationSecretName, logger)
if err != nil {
return nil, errors.Wrap(err, "failed to get secret value for database")
}
var databaseConnectionString, databaseReadReplicasString, databaseConnectionCheck string
if d.databaseType == model.DatabaseEngineTypeMySQL {
databaseConnectionString, databaseReadReplicasString =
MattermostMySQLConnStrings(
installationDatabaseName,
installationSecret.MasterUsername,
installationSecret.MasterPassword,
rdsCluster,
)
databaseConnectionCheck = fmt.Sprintf("http://%s:3306", *rdsCluster.Endpoint)
} else {
databaseConnectionString, databaseReadReplicasString =
MattermostPostgresConnStrings(
installationDatabaseName,
installationSecret.MasterUsername,
installationSecret.MasterPassword,
rdsCluster,
)
databaseConnectionCheck = databaseConnectionString
}
secret := InstallationDBSecret{
InstallationSecretName: installationSecretName,
ConnectionString: databaseConnectionString,
DBCheckURL: databaseConnectionCheck,
ReadReplicasURL: databaseReadReplicasString,
}
logger.Debug("AWS RDS multitenant database configuration generated for cluster installation")
return secret.ToK8sSecret(d.disableDBCheck), nil
}
// Teardown removes all AWS resources related to a RDS multitenant database.
func (d *RDSMultitenantDatabase) Teardown(store model.InstallationDatabaseStoreInterface, keepData bool, logger log.FieldLogger) error {
logger = logger.WithField("rds-multitenant-database", MattermostRDSDatabaseName(d.installationID))
logger.Info("Tearing down RDS multitenant database")
err := d.IsValid()
if err != nil {
return errors.Wrap(err, "multitenant database configuration is invalid")
}
if keepData {
logger.Warn("Keepdata is set to true on this server, but this is not yet supported for RDS multitenant databases")
}
database, unlockFn, err := d.getAndLockAssignedMultitenantDatabase(store, logger)
if err != nil {
return errors.Wrap(err, "failed to get assigned multitenant database")
}
if database != nil {
defer unlockFn()
err = d.removeInstallationFromMultitenantDatabase(database, store, logger)
if err != nil {
return errors.Wrap(err, "failed to remove installation database")
}
} else {
logger.Debug("No multitenant databases found for this installation; skipping...")
}
logger.Info("Multitenant RDS database teardown complete")
return nil
}
// TeardownMigrated removes database from which Installation was migrated out.
func (d *RDSMultitenantDatabase) TeardownMigrated(store model.InstallationDatabaseStoreInterface, migrationOp *model.InstallationDBMigrationOperation, logger log.FieldLogger) error {
logger.Info("Tearing down migrated multitenant database")
db, err := store.GetMultitenantDatabase(migrationOp.SourceMultiTenant.DatabaseID)
if err != nil {
return errors.Wrap(err, "failed to get multitenant database")
}
if db == nil {
logger.Info("Source database does not exist, skipping removal")
return nil
}
if db.DeleteAt > 0 {
logger.Info("Source database marked as deleted, skipping removal")
return nil
}
unlockFn, err := lockMultitenantDatabase(migrationOp.SourceMultiTenant.DatabaseID, d.instanceID, store, logger)
if err != nil {
return errors.Wrap(err, "failed to lock multitenant database")
}
defer unlockFn()
db, err = store.GetMultitenantDatabase(migrationOp.SourceMultiTenant.DatabaseID)
if err != nil {
return errors.Wrap(err, "failed to get multitenant database")
}
err = d.removeMigratedInstallationFromMultitenantDatabase(db, store, logger)
if err != nil {
return errors.Wrap(err, "failed to remove migrated installation database")
}
return nil
}
// MigrateOut marks Installation as migrated from the database but does not remove the actual data.
func (d *RDSMultitenantDatabase) MigrateOut(store model.InstallationDatabaseStoreInterface, dbMigration *model.InstallationDBMigrationOperation, logger log.FieldLogger) error {
installationDatabaseName := MattermostRDSDatabaseName(d.installationID)
logger = logger.WithFields(log.Fields{
"multitenant-rds-database": installationDatabaseName,
"database-type": d.databaseType,
})
unlock, err := lockMultitenantDatabase(dbMigration.SourceMultiTenant.DatabaseID, d.instanceID, store, logger)
if err != nil {
return errors.Wrap(err, "failed to lock multitenant database")
}
defer unlock()
database, err := store.GetMultitenantDatabase(dbMigration.SourceMultiTenant.DatabaseID)
if err != nil {
return errors.Wrap(err, "failed to query for the multitenant database")
}
database.Installations.Remove(d.installationID)
if !common.Contains(database.MigratedInstallations, d.installationID) {
database.MigratedInstallations.Add(d.installationID)
}
err = store.UpdateMultitenantDatabase(database)
if err != nil {
return errors.Wrap(err, "failed to update multitenant db")
}
rdsCluster, err := describeRDSCluster(database.RdsClusterID, d.client)
if err != nil {
return errors.Wrapf(err, "failed to describe the multitenant RDS cluster ID %s", database.ID)
}
err = updateCounterTagWithCurrentWeight(database, rdsCluster, store, d.client, logger)
if err != nil {
return errors.Wrap(err, "failed to update counter tag with current weight")
}
logger.Infof("Installation %s migrated out of multitenant database %s", d.installationID, database.ID)
return nil
}
// MigrateTo creates new logical database in the database cluster for already existing Installation.
func (d *RDSMultitenantDatabase) MigrateTo(store model.InstallationDatabaseStoreInterface, dbMigration *model.InstallationDBMigrationOperation, logger log.FieldLogger) error {
installationDatabaseName := MattermostRDSDatabaseName(d.installationID)
logger = logger.WithFields(log.Fields{
"multitenant-rds-database": installationDatabaseName,
"database-type": d.databaseType,
})
unlock, err := lockMultitenantDatabase(dbMigration.DestinationMultiTenant.DatabaseID, d.instanceID, store, logger)
if err != nil {
return errors.Wrap(err, "failed to lock multitenant database")
}
defer unlock()
database, err := store.GetMultitenantDatabase(dbMigration.DestinationMultiTenant.DatabaseID)
if err != nil {
return errors.Wrap(err, "failed to query for the multitenant database")
}
err = d.migrateInstallationToDB(store, database)
if err != nil {
return errors.Wrap(err, "failed to migrate installation to multitenant db")
}
vpc, err := getVPCForInstallation(d.installationID, store, d.client)
if err != nil {
return errors.Wrap(err, "failed to find cluster installation VPC")
}
rdsCluster, err := describeRDSCluster(database.RdsClusterID, d.client)
if err != nil {
return errors.Wrapf(err, "failed to describe the multitenant RDS cluster ID %s", database.ID)
}
if *rdsCluster.Status != DefaultRDSStatusAvailable {
return errors.Errorf("multitenant RDS cluster ID %s is not available (status: %s)", database.ID, *rdsCluster.Status)
}
rdsID := *rdsCluster.DBClusterIdentifier
logger = logger.WithField("rds-cluster-id", rdsID)
err = d.runProvisionSQLCommands(installationDatabaseName, *vpc.VpcId, rdsCluster, logger)
if err != nil {
return errors.Wrap(err, "failed to run provisioning sql commands")
}
err = updateCounterTagWithCurrentWeight(database, rdsCluster, store, d.client, logger)
if err != nil {
return errors.Wrap(err, "failed to update counter tag with current weight")
}
logger.Infof("Installation %s migrated to multitenant database %s", d.installationID, database.ID)
return nil
}
func (d *RDSMultitenantDatabase) migrateInstallationToDB(store model.InstallationDatabaseStoreInterface, database *model.MultitenantDatabase) error {
// To make migration idempotent we check if installation is already in db.
if common.Contains(database.Installations, d.installationID) {
return nil
}
err := common.ValidateDBMigrationDestination(store, database, d.installationID, float64(d.MaxSupportedDatabases()))
if err != nil {
return errors.Wrap(err, "database validation failed")
}
database.Installations.Add(d.installationID)
err = store.UpdateMultitenantDatabase(database)
if err != nil {
return errors.Wrap(err, "failed to add installation to multitenant db")
}
return nil
}
// TODO: for now rollback will be supported only for multi-tenant postgres to multi-tenant postgres migration
// To support more DB types we will have to split this method to two.
// RollbackMigration rollbacks Installation to the source database.
func (d *RDSMultitenantDatabase) RollbackMigration(store model.InstallationDatabaseStoreInterface, dbMigration *model.InstallationDBMigrationOperation, logger log.FieldLogger) error {
installationDatabaseName := MattermostRDSDatabaseName(d.installationID)
logger = logger.WithFields(log.Fields{
"multitenant-rds-database": installationDatabaseName,
"database-type": d.databaseType,
})
if dbMigration.SourceDatabase != model.InstallationDatabaseMultiTenantRDSPostgres ||
dbMigration.DestinationDatabase != model.InstallationDatabaseMultiTenantRDSPostgres {
return errors.New("db migration rollback is supported only for multitenant postgres database")
}
unlockDest, err := lockMultitenantDatabase(dbMigration.DestinationMultiTenant.DatabaseID, d.instanceID, store, logger)
if err != nil {
return errors.Wrap(err, "failed to lock multitenant database")
}
defer unlockDest()
destinationDatabase, err := store.GetMultitenantDatabase(dbMigration.DestinationMultiTenant.DatabaseID)
if err != nil {
return errors.Wrap(err, "failed to query for the multitenant database")
}
unlockSource, err := lockMultitenantDatabase(dbMigration.SourceMultiTenant.DatabaseID, d.instanceID, store, logger)
if err != nil {
return errors.Wrap(err, "failed to lock multitenant database")
}
defer unlockSource()
sourceDatabase, err := store.GetMultitenantDatabase(dbMigration.SourceMultiTenant.DatabaseID)
if err != nil {
return errors.Wrap(err, "failed to query for the multitenant database")
}
sourceDatabase.MigratedInstallations.Remove(d.installationID)
destinationDatabase.Installations.Remove(d.installationID)
if !common.Contains(sourceDatabase.Installations, d.installationID) {
sourceDatabase.Installations.Add(d.installationID)
}
err = store.UpdateMultitenantDatabase(sourceDatabase)
if err != nil {
return errors.Wrap(err, "failed to update source multitenant database")
}
err = store.UpdateMultitenantDatabase(destinationDatabase)
if err != nil {
return errors.Wrap(err, "failed to update destination multitenant database")
}
rdsCluster, err := describeRDSCluster(destinationDatabase.RdsClusterID, d.client)
if err != nil {
return errors.Wrapf(err, "failed to describe the multitenant RDS cluster ID %s", destinationDatabase.ID)
}
if *rdsCluster.Status != DefaultRDSStatusAvailable {
return errors.Errorf("multitenant RDS cluster ID %s is not available (status: %s)", destinationDatabase.ID, *rdsCluster.Status)
}
rdsID := *rdsCluster.DBClusterIdentifier
logger = logger.WithField("rds-cluster-id", rdsID)
err = d.cleanupDatabase(rdsID, *rdsCluster.Endpoint, logger)
if err != nil {
return errors.Wrap(err, "failed to drop destination database")
}
err = updateCounterTagWithCurrentWeight(destinationDatabase, rdsCluster, store, d.client, logger)
if err != nil {
return errors.Wrap(err, "failed to update counter tag with current weight")
}
err = updateCounterTagWithCurrentWeight(sourceDatabase, rdsCluster, store, d.client, logger)
if err != nil {
return errors.Wrap(err, "failed to update counter tag with current weight")
}
logger.Infof("Installation %s migrated to multitenant database %s", d.installationID, destinationDatabase.ID)
return nil
}
// Helpers
// getAssignedMultitenantDatabaseResources returns the assigned multitenant
// database if there is one or nil if there is not. An error is returned if the
// installation is assigned to more than one database.
func (d *RDSMultitenantDatabase) getAndLockAssignedMultitenantDatabase(store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) (*model.MultitenantDatabase, func(), error) {
multitenantDatabases, err := store.GetMultitenantDatabases(&model.MultitenantDatabaseFilter{
InstallationID: d.installationID,
MaxInstallationsLimit: model.NoInstallationsLimit,
Paging: model.AllPagesNotDeleted(),
})
if err != nil {
return nil, nil, errors.Wrap(err, "failed to query for multitenant databases")
}
if len(multitenantDatabases) > 1 {
return nil, nil, errors.Errorf("expected no more than 1 assigned database for installation, but found %d", len(multitenantDatabases))
}
if len(multitenantDatabases) == 0 {
return nil, nil, nil
}
unlockFn, err := lockMultitenantDatabase(multitenantDatabases[0].ID, d.instanceID, store, logger)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to lock multitenant database")
}
// Take no chances that the stored multitenant database was updated between
// retrieving it and locking it. We know this installation is assigned to
// exactly one multitenant database at this point so we can use the store
// function to directly retrieve it.
database, err := store.GetMultitenantDatabaseForInstallationID(d.installationID)
if err != nil {
unlockFn()
return nil, nil, errors.Wrap(err, "failed to refresh multitenant database after lock")
}
return database, unlockFn, nil
}
// This helper method finds a multitenant RDS cluster that is ready for receiving a database installation. The lookup
// for multitenant databases will happen in order:
// 1. fetch a multitenant database by installation ID.
// 2. fetch all multitenant databases in the store which are under the max number of installations limit.
// 3. fetch all multitenant databases in the RDS cluster that are under the max number of installations limit.
func (d *RDSMultitenantDatabase) assignInstallationToMultitenantDatabaseAndLock(vpcID string, store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) (*model.MultitenantDatabase, func(), error) {
multitenantDatabases, err := store.GetMultitenantDatabases(&model.MultitenantDatabaseFilter{
DatabaseType: d.databaseType,
MaxInstallationsLimit: d.MaxSupportedDatabases(),
VpcID: vpcID,
Paging: model.AllPagesNotDeleted(),
})
if err != nil {
return nil, nil, errors.Wrap(err, "failed to get available multitenant databases")
}
if len(multitenantDatabases) == 0 {
logger.Infof("No %s multitenant databases with less than %d installations found in the datastore; fetching all available resources from AWS", d.databaseType, d.MaxSupportedDatabases())
multitenantDatabases, err = d.getMultitenantDatabasesFromResourceTags(vpcID, store, logger)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to fetch new multitenant databases from AWS")
}
}
if len(multitenantDatabases) == 0 {
return nil, nil, errors.New("no multitenant databases are currently available for new installations")
}
// We want to be smart about how we assign the installation to a database.
// Find the database with the most installations on it to keep utilization
// as close to maximum efficiency as possible.
// TODO: we haven't aquired a lock yet on any of these databases so this
// could open up small race conditions.
selectedDatabase := &model.MultitenantDatabase{}
for _, multitenantDatabase := range multitenantDatabases {
if multitenantDatabase.Installations.Count() >= selectedDatabase.Installations.Count() {
selectedDatabase = multitenantDatabase
}
}
unlockFn, err := lockMultitenantDatabase(selectedDatabase.ID, d.instanceID, store, logger)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to lock selected database")
}
// Now that we have selected one and have a lock, ensure the database hasn't
// been updated.
selectedDatabase, err = store.GetMultitenantDatabase(selectedDatabase.ID)
if err != nil {
unlockFn()
return nil, nil, errors.Wrap(err, "failed to refresh multitenant database after lock")
}
// Finish assigning the installation.
selectedDatabase.Installations.Add(d.installationID)
err = store.UpdateMultitenantDatabase(selectedDatabase)
if err != nil {
unlockFn()
return nil, nil, errors.Wrap(err, "failed to save installation to selected database")
}
return selectedDatabase, unlockFn, nil
}
func (d *RDSMultitenantDatabase) getMultitenantDatabasesFromResourceTags(vpcID string, store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) ([]*model.MultitenantDatabase, error) {
databaseEngineType := d.DatabaseEngineTypeTagValue()
resourceNames, err := d.client.resourceTaggingGetAllResources(gt.GetResourcesInput{
TagFilters: standardMultitenantDatabaseTagFilters(
DefaultRDSMultitenantDatabaseTypeTagValue,
databaseEngineType,
vpcID,
),
ResourceTypeFilters: []string{DefaultResourceTypeClusterRDS},
})
if err != nil {
return nil, errors.Wrap(err, "failed to get available multitenant RDS resources")
}
var multitenantDatabases []*model.MultitenantDatabase
for _, resource := range resourceNames {
resourceARN, err := arn.Parse(*resource.ResourceARN)
if err != nil {
return nil, err
}
if !strings.Contains(resourceARN.Resource, RDSMultitenantDBClusterResourceNamePrefix) {
logger.Warnf("Provisioner skipped RDS resource (%s) because name does not have a correct multitenant database prefix (%s)", resourceARN.Resource, RDSMultitenantDBClusterResourceNamePrefix)
continue
}
rdsClusterID, err := getRDSClusterIDFromResourceTags(d.MaxSupportedDatabases(), resource.Tags)
if err != nil {
return nil, errors.Wrap(err, "failed to get a multitenant RDS cluster ID from AWS resource tags")
}
if rdsClusterID == nil {
continue
}
ready, err := isRDSClusterEndpointsReady(*rdsClusterID, d.client)
if err != nil {
logger.WithError(err).Errorf("Failed to check RDS cluster status. Skipping RDS cluster ID %s", *rdsClusterID)
continue
}
if !ready {
continue
}
rdsCluster, err := describeRDSCluster(*rdsClusterID, d.client)
if err != nil {
logger.WithError(err).Errorf("Failed to describe the multitenant RDS cluster ID %s", *rdsClusterID)
continue
}
multitenantDatabase := model.MultitenantDatabase{
RdsClusterID: *rdsClusterID,
VpcID: vpcID,
DatabaseType: d.databaseType,
State: model.DatabaseStateStable,
WriterEndpoint: *rdsCluster.Endpoint,
ReaderEndpoint: *rdsCluster.ReaderEndpoint,
}
err = store.CreateMultitenantDatabase(&multitenantDatabase)
if err != nil {
logger.WithError(err).Errorf("Failed to create a multitenant database. Skipping RDS cluster ID %s", *rdsClusterID)
continue
}
logger.Debugf("Added multitenant database %s to the datastore", multitenantDatabase.ID)
multitenantDatabases = append(multitenantDatabases, &multitenantDatabase)
}
return multitenantDatabases, nil
}
func getRDSClusterIDFromResourceTags(maxDatabases int, resourceTags []gtTypes.Tag) (*string, error) {
var rdsClusterID *string
var installationCounter *string
for _, tag := range resourceTags {
if *tag.Key == trimTagPrefix(RDSMultitenantInstallationCounterTagKey) && tag.Value != nil {
installationCounter = tag.Value
}
if *tag.Key == trimTagPrefix(DefaultRDSMultitenantDatabaseIDTagKey) && tag.Value != nil {
rdsClusterID = tag.Value
}
if rdsClusterID != nil && installationCounter != nil {
counter, err := strconv.Atoi(*installationCounter)
if err != nil {
return nil, errors.Wrap(err, "failed to parse string tag:counter to integer")
}
if counter < maxDatabases {
return rdsClusterID, nil
}
}
}
return nil, nil
}
func updateCounterTag(resourceARN *string, counter int, client *Client) error {
_, err := client.Service().rds.AddTagsToResource(
context.TODO(),
&rds.AddTagsToResourceInput{
ResourceName: resourceARN,
Tags: []rdsTypes.Tag{
{
Key: aws.String(trimTagPrefix(DefaultMultitenantDatabaseCounterTagKey)),
Value: aws.String(fmt.Sprintf("%d", counter)),
},
},
})
if err != nil {
return errors.Wrapf(err, "failed to update %s for the multitenant RDS cluster %s", DefaultMultitenantDatabaseCounterTagKey, *resourceARN)
}
return nil
}
func createDatabaseUserSecret(secretName, username, description string, tags []smTypes.Tag, client *Client) (*RDSSecret, error) {
rdsSecretPayload := RDSSecret{
MasterUsername: username,
MasterPassword: model.NewRandomPassword(model.DefaultPasswordLength),
}
err := rdsSecretPayload.Validate()
if err != nil {
return nil, errors.Wrap(err, "RDS secret failed validation")
}
b, err := json.Marshal(&rdsSecretPayload)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal secrets manager payload")
}
_, err = client.Service().secretsManager.CreateSecret(
context.TODO(),
&secretsmanager.CreateSecretInput{
Name: aws.String(secretName),
Description: aws.String(description),
Tags: tags,
SecretString: aws.String(string(b)),
})
if err != nil {
return nil, errors.Wrap(err, "failed to create secret")
}
return &rdsSecretPayload, nil
}
func describeRDSCluster(dbClusterID string, client *Client) (*rdsTypes.DBCluster, error) {
dbClusterOutput, err := client.Service().rds.DescribeDBClusters(
context.TODO(),
&rds.DescribeDBClustersInput{
Filters: []rdsTypes.Filter{
{
Name: aws.String("db-cluster-id"),
Values: []string{dbClusterID},
},
},
})
if err != nil {
return nil, errors.Wrapf(err, "failed to get multitenant RDS cluster id %s", dbClusterID)
}
if len(dbClusterOutput.DBClusters) != 1 {
return nil, errors.Errorf("expected exactly one multitenant RDS cluster (found %d)", len(dbClusterOutput.DBClusters))
}
cluster := dbClusterOutput.DBClusters[0]
return &cluster, nil
}
func lockMultitenantDatabase(multitenantDatabaseID, instanceID string, store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) (func(), error) {
locked, err := store.LockMultitenantDatabase(multitenantDatabaseID, instanceID)
if err != nil {
return nil, errors.Wrapf(err, "failed to lock multitenant database %s", multitenantDatabaseID)
}
if !locked {
return nil, errors.Errorf("failed to acquire lock for multitenant database %s", multitenantDatabaseID)
}
unlockFN := func() {
unlocked, err := store.UnlockMultitenantDatabase(multitenantDatabaseID, instanceID, true)
if err != nil {
logger.WithError(err).Error("failed to unlock multitenant database")
}
if !unlocked {
logger.Warn("failed to release lock for multitenant database")
}
}
return unlockFN, nil
}
// removeInstallationFromMultitenantDatabase performs the work necessary to
// remove a single installation database from a multitenant RDS cluster.
func (d *RDSMultitenantDatabase) removeInstallationFromMultitenantDatabase(database *model.MultitenantDatabase, store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) error {
rdsCluster, err := describeRDSCluster(database.RdsClusterID, d.client)
if err != nil {
return errors.Wrap(err, "failed to describe multitenant database")
}
logger = logger.WithField("rds-cluster-id", *rdsCluster.DBClusterIdentifier)
err = d.cleanupDatabase(database.RdsClusterID, *rdsCluster.Endpoint, logger)
if err != nil {
return errors.Wrap(err, "failed to drop multitenant database")
}
err = d.client.secretsManagerEnsureSecretDeleted(RDSMultitenantSecretName(d.installationID), false, logger)
if err != nil {
return errors.Wrap(err, "failed to delete multitenant database secret")
}
database.Installations.Remove(d.installationID)
err = updateCounterTagWithCurrentWeight(database, rdsCluster, store, d.client, logger)
if err != nil {
return errors.Wrap(err, "failed to update counter tag with current weight")
}
err = store.UpdateMultitenantDatabase(database)
if err != nil {
return errors.Wrapf(err, "failed to remove installation ID %s from multitenant datastore", d.installationID)
}
return nil
}
// removeMigratedInstallationFromMultitenantDatabase performs the work necessary to
// remove a single migrated installation database from a multitenant RDS cluster.
func (d *RDSMultitenantDatabase) removeMigratedInstallationFromMultitenantDatabase(database *model.MultitenantDatabase, store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) error {
rdsCluster, err := describeRDSCluster(database.RdsClusterID, d.client)
if err != nil {
return errors.Wrap(err, "failed to describe multitenant database")
}
logger = logger.WithField("rds-cluster-id", *rdsCluster.DBClusterIdentifier)
err = d.cleanupDatabase(database.RdsClusterID, *rdsCluster.Endpoint, logger)
if err != nil {
return errors.Wrap(err, "failed to drop migrated database")
}
database.MigratedInstallations.Remove(d.installationID)
err = store.UpdateMultitenantDatabase(database)
if err != nil {
return errors.Wrapf(err, "failed to remove migrated installation ID %s from multitenant datastore", d.installationID)
}
return nil
}
func (d *RDSMultitenantDatabase) cleanupDatabase(rdsClusterID, rdsClusterendpoint string, logger log.FieldLogger) error {
databaseName := MattermostRDSDatabaseName(d.installationID)
masterSecretValue, err := d.client.Service().secretsManager.GetSecretValue(
context.TODO(),
&secretsmanager.GetSecretValueInput{
SecretId: aws.String(rdsClusterID),
})
if err != nil {
return errors.Wrapf(err, "failed to get master secret by ID %s", rdsClusterID)
}
disconnect, err := d.connectRDSCluster(rdsClusterendpoint, DefaultMattermostDatabaseUsername, *masterSecretValue.SecretString)
if err != nil {
return errors.Wrapf(err, "failed to connect to multitenant RDS cluster ID %s", rdsClusterID)
}
defer disconnect(logger)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(DefaultMySQLContextTimeSeconds*time.Second))
defer cancel()
err = dropDatabaseIfExists(ctx, d.db, databaseName)
if err != nil {
return errors.Wrapf(err, "failed to drop multitenant RDS database name %s", databaseName)
}
err = dropUserIfExists(ctx, d.db, MattermostMultitenantDatabaseUsername(d.installationID))
if err != nil {
return errors.Wrap(err, "failed to delete installation database user")
}
return nil
}
func (d *RDSMultitenantDatabase) ensureMultitenantDatabaseSecretIsCreated(rdsClusterID, VpcID *string) (*RDSSecret, error) {
installationSecretName := RDSMultitenantSecretName(d.installationID)
installationSecret, err := d.client.secretsManagerGetRDSSecret(installationSecretName, d.client.logger)
if err != nil {
// If there's any error apart from the resource not existing, fail
var awsErr *smTypes.ResourceNotFoundException
if !errors.As(err, &awsErr) {
return nil, errors.Wrapf(err, "failed to get multitenant RDS database secret %s", installationSecretName)
}
}
// If the secret does not exist, create one
if installationSecret == nil {
description := RDSMultitenantClusterSecretDescription(d.installationID, *rdsClusterID)
tags := []smTypes.Tag{
{
Key: aws.String(trimTagPrefix(DefaultRDSMultitenantDatabaseIDTagKey)),
Value: rdsClusterID,
},
{
Key: aws.String(trimTagPrefix(VpcIDTagKey)),
Value: VpcID,
},
{
Key: aws.String(trimTagPrefix(DefaultMattermostInstallationIDTagKey)),
Value: aws.String(d.installationID),
},
}
username := MattermostMultitenantDatabaseUsername(d.installationID)
installationSecret, err = createDatabaseUserSecret(installationSecretName, username, description, tags, d.client)
if err != nil {
return nil, errors.Wrapf(err, "failed to create a multitenant RDS database secret %s", installationSecretName)
}
}
return installationSecret, nil
}
func isRDSClusterEndpointsReady(rdsClusterID string, client *Client) (bool, error) {
output, err := client.service.rds.DescribeDBClusterEndpoints(
context.TODO(),
&rds.DescribeDBClusterEndpointsInput{
DBClusterIdentifier: aws.String(rdsClusterID),
})
if err != nil {
return false, errors.Wrap(err, "failed to describe RDS cluster endpoint")
}
for _, endpoint := range output.DBClusterEndpoints {
if *endpoint.Status != DefaultRDSStatusAvailable {
return false, nil
}
}
return true, nil
}
func (d *RDSMultitenantDatabase) runProvisionSQLCommands(installationDatabaseName, vpcID string, rdsCluster *rdsTypes.DBCluster, logger log.FieldLogger) error {
rdsID := *rdsCluster.DBClusterIdentifier
masterSecretValue, err := d.client.Service().secretsManager.GetSecretValue(
context.TODO(),
&secretsmanager.GetSecretValueInput{
SecretId: rdsCluster.DBClusterIdentifier,
})
if err != nil {
return errors.Wrapf(err, "failed to find the master secret for the multitenant RDS cluster %s", rdsID)
}
disconnect, err := d.connectRDSCluster(*rdsCluster.Endpoint, DefaultMattermostDatabaseUsername, *masterSecretValue.SecretString)
if err != nil {
return errors.Wrapf(err, "failed to connect to the multitenant RDS cluster %s", rdsID)
}
defer disconnect(logger)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(DefaultMySQLContextTimeSeconds*time.Second))
defer cancel()
err = d.ensureDatabaseIsCreated(ctx, installationDatabaseName)
if err != nil {
return errors.Wrapf(err, "failed to create schema in multitenant RDS cluster %s", rdsID)
}
installationSecret, err := d.ensureMultitenantDatabaseSecretIsCreated(rdsCluster.DBClusterIdentifier, &vpcID)
if err != nil {
return errors.Wrap(err, "failed to get a secret for installation")
}
err = d.ensureDatabaseUserIsCreated(ctx, installationSecret.MasterUsername, installationSecret.MasterPassword)
if err != nil {
return errors.Wrap(err, "failed to create Mattermost database user")
}
err = d.ensureDatabaseUserHasFullPermissions(ctx, installationDatabaseName, installationSecret.MasterUsername)
if err != nil {
return errors.Wrap(err, "failed to grant permissions to Mattermost database user")
}
return nil
}
func (d *RDSMultitenantDatabase) connectRDSCluster(endpoint, username, password string) (func(logger log.FieldLogger), error) {
if d.db == nil {
var db SQLDatabaseManager
var err error
switch d.databaseType {
case model.DatabaseEngineTypeMySQL:
db, err = sql.Open("mysql", RDSMySQLConnString(rdsMySQLDefaultSchema, endpoint, username, password))
if err != nil {
return nil, errors.Wrapf(err, "failed to connect multitenant RDS cluster endpoint %s", endpoint)
}
case model.DatabaseEngineTypePostgres:
db, err = sql.Open("postgres", RDSPostgresConnString(rdsPostgresDefaultSchema, endpoint, username, password))
if err != nil {
return nil, errors.Wrap(err, "failed to connect to postgres database")
}
}
d.db = db
}
closeFunc := func(logger log.FieldLogger) {
err := d.db.Close()
if err != nil {
logger.WithError(err).Errorf("Failed to close the connection with multitenant RDS cluster endpoint %s", endpoint)
}
}
return closeFunc, nil
}
func (d *RDSMultitenantDatabase) ensureDatabaseIsCreated(ctx context.Context, databaseName string) error {
if d.databaseType == model.DatabaseEngineTypeMySQL {
// Query placeholders don't seem to work with argument database.
// See https://github.com/mattermost/mattermost-cloud/pull/209#discussion_r422533477
query := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s CHARACTER SET ?", databaseName)
_, err := d.db.QueryContext(ctx, query, "utf8mb4")
if err != nil {
return errors.Wrap(err, "failed to run create database SQL command")
}
} else {
query := fmt.Sprintf(`SELECT datname FROM pg_catalog.pg_database WHERE lower(datname) = lower('%s');`, databaseName)
rows, err := d.db.QueryContext(ctx, query)
if err != nil {
return errors.Wrap(err, "failed to run create database SQL command")
}
if rows.Next() {
return nil
}
query = fmt.Sprintf(`CREATE DATABASE %s`, databaseName)
_, err = d.db.QueryContext(ctx, query)
if err != nil {
return errors.Wrap(err, "failed to run create database SQL command")
}
}
return nil
}
func (d *RDSMultitenantDatabase) ensureDatabaseUserIsCreated(ctx context.Context, username, password string) error {
if d.databaseType == model.DatabaseEngineTypeMySQL {
_, err := d.db.QueryContext(ctx, "CREATE USER IF NOT EXISTS ?@? IDENTIFIED BY ? REQUIRE SSL", username, "%", password)
if err != nil {
return errors.Wrap(err, "failed to run create user SQL command")
}
} else {
query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s'", username)
rows, err := d.db.QueryContext(ctx, query)
if err != nil {
return errors.Wrap(err, "failed to run original user cleanup SQL command")
}
if rows.Next() {
return nil
}
// Due to not being able use parameters here, we have to do something
// a bit gross to ensure the password is not leaked into logs.
// https://github.com/lib/pq/issues/694#issuecomment-356180769
query = fmt.Sprintf("CREATE USER %s WITH PASSWORD '%s'", username, password)
_, err = d.db.QueryContext(ctx, query)
if err != nil {
return errors.New("failed to run create user SQL command: error suppressed")
}
}
return nil
}
func (d *RDSMultitenantDatabase) ensureDatabaseUserHasFullPermissions(ctx context.Context, databaseName, username string) error {
if d.databaseType == model.DatabaseEngineTypeMySQL {
// Query placeholders don't seem to work with argument database.
// See https://github.com/mattermost/mattermost-cloud/pull/209#discussion_r422533477
query := fmt.Sprintf("GRANT ALL PRIVILEGES ON %s.* TO ?@?", databaseName)
_, err := d.db.QueryContext(ctx, query, username, "%")
if err != nil {
return errors.Wrap(err, "failed to run privilege grant SQL command")
}
} else {
query := fmt.Sprintf("GRANT ALL PRIVILEGES ON DATABASE %s TO %s", databaseName, username)
_, err := d.db.QueryContext(ctx, query)
if err != nil {
return errors.Wrap(err, "failed to run privilege grant SQL command")
}
}
return nil
}
|
//switch case 範例
package main
import "fmt"
var x string = "p1"
func main() {
sw_sample_0()
sw_sample_1()
x = "p2"
fmt.Println("更換x=p2")
sw_sample_1()
}
// 沒指定 true fallthrough
func sw_sample_0() {
switch {
case false:
fmt.Println("sw_0_1s")
case true:
fmt.Println("sw_0_2n")
fallthrough
case false:
fmt.Println("sw_0_3r")
default:
fmt.Println("defalt_case")
}
}
//switch指定
func sw_sample_1() {
switch x {
case "p1":
fmt.Println("p1_text")
case "p2":
fmt.Println("p2_text")
case "p3":
fmt.Println("p3_text")
}
}
|
// Copyright (c) 2018 soren yang
//
// Licensed under the MIT License
// you may not use this file except in complicance with the License.
// You may obtain a copy of the License at
//
// https://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wait
import (
"testing"
"time"
)
func Benchmark_Wait(b *testing.B) {
w := New()
for i := 0; i < b.N; i++ {
w.Register(uint64(i))
w.Trigger(uint64(i), nil)
}
}
func Benchmark_Wait_Parallel(b *testing.B) {
w := New()
b.RunParallel(func(p *testing.PB) {
for p.Next() {
i := uint64(time.Now().UnixNano())
w.Register(i)
w.Trigger(i, nil)
}
})
}
|
package main
import (
"flag"
"io"
"log"
"net"
"os"
)
var addr = flag.String("addr", "localhost:8080", "address of tcp server")
func main() {
flag.Parse()
conn, err := net.Dial("tcp", *addr)
if err != nil {
log.Fatal(err)
}
defer conn.Close()
go mustCopy(os.Stdout, conn)
mustCopy(conn, os.Stdin)
}
func mustCopy(dst io.Writer, src io.Reader) {
if _, err := io.Copy(dst, src); err != nil {
log.Printf("mustCopy: %v\n", err)
}
}
|
package udwSqlite3Test
var ggetBcDbEncrypt []uint8 = []byte{0xf3, 0x0f, 0xf7, 0x46, 0x27, 0xb9, 0x58, 0x7d, 0x99, 0xcc, 0x46, 0xb6, 0x16, 0x78, 0xd5, 0x99,
0xb0, 0x1f, 0xc8, 0xa9, 0x23, 0xfb, 0xca, 0x1c, 0xad, 0xa4, 0x66, 0xa5, 0x0f, 0x82, 0x21, 0x13,
0x1c, 0x64, 0xdb, 0x1e, 0x13, 0x7f, 0x14, 0xdd, 0x86, 0xb4, 0x74, 0xf3, 0x29, 0x78, 0x05, 0xf1,
0x5d, 0xdd, 0x05, 0x53, 0xb8, 0x7e, 0x6e, 0x99, 0x73, 0x7b, 0xc8, 0xec, 0x0d, 0xc9, 0xd2, 0xd3,
0x3a, 0x88, 0xf5, 0x36, 0x4b, 0x04, 0x63, 0x3a, 0x18, 0x74, 0x4e, 0x81, 0x4c, 0x4e, 0x10, 0x5f,
0x8f, 0xb1, 0x7e, 0x91, 0xa6, 0x84, 0x0c, 0xc5, 0x96, 0xee, 0x44, 0xc6, 0x74, 0x68, 0x1e, 0xb5,
0xff, 0xee, 0xfe, 0xf0, 0xd6, 0x7e, 0xcc, 0xaa, 0x02, 0x96, 0x55, 0x0e, 0x40, 0x91, 0xb9, 0xc3,
0x53, 0x21, 0x79, 0x1e, 0x0c, 0x00, 0x1c, 0xb9, 0x1b, 0xc3, 0x81, 0xda, 0x53, 0xe0, 0x13, 0xd2,
0xca, 0x4d, 0xaf, 0xba, 0x81, 0xc1, 0xfb, 0x09, 0x5b, 0x51, 0x1a, 0x0c, 0x19, 0x00, 0xa2, 0xb4,
0x8e, 0x6a, 0x4f, 0xf9, 0x65, 0x10, 0xdf, 0xbb, 0xa0, 0xf3, 0xe1, 0x69, 0xd8, 0xe3, 0x17, 0xf2,
0x78, 0x0d, 0x7c, 0x27, 0xa2, 0xb7, 0x82, 0xb2, 0x8e, 0x7b, 0x0e, 0x1a, 0x73, 0x6e, 0xfd, 0x9a,
0xd5, 0xfd, 0x0d, 0xcb, 0x47, 0xa7, 0xe7, 0x4e, 0x59, 0xa7, 0x3b, 0x35, 0x2f, 0x8c, 0x16, 0x61,
0xa1, 0xd9, 0x24, 0x14, 0x15, 0x8f, 0xbe, 0x69, 0xb7, 0x70, 0x80, 0x8b, 0x87, 0x68, 0x71, 0x65,
0xf7, 0xbe, 0x93, 0x66, 0xbd, 0x9c, 0xc0, 0x1f, 0x24, 0xcf, 0x55, 0xa4, 0x4a, 0x4e, 0xfe, 0x35,
0x37, 0xd2, 0x84, 0xc0, 0x4b, 0xe6, 0x3b, 0x10, 0x3e, 0x34, 0x63, 0x72, 0xf8, 0x33, 0x5f, 0x28,
0x5a, 0x81, 0x6b, 0xa8, 0xf8, 0xcd, 0xcf, 0x29, 0x1f, 0x19, 0x19, 0x16, 0xf7, 0x0b, 0x17, 0x6d,
0x89, 0xd0, 0x86, 0x38, 0x43, 0x5f, 0x32, 0xc1, 0xb5, 0xe9, 0x58, 0x3e, 0xea, 0xc9, 0x0e, 0x13,
0x2f, 0x5d, 0x63, 0x92, 0x8c, 0xcf, 0xc1, 0x13, 0x18, 0xa4, 0x2d, 0x26, 0xe4, 0x0e, 0xd0, 0x2d,
0xa2, 0x93, 0xb1, 0x61, 0xde, 0x7a, 0x2f, 0xd8, 0x19, 0xf4, 0xa5, 0xdb, 0xc8, 0x97, 0x44, 0x8e,
0x92, 0xe0, 0x61, 0x44, 0xd0, 0x63, 0xe2, 0x25, 0x68, 0x73, 0x36, 0x7b, 0x16, 0x21, 0x06, 0x32,
0xc8, 0x76, 0x95, 0x91, 0x94, 0x83, 0x98, 0x33, 0xb0, 0x1b, 0x63, 0x43, 0xa5, 0xc0, 0x71, 0xc5,
0x18, 0xcc, 0x39, 0xba, 0x78, 0x08, 0xd5, 0x0c, 0xac, 0x06, 0x3c, 0xa7, 0xfb, 0x38, 0xae, 0x28,
0x03, 0xfb, 0x66, 0x05, 0x21, 0x54, 0xc4, 0x31, 0x30, 0x50, 0x5f, 0xa7, 0x89, 0x2f, 0x05, 0x1b,
0x04, 0xcb, 0x38, 0x5b, 0xee, 0x5a, 0x56, 0xfe, 0x3a, 0xd0, 0xb3, 0xdf, 0xa7, 0xb7, 0xb3, 0x1e,
0x49, 0x22, 0x93, 0x75, 0x8b, 0xf1, 0x30, 0x5e, 0x19, 0x09, 0x6a, 0x4a, 0x66, 0x01, 0x28, 0x0c,
0x91, 0x79, 0x9f, 0xd1, 0x9e, 0xfe, 0x6f, 0xc9, 0xd6, 0x72, 0xea, 0x26, 0xc5, 0x81, 0x28, 0x3e,
0x25, 0x89, 0x50, 0x94, 0xf5, 0x43, 0x5c, 0xbb, 0x4c, 0x17, 0xfd, 0xc5, 0x93, 0x9b, 0x08, 0xd7,
0xce, 0x09, 0xa0, 0x13, 0x56, 0xaa, 0x4e, 0x4b, 0xb0, 0x2a, 0xb3, 0x97, 0x48, 0x2f, 0x9d, 0xfc,
0x1a, 0xb0, 0xce, 0x1d, 0x0d, 0x29, 0xef, 0x60, 0x26, 0xd0, 0xf9, 0xf8, 0xec, 0x45, 0x9e, 0xcb,
0xb2, 0xe4, 0x3d, 0x83, 0xe6, 0x65, 0x2d, 0xa5, 0x93, 0xb9, 0xc7, 0x54, 0x3d, 0xd8, 0xe9, 0xaa,
0xcd, 0x24, 0xaf, 0xa4, 0x7d, 0xde, 0xb3, 0xb9, 0xd2, 0xf3, 0xe9, 0x90, 0x6f, 0x31, 0x49, 0xc9,
0xc6, 0x3e, 0x91, 0x4d, 0xa1, 0xcc, 0x1b, 0x03, 0x25, 0xab, 0x2e, 0xd0, 0x69, 0xa0, 0x23, 0x10,
0x35, 0x91, 0x64, 0x7d, 0x86, 0x75, 0xe2, 0xf8, 0xe5, 0x81, 0x14, 0x70, 0xfa, 0x1b, 0x35, 0xb9,
0xda, 0x7a, 0xe9, 0xde, 0xad, 0x81, 0x82, 0x74, 0x08, 0x94, 0x54, 0xde, 0xb7, 0x2b, 0x15, 0x9d,
0x67, 0x36, 0xf1, 0x7c, 0xf2, 0xa3, 0x88, 0x41, 0xc5, 0x81, 0x58, 0x24, 0x42, 0x78, 0xcc, 0xdf,
0xdd, 0x15, 0x23, 0xf1, 0x64, 0xbc, 0x17, 0x81, 0x2a, 0x35, 0x62, 0xd0, 0xa1, 0x44, 0x68, 0xd5,
0x92, 0xe0, 0x00, 0x52, 0x42, 0x0d, 0x31, 0xb4, 0x3e, 0x89, 0xa4, 0x6f, 0x89, 0xd9, 0x17, 0xe0,
0xd1, 0x80, 0xe7, 0xa4, 0xc5, 0xd8, 0x1c, 0x33, 0xf6, 0x86, 0xc7, 0x2f, 0x75, 0xcd, 0xb0, 0x76,
0x21, 0x77, 0x07, 0xe2, 0x7c, 0x97, 0x1d, 0x87, 0x57, 0x9d, 0xdf, 0x9b, 0x06, 0x0b, 0x9b, 0x73,
0xab, 0xb8, 0x62, 0x3f, 0x6d, 0x83, 0x09, 0xd0, 0x1d, 0x08, 0x02, 0xd3, 0x2a, 0xa4, 0x1b, 0x5e,
0x49, 0x4f, 0x9b, 0xcc, 0x70, 0x23, 0x7f, 0xde, 0x91, 0x68, 0x7b, 0x9d, 0x84, 0xb1, 0xa6, 0x91,
0xe5, 0x85, 0xe8, 0x57, 0x73, 0x82, 0x07, 0xd7, 0x42, 0x41, 0x0f, 0x55, 0x09, 0x71, 0x1b, 0x2c,
0xc8, 0xab, 0x11, 0x11, 0x71, 0xa1, 0xca, 0x78, 0x57, 0xff, 0x40, 0x69, 0xf7, 0xbf, 0xb2, 0x84,
0x6b, 0x73, 0x01, 0x91, 0x45, 0x03, 0xde, 0xc6, 0x27, 0x15, 0x7d, 0xab, 0xf9, 0xdb, 0x36, 0x03,
0x0a, 0xfc, 0x87, 0x0f, 0x2a, 0x0a, 0x84, 0x1d, 0xd7, 0x7d, 0xa7, 0x25, 0xf4, 0x63, 0x0c, 0x04,
0xd6, 0xa3, 0x68, 0x5c, 0xf3, 0xa2, 0x1b, 0xe6, 0x6b, 0x02, 0x19, 0xeb, 0x9d, 0xa8, 0x94, 0x88,
0x8f, 0xf6, 0xb3, 0x0d, 0x1c, 0x93, 0x61, 0xac, 0x29, 0xa6, 0xc5, 0xc1, 0xec, 0xb4, 0x6a, 0x7a,
0xa2, 0x25, 0xd0, 0xf9, 0x95, 0xd8, 0x06, 0x65, 0x71, 0x44, 0x4a, 0xcb, 0xc9, 0x30, 0xab, 0x51,
0xa5, 0xa6, 0x03, 0x0f, 0x8c, 0x62, 0xf5, 0x5a, 0xdf, 0xb4, 0x67, 0x35, 0x2c, 0xd7, 0xf6, 0xe3,
0x78, 0x06, 0x66, 0x33, 0xd9, 0x52, 0x64, 0x5f, 0xa8, 0xc0, 0xf3, 0xd9, 0x79, 0x29, 0xe1, 0x76,
0x6a, 0x2f, 0x15, 0x70, 0x5f, 0xbb, 0xe0, 0xc1, 0xfe, 0x77, 0x60, 0xa0, 0x97, 0x30, 0xcb, 0x9a,
0x3e, 0x62, 0xbe, 0x30, 0xff, 0xf7, 0xb9, 0xda, 0xf5, 0x32, 0xc6, 0xb9, 0x62, 0x88, 0x53, 0x62,
0xf0, 0x29, 0xc1, 0xe8, 0x89, 0xcd, 0x2b, 0xfd, 0x3c, 0x1e, 0x23, 0x2b, 0x06, 0xec, 0x84, 0xad,
0x9b, 0x76, 0x9a, 0x0c, 0xc1, 0xdb, 0x64, 0x14, 0x1a, 0x1e, 0x0c, 0x1f, 0x2b, 0x57, 0xbd, 0xb9,
0x17, 0x4e, 0xf8, 0x43, 0xc1, 0x9d, 0xc8, 0xb5, 0x3a, 0x65, 0xa8, 0x76, 0x4d, 0x87, 0xe2, 0xb5,
0x75, 0xd2, 0x47, 0xb7, 0x49, 0xfe, 0xe4, 0xeb, 0xc7, 0x9c, 0x61, 0xcb, 0xa1, 0xb2, 0x6b, 0x8c,
0xe6, 0xcd, 0x05, 0xc6, 0x59, 0xf2, 0x33, 0xdf, 0x7b, 0x5a, 0x2f, 0x95, 0x46, 0xeb, 0x39, 0xbc,
0x58, 0xb1, 0xfa, 0x8c, 0xae, 0x96, 0x48, 0xae, 0xb6, 0x8e, 0x2f, 0x23, 0x30, 0xf7, 0xd4, 0x1d,
0xc8, 0xdf, 0x35, 0xe8, 0x53, 0xbb, 0xcc, 0x56, 0x4b, 0xee, 0x97, 0xce, 0x96, 0xe9, 0x4c, 0x36,
0xf6, 0x6c, 0x1e, 0x5b, 0xfd, 0xf7, 0xdc, 0x5d, 0xa9, 0x19, 0x8c, 0x67, 0xb9, 0x45, 0x2a, 0xdd,
0x37, 0xb0, 0x4f, 0x26, 0x88, 0xda, 0x17, 0x20, 0x71, 0x11, 0xce, 0xc0, 0x4f, 0xab, 0x0f, 0x46,
0xaa, 0x8c, 0xc7, 0xbf, 0x47, 0xd7, 0xed, 0xb3, 0xe7, 0x8e, 0xf7, 0xb7, 0x08, 0x3e, 0x59, 0xda,
0x0d, 0xfc, 0x52, 0xad, 0x6a, 0x3b, 0x5d, 0x05, 0x71, 0x2e, 0x82, 0x78, 0x2a, 0xc5, 0x0e, 0xe2,
0x7b, 0x0e, 0x27, 0xb0, 0x34, 0x7f, 0xf8, 0x0f, 0xc0, 0x86, 0x61, 0x93, 0xde, 0x80, 0xcf, 0xdf,
0x54, 0xd6, 0xf5, 0x8c, 0x81, 0x3a, 0x43, 0x95, 0x8b, 0xf3, 0xf2, 0x5f, 0xd1, 0x6f, 0x3c, 0x6b,
0xc3, 0x98, 0x81, 0x0f, 0xe4, 0x62, 0xb6, 0x2c, 0xda, 0x16, 0x95, 0x5e, 0x7b, 0x95, 0x83, 0xe1,
0x38, 0x27, 0x6a, 0xd6, 0x88, 0x76, 0x4d, 0xeb, 0x97, 0xda, 0xa4, 0x96, 0xd5, 0x9b, 0x06, 0x67,
0xb1, 0xd5, 0xa3, 0x42, 0x51, 0x84, 0x11, 0x09, 0xd0, 0x1c, 0x8f, 0x7d, 0x94, 0xcc, 0xc7, 0x4f,
0x15, 0x8b, 0xa0, 0x5b, 0xa4, 0x70, 0x58, 0x00, 0x1d, 0xe6, 0xd8, 0x67, 0x4f, 0xbc, 0xbc, 0xb4,
0x84, 0x53, 0xd6, 0xcb, 0x3e, 0x21, 0x7b, 0x98, 0xa3, 0x45, 0x32, 0xa3, 0xe5, 0xb3, 0x04, 0x77,
0x9d, 0x1e, 0x93, 0x40, 0x89, 0xa8, 0x3e, 0xe6, 0x2d, 0x49, 0x9f, 0xd2, 0xf4, 0xc9, 0xa1, 0xb4,
0x12, 0x5f, 0xf2, 0xd8, 0xcd, 0xbd, 0xe4, 0x7c, 0x28, 0x11, 0x72, 0x95, 0xbc, 0xf9, 0x42, 0x36,
0xaa, 0xc1, 0x94, 0x5c, 0xdf, 0xa0, 0xa9, 0x3a, 0x96, 0x48, 0x24, 0xfe, 0x20, 0x54, 0x94, 0xaf,
0x90, 0xf0, 0x84, 0x02, 0x28, 0x8a, 0xd1, 0xf0, 0xa3, 0x2e, 0x0f, 0xb5, 0xfd, 0x8d, 0x32, 0x6d,
0x27, 0x3d, 0x9c, 0x30, 0x46, 0xdc, 0x72, 0xca, 0x0b, 0xfb, 0x71, 0xde, 0xb2, 0x7a, 0xee, 0x81,
0xa6, 0x1d, 0xc5, 0x4a, 0x20, 0xc5, 0x0d, 0x72, 0x62, 0xf9, 0xe8, 0xb5, 0x49, 0xb4, 0x5f, 0xde,
0x33, 0x59, 0x6f, 0x60, 0x3d, 0x16, 0x85, 0xa0, 0xf7, 0x22, 0x9c, 0x18, 0x86, 0x50, 0x58, 0x0b,
0x16, 0xc4, 0x73, 0x7c, 0x9f, 0xcc, 0xc5, 0x0f, 0xc9, 0x5b, 0x11, 0xa3, 0xc3, 0x64, 0x00, 0x03,
0x81, 0x43, 0xcb, 0x68, 0x2f, 0xef, 0x93, 0xe8, 0x97, 0x39, 0x27, 0x55, 0x85, 0xc9, 0xa2, 0x4a,
0x04, 0x08, 0x47, 0x76, 0xe4, 0xa6, 0x9a, 0xd1, 0x24, 0xa6, 0x76, 0x5c, 0x28, 0x61, 0xa9, 0x96,
0x3f, 0x10, 0x27, 0xbb, 0x9a, 0xc7, 0x72, 0xcd, 0xf8, 0x1c, 0xdc, 0xd6, 0x41, 0x82, 0x35, 0x1d,
0x1b, 0x2b, 0xa9, 0x8a, 0x34, 0xaf, 0x11, 0x83, 0x51, 0x84, 0x4e, 0xeb, 0xff, 0xbf, 0xc2, 0xb1,
0x87, 0x2a, 0xf1, 0xb8, 0x5a, 0x91, 0x4e, 0xf4, 0x16, 0x70, 0x5e, 0xa8, 0x14, 0xf5, 0xbb, 0xd8,
0x3f, 0xb5, 0x82, 0x20, 0x80, 0x6f, 0x60, 0x97, 0x61, 0x74, 0x2a, 0xca, 0x2b, 0x7b, 0x5e, 0xc5,
0x63, 0xeb, 0x1d, 0x60, 0xeb, 0x1f, 0x8b, 0x24, 0xce, 0xba, 0xaa, 0xd5, 0x90, 0x3f, 0xfb, 0x7f,
0x8c, 0x09, 0x79, 0xb6, 0x3d, 0x5e, 0x51, 0x0e, 0x91, 0x50, 0xe0, 0xf2, 0x23, 0xe5, 0x74, 0x8d,
0x79, 0x7a, 0xe0, 0xbf, 0xdd, 0x2d, 0xaf, 0x11, 0x39, 0x2f, 0x93, 0x38, 0xf9, 0x30, 0x3e, 0x6a,
0x59, 0xc8, 0x2c, 0x7d, 0x36, 0x9f, 0x5c, 0xdd, 0x62, 0xf4, 0x44, 0x2e, 0xf1, 0x91, 0x16, 0x14,
0x88, 0x31, 0x45, 0x09, 0x6b, 0x0d, 0xa5, 0xca, 0xac, 0x29, 0xcc, 0x5c, 0x46, 0xc3, 0xe0, 0x2b,
0x38, 0x91, 0x53, 0xf0, 0xf9, 0x54, 0x8a, 0xf7, 0x35, 0xff, 0x1e, 0x01, 0xb0, 0x8f, 0x0b, 0x99,
0x27, 0xf1, 0xf3, 0x9f, 0x39, 0x13, 0xfa, 0x8c, 0x46, 0x68, 0x07, 0x79, 0xdb, 0x6d, 0xe7, 0xb4,
0xb2, 0x1b, 0x19, 0xf7, 0xb9, 0xca, 0x47, 0x7e, 0x0a, 0xd9, 0x59, 0x98, 0x4d, 0xa7, 0x25, 0x6e,
0x0c, 0x08, 0x41, 0x4e, 0xad, 0x9d, 0xf7, 0x8a, 0xbd, 0x3c, 0x27, 0x76, 0xc9, 0x7f, 0x55, 0xd0,
0x5d, 0xd7, 0xf5, 0x2d, 0xb4, 0x0d, 0x23, 0x3b, 0xd5, 0xb6, 0xfe, 0x85, 0x77, 0xe1, 0x40, 0x12,
0x2b, 0x54, 0xd3, 0x9e, 0x8e, 0x99, 0x6c, 0xb6, 0x58, 0x40, 0xbd, 0xa8, 0xc4, 0x0b, 0xda, 0xb0,
0xb4, 0x76, 0x4e, 0x85, 0x4d, 0x3d, 0xe3, 0x4d, 0xfd, 0x3e, 0x0b, 0x9e, 0x3c, 0x49, 0x1d, 0xbd,
0x54, 0x7b, 0x5d, 0x57, 0x6b, 0x22, 0xe7, 0x68, 0xe2, 0x44, 0xc8, 0xbd, 0x9e, 0x1b, 0x74, 0x17,
0xf8, 0xb8, 0xd3, 0xcf, 0xe0, 0xcd, 0x19, 0xc6, 0x9c, 0x34, 0xd5, 0xf7, 0x06, 0xd5, 0x01, 0x1a,
0x54, 0x5c, 0x56, 0xb2, 0x63, 0x89, 0xc0, 0x0e, 0x05, 0xd7, 0x36, 0x81, 0x4b, 0xbf, 0x95, 0xec,
0xc5, 0x86, 0x98, 0x0e, 0x0f, 0xdd, 0x16, 0xfe, 0x06, 0xd7, 0xb1, 0x75, 0x87, 0x62, 0x17, 0xaf,
0xae, 0x7e, 0x6b, 0x53, 0xaa, 0x3d, 0xb0, 0x6b, 0x95, 0x5f, 0xae, 0x47, 0x5b, 0xd8, 0x9d, 0x52,
0x53, 0x47, 0xd5, 0x7e, 0xb7, 0x94, 0xdb, 0x15, 0x40, 0x8f, 0xe0, 0xee, 0xa7, 0x33, 0xec, 0x28,
0x7a, 0x3f, 0x5c, 0x7e, 0x93, 0xdf, 0xf5, 0x90, 0x35, 0xd9, 0xe7, 0x8c, 0x69, 0x83, 0x7e, 0x9b,
0x40, 0xb3, 0x4d, 0xc4, 0x93, 0x59, 0x6a, 0xec, 0x13, 0xa2, 0xbc, 0x01, 0x89, 0x21, 0x75, 0xdc,
0x61, 0xc9, 0x98, 0xaa, 0xf5, 0x39, 0xe9, 0xdf, 0x21, 0x6a, 0x1a, 0x56, 0x9a, 0x8d, 0x66, 0xd2,
0x19, 0x06, 0x94, 0x63, 0x74, 0xcc, 0x3c, 0x97, 0x5e, 0x57, 0xd9, 0x25, 0x74, 0x96, 0x91, 0x45,
0x77, 0x42, 0x83, 0x66, 0x8e, 0x30, 0xf0, 0x02, 0xa2, 0x5f, 0x17, 0x17, 0x71, 0x75, 0xf2, 0x26,
0x0c, 0x59, 0x25, 0x83, 0x24, 0xbf, 0x79, 0x58, 0xd7, 0x3a, 0x17, 0x25, 0xc7, 0xbf, 0x6c, 0xd1,
0x73, 0x0e, 0xb3, 0x6b, 0xa4, 0x81, 0xd0, 0x48, 0x51, 0x7e, 0x55, 0x83, 0xf5, 0x97, 0x06, 0x92,
0x59, 0x3e, 0xfb, 0xd5, 0x88, 0x53, 0x2b, 0xb0, 0x0a, 0xe7, 0xc7, 0x4a, 0x17, 0x46, 0x11, 0x2f,
0x99, 0xcb, 0x5b, 0x55, 0xc5, 0x96, 0xbe, 0x3e, 0x31, 0x8d, 0xa6, 0x75, 0xfe, 0x4e, 0x16, 0x7e,
0xc9, 0x7e, 0x54, 0x4e, 0x17, 0x33, 0x64, 0xde, 0xe0, 0x9a, 0x99, 0x56, 0xe9, 0x22, 0x0c, 0x54,
0xa7, 0xdd, 0xfe, 0x69, 0xd1, 0x95, 0x87, 0xfd, 0x80, 0xf9, 0xc0, 0xc7, 0xd0, 0x19, 0xd7, 0x82,
0xca, 0xba, 0x04, 0x19, 0x3d, 0xb6, 0xba, 0x28, 0xc0, 0x6b, 0x80, 0x13, 0xff, 0x6c, 0x7d, 0x97,
0x2d, 0x1e, 0x96, 0x41, 0xa5, 0x34, 0xdc, 0x7a, 0xd3, 0x37, 0x83, 0xa2, 0x03, 0xc4, 0xe4, 0xb9,
0xcd, 0x8a, 0xb9, 0xe3, 0xd5, 0x51, 0xc1, 0x4c, 0xd9, 0x88, 0xd1, 0xad, 0xe4, 0x34, 0xdf, 0x32,
0xcc, 0x54, 0xef, 0xf7, 0x6c, 0xa7, 0x1e, 0x69, 0x97, 0xdd, 0x41, 0x27, 0xbd, 0xd1, 0x40, 0x2d,
0x02, 0x39, 0x72, 0x8d, 0x93, 0xd4, 0xd7, 0x3e, 0x92, 0xbc, 0x2c, 0x78, 0x5a, 0xb0, 0xe1, 0x11,
0xe4, 0x6a, 0x82, 0x62, 0xf4, 0x6c, 0x52, 0x19, 0x3a, 0xeb, 0x57, 0x77, 0x6e, 0x32, 0x20, 0xe2,
0xb2, 0xb1, 0x4a, 0xfa, 0xe4, 0xc7, 0xcb, 0xb2, 0x57, 0x5f, 0xd9, 0x62, 0xaf, 0x2d, 0x16, 0xdd,
0x42, 0x12, 0x22, 0xd7, 0x25, 0x57, 0x60, 0x15, 0x42, 0xb8, 0x8c, 0x24, 0x8b, 0xe6, 0x1f, 0x80,
0x50, 0x77, 0xf9, 0xdf, 0xe4, 0x8f, 0x47, 0x3d, 0x0a, 0xe7, 0xdd, 0xb8, 0x28, 0xb2, 0x28, 0xf7,
0xbd, 0x4c, 0xcd, 0x76, 0x21, 0x54, 0xef, 0x66, 0xd4, 0x90, 0xa3, 0x85, 0xb4, 0xbd, 0x22, 0xb5,
0x6d, 0x58, 0x15, 0x0a, 0xd0, 0xc2, 0x0b, 0x52, 0xb3, 0xfd, 0xf0, 0xa7, 0xd6, 0x81, 0x93, 0x60,
0xcf, 0x1d, 0x3c, 0xea, 0x84, 0x02, 0xab, 0x93, 0x8c, 0x4f, 0x87, 0x7a, 0x53, 0x3f, 0xb3, 0xe8,
0xf4, 0x05, 0xaa, 0xfb, 0x16, 0xe8, 0x2d, 0x2c, 0xb1, 0x71, 0xc8, 0xf5, 0xab, 0x8a, 0x0b, 0xd3,
0x65, 0x88, 0x54, 0x27, 0x7a, 0x81, 0xd6, 0x0f, 0x5b, 0x6a, 0x52, 0x57, 0xd2, 0xda, 0xa5, 0x0a,
0xa1, 0x7a, 0x35, 0xe8, 0x07, 0xc5, 0x9f, 0xfe, 0x1b, 0x7f, 0x81, 0x68, 0x3d, 0x2a, 0xa1, 0x07,
0xa3, 0x06, 0x11, 0x32, 0x44, 0xc3, 0x10, 0x6e, 0x40, 0x7e, 0x0d, 0xf8, 0x90, 0xb0, 0xcd, 0x80,
0x0b, 0xb6, 0x99, 0x1c, 0x51, 0x3e, 0xc3, 0x63, 0x09, 0xd8, 0xf2, 0x70, 0xf1, 0x59, 0x18, 0x48,
0x22, 0x53, 0x5f, 0x33, 0x09, 0x74, 0xc4, 0x0a, 0x12, 0xbf, 0x12, 0xfc, 0xb3, 0x8c, 0x2c, 0x79,
0x70, 0x3a, 0xba, 0x4c, 0x26, 0xaa, 0x65, 0xe1, 0xc8, 0x2b, 0x0e, 0x9c, 0x2d, 0x84, 0x9c, 0xd9,
0x2c, 0xcf, 0xc2, 0x80, 0x08, 0xb8, 0x92, 0xf8, 0x1a, 0x34, 0xee, 0xf0, 0x90, 0x09, 0xd8, 0x5b,
0x32, 0xdc, 0x34, 0xa2, 0x78, 0x4b, 0x68, 0x09, 0x55, 0xad, 0x07, 0xea, 0x0e, 0xe8, 0x62, 0x27,
0xcf, 0xdd, 0xb7, 0x9c, 0xee, 0xa2, 0xaf, 0x0b, 0x5a, 0x2b, 0x8e, 0x05, 0xe4, 0x4a, 0xb3, 0x76,
0x5c, 0xb1, 0x65, 0xb5, 0xf3, 0x36, 0x2b, 0x73, 0xbd, 0x00, 0x8d, 0xeb, 0x6a, 0xfc, 0x8c, 0x62,
0x2b, 0x0d, 0x5d, 0xce, 0xef, 0x43, 0x6c, 0x36, 0x3e, 0x6f, 0x0a, 0x3a, 0x80, 0xda, 0x41, 0xee,
0xb9, 0xbd, 0x53, 0xd1, 0x66, 0xb4, 0xc7, 0xc4, 0x53, 0x39, 0x89, 0x69, 0x4c, 0x1e, 0x1e, 0xf3,
0xb2, 0xa4, 0xb4, 0xe3, 0xed, 0x16, 0x22, 0x0f, 0xe3, 0x47, 0xdd, 0xff, 0x88, 0x36, 0x1a, 0x1d,
0xee, 0x11, 0x82, 0x99, 0x2d, 0xf6, 0xd0, 0x5e, 0xf0, 0x0e, 0x2e, 0x26, 0xd9, 0x97, 0x0e, 0xbb,
0x8f, 0xae, 0xb6, 0xc0, 0x19, 0x5f, 0xde, 0xa6, 0x35, 0xdf, 0x52, 0x5d, 0x19, 0x0e, 0x1a, 0x23,
0xa1, 0xbc, 0xc7, 0xac, 0xb2, 0x79, 0x7d, 0xd4, 0xa1, 0x2f, 0xdc, 0x2c, 0x59, 0x84, 0xb0, 0x59,
0xfc, 0x14, 0x04, 0x96, 0xea, 0x84, 0x7b, 0xb2, 0xb3, 0x24, 0x09, 0xda, 0x64, 0xaa, 0xfc, 0xf4,
0x62, 0xba, 0x1e, 0x4e, 0xea, 0x36, 0x2f, 0x5a, 0x86, 0xbe, 0x3d, 0x9f, 0xc6, 0x53, 0xc3, 0x8f,
0xf1, 0xa9, 0x91, 0xb9, 0xbe, 0x50, 0xad, 0xa8, 0x42, 0xd4, 0x4c, 0x16, 0x9e, 0x95, 0xad, 0xdc,
0xee, 0xc7, 0x4d, 0xf9, 0x19, 0x83, 0xad, 0xd1, 0x85, 0x26, 0x25, 0x31, 0x1b, 0x85, 0xb6, 0x09,
0x5d, 0x49, 0x37, 0xbf, 0xe6, 0xfc, 0x23, 0x65, 0x31, 0x01, 0x86, 0xbc, 0xbf, 0x3f, 0x17, 0x76,
0x73, 0x6f, 0x46, 0xec, 0x49, 0xc3, 0xd8, 0x71, 0x4f, 0xc6, 0x5a, 0xf5, 0x3d, 0x29, 0x3f, 0x81,
0x63, 0x2b, 0x89, 0x83, 0x05, 0xdc, 0x22, 0xc3, 0xe3, 0x11, 0x7b, 0x30, 0xee, 0xed, 0x8e, 0xb7,
0x00, 0x25, 0x1a, 0x3a, 0x9f, 0x90, 0xae, 0x0a, 0xd1, 0xe6, 0x71, 0x58, 0x5a, 0xbf, 0x22, 0x10,
0xab, 0x28, 0x1b, 0xa3, 0xda, 0x26, 0x92, 0x56, 0x86, 0x2a, 0x99, 0x32, 0x94, 0x7b, 0x08, 0x13,
0xac, 0xe9, 0xdb, 0x6b, 0xf7, 0x72, 0x29, 0x65, 0x7f, 0x85, 0xea, 0x9a, 0x55, 0x6f, 0x72, 0xa5,
0x4f, 0x77, 0x10, 0x3f, 0xcd, 0xc7, 0xd3, 0x46, 0xd1, 0xe8, 0xa5, 0x10, 0x6b, 0xb6, 0xdc, 0x7b,
0x3e, 0x01, 0x34, 0xd0, 0xa4, 0x39, 0x7e, 0x96, 0x48, 0x44, 0x09, 0x1f, 0xd2, 0x17, 0x98, 0x40,
0xf9, 0xa0, 0xbc, 0x37, 0x74, 0xbb, 0xd6, 0x48, 0x35, 0x2f, 0xe2, 0xd4, 0xa9, 0x8a, 0x89, 0x60,
0x3f, 0x30, 0x51, 0x44, 0x0a, 0xf7, 0xf4, 0x2e, 0x37, 0xe1, 0x06, 0xcf, 0x22, 0x2d, 0xa5, 0x28,
0xae, 0x18, 0xc3, 0x23, 0xfd, 0x45, 0x02, 0x73, 0x8b, 0x18, 0x90, 0xa6, 0xab, 0x1c, 0xc0, 0xca,
0xf3, 0xcd, 0x85, 0x5e, 0x14, 0x5e, 0x5c, 0xe9, 0x6e, 0xa5, 0x97, 0x03, 0x18, 0xce, 0xf0, 0x22,
0xcf, 0x2d, 0x0d, 0xba, 0x97, 0xfa, 0xd8, 0x7d, 0x4c, 0xb4, 0xa8, 0x5a, 0x08, 0x11, 0x54, 0x81,
0xdc, 0xe9, 0xfd, 0x86, 0x07, 0x0a, 0x3f, 0xca, 0x82, 0x81, 0xd7, 0xf1, 0x8f, 0x06, 0x09, 0x2e,
0x1e, 0xef, 0x96, 0xe4, 0x02, 0x00, 0xab, 0x32, 0x82, 0x70, 0x57, 0x16, 0x56, 0x93, 0xa2, 0x7b,
0x0b, 0xbd, 0xf1, 0xb5, 0x89, 0xc4, 0x2e, 0x93, 0x5d, 0xd9, 0x2b, 0x86, 0xa9, 0xfd, 0x78, 0xc9,
0x09, 0x13, 0xa9, 0x3a, 0xd9, 0xad, 0x4e, 0xb9, 0xd8, 0x20, 0x5f, 0x2d, 0xd8, 0x4d, 0xb0, 0x26,
0xf0, 0xed, 0x32, 0x4b, 0xdf, 0x78, 0xb5, 0x38, 0xc6, 0x43, 0x4a, 0x97, 0xb9, 0xb4, 0x03, 0x97,
0xbe, 0xf7, 0xf9, 0xc2, 0x51, 0xd0, 0xf8, 0x8a, 0xb9, 0xe6, 0xb3, 0xc2, 0x83, 0xca, 0x3d, 0x5c,
0x3c, 0xb8, 0x26, 0xb5, 0xb7, 0xb8, 0xe8, 0xdc, 0x2b, 0x5d, 0xe0, 0x27, 0x2b, 0x2c, 0x65, 0xc8,
0x09, 0xcd, 0xf0, 0x5e, 0xe9, 0x82, 0xf9, 0x62, 0x5b, 0xd3, 0x00, 0xd7, 0xc5, 0x6e, 0xde, 0xa9,
0x9b, 0xee, 0x3d, 0x58, 0xbf, 0x7f, 0xc1, 0xaf, 0xec, 0x6e, 0x6c, 0xa7, 0x85, 0xc8, 0xc8, 0x58,
0xf3, 0xb1, 0x7d, 0xe1, 0x07, 0x4a, 0xb7, 0x03, 0x2a, 0x12, 0x72, 0x7c, 0xe9, 0xba, 0xec, 0x73,
0x00, 0x16, 0xe2, 0xfd, 0xc1, 0x7a, 0x1e, 0xe1, 0xad, 0xb8, 0x0b, 0xf4, 0xed, 0x4e, 0x3a, 0x4a,
0xe4, 0x26, 0xff, 0xc9, 0x0b, 0xb1, 0xe1, 0x5f, 0x79, 0x29, 0x23, 0x06, 0x7c, 0xb4, 0x3d, 0xbe,
0x43, 0xe4, 0x40, 0xb1, 0xb4, 0x9c, 0x14, 0xbc, 0xa1, 0x47, 0x8f, 0x17, 0x3f, 0xfc, 0x9b, 0x65,
0x3d, 0x25, 0x61, 0xf7, 0x05, 0x72, 0xad, 0xae, 0xbf, 0xdf, 0x7f, 0x0f, 0xb6, 0xbf, 0x89, 0xd6,
0x29, 0xfc, 0x97, 0x6c, 0xdc, 0xe9, 0x8d, 0x7c, 0x8a, 0xc0, 0xf1, 0x39, 0x3b, 0x96, 0x92, 0x4a,
0x07, 0x9f, 0xe2, 0x13, 0x3c, 0x04, 0x4b, 0xa9, 0xea, 0x22, 0x9f, 0xa5, 0x00, 0xa2, 0x1b, 0x9c,
0x12, 0x5a, 0x71, 0xb2, 0xc1, 0x59, 0xf1, 0x27, 0x85, 0x30, 0x27, 0xcc, 0x19, 0x8f, 0x62, 0x31,
0x42, 0xda, 0x29, 0xb4, 0xb7, 0x66, 0xf7, 0xe9, 0xd4, 0x6e, 0xf9, 0x08, 0x3d, 0x2e, 0x3d, 0xef,
0x49, 0xc8, 0xbf, 0x80, 0x0e, 0x93, 0xd0, 0x73, 0xf5, 0x49, 0xe5, 0x02, 0xb8, 0x74, 0xa6, 0x4f,
0xe0, 0x05, 0x56, 0x8f, 0x06, 0x1a, 0x3a, 0xc6, 0x87, 0x93, 0x5e, 0x2b, 0x2a, 0x81, 0xcf, 0x41,
0xf2, 0x23, 0x3b, 0xf8, 0x5e, 0x73, 0x50, 0x94, 0x06, 0x23, 0x90, 0x06, 0xb7, 0x47, 0xbc, 0xbe,
0x62, 0x75, 0xfb, 0xb7, 0x8b, 0x5e, 0xea, 0xa2, 0x28, 0x36, 0xfe, 0x9c, 0x4f, 0x76, 0x6f, 0xde,
0x62, 0xf2, 0xe5, 0xf5, 0xc4, 0xa5, 0x6c, 0x07, 0xe3, 0x95, 0xf4, 0x66, 0xb5, 0x1d, 0x2e, 0xd1,
0xd2, 0x38, 0x59, 0xab, 0xb2, 0x5f, 0x98, 0xed, 0x88, 0x81, 0xe0, 0x80, 0xc5, 0x1f, 0xb1, 0xc8,
0xd4, 0x1b, 0x40, 0x29, 0xbf, 0xb6, 0x25, 0xb6, 0x69, 0x50, 0x52, 0xc3, 0x8a, 0x9d, 0x2b, 0x15,
0x84, 0x6c, 0x29, 0x2f, 0x0c, 0xb2, 0xdc, 0xec, 0x14, 0x3e, 0x50, 0xe1, 0xbb, 0x08, 0xc5, 0x94,
0x7a, 0x8e, 0x40, 0x4a, 0x06, 0x48, 0x6d, 0x4f, 0xef, 0xd6, 0xf6, 0xb6, 0xe5, 0xf9, 0xdc, 0x78,
0x1c, 0xa3, 0xa8, 0xb1, 0x49, 0xb6, 0xd4, 0x9e, 0xd0, 0x48, 0x20, 0x13, 0x0f, 0x3f, 0xa3, 0xa1,
0x4c, 0xc1, 0x25, 0x18, 0xdb, 0x14, 0x68, 0x44, 0x0d, 0xd1, 0x74, 0x8c, 0xf5, 0x95, 0x04, 0x1a,
0x2d, 0x03, 0xf1, 0x67, 0xbf, 0x0a, 0xb1, 0x1c, 0x68, 0x83, 0x4f, 0x41, 0x00, 0x55, 0xad, 0x99,
0xc2, 0x5f, 0xb7, 0xc8, 0xc1, 0x77, 0xf4, 0x89, 0x85, 0x46, 0x9e, 0x6f, 0xdc, 0x76, 0x99, 0xe0,
0x5a, 0x96, 0x94, 0x4e, 0x55, 0x58, 0x5f, 0xb6, 0x37, 0x21, 0x7c, 0x1c, 0x55, 0x0c, 0xd7, 0x9b,
0x9e, 0xcd, 0x6d, 0x6c, 0x82, 0xa3, 0x93, 0x0f, 0x42, 0x6b, 0xc9, 0x9b, 0x39, 0xcd, 0xcb, 0xc5,
0x49, 0xe7, 0xf5, 0x44, 0x0f, 0x7e, 0xce, 0x75, 0xe0, 0x8f, 0x3b, 0x27, 0x33, 0xf2, 0xde, 0xd6,
0x4a, 0x21, 0xd4, 0xad, 0x4e, 0xca, 0xef, 0x6c, 0x39, 0x94, 0x34, 0xa2, 0x07, 0x4a, 0xd4, 0x28,
0x08, 0xa1, 0x47, 0x28, 0xc1, 0x95, 0x64, 0x5f, 0xc0, 0x06, 0x05, 0xd8, 0x2f, 0xf9, 0x31, 0x13,
0xc5, 0x1a, 0x2b, 0x1b, 0x0e, 0x25, 0x48, 0x7b, 0x73, 0x0a, 0xce, 0xb3, 0xf5, 0xf6, 0xfa, 0x31,
0xc1, 0x9f, 0x01, 0xc1, 0xee, 0x69, 0x3b, 0xcb, 0x9a, 0x1d, 0x70, 0x04, 0x66, 0xcb, 0x24, 0x92,
0xc2, 0x68, 0x6a, 0x8f, 0xaa, 0x50, 0xe7, 0xd9, 0x6c, 0x24, 0xf7, 0xd5, 0x0a, 0x9b, 0x82, 0x96,
0x67, 0x4b, 0xc7, 0xc6, 0xa0, 0xee, 0xd9, 0x9f, 0xb5, 0x7d, 0xd0, 0xd8, 0xad, 0xad, 0xe2, 0x2f,
0x2f, 0x48, 0xc6, 0xf1, 0x6f, 0x60, 0xd6, 0x50, 0xdc, 0x5b, 0x46, 0x86, 0x86, 0xb9, 0xc7, 0xb0,
0x96, 0x9a, 0x4f, 0x72, 0x17, 0xa3, 0xe8, 0xe9, 0xcb, 0xca, 0x9a, 0x60, 0x64, 0xbd, 0xd6, 0xb4,
0x92, 0x98, 0x99, 0x0a, 0x18, 0xf8, 0x1d, 0x16, 0x2c, 0x4e, 0x85, 0x9b, 0x0e, 0x44, 0xff, 0x02,
0x38, 0x0e, 0x88, 0xfa, 0x2a, 0xca, 0x0f, 0xb3, 0xd5, 0x31, 0x0a, 0x5c, 0x16, 0xa4, 0x4e, 0x5b,
0x8f, 0xa5, 0x71, 0x44, 0xe6, 0xb2, 0x22, 0xed, 0x1a, 0x73, 0x10, 0x6c, 0xa1, 0x18, 0xf7, 0x25,
0x47, 0xde, 0xee, 0x6f, 0xdb, 0xc1, 0x6a, 0xaf, 0x75, 0x3d, 0x88, 0x15, 0x0d, 0x81, 0x41, 0xb4,
0xc1, 0x89, 0x36, 0x2e, 0x05, 0x7a, 0x3d, 0xfc, 0xf6, 0xf6, 0x67, 0x84, 0xf8, 0xd6, 0xdd, 0x2c,
0xc7, 0x1b, 0x26, 0x7f, 0xf2, 0x37, 0x02, 0xa8, 0xa5, 0x13, 0x2c, 0xc9, 0x21, 0xc2, 0x4e, 0xe7,
0x7c, 0x0b, 0xef, 0xc5, 0x08, 0x73, 0x92, 0x1e, 0xac, 0xc6, 0xc0, 0xb9, 0x2b, 0xc0, 0xc1, 0x99,
0xda, 0xe6, 0x7c, 0x60, 0xf7, 0x8c, 0xc7, 0xda, 0x9b, 0xd3, 0xf1, 0xa3, 0xb0, 0x97, 0xe6, 0x33,
0x9c, 0xe1, 0x16, 0x7c, 0x7a, 0x17, 0x3e, 0x7b, 0xac, 0x3c, 0xab, 0xde, 0x73, 0xb2, 0x63, 0x1f,
0xec, 0xdf, 0xb8, 0xfe, 0x94, 0x48, 0xf5, 0x8f, 0xd4, 0x21, 0x80, 0xda, 0x9a, 0xbd, 0xc5, 0x4b,
0x25, 0xab, 0xfa, 0x25, 0x84, 0x16, 0x74, 0x7b, 0x74, 0x24, 0x94, 0x17, 0x4f, 0xf3, 0x71, 0x7e,
0x6d, 0xe1, 0x50, 0x8a, 0x5d, 0xd5, 0xbd, 0x31, 0x9e, 0x85, 0x4f, 0x8e, 0xb8, 0x92, 0xfa, 0xf6,
0xfe, 0xae, 0x84, 0x46, 0x4a, 0x30, 0x67, 0xf5, 0xbb, 0x79, 0x42, 0xc4, 0x43, 0x71, 0x7c, 0xb7,
0xec, 0xf5, 0xb9, 0xea, 0x65, 0x63, 0x42, 0x2f, 0x36, 0x68, 0xf7, 0xe2, 0x19, 0xa1, 0x73, 0x64,
0x85, 0x30, 0xe8, 0xa8, 0xd6, 0xfe, 0x75, 0x04, 0xcb, 0xa0, 0x60, 0x6f, 0xc7, 0x5c, 0x36, 0xfc,
0xb7, 0xf0, 0xf7, 0xfd, 0xe6, 0xbd, 0x51, 0x49, 0xee, 0xe4, 0x74, 0x97, 0x74, 0x4e, 0x89, 0xab,
0x92, 0x70, 0x82, 0x96, 0x62, 0x2d, 0x17, 0x54, 0xa7, 0x82, 0x74, 0xca, 0x62, 0x4d, 0xd2, 0x4a,
0x4a, 0x4c, 0x30, 0xea, 0xc0, 0x57, 0xc8, 0x61, 0x22, 0x81, 0x4a, 0x8a, 0xc2, 0x3b, 0xb8, 0xa0,
0x2a, 0xff, 0x93, 0x92, 0xa7, 0x03, 0x6f, 0x7e, 0x25, 0xea, 0xd5, 0x98, 0x6a, 0x45, 0xc4, 0x84,
0xcd, 0x07, 0xe8, 0x65, 0xe1, 0x96, 0x41, 0xde, 0x16, 0x99, 0x81, 0x8f, 0xd5, 0x6b, 0xfc, 0xe9,
0x41, 0x82, 0x6d, 0xa4, 0x2a, 0x4a, 0x61, 0xd9, 0x27, 0x4b, 0xd8, 0x06, 0x0d, 0xbb, 0x00, 0x53,
0x39, 0xf6, 0x6a, 0x53, 0xab, 0x47, 0xdd, 0x8e, 0xc5, 0xe8, 0xf7, 0x89, 0x00, 0x1d, 0xce, 0xce,
0xba, 0xab, 0x7c, 0xe9, 0xba, 0xc3, 0x23, 0xaa, 0xa3, 0x8a, 0xb4, 0x52, 0x45, 0x86, 0x59, 0xf5,
0xe3, 0x2b, 0x17, 0x8e, 0x58, 0x8b, 0x4b, 0x66, 0x6a, 0xae, 0xb6, 0xf5, 0xee, 0xf3, 0x73, 0x2e,
0x9e, 0xad, 0xdb, 0x51, 0x1b, 0x31, 0xda, 0x52, 0xf2, 0x23, 0xb3, 0x15, 0x8c, 0xcd, 0xf0, 0x25,
0xe2, 0x12, 0x33, 0xa9, 0xc4, 0xf1, 0x82, 0x8a, 0x7b, 0x73, 0xf6, 0xeb, 0x17, 0x60, 0xf0, 0x60,
0x2e, 0x53, 0xc0, 0xef, 0x61, 0x26, 0x8a, 0xf9, 0x49, 0x06, 0xae, 0x5c, 0xdd, 0xa7, 0x86, 0xa6,
0x92, 0x63, 0xec, 0x95, 0xba, 0x49, 0xa8, 0xa8, 0x16, 0x8e, 0x5c, 0xb7, 0x45, 0x94, 0x57, 0xdc,
0x4c, 0x5c, 0x14, 0x36, 0x4d, 0x19, 0x21, 0x92, 0xfb, 0x11, 0xe0, 0x74, 0x2d, 0xcb, 0x1d, 0xbc,
0xa9, 0x55, 0x9e, 0x81, 0x64, 0x72, 0x65, 0x54, 0x38, 0xbb, 0x0d, 0x17, 0x5b, 0xeb, 0x7c, 0xc6,
0x74, 0x6e, 0x8b, 0xa1, 0x84, 0x05, 0x3f, 0x25, 0x2c, 0xbd, 0x80, 0x05, 0xd3, 0x85, 0xf4, 0x1e,
0x8a, 0x52, 0xaf, 0x0b, 0x8c, 0x8f, 0x10, 0x0d, 0x74, 0x8d, 0x45, 0x08, 0xda, 0x1f, 0x75, 0xbe,
0x68, 0x61, 0xaa, 0x6c, 0x27, 0x4a, 0xf9, 0x81, 0xfb, 0x76, 0x1a, 0x29, 0x17, 0x7d, 0xa3, 0x22,
0xed, 0xa0, 0x04, 0x26, 0x39, 0xca, 0x0c, 0x1d, 0x9f, 0xa6, 0xe9, 0x38, 0xab, 0x6c, 0xfe, 0xa2,
0x38, 0x7c, 0xb0, 0xb0, 0xf6, 0x69, 0xbd, 0xf7, 0xaf, 0xbc, 0xf0, 0x0f, 0x9e, 0x5c, 0x98, 0x1a,
0x11, 0x5c, 0x22, 0xcd, 0x26, 0x30, 0x56, 0x61, 0x98, 0x96, 0xb6, 0xf2, 0xbc, 0xeb, 0x99, 0x0e,
0xd9, 0xf8, 0xd0, 0xb5, 0x81, 0x28, 0xda, 0x96, 0x8b, 0xf7, 0x60, 0xf1, 0x46, 0x7f, 0xac, 0x89,
0xaa, 0x6f, 0x5d, 0xd4, 0x06, 0x30, 0xfe, 0xad, 0xcc, 0xae, 0x5a, 0xf4, 0x35, 0x19, 0x55, 0xfa,
0x4f, 0xac, 0x7c, 0x82, 0x64, 0x85, 0xdc, 0x88, 0x76, 0xa7, 0xb0, 0x93, 0x16, 0x30, 0x38, 0x9e,
0x14, 0x0c, 0x09, 0x01, 0xe4, 0xbe, 0xf8, 0xd1, 0x5e, 0x9e, 0x76, 0x38, 0x68, 0xc5, 0x45, 0xb5,
0x41, 0xaf, 0x03, 0x16, 0xa4, 0xe9, 0xcb, 0xc2, 0x76, 0x7d, 0x5c, 0x68, 0x70, 0xd0, 0xf7, 0x36,
0x2c, 0xbb, 0xd0, 0x44, 0xd1, 0x0e, 0xb8, 0xa5, 0x8b, 0xd9, 0x19, 0xfc, 0x7f, 0x32, 0x9b, 0x33,
0x83, 0x64, 0x12, 0xb3, 0x4f, 0x0e, 0xb7, 0xf8, 0x8d, 0x5e, 0xf1, 0xe0, 0xf0, 0x6d, 0x4e, 0x30,
0xa5, 0xdb, 0xf5, 0x24, 0xfe, 0x24, 0x28, 0x26, 0x07, 0x93, 0x27, 0xde, 0x6e, 0x57, 0x89, 0x5a,
0x7f, 0x90, 0xa7, 0xd9, 0x05, 0xef, 0xb2, 0xb4, 0x5d, 0xa0, 0x93, 0xe0, 0x6a, 0x62, 0x02, 0xe2,
0x4e, 0xa7, 0x44, 0x8f, 0x42, 0x93, 0x7e, 0x10, 0xe5, 0x27, 0x8b, 0xfe, 0xe6, 0x08, 0x09, 0x31,
0xa9, 0x30, 0x2c, 0x84, 0x5c, 0x35, 0xaa, 0xbe, 0x89, 0x64, 0x04, 0x56, 0xb5, 0x2d, 0x68, 0x86,
0xbf, 0xc4, 0x33, 0xa1, 0x65, 0xe5, 0x8b, 0x75, 0x9f, 0x4c, 0x11, 0x91, 0x96, 0x43, 0xf6, 0x0b,
0x98, 0xf8, 0x76, 0x47, 0xa2, 0xe2, 0x9a, 0x65, 0x14, 0x7e, 0xac, 0x2e, 0x3a, 0x27, 0xaa, 0x58,
0x48, 0xfc, 0x79, 0x9c, 0xd4, 0x8f, 0xee, 0x7a, 0x11, 0x77, 0x78, 0xb8, 0x85, 0xad, 0xd6, 0x20,
0xf3, 0x18, 0x6b, 0x20, 0xb8, 0xee, 0xef, 0xf3, 0x03, 0xa2, 0x70, 0x25, 0x2b, 0x63, 0x5c, 0x00,
0x31, 0x2b, 0x61, 0x77, 0xb2, 0x73, 0x35, 0xd0, 0x47, 0x09, 0xc8, 0x5b, 0x91, 0x04, 0x34, 0x71,
0x98, 0x0c, 0x2e, 0x28, 0x56, 0x7f, 0x45, 0x39, 0x0a, 0xf4, 0x1d, 0x6a, 0x3b, 0x02, 0xa8, 0x46,
0xcc, 0x70, 0x71, 0x70, 0xea, 0x9e, 0xa8, 0x22, 0xac, 0x45, 0x87, 0xd1, 0xce, 0xa0, 0x9f, 0x6a,
0xc7, 0x29, 0x61, 0xa3, 0xa8, 0x68, 0xed, 0x21, 0xf3, 0xc7, 0x32, 0x04, 0x7a, 0x7a, 0xa0, 0x4b,
0x04, 0x06, 0xb8, 0xa1, 0x11, 0x00, 0x05, 0xf0, 0xc2, 0xc6, 0xb6, 0xd2, 0x10, 0xe1, 0xde, 0x1d,
0x24, 0xd2, 0x55, 0xe3, 0xaa, 0x3c, 0xaf, 0x16, 0x81, 0xef, 0x57, 0x18, 0xe0, 0x15, 0xa0, 0x8b,
0x5b, 0xf7, 0x5b, 0x73, 0x20, 0x2c, 0x39, 0x2e, 0xe1, 0x00, 0x08, 0x86, 0xb5, 0xd0, 0x5a, 0xe8,
0x5c, 0xf4, 0x21, 0x78, 0x53, 0x8f, 0x9b, 0xf7, 0x32, 0x26, 0xab, 0xa8, 0xb0, 0x33, 0xe8, 0x73,
0x33, 0xab, 0x1c, 0xea, 0x33, 0x2e, 0x62, 0x1f, 0xae, 0x72, 0x9e, 0x60, 0x3b, 0x8b, 0x2c, 0x82,
0x46, 0xa2, 0xf5, 0x45, 0x4c, 0xa4, 0x7a, 0x5c, 0xe9, 0x08, 0xd0, 0x74, 0x02, 0x70, 0x43, 0x47,
0xcd, 0x9f, 0x5b, 0xb6, 0xd5, 0xd1, 0xd7, 0xbf, 0x80, 0xb8, 0xb2, 0xf7, 0x4a, 0x41, 0x06, 0x23,
0x32, 0x99, 0x6a, 0xee, 0x74, 0x11, 0x1a, 0x0a, 0x46, 0x08, 0xcc, 0x67, 0x26, 0xf5, 0x8f, 0x50,
0xfc, 0x6a, 0xf7, 0x94, 0x79, 0x6a, 0x57, 0xf5, 0x1b, 0x5e, 0x96, 0xda, 0x5a, 0x7c, 0xb8, 0x1a,
0x48, 0x3c, 0xc9, 0xd7, 0xda, 0x80, 0xf1, 0xf5, 0xca, 0x2f, 0x32, 0x6f, 0x60, 0xa0, 0xd1, 0x32,
0x69, 0x82, 0xcd, 0x96, 0x81, 0xa0, 0xd0, 0xf4, 0x57, 0x24, 0x55, 0x19, 0x31, 0xca, 0x6b, 0xa7,
0xee, 0x7b, 0xa4, 0x3e, 0x47, 0x07, 0x3d, 0xc9, 0x6c, 0x0e, 0x15, 0xa7, 0xab, 0x94, 0xe6, 0x76,
0x24, 0xf1, 0xfd, 0x91, 0x9d, 0x27, 0xe9, 0xac, 0xbb, 0xb5, 0xa3, 0x96, 0xe7, 0xec, 0x3d, 0x25,
0x58, 0x21, 0xf1, 0xac, 0x48, 0xcb, 0x66, 0xdd, 0xad, 0xa3, 0x66, 0xb2, 0x6d, 0xb8, 0x13, 0x84,
0x5b, 0x0c, 0xb0, 0xd4, 0x2c, 0xf3, 0xc7, 0xb5, 0xaf, 0x27, 0xeb, 0xaa, 0x3a, 0x54, 0x23, 0x3e,
0x78, 0xbc, 0xe6, 0x12, 0x54, 0xaf, 0xa0, 0x68, 0xbe, 0x90, 0x28, 0x24, 0x5d, 0x31, 0x8c, 0x04,
0x5f, 0x33, 0xc9, 0x39, 0x87, 0x46, 0x3d, 0xf2, 0x1a, 0x57, 0xe6, 0x47, 0x8a, 0xf1, 0x27, 0xd0,
0xe9, 0xec, 0xb5, 0x40, 0x78, 0xc0, 0x39, 0x6c, 0xcc, 0x3e, 0x9e, 0x80, 0x21, 0x3b, 0x41, 0x0c,
0xde, 0x53, 0x71, 0x67, 0x9f, 0xe7, 0xb9, 0x88, 0x75, 0xc6, 0xb4, 0x69, 0x21, 0x72, 0xbb, 0x4c,
0x9d, 0xee, 0xd9, 0x4f, 0xaf, 0xa7, 0xc6, 0xc8, 0x78, 0x8a, 0x7a, 0x45, 0x53, 0x46, 0x2a, 0x5f,
0x24, 0x21, 0x8d, 0x01, 0x5a, 0xa4, 0x82, 0x54, 0xb3, 0x4a, 0x0c, 0xa8, 0xe9, 0x9e, 0x51, 0x1a,
0x1d, 0x63, 0xe8, 0x4a, 0x90, 0xde, 0xe2, 0xf5, 0xd8, 0x85, 0xdc, 0x04, 0x7b, 0x15, 0x75, 0x86,
0x21, 0x61, 0x20, 0x6d, 0x40, 0xd7, 0x08, 0xbd, 0xfd, 0x7c, 0xaf, 0x31, 0x21, 0x15, 0x58, 0x29,
0xac, 0x3e, 0x82, 0xb1, 0x0e, 0xab, 0xcb, 0xfe, 0xdf, 0x7b, 0x8b, 0xe4, 0x67, 0xa2, 0xf3, 0x7e,
0x94, 0xd0, 0x39, 0xca, 0xa0, 0x6c, 0x36, 0x0b, 0x10, 0x19, 0xba, 0x49, 0x02, 0x91, 0x93, 0x34,
0xdf, 0xde, 0xfe, 0x62, 0x9c, 0xf1, 0x43, 0xfa, 0xb5, 0x22, 0x73, 0xda, 0x0c, 0x04, 0x32, 0x8a,
0x3a, 0x2b, 0xb8, 0xdd, 0x2d, 0xc2, 0x32, 0x88, 0x28, 0x05, 0xbf, 0x52, 0xbb, 0x07, 0xda, 0x82,
0x51, 0x1c, 0x2c, 0xf3, 0xdf, 0xb6, 0xd6, 0x66, 0xdf, 0xc1, 0xb8, 0x90, 0xe2, 0x87, 0xc3, 0x08,
0x73, 0x66, 0x51, 0x17, 0x13, 0xc2, 0x1c, 0x46, 0xaa, 0x6f, 0x59, 0x99, 0x41, 0x2c, 0x55, 0x85,
0x0d, 0x0b, 0x1c, 0x78, 0x2a, 0x0c, 0x1e, 0x72, 0xea, 0xad, 0xf8, 0x0f, 0x58, 0xb3, 0xd2, 0x29,
0x0b, 0x5d, 0x2d, 0x5f, 0xc1, 0xed, 0x92, 0xda, 0xb5, 0x49, 0x8d, 0x70, 0x9d, 0x09, 0xc5, 0x9f,
0x6b, 0xbb, 0x08, 0xaf, 0x0f, 0xcf, 0x81, 0x67, 0xde, 0x97, 0xb5, 0xbb, 0x23, 0x10, 0xdd, 0xc8,
0x3a, 0xdf, 0x90, 0x32, 0xe2, 0x21, 0x2d, 0xf1, 0xf5, 0xda, 0x0f, 0xd9, 0xbc, 0x8c, 0x0d, 0x40,
0x0a, 0x98, 0xe1, 0xea, 0x87, 0xdd, 0x99, 0xe5, 0x79, 0x6c, 0x2a, 0x2d, 0xd3, 0x7a, 0x2e, 0x46,
0xf1, 0xb6, 0x2a, 0xba, 0x12, 0x5c, 0xca, 0xd9, 0x60, 0x64, 0xbd, 0x67, 0x0e, 0x18, 0xb0, 0x02,
0x16, 0x54, 0x00, 0xb5, 0x4b, 0xa6, 0xd6, 0x03, 0x51, 0xd5, 0x94, 0x16, 0x7a, 0x5b, 0x10, 0xf3,
0x43, 0x64, 0xa2, 0xfd, 0x35, 0x66, 0x07, 0x83, 0xae, 0xe1, 0xda, 0x79, 0xb6, 0xbd, 0x6f, 0xec,
0x62, 0xb2, 0x8d, 0xb8, 0xee, 0x4d, 0x72, 0x87, 0xd4, 0x1d, 0x7c, 0x30, 0x44, 0x4d, 0x44, 0x46,
0x37, 0x34, 0x8d, 0xed, 0xf9, 0x63, 0xcc, 0xa0, 0x11, 0xcb, 0xc5, 0xe8, 0x4f, 0xb7, 0xff, 0xfd,
0x2d, 0x99, 0x44, 0xb5, 0x64, 0x0e, 0xf7, 0x56, 0xd7, 0xb5, 0xf7, 0xe6, 0xb6, 0xfd, 0x56, 0x7a,
0x57, 0x37, 0x6f, 0x05, 0xf1, 0x97, 0x2d, 0xb5, 0x2e, 0x99, 0x11, 0x06, 0x47, 0x97, 0x4f, 0xc5,
0xca, 0x57, 0x18, 0x8a, 0xd1, 0x84, 0x40, 0xc3, 0xda, 0x0e, 0x2b, 0x15, 0xbc, 0x20, 0x79, 0xd4,
0x25, 0xe9, 0x2c, 0x67, 0x3b, 0xb3, 0x76, 0xa0, 0xa4, 0x6b, 0xa0, 0xd1, 0xfe, 0x97, 0x98, 0x0c,
0x49, 0xb1, 0x89, 0xc5, 0x41, 0x91, 0x1d, 0x4f, 0xf2, 0x20, 0x55, 0x6a, 0xd0, 0x96, 0x9a, 0x52,
0xdb, 0x1c, 0x03, 0x9e, 0x9b, 0x01, 0x2c, 0x36, 0xcb, 0x0c, 0x99, 0x03, 0xc5, 0x71, 0x45, 0x2d,
0x40, 0x92, 0xd8, 0x63, 0x5d, 0x77, 0x3d, 0x93, 0x68, 0x11, 0x36, 0xcb, 0x2e, 0x86, 0xec, 0x49,
0x16, 0xcb, 0x12, 0xee, 0x47, 0x37, 0xca, 0x8c, 0xc5, 0xfd, 0x88, 0x66, 0x1c, 0x24, 0x75, 0x61,
0xd0, 0x6c, 0x92, 0x74, 0xc4, 0x49, 0xfb, 0x14, 0x1d, 0x11, 0xc6, 0xa1, 0xfc, 0x08, 0xbe, 0xd1,
0x27, 0xcc, 0x32, 0x65, 0x95, 0xc4, 0x1f, 0xd0, 0xba, 0xf7, 0xb0, 0x1f, 0x58, 0xf3, 0x8f, 0x9c,
0xec, 0xe0, 0x74, 0x73, 0x9d, 0x1c, 0xc9, 0x63, 0x2d, 0x98, 0x9a, 0x4d, 0x2e, 0x4a, 0xa9, 0xd0,
0xf2, 0x74, 0xbd, 0x00, 0xee, 0xd4, 0xd0, 0xf6, 0x69, 0x9f, 0xdc, 0x8d, 0xe8, 0x5f, 0x41, 0x1f,
0xc1, 0x32, 0x9c, 0x97, 0x3c, 0x7c, 0x8b, 0xcb, 0x62, 0x12, 0x0f, 0x31, 0x78, 0xc9, 0x8c, 0x38,
0x55, 0x59, 0xb5, 0xba, 0x2c, 0x1d, 0x07, 0xf4, 0x4b, 0xc2, 0x8f, 0x9f, 0x3a, 0x72, 0x14, 0x18,
0x42, 0xf3, 0x57, 0x36, 0x3d, 0x4b, 0x7e, 0x08, 0xb9, 0xd6, 0xbd, 0x3f, 0x0e, 0xea, 0x45, 0x29,
0xff, 0x1a, 0x55, 0x12, 0x6f, 0xa1, 0x21, 0xce, 0x92, 0x8a, 0xfb, 0xe1, 0xd6, 0xf6, 0x29, 0x9c,
0x3e, 0x39, 0x62, 0x06, 0x38, 0x18, 0x4b, 0xd8, 0xa6, 0x5f, 0x4c, 0x2d, 0x62, 0xbd, 0x5c, 0x31,
0x79, 0x8b, 0xcf, 0xfe, 0x65, 0x24, 0xca, 0x9b, 0x12, 0xec, 0xd3, 0xb5, 0xff, 0xe3, 0x6e, 0x4e,
0x5b, 0x2c, 0x68, 0xf1, 0xae, 0xa4, 0xcb, 0x28, 0xb9, 0x76, 0x9c, 0x31, 0x9c, 0xaf, 0x18, 0x2e,
0x88, 0x4d, 0xda, 0x3a, 0x88, 0x72, 0xe1, 0xa9, 0xe8, 0xe7, 0x43, 0x0d, 0x22, 0xf0, 0x62, 0x45,
0x45, 0xec, 0xc3, 0x8c, 0x2d, 0x5d, 0x32, 0xf8, 0x9d, 0xa2, 0x9e, 0xe2, 0xfd, 0xb5, 0x36, 0x9d,
0x10, 0xa6, 0xc5, 0xf9, 0x80, 0x33, 0x06, 0x6a, 0x71, 0x36, 0xc7, 0xfa, 0x72, 0xab, 0x2c, 0xe0,
0xbc, 0x02, 0x09, 0xea, 0x84, 0x79, 0x9c, 0xf3, 0xfc, 0x76, 0xbd, 0x4e, 0x3b, 0xc6, 0x9c, 0xbc,
0x63, 0x95, 0x37, 0x34, 0xe4, 0x58, 0x6f, 0x1f, 0x6b, 0x9e, 0x50, 0xdd, 0xe2, 0x5e, 0x4d, 0x3a,
0xc1, 0xab, 0x71, 0xf6, 0x4f, 0xab, 0x3f, 0x36, 0x23, 0xfb, 0x0d, 0x53, 0x07, 0xb9, 0x88, 0xb1,
0x54, 0xd9, 0x78, 0x4a, 0x41, 0xc5, 0x59, 0x4a, 0x57, 0x31, 0x69, 0x48, 0x34, 0x59, 0x5e, 0x45,
0x95, 0x1b, 0x92, 0x66, 0x6a, 0x79, 0x03, 0xa5, 0x20, 0x33, 0x3e, 0x6d, 0x01, 0xad, 0x11, 0x77,
0x3b, 0x29, 0xd7, 0x80, 0x12, 0xfe, 0x1e, 0x45, 0xf7, 0xb1, 0x1b, 0x84, 0xf8, 0xf8, 0x02, 0xe9,
0x1f, 0xd3, 0x95, 0xb1, 0x52, 0x98, 0x49, 0x05, 0x3c, 0x65, 0xe9, 0x40, 0xf9, 0x0f, 0xa3, 0xc9,
0xb7, 0x0a, 0x2e, 0x96, 0x4f, 0xbb, 0x3f, 0xc2, 0xe1, 0x44, 0x73, 0x8d, 0x87, 0x15, 0xa2, 0x7d,
0x4e, 0xe0, 0x12, 0xb9, 0x1c, 0x3a, 0x9f, 0x0b, 0xba, 0xe2, 0xac, 0x22, 0xd0, 0x14, 0x50, 0xfe,
0xee, 0xf8, 0xdc, 0xee, 0x6f, 0x7e, 0x5a, 0xb4, 0x43, 0xe1, 0xcd, 0x55, 0x00, 0xf6, 0x3f, 0xab,
0xe5, 0x3c, 0x1d, 0xd1, 0x46, 0xe7, 0x27, 0xbe, 0xdd, 0x8b, 0x73, 0xa0, 0x09, 0x58, 0x29, 0xfd,
0xf9, 0x52, 0xb2, 0x0c, 0xfd, 0x67, 0xbc, 0x96, 0xcb, 0xf4, 0x77, 0xf1, 0x57, 0x65, 0x5f, 0x43,
0x13, 0x98, 0xe9, 0x8b, 0xd7, 0xb7, 0xf7, 0x8c, 0x3c, 0xa9, 0x8d, 0xe5, 0xe5, 0x4c, 0xc7, 0x0b,
0x1f, 0x83, 0x1b, 0x2d, 0xb2, 0xbb, 0x77, 0xb2, 0x2b, 0x84, 0x58, 0xcf, 0xb0, 0x29, 0x18, 0xa9,
0x32, 0xec, 0xfe, 0x70, 0x03, 0x10, 0xc7, 0x97, 0x9c, 0x0f, 0x75, 0x82, 0x47, 0x64, 0x16, 0x35,
0x59, 0x24, 0x8f, 0x86, 0x39, 0xa0, 0x1d, 0x2d, 0xce, 0x6e, 0xa8, 0xf4, 0x5c, 0x14, 0xf1, 0xa4,
0x8c, 0x4d, 0xd7, 0x71, 0xeb, 0xd2, 0x82, 0xf5, 0xd0, 0x9c, 0xde, 0xe6, 0xb0, 0xc3, 0xca, 0xa9,
0x0a, 0x0b, 0x57, 0xec, 0x71, 0xd9, 0x2d, 0x4c, 0x09, 0xeb, 0xe0, 0x15, 0xe8, 0x02, 0x70, 0x69,
0x3d, 0xc2, 0xe6, 0x7a, 0x2c, 0x36, 0x4a, 0xce, 0x59, 0x34, 0x96, 0xe3, 0x46, 0xbe, 0xcf, 0x98,
0xf7, 0x76, 0x7e, 0x39, 0x44, 0x9f, 0xfe, 0xe3, 0x1c, 0x98, 0x6c, 0x9d, 0xac, 0xe6, 0xb9, 0x72,
0x86, 0xef, 0xa7, 0x8b, 0x27, 0x20, 0xab, 0xfc, 0xfe, 0x43, 0x7f, 0xbb, 0x03, 0x39, 0x9e, 0x76,
0xd8, 0x6d, 0x6d, 0x41, 0xfc, 0xd7, 0x3b, 0x40, 0xff, 0x1f, 0x63, 0xc6, 0x2d, 0xda, 0x21, 0x01,
0x06, 0xe7, 0x26, 0x49, 0x5f, 0x43, 0xab, 0x42, 0xf7, 0xd7, 0x51, 0x5d, 0x74, 0xf9, 0x07, 0xc8,
0x0c, 0xf0, 0x47, 0xf3, 0x57, 0x90, 0xd7, 0x79, 0x94, 0x30, 0xea, 0x22, 0xf7, 0xb3, 0xb3, 0xdf,
0xdc, 0x81, 0xf7, 0x81, 0x25, 0x1b, 0xd2, 0xac, 0x72, 0x70, 0xb5, 0xa8, 0xa7, 0xa8, 0x1c, 0xf8,
0x14, 0x83, 0x4f, 0xc0, 0xae, 0x69, 0xf3, 0x2f, 0x56, 0x53, 0xac, 0x8d, 0x96, 0x6a, 0x9d, 0xda,
0x07, 0x25, 0xff, 0x0f, 0x62, 0x1e, 0xf9, 0x7a, 0x09, 0x06, 0xf7, 0xe8, 0xca, 0xca, 0xb5, 0x23,
0x9e, 0x5b, 0xdf, 0x90, 0xde, 0x9b, 0x66, 0xdb, 0xd7, 0x74, 0x7b, 0x92, 0x38, 0x33, 0x90, 0x63,
0x42, 0xda, 0x8f, 0xb1, 0x91, 0xf6, 0xb3, 0xa6, 0x47, 0x4b, 0x3b, 0x41, 0x32, 0xa6, 0xad, 0xb4,
0x11, 0xa1, 0xd0, 0x6c, 0x0e, 0xda, 0x8c, 0x05, 0x95, 0xca, 0x45, 0x06, 0x74, 0x0c, 0xa5, 0x9f,
0x8a, 0x19, 0xc3, 0xce, 0x7c, 0x21, 0x60, 0x69, 0x95, 0x2a, 0x08, 0x6b, 0x78, 0x4c, 0x58, 0x65,
0x5c, 0x90, 0xcf, 0x48, 0xfc, 0x44, 0x5f, 0xec, 0xf2, 0xf1, 0x4e, 0x96, 0x5b, 0x70, 0xb5, 0x93,
0x0a, 0x37, 0x29, 0x8b, 0xfb, 0xec, 0x35, 0xa9, 0x90, 0x20, 0x07, 0x85, 0x3f, 0x40, 0x68, 0xf4,
0xa8, 0x04, 0x76, 0x4d, 0xf1, 0x9f, 0x72, 0xbd, 0x73, 0x1a, 0xe2, 0xe2, 0xa3, 0x81, 0x79, 0x6e,
0x41, 0x83, 0x59, 0x62, 0x7c, 0x52, 0x45, 0x7b, 0x4c, 0xf8, 0xeb, 0xcc, 0x0e, 0xab, 0x79, 0xb7,
0xf7, 0xc8, 0x90, 0x12, 0x7d, 0x74, 0x59, 0x62, 0xa5, 0x27, 0xd2, 0xb9, 0xa0, 0x8d, 0x64, 0xf3,
0xd6, 0x17, 0xd3, 0x71, 0x50, 0x63, 0xf3, 0xde, 0xd8, 0x2a, 0xcc, 0xfa, 0x12, 0x20, 0x6a, 0x7c,
0x5c, 0xf9, 0xa6, 0x8d, 0x8f, 0x3c, 0x70, 0x6f, 0x8f, 0x7a, 0x18, 0x73, 0xe2, 0x72, 0x5a, 0x95,
0x79, 0x0c, 0x20, 0x9b, 0x8e, 0x89, 0xe8, 0x4b, 0xe7, 0x29, 0xff, 0x06, 0x1a, 0x55, 0xab, 0x23,
0x80, 0xb4, 0x51, 0x3e, 0xf9, 0x4a, 0x59, 0xeb, 0x3e, 0x8c, 0x39, 0x2c, 0x68, 0x28, 0x81, 0xae,
0x0f, 0xeb, 0x6c, 0x0d, 0x9f, 0xfc, 0xec, 0xf9, 0xfd, 0xb3, 0x5d, 0x02, 0x9b, 0xfd, 0x11, 0xa9,
0xf8, 0x89, 0x47, 0x5d, 0x4c, 0xac, 0xab, 0xd6, 0xf4, 0xa6, 0x9a, 0x6c, 0x35, 0x30, 0x43, 0xdd,
0x5d, 0x70, 0xee, 0x70, 0x1e, 0x21, 0x00, 0xca, 0xad, 0x2d, 0xc9, 0xe1, 0x9f, 0xa7, 0x56, 0xbd,
0x86, 0xaf, 0xcf, 0xed, 0x1b, 0xfb, 0xe9, 0x43, 0x17, 0x83, 0xc4, 0x8d, 0x0f, 0x66, 0xf3, 0x07,
0xb7, 0x7e, 0x53, 0xb0, 0xfa, 0x57, 0xbe, 0x67, 0x20, 0x93, 0x89, 0x35, 0xf5, 0x5f, 0x01, 0x53,
0x84, 0xb3, 0xc1, 0xa5, 0x9b, 0x2c, 0x37, 0x29, 0xfc, 0x3a, 0x82, 0x3d, 0x6d, 0xaf, 0xc4, 0x6b,
0x3a, 0xb5, 0x5d, 0x67, 0xb9, 0x37, 0xf0, 0xb2, 0x6c, 0x7c, 0xf1, 0x18, 0x69, 0x46, 0xcf, 0x3f,
0xb5, 0x9f, 0x2e, 0x5e, 0x78, 0xba, 0x23, 0x75, 0x2d, 0x51, 0x1e, 0x5b, 0xb0, 0xe7, 0xb2, 0xd6,
0xe4, 0x47, 0x13, 0x76, 0x41, 0x71, 0x72, 0x0e, 0x33, 0x53, 0xdd, 0x8d, 0x50, 0xf6, 0x4f, 0xc1,
0x27, 0x06, 0xc8, 0xc5, 0x8b, 0xc6, 0x42, 0x1a, 0x8f, 0x92, 0xdf, 0x59, 0x87, 0x08, 0x29, 0x1e,
0x36, 0xc8, 0xa4, 0xe0, 0x87, 0x56, 0x23, 0xc3, 0x62, 0x9c, 0xae, 0xcb, 0xa0, 0x4c, 0x69, 0xa5,
0x9e, 0x57, 0xf9, 0x4f, 0xba, 0x36, 0xdc, 0x36, 0xfd, 0x60, 0x2d, 0x9b, 0x0f, 0xf3, 0x88, 0x28,
0x27, 0x56, 0x74, 0xa5, 0xcb, 0x40, 0x9d, 0x6b, 0x22, 0xaa, 0xc8, 0x94, 0xa8, 0x8a, 0xd8, 0xcc,
0x43, 0xec, 0xd3, 0x86, 0xff, 0x00, 0xf6, 0xd0, 0xe8, 0xc9, 0x39, 0x73, 0x11, 0x5c, 0xf7, 0x1c,
0xbc, 0x61, 0xf1, 0x59, 0x50, 0x2a, 0x90, 0x9f, 0xb5, 0x61, 0xd2, 0xd4, 0xd8, 0x8f, 0xe1, 0x34,
0x69, 0xef, 0x33, 0xea, 0xf3, 0x20, 0x6d, 0xe9, 0x3a, 0xfd, 0xdd, 0x75, 0xd1, 0x2e, 0xa7, 0x41,
0xcd, 0x1c, 0x8d, 0x8f, 0x79, 0x60, 0xd0, 0x26, 0x72, 0x52, 0x7b, 0x39, 0xe3, 0x4b, 0xb2, 0x94,
0x66, 0x9e, 0x1d, 0x04, 0x61, 0x7b, 0xcf, 0x81, 0x3c, 0x3f, 0x05, 0x10, 0x73, 0x08, 0x86, 0x40,
0x83, 0x6a, 0x73, 0xd1, 0xf0, 0x34, 0xc3, 0x88, 0xfc, 0xb8, 0xd6, 0x98, 0x18, 0x35, 0x13, 0x52,
0xe5, 0x35, 0xb7, 0x40, 0x26, 0x65, 0xe5, 0xf3, 0xac, 0xc8, 0x18, 0x59, 0xd7, 0xe0, 0x87, 0xfa,
0x22, 0x26, 0x81, 0xfa, 0x46, 0xcd, 0x8b, 0xd6, 0x1f, 0xd2, 0x7d, 0x9a, 0xc6, 0x00, 0xd1, 0xb8,
0x15, 0x83, 0xbf, 0xc4, 0x3d, 0x02, 0xd7, 0x62, 0xc4, 0xd5, 0xdb, 0x82, 0x57, 0xf1, 0x4a, 0xbf,
0x3b, 0xbe, 0x79, 0x3b, 0x4c, 0xc0, 0x5b, 0x99, 0xaf, 0xfc, 0x93, 0x3b, 0x7f, 0x9d, 0xdc, 0xec,
0x49, 0x53, 0x43, 0x67, 0x3f, 0x65, 0x5b, 0x20, 0x86, 0xb9, 0xcb, 0xb1, 0xd2, 0x16, 0x9c, 0xa8,
0xef, 0xb1, 0x5a, 0x55, 0xb7, 0x93, 0x2b, 0x86, 0xb1, 0x9f, 0xa9, 0x85, 0xce, 0xa7, 0x87, 0x90,
0x35, 0xfe, 0x9b, 0xfa, 0xe4, 0xc7, 0x35, 0x3f, 0x58, 0x02, 0xd3, 0xb2, 0xa3, 0x88, 0xa9, 0x8f,
0xbc, 0x43, 0x9a, 0x75, 0xb7, 0xe4, 0xdf, 0x53, 0x1c, 0x8f, 0x9c, 0x5c, 0x63, 0xa5, 0x30, 0x78,
0x0b, 0x66, 0x18, 0x4e, 0xfb, 0xe2, 0x35, 0xdc, 0xb6, 0xe6, 0x14, 0x11, 0x10, 0x5d, 0x70, 0x38,
0x6f, 0x98, 0x06, 0xf7, 0xe3, 0x8f, 0xf6, 0xbb, 0x38, 0x53, 0x9b, 0x9e, 0x94, 0xe4, 0xf4, 0xc7,
0x0b, 0x2c, 0x7b, 0x7e, 0x17, 0x94, 0xbe, 0x10, 0xc7, 0x1c, 0x46, 0x6d, 0x4c, 0x67, 0x70, 0xaa,
0x00, 0x31, 0xfc, 0x53, 0x23, 0xb9, 0xb8, 0x11, 0x2d, 0x9e, 0x7d, 0xde, 0xaf, 0x05, 0x5e, 0xea,
0x65, 0x01, 0x96, 0x8a, 0x18, 0xaf, 0x33, 0x88, 0xc3, 0x7b, 0x75, 0x75, 0x6d, 0xd3, 0xe6, 0x8a,
0x52, 0xbc, 0x1d, 0xb8, 0x7b, 0xc6, 0x65, 0xb1, 0x88, 0xb5, 0x1b, 0x65, 0xde, 0x54, 0x45, 0x0f,
0x73, 0x2c, 0x32, 0x63, 0xe3, 0xd2, 0x8c, 0xa3, 0x56, 0xba, 0x31, 0xa9, 0x20, 0x3f, 0xa1, 0x24,
0xad, 0x62, 0x34, 0x5d, 0x47, 0x49, 0x87, 0x7f, 0x3f, 0x8a, 0xc4, 0x3b, 0x69, 0x62, 0x6e, 0x47,
0x06, 0xdf, 0x49, 0x15, 0x46, 0xa1, 0x8f, 0x8f, 0xb9, 0x2f, 0xfd, 0xf2, 0x01, 0x97, 0x79, 0x0d,
0x99, 0x11, 0xee, 0x4c, 0x6e, 0x85, 0x85, 0x13, 0x9d, 0x80, 0x81, 0x90, 0x88, 0xeb, 0x65, 0xdb,
0xae, 0xda, 0x95, 0x6f, 0xdc, 0xf7, 0x82, 0x21, 0x8f, 0x2b, 0x10, 0x6e, 0x44, 0x9d, 0x6f, 0x59,
0xa1, 0xc2, 0xa2, 0x59, 0x74, 0xbd, 0xc7, 0x0a, 0xa8, 0x8f, 0x5c, 0x16, 0xd3, 0xec, 0x69, 0xa6,
0x96, 0xb4, 0xd9, 0xba, 0xbd, 0xe0, 0x94, 0xb2, 0x9d, 0xce, 0x0e, 0x9e, 0xb0, 0x72, 0x9a, 0xdd,
0xaf, 0x7b, 0x0e, 0x56, 0xb3, 0x7b, 0x39, 0xe5, 0xc8, 0xfa, 0x9c, 0x8b, 0x32, 0x0e, 0x03, 0x77,
0xa0, 0x8a, 0xff, 0xc6, 0x5c, 0xe7, 0x13, 0x7f, 0xe9, 0xf3, 0x5f, 0xa7, 0x90, 0x03, 0x4e, 0xb3,
0xc4, 0x30, 0xc6, 0xe7, 0x87, 0xcf, 0x27, 0xdb, 0xb9, 0xe9, 0x0d, 0x0f, 0xb5, 0x31, 0x92, 0xe6,
0xcb, 0xa2, 0xce, 0x95, 0xa0, 0xe7, 0x31, 0x39, 0x86, 0xe0, 0x34, 0xc0, 0xfa, 0x61, 0x6e, 0x40,
0xba, 0x73, 0x87, 0x1f, 0x43, 0x38, 0x1c, 0x7d, 0x61, 0xe4, 0xfc, 0x73, 0x38, 0xb5, 0x01, 0x7c,
0x3f, 0xd5, 0x39, 0xdf, 0xa7, 0xdf, 0x54, 0x05, 0xa0, 0x6c, 0x79, 0x29, 0x00, 0xd2, 0xf1, 0x9c,
0xd4, 0x92, 0xac, 0x5c, 0x96, 0xd7, 0xae, 0xc4, 0xc8, 0xd3, 0xf4, 0xdf, 0x45, 0x69, 0x59, 0x0f,
0x66, 0x4e, 0x79, 0x7a, 0x12, 0x4f, 0x29, 0xe0, 0xd8, 0x16, 0x05, 0xa1, 0xa0, 0xf0, 0xa1, 0xae,
0xcc, 0x58, 0x46, 0xb3, 0x89, 0xcc, 0x43, 0xf7, 0xa8, 0x59, 0xd5, 0x9a, 0xb1, 0xb4, 0x29, 0x31,
0x40, 0x3c, 0x1d, 0x97, 0x21, 0xe2, 0x91, 0xff, 0x29, 0x39, 0xe9, 0xec, 0x18, 0x74, 0xa1, 0xd4,
0xfd, 0x3f, 0x35, 0xd7, 0x75, 0x3c, 0x2c, 0x08, 0xd2, 0x33, 0x2e, 0x7b, 0x07, 0xe3, 0x9d, 0xe1,
0xf3, 0x07, 0xd6, 0x5c, 0x00, 0x34, 0x9b, 0x1a, 0xc6, 0xc2, 0xb8, 0x4d, 0x4b, 0x56, 0x07, 0x05,
0x8a, 0x3e, 0x19, 0x21, 0xf3, 0xc9, 0xb1, 0x5c, 0x04, 0x28, 0x73, 0x8e, 0x2b, 0x41, 0xb2, 0x51,
0xd4, 0x42, 0x2d, 0x0f, 0x6e, 0x08, 0xe3, 0x9f, 0xad, 0x03, 0xb0, 0xd0, 0x14, 0x17, 0xe6, 0x02,
0xff, 0x2b, 0x7a, 0xcc, 0x8d, 0x50, 0x12, 0x30, 0x1e, 0xa5, 0xcf, 0xf4, 0x8c, 0x23, 0xdb, 0xee,
0x4c, 0xb3, 0xa9, 0x98, 0x0c, 0xfd, 0x49, 0x8d, 0x76, 0x4f, 0x53, 0x1a, 0x2e, 0x2e, 0x3b, 0xa3,
0x7f, 0x9d, 0x86, 0xc0, 0x77, 0x72, 0x13, 0x0c, 0xff, 0xa8, 0x87, 0xbc, 0xd4, 0xc7, 0x7f, 0x00,
0x88, 0x99, 0x56, 0xb0, 0x34, 0x0b, 0x18, 0x37, 0xf1, 0xc6, 0x57, 0x77, 0xe5, 0x95, 0x8e, 0xbe,
0x56, 0xe1, 0x53, 0x0f, 0x30, 0xa0, 0xd1, 0xba, 0x15, 0xf0, 0x50, 0x13, 0x9f, 0x42, 0x07, 0x51,
0xfb, 0x13, 0x8a, 0x59, 0xb5, 0xe5, 0xdd, 0xf1, 0x3d, 0xe2, 0x27, 0xf4, 0xa8, 0x8f, 0xdc, 0x95,
0xd3, 0x55, 0x4e, 0x9a, 0xc0, 0x73, 0xef, 0x5f, 0x2d, 0x50, 0xf6, 0xc3, 0xbe, 0x6d, 0xc4, 0x85,
0x71, 0x8f, 0x53, 0xe7, 0xf8, 0x09, 0x73, 0x9c, 0x4e, 0x57, 0x6d, 0x99, 0x54, 0xbe, 0xd6, 0xa7,
0x18, 0x1f, 0xc2, 0xe1, 0x90, 0x91, 0x70, 0xfa, 0x94, 0x28, 0xeb, 0x4e, 0x6b, 0xca, 0xb0, 0xdb,
0x88, 0x53, 0x6e, 0x00, 0xaf, 0xd1, 0xa3, 0x98, 0x6f, 0x1f, 0x67, 0xec, 0x48, 0x63, 0x73, 0xca,
0x92, 0x22, 0xed, 0x83, 0x66, 0x09, 0x99, 0x48, 0x0c, 0x76, 0xe7, 0x3b, 0x6c, 0xa6, 0x5e, 0x8b,
0xbe, 0x39, 0x42, 0x8c, 0x00, 0x4f, 0xdf, 0x04, 0x56, 0x57, 0x3d, 0xde, 0x06, 0xe2, 0x7b, 0x2f,
0x48, 0x93, 0x3c, 0x47, 0x8f, 0xbd, 0x2e, 0x63, 0x45, 0x72, 0xca, 0xe5, 0xc4, 0xbb, 0xa3, 0x53,
0xc0, 0x45, 0x94, 0x80, 0x4b, 0x0b, 0x42, 0x72, 0x15, 0x90, 0x83, 0xc0, 0x69, 0x75, 0x8d, 0xbb,
0xac, 0xae, 0x7d, 0x1c, 0x52, 0x06, 0xe1, 0x2f, 0xb3, 0x8d, 0x3d, 0x5e, 0x1c, 0x2f, 0xa5, 0x35,
0x23, 0xc3, 0x79, 0x99, 0x5d, 0xe8, 0x48, 0x43, 0xe2, 0x63, 0x69, 0x2c, 0x74, 0x31, 0xdd, 0x31,
0x41, 0xce, 0x55, 0x3f, 0x81, 0x46, 0x52, 0xa2, 0x95, 0x7f, 0xec, 0x12, 0xd7, 0x5e, 0xbc, 0xbb,
0xc8, 0x70, 0x73, 0xb4, 0xaf, 0x3a, 0x10, 0xcb, 0x51, 0x0a, 0xed, 0x2d, 0xdd, 0xf2, 0xe9, 0x4b,
0xc1, 0x8a, 0xb2, 0x76, 0x1a, 0xd0, 0x1a, 0xb8, 0x8e, 0xee, 0xa6, 0x18, 0xa3, 0x2a, 0x5c, 0x98,
0xe7, 0xfa, 0x23, 0x9e, 0xf3, 0x7a, 0x60, 0x7b, 0xfd, 0x8a, 0x95, 0xd4, 0x19, 0x5b, 0xc7, 0xd0,
0xc8, 0x40, 0x43, 0xc4, 0x6c, 0x05, 0x9b, 0x19, 0x62, 0xa6, 0x7d, 0x69, 0xd6, 0xff, 0xb5, 0x71,
0x9e, 0x25, 0xb0, 0x97, 0x40, 0xd4, 0xc7, 0x19, 0xc8, 0x94, 0x21, 0x96, 0x57, 0x5f, 0x94, 0x59,
0x01, 0x97, 0xcb, 0xa0, 0x52, 0x16, 0x34, 0x6e, 0xd4, 0xbc, 0x86, 0xd2, 0x7e, 0x53, 0x6c, 0x1c,
0x9f, 0x43, 0xe2, 0xe1, 0x0b, 0x7e, 0xef, 0x21, 0x7b, 0xf7, 0x9d, 0x24, 0xe5, 0x50, 0x57, 0xfb,
0xbb, 0xfb, 0x59, 0xe2, 0xd4, 0x44, 0x32, 0x8a, 0xc9, 0x96, 0x8a, 0x6a, 0x1e, 0x94, 0x47, 0x36,
0x27, 0x15, 0x4d, 0x9c, 0x24, 0x82, 0x68, 0xfa, 0x33, 0xf8, 0x30, 0xb7, 0xde, 0x8d, 0x51, 0x85,
0x89, 0xbe, 0x17, 0xb0, 0x96, 0x24, 0x43, 0x07, 0xac, 0xdf, 0x89, 0x63, 0xd1, 0xdd, 0xcc, 0xfc,
0x22, 0x05, 0x2c, 0xf0, 0x2a, 0x05, 0x7e, 0x7f, 0x27, 0x59, 0x38, 0x24, 0x81, 0xbb, 0x47, 0x6e,
0x54, 0x7e, 0x55, 0xb6, 0x4d, 0xd6, 0xe0, 0x70, 0x9d, 0xa0, 0x7e, 0x3c, 0x75, 0xc8, 0x2f, 0xe8,
0x1b, 0x6d, 0x3d, 0x23, 0xae, 0xbb, 0x1e, 0xf4, 0x0f, 0xfe, 0xba, 0xc8, 0xc6, 0xa7, 0x63, 0xb0,
0xe4, 0x40, 0x8f, 0x54, 0x2a, 0x0b, 0x19, 0x49, 0x7a, 0xd5, 0x48, 0x03, 0xe9, 0xc4, 0x1d, 0xc7,
0x5e, 0x49, 0x7b, 0xbd, 0x9c, 0x88, 0x92, 0x66, 0x1c, 0xe6, 0xbd, 0xa6, 0x9d, 0x7d, 0x17, 0x59,
0x72, 0xcf, 0xb0, 0x45, 0x34, 0xd1, 0x84, 0x2d, 0x3f, 0x3f, 0x46, 0x85, 0xea, 0xf8, 0x89, 0x89,
0xe9, 0xbe, 0x59, 0xf2, 0x7b, 0x42, 0x96, 0xf6, 0xf5, 0xb4, 0xa2, 0x36, 0x7d, 0x4e, 0x5b, 0xb7,
0x68, 0xdf, 0x1e, 0xc9, 0x1b, 0xc3, 0x4d, 0x59, 0x70, 0xe2, 0x8f, 0xbf, 0x2f, 0x61, 0x35, 0x8a,
0x27, 0x08, 0x63, 0x94, 0x1a, 0xb1, 0x3a, 0xe0, 0x7d, 0x00, 0x95, 0xde, 0x9d, 0x0c, 0xbb, 0xbf,
0xe5, 0x2d, 0x0e, 0x95, 0x48, 0xbd, 0x13, 0x85, 0xf1, 0x27, 0x75, 0x62, 0x46, 0x06, 0x6b, 0xbb,
0xb8, 0x51, 0x84, 0xaa, 0xea, 0x5d, 0x6d, 0x05, 0x7d, 0x3a, 0x8e, 0x89, 0xd1, 0x5a, 0xd7, 0xda,
0x06, 0xfb, 0x6a, 0x14, 0x39, 0x49, 0x9a, 0x64, 0xcc, 0x43, 0xb7, 0x1a, 0xfc, 0x3f, 0xc2, 0xff,
0xb4, 0x5c, 0x39, 0x82, 0x03, 0x14, 0xc5, 0x32, 0xda, 0x1b, 0x31, 0x5a, 0x94, 0xa9, 0x08, 0xbc,
0xb9, 0xf4, 0xf6, 0x10, 0x6e, 0xe2, 0x65, 0xd0, 0x16, 0xdf, 0x25, 0x3a, 0x5d, 0x6c, 0x38, 0xd9,
0x2e, 0xd7, 0x22, 0xda, 0xd0, 0x58, 0xf9, 0xb9, 0x40, 0x2f, 0xa2, 0xa0, 0x38, 0x8d, 0x90, 0x6c,
0xd7, 0x6c, 0x55, 0x73, 0xce, 0x70, 0x53, 0x01, 0x05, 0xf2, 0x5d, 0xe4, 0xc0, 0x3b, 0xd4, 0x50,
0xa8, 0x16, 0xe5, 0x37, 0x82, 0x32, 0x00, 0x55, 0x98, 0x0d, 0xa6, 0xc3, 0x46, 0x88, 0x06, 0x77,
0x65, 0x31, 0x3c, 0xde, 0x9b, 0x51, 0x28, 0x96, 0x39, 0x3d, 0x77, 0xe8, 0xb2, 0x8c, 0xa5, 0x50,
0xef, 0x1c, 0x9f, 0x72, 0xcd, 0xfa, 0xf3, 0xd8, 0x1c, 0xa6, 0x07, 0xf6, 0x88, 0x92, 0x29, 0xff,
0xba, 0x39, 0x1e, 0x9e, 0xf9, 0x49, 0xcd, 0xa9, 0x72, 0x08, 0xa7, 0xc3, 0x74, 0x20, 0x6f, 0x5a,
0xa4, 0xc4, 0x7a, 0xc9, 0x3a, 0x15, 0xa5, 0x39, 0x0c, 0x68, 0xb4, 0xcf, 0xa4, 0x87, 0x44, 0xd9,
0x71, 0x72, 0xab, 0x00, 0xef, 0x21, 0x92, 0xdc, 0x8f, 0xeb, 0x1a, 0x13, 0x63, 0x98, 0x5b, 0x55,
0x63, 0xe5, 0x72, 0x89, 0xcf, 0xfb, 0x34, 0xd3, 0x22, 0x16, 0x86, 0x57, 0x01, 0x19, 0xf6, 0xeb,
0xa9, 0x95, 0xf5, 0x81, 0x1f, 0x9b, 0x64, 0x93, 0xea, 0x5d, 0x51, 0x22, 0x4e, 0x7b, 0x36, 0x18,
0xfa, 0x76, 0xbf, 0xcf, 0x65, 0x3a, 0x92, 0x52, 0x09, 0x6b, 0xf8, 0x75, 0xa8, 0xa0, 0xa0, 0xee,
0x54, 0xe6, 0x33, 0x03, 0x8c, 0xf1, 0x60, 0xca, 0xfa, 0xd1, 0x63, 0x06, 0x16, 0xd6, 0x12, 0xf5,
0x5a, 0xb8, 0x42, 0xb9, 0x70, 0x44, 0xed, 0x10, 0xae, 0xd7, 0x1d, 0x91, 0xff, 0x2e, 0x9b, 0x43,
0xd3, 0xe1, 0x82, 0x98, 0x64, 0x73, 0x93, 0xec, 0x0a, 0x4d, 0x7c, 0xea, 0xab, 0xee, 0x2b, 0x9a,
0xb4, 0xff, 0xb3, 0x43, 0xf9, 0x2e, 0xc1, 0xda, 0x9f, 0x8d, 0xc8, 0x18, 0x9b, 0xdb, 0x67, 0xe6,
0xba, 0x9b, 0x71, 0x64, 0x3c, 0x98, 0xea, 0xdb, 0x23, 0x56, 0xe0, 0x43, 0x68, 0x3c, 0x99, 0xa6,
0x47, 0x04, 0x02, 0x7a, 0x59, 0xd3, 0x25, 0x81, 0x3a, 0x54, 0x20, 0x7f, 0x49, 0x2d, 0x53, 0xea,
0xbf, 0xf5, 0xe3, 0xbb, 0x27, 0x68, 0x90, 0x96, 0x31, 0x7c, 0x9b, 0xae, 0x28, 0xb7, 0x2e, 0xb6,
0x5e, 0xc6, 0xe0, 0xa7, 0x49, 0x30, 0x4c, 0x8e, 0x6c, 0x47, 0x47, 0x1c, 0x75, 0x73, 0x0a, 0xa5,
0x37, 0x56, 0x0d, 0x3a, 0x0d, 0x3a, 0xad, 0xe8, 0x3c, 0x54, 0x29, 0x6d, 0x3c, 0x4b, 0x83, 0xc2,
0xd5, 0x57, 0x29, 0xde, 0xfe, 0xa8, 0xac, 0xb4, 0xb2, 0xd7, 0x49, 0x6e, 0xe4, 0xef, 0x76, 0x76,
0x36, 0xd9, 0x06, 0x20, 0x91, 0xe5, 0x50, 0xc4, 0xea, 0x06, 0x1a, 0xdd, 0x8f, 0x7d, 0xdf, 0xcf,
0x26, 0xb7, 0xe6, 0x35, 0x6a, 0x8b, 0xef, 0x58, 0x81, 0x58, 0xb0, 0x2f, 0x6f, 0xbd, 0xf4, 0xf0,
0xef, 0x65, 0xc6, 0xd9, 0xe4, 0x7b, 0x1f, 0x2b, 0xb2, 0x93, 0xd6, 0x7a, 0x47, 0x9a, 0xc3, 0xbf,
0x94, 0x0b, 0xf3, 0x10, 0x4f, 0x7f, 0xaa, 0xbd, 0x59, 0x4a, 0xf7, 0xc5, 0x56, 0x8e, 0x30, 0x2e,
0x9c, 0x1c, 0xcf, 0x76, 0xa9, 0xb6, 0x52, 0xb7, 0xab, 0x4a, 0x54, 0x10, 0xdd, 0xf4, 0xf2, 0x74,
0x54, 0xf3, 0x99, 0xc8, 0x48, 0x1c, 0xc5, 0x2e, 0xe0, 0xb8, 0x0a, 0x3b, 0x6e, 0x50, 0xd8, 0x99,
0x25, 0x4a, 0xd0, 0x2d, 0x2a, 0x5b, 0xd2, 0x64, 0x2e, 0xf7, 0x81, 0x81, 0x67, 0xf3, 0xa0, 0xf0,
0xef, 0xcc, 0xf5, 0x47, 0x1d, 0x58, 0x57, 0xce, 0x02, 0x1b, 0xf3, 0x70, 0xbe, 0xd9, 0xd8, 0x6d,
0x8e, 0xeb, 0xfa, 0x57, 0x47, 0xa1, 0x2d, 0x75, 0x64, 0xbb, 0xed, 0xa1, 0xaa, 0xb4, 0x95, 0x7f,
0x7c, 0x5a, 0xfa, 0xb5, 0xec, 0xbd, 0xe5, 0x6a, 0x16, 0x9f, 0xc4, 0x03, 0xd0, 0xce, 0x49, 0x47,
0x2d, 0x7b, 0xcb, 0x56, 0x9b, 0x22, 0xe9, 0xd1, 0x26, 0xbb, 0x66, 0xb2, 0xa0, 0xbe, 0xb9, 0xfd,
0xc9, 0xd3, 0x97, 0x8c, 0xac, 0x23, 0x8c, 0x25, 0x4a, 0x4b, 0x08, 0xb9, 0x03, 0x71, 0xed, 0x6f,
0x6c, 0x24, 0xf1, 0xe1, 0x25, 0x17, 0xce, 0xab, 0x72, 0x43, 0xfc, 0xc4, 0xb4, 0xa4, 0xd2, 0xee,
0x99, 0x76, 0x29, 0xf6, 0x73, 0x47, 0x0b, 0x39, 0x02, 0x1c, 0xe6, 0x5b, 0x0b, 0x1b, 0x9b, 0xa4,
0x22, 0x3c, 0xc9, 0x61, 0xe7, 0x66, 0xa2, 0xa2, 0xfb, 0x42, 0x2b, 0xd2, 0x02, 0x7b, 0x4d, 0x6b,
0x0e, 0x9e, 0xbd, 0x37, 0xfe, 0x33, 0x00, 0x71, 0x8b, 0xbf, 0x63, 0x0d, 0xf4, 0xd9, 0xdd, 0xb8,
0x2c, 0xb6, 0xe8, 0x12, 0xdc, 0x38, 0x0e, 0xa6, 0xcb, 0xfd, 0xf3, 0x21, 0xf9, 0xf1, 0x13, 0xc8,
0x17, 0xe0, 0x24, 0x31, 0xc9, 0x1d, 0x85, 0x4b, 0x85, 0x64, 0x16, 0xe2, 0x11, 0xd8, 0xb6, 0x38,
0x8f, 0x95, 0x58, 0xad, 0xcd, 0x02, 0xeb, 0xe0, 0xc0, 0x9f, 0xb3, 0xc1, 0x72, 0xa2, 0x57, 0xa5,
0x9b, 0xda, 0x27, 0x77, 0xbb, 0xe5, 0xc4, 0xff, 0x6f, 0x48, 0x57, 0xf1, 0x3c, 0xbf, 0x24, 0xda,
0x4d, 0x1e, 0xba, 0x69, 0xb8, 0x86, 0xdb, 0x43, 0x4b, 0x1a, 0x0d, 0xc2, 0x59, 0x69, 0x84, 0xbf,
0x15, 0xde, 0x7c, 0x1a, 0x8c, 0x5c, 0xf9, 0x77, 0xc3, 0xf2, 0x1f, 0xef, 0x36, 0xca, 0x51, 0xd8,
0xf6, 0x88, 0x20, 0x5d, 0x75, 0x37, 0xca, 0x4c, 0xe3, 0x84, 0xe1, 0x0f, 0xbe, 0x7b, 0x48, 0x45,
0xf2, 0xc3, 0x34, 0x60, 0x96, 0x7e, 0xaa, 0x33, 0x3c, 0xa0, 0x7d, 0xcc, 0xac, 0x29, 0xe3, 0xdf,
0x04, 0xea, 0x03, 0xe9, 0x38, 0x53, 0xaf, 0xc9, 0xe3, 0xce, 0x43, 0x52, 0x69, 0x72, 0xe0, 0xc6,
0xdf, 0x84, 0x73, 0xc3, 0x90, 0x08, 0xe4, 0xa1, 0x23, 0x4d, 0xcd, 0x75, 0xb0, 0x4d, 0x9a, 0x8d,
0xb5, 0x62, 0x06, 0xe2, 0x75, 0x97, 0x51, 0x29, 0xb2, 0xd1, 0x16, 0x61, 0x38, 0x62, 0x0e, 0xa2,
0x71, 0x62, 0x61, 0x17, 0xa4, 0x70, 0x99, 0xc6, 0x79, 0x65, 0xc0, 0xf6, 0x7e, 0xf3, 0xdf, 0xcc,
0x6d, 0xa5, 0x29, 0x80, 0xbf, 0x57, 0xfc, 0xd8, 0x56, 0x83, 0x37, 0x9e, 0x77, 0x6b, 0xaa, 0xea,
0x77, 0xa0, 0x1b, 0xde, 0xfb, 0xb8, 0x26, 0xeb, 0x74, 0xe7, 0xfe, 0xd1, 0x7e, 0x51, 0x7a, 0x74,
0xc8, 0x5e, 0x68, 0x62, 0x62, 0x07, 0xb6, 0xea, 0x04, 0x2b, 0xa5, 0x67, 0x1d, 0xe1, 0x50, 0xcb,
0x6a, 0x14, 0x45, 0xea, 0x17, 0xa4, 0xce, 0x7b, 0x9d, 0x27, 0x78, 0x3d, 0xbf, 0xcb, 0x87, 0x16,
0xd2, 0xdd, 0x3b, 0xe3, 0x13, 0x6b, 0x6b, 0xa9, 0xec, 0xca, 0x16, 0x70, 0xe8, 0x38, 0xa3, 0xf0,
0x59, 0xc8, 0xfd, 0x9a, 0x98, 0x50, 0x0f, 0x18, 0xf0, 0x57, 0x81, 0x38, 0x0f, 0x4c, 0x22, 0x62,
0x69, 0x7f, 0x86, 0x72, 0xd4, 0xb5, 0x36, 0xe3, 0x99, 0xa6, 0x40, 0x9d, 0xf1, 0x24, 0xb8, 0xa7,
0xea, 0x96, 0xd1, 0xf2, 0x96, 0x36, 0x2c, 0x57, 0xba, 0x29, 0x2a, 0x6a, 0x80, 0x78, 0x9d, 0xf0,
0x22, 0xb5, 0x63, 0x58, 0x36, 0x64, 0xb1, 0x13, 0xe7, 0xc0, 0xfe, 0x00, 0xfb, 0xb5, 0x43, 0xf5,
0x17, 0x2d, 0x74, 0xa1, 0x43, 0xfd, 0x3b, 0xf9, 0xb3, 0x53, 0x01, 0xd7, 0x1f, 0xd6, 0xc3, 0x6b,
0xc9, 0x9c, 0x64, 0x90, 0x35, 0x20, 0xae, 0xfd, 0x37, 0x1b, 0xeb, 0x14, 0x44, 0x86, 0x31, 0xac,
}
func getBcDbEncrypt() []uint8 {
return ggetBcDbEncrypt
}
|
package executors
import (
"os/exec"
tasker "github.com/Herlitzd/tasker/lib/core"
)
type Local struct {
}
func (l *Local) Execute(t *tasker.Task) *tasker.TaskResult {
command := exec.Command(t.Program, t.Args...)
stdoutStderr, err := command.CombinedOutput()
output := string(stdoutStderr)
result := tasker.TaskResult{BaseTask: t, Output: &output}
if err != nil {
result.IsSuccess = false
} else {
result.IsSuccess = true
}
return &result
}
|
package endpoint
import (
"context"
"github.com/go-kit/kit/endpoint"
"github.com/payfazz/fazzkit/server/http"
)
func Get() endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (response interface{}, err error) {
var data [][]string
data = append(data, []string{"1", "2", "3"})
data = append(data, []string{"4", "5", "6"})
return http.CSVResponse{
Filename: "xyz.csv",
Data: data,
}, nil
}
}
|
// +build !linux,!darwin,!windows,!openbsd
package container
import (
"context"
"github.com/nektos/act/pkg/common"
)
func NewDockerVolumeRemoveExecutor(volume string, force bool) common.Executor {
return func(ctx context.Context) error {
return nil
}
}
|
package air
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestNewServer(t *testing.T) {
a := New()
s := a.server
assert.NotNil(t, s)
assert.NotNil(t, s.a)
assert.NotNil(t, s.server)
assert.NotNil(t, s.addressMap)
assert.Nil(t, s.shutdownJobs)
assert.NotNil(t, s.shutdownJobMutex)
assert.NotNil(t, s.shutdownJobDone)
assert.Zero(t, cap(s.shutdownJobDone))
assert.NotNil(t, s.requestPool)
assert.NotNil(t, s.responsePool)
assert.IsType(t, &Request{}, s.requestPool.Get())
assert.IsType(t, &Response{}, s.responsePool.Get())
}
func TestServerServe(t *testing.T) {
a := New()
a.Address = "localhost:0"
s := a.server
hijackOSStdout()
go s.serve()
time.Sleep(100 * time.Millisecond)
revertOSStdout()
assert.NoError(t, s.close())
a = New()
a.Address = "-1:0"
s = a.server
assert.Error(t, s.serve())
a = New()
a.Address = ""
s = a.server
assert.Error(t, s.serve())
a = New()
a.Address = ":-1"
s = a.server
assert.Error(t, s.serve())
a = New()
a.Address = ""
s = a.server
assert.Error(t, s.serve())
dir, err := ioutil.TempDir("", "air.TestServerServe")
assert.NoError(t, err)
assert.NotEmpty(t, dir)
defer os.RemoveAll(dir)
a = New()
a.DebugMode = true
a.Address = "localhost:0"
s = a.server
stdout, err := ioutil.TempFile(dir, "")
assert.NoError(t, err)
stdoutBackup := os.Stdout
os.Stdout = stdout
go s.serve()
time.Sleep(100 * time.Millisecond)
os.Stdout = stdoutBackup
assert.NoError(t, stdout.Close())
b, err := ioutil.ReadFile(stdout.Name())
assert.NoError(t, err)
assert.Equal(
t,
fmt.Sprintf(
"air: serving in debug mode\nair: listening on %v\n",
s.addresses(),
),
string(b),
)
assert.NoError(t, s.close())
a = New()
a.Address = "localhost:0"
s = a.server
hijackOSStdout()
go s.serve()
time.Sleep(100 * time.Millisecond)
revertOSStdout()
res, err := http.DefaultClient.Do(&http.Request{
Method: http.MethodGet,
URL: &url.URL{
Scheme: "http",
Host: s.addresses()[0],
},
Host: "localhost",
})
assert.NoError(t, err)
assert.NotNil(t, res)
res, err = http.DefaultClient.Do(&http.Request{
Method: http.MethodGet,
URL: &url.URL{
Scheme: "http",
Host: s.addresses()[0],
},
Host: "example.com",
})
assert.NoError(t, err)
assert.NotNil(t, res)
assert.NoError(t, s.close())
a = New()
a.Address = "localhost:0"
assert.NoError(t, ioutil.WriteFile(
filepath.Join(dir, "tls_cert.pem"),
nil,
os.ModePerm,
))
assert.NoError(t, ioutil.WriteFile(
filepath.Join(dir, "tls_key.pem"),
nil,
os.ModePerm,
))
a.TLSCertFile = filepath.Join(dir, "tls_cert.pem")
a.TLSKeyFile = filepath.Join(dir, "tls_key.pem")
s = a.server
assert.Error(t, s.serve())
a = New()
a.Address = "localhost:0"
a.HTTPSEnforced = true
a.HTTPSEnforcedPort = "0"
a.ErrorLogger = log.New(ioutil.Discard, "", 0)
assert.NoError(t, ioutil.WriteFile(
filepath.Join(dir, "tls_cert.pem"),
[]byte(`
-----BEGIN CERTIFICATE-----
MIIFBTCCA+2gAwIBAgISA19vMeUvx/Tnt3mnfnbQKzIEMA0GCSqGSIb3DQEBCwUA
MEoxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MSMwIQYDVQQD
ExpMZXQncyBFbmNyeXB0IEF1dGhvcml0eSBYMzAeFw0xNzAxMjIwMzA3MDBaFw0x
NzA0MjIwMzA3MDBaMBQxEjAQBgNVBAMTCWFpcndmLm9yZzCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAMqIYMFjNRADYUbnQhfyIc77M0in8eWD4iVAEXcj
lKUz/vf/Hxm1TfE+LQampJF57JceT0hfqmDNzt5W+52aN1P+wbx7XHa4F+3DdY5h
MVfxm36Y1y4/OKAsNBpVlBhTtnFQJLIUO8c9mDs9VSX6DBCNSzAS/rSfnThlxDKN
qTaQVXIAN8+iqiiIrK4q0SSlW12jOzok/BXxbOtiTWXaLEVnzKUEsYTZMkdGiRZF
PyIJktIHY3eujG8c4tGr9KtX1b2ZvaaAIRcCOo0uhtJ18Sjb7IzQbz/Xba6LcqDL
3Q0HWO3UmIPxbzeTPgVSftdpC18ig9s7gLws38Rb1yifbskCAwEAAaOCAhkwggIV
MA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw
DAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUJ3IaKlnvlxFNz5q5kBBJkUtcamAwHwYD
VR0jBBgwFoAUqEpqYwR93brm0Tm3pkVl7/Oo7KEwcAYIKwYBBQUHAQEEZDBiMC8G
CCsGAQUFBzABhiNodHRwOi8vb2NzcC5pbnQteDMubGV0c2VuY3J5cHQub3JnLzAv
BggrBgEFBQcwAoYjaHR0cDovL2NlcnQuaW50LXgzLmxldHNlbmNyeXB0Lm9yZy8w
IwYDVR0RBBwwGoIJYWlyd2Yub3Jngg13d3cuYWlyd2Yub3JnMIH+BgNVHSAEgfYw
gfMwCAYGZ4EMAQIBMIHmBgsrBgEEAYLfEwEBATCB1jAmBggrBgEFBQcCARYaaHR0
cDovL2Nwcy5sZXRzZW5jcnlwdC5vcmcwgasGCCsGAQUFBwICMIGeDIGbVGhpcyBD
ZXJ0aWZpY2F0ZSBtYXkgb25seSBiZSByZWxpZWQgdXBvbiBieSBSZWx5aW5nIFBh
cnRpZXMgYW5kIG9ubHkgaW4gYWNjb3JkYW5jZSB3aXRoIHRoZSBDZXJ0aWZpY2F0
ZSBQb2xpY3kgZm91bmQgYXQgaHR0cHM6Ly9sZXRzZW5jcnlwdC5vcmcvcmVwb3Np
dG9yeS8wDQYJKoZIhvcNAQELBQADggEBAEeZuWoMm5E9V/CQxQv0GBJEr3jl7e/O
Wauwl+sRLbQG9ajHlnKz46Af/oDoG4Z+e7iYRRZm9nIOLVCsp3Yp+h+GSjwm8yiP
fwAyaLfBKNbtEk0S/FNmqzr7jjxCyHhqoloHhzFAfHJyhlYlMUwQhbxM1U5GbejE
9ru76RTbdh3yb00HSXBMcc3woiaGWPc8FVaT8LGOweKIEH4kcYevC06m860ovHV/
s87+zaamZW4j8uWLGPxS4eD2Ulg+nbLKdnprbYEx5F943M1b7s05LJ+E7SnqKS3i
jiepPCVdRmlsROMoSfWQXFdfsTKEFAwOeIbIxfk7EgUIzrUgnnv0G7Q=
-----END CERTIFICATE-----
`),
os.ModePerm,
))
assert.NoError(t, ioutil.WriteFile(
filepath.Join(dir, "tls_key.pem"),
[]byte(`
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDKiGDBYzUQA2FG
50IX8iHO+zNIp/Hlg+IlQBF3I5SlM/73/x8ZtU3xPi0GpqSReeyXHk9IX6pgzc7e
VvudmjdT/sG8e1x2uBftw3WOYTFX8Zt+mNcuPzigLDQaVZQYU7ZxUCSyFDvHPZg7
PVUl+gwQjUswEv60n504ZcQyjak2kFVyADfPoqooiKyuKtEkpVtdozs6JPwV8Wzr
Yk1l2ixFZ8ylBLGE2TJHRokWRT8iCZLSB2N3roxvHOLRq/SrV9W9mb2mgCEXAjqN
LobSdfEo2+yM0G8/122ui3Kgy90NB1jt1JiD8W83kz4FUn7XaQtfIoPbO4C8LN/E
W9con27JAgMBAAECggEAFUx6QFwafHCejkJLpREFlSq9nepreeOAqMIwFANd4nGx
YoslziJO7AvJ2GU18UaNJuc9FzNYS43ZL3CeTVimcOLdpOCkPKfnfE2N00dNVR5H
Z+zS1D45yj5bzFkrldNX4Fq5QTD3iGBl3fT5O2EsW6FAQvH8bypJ8mBhXZ+gJ+id
4croKKwMsHGYSiLdCSVf6oGkytlQwggAl0B85KBCOR1ArMf2nrM9lf6yBLJRGo6f
qzIEAvDPNicW5BWGf2lwQTmawKMecStWXniu8VdjKoRO9IXDe2WQAdwC8LjAQwxZ
hQJbM6I8x0CExMmEthieUrX0VkblboOC/BQsUzNwAQKBgQDurZ07acp/P9icDIUN
l53OiCafYrlBceZCdykheDHgpg+TBVfO8GUMsXywYIMOw1RzmGqDWWrU7uaiXnMn
kL/LKFM9t/10vFrlt5F1cx45MJsknVDebfJGq+L6eHISx+7igTCyQ6JBD4sW2tcs
c6MYHgVsAHioqrkcjvHBUY8cSQKBgQDZOzhFg41h3U+cTgePGjzZpziWB1VO8ird
OJp8Hn8umUW8JfdYTalTvzs2CiNw0gOjGETMUmKKhS2YcGIol9j7elBOhT9mzxKf
NHEJRiV6+2SInESUfcLaXZZQKbMMiw2YZfV2ADf8n+Lb79tlbAtSEnMnvmlDI/1K
SASXbGS+gQKBgQDeh7JUBaOOFsnvXGDlNlokiJ5x9krBMN+9UnpfwT/HsyxMKCwh
PdMJDaYykBlBN27Sw+VzB3hqhT81XZhB6FxZnwRVQ+kk4MRi707IUYd5TM8pSR9v
8tRzfakHXCsHRa99MXRkkFiEDmjg6zK5OCt0vfDSLHJS17H1ZXUTh+ZFOQKBgFgX
1OUTyTUDu7ImTphwynZ1gtQMm0LNoCZgOv3UnDz4eTgoqVrM+7rzlP6ANAkfkcwF
HnlBe6azBV+JS7UshxjMbF67WI/Hr8SSTri1EqQB6K4huQoCyg8l3rwZfPu8NEI2
LsmwowO2jxgj9/P0Uc7xnnNim2tX3/LMq9gAZAaBAoGBALI4Y4/lBNfBRB0IIA+p
Edt9VRdifXbQE+q1JwyG9smGsumYuMCBGQFZp51Wa5/FD/NRqezRDP3myiRQzWiM
fNAWEfZaazKKFmOrC4WgM+Z8bKAyrDpmCu2iNvdS2JPYujiIX+f5kq7W0muF4JXZ
l7j2fuWjNfj9JfnXoP2SEgPG
-----END PRIVATE KEY-----
`),
os.ModePerm,
))
a.TLSCertFile = filepath.Join(dir, "tls_cert.pem")
a.TLSKeyFile = filepath.Join(dir, "tls_key.pem")
s = a.server
hijackOSStdout()
go s.serve()
time.Sleep(100 * time.Millisecond)
revertOSStdout()
res, err = http.DefaultClient.Do(&http.Request{
Method: http.MethodGet,
URL: &url.URL{
Scheme: "https",
Host: s.addresses()[0],
},
Host: "example.com",
})
assert.Error(t, err)
assert.Nil(t, res)
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
res, err = http.DefaultClient.Do(&http.Request{
Method: http.MethodGet,
URL: &url.URL{
Scheme: "https",
Host: s.addresses()[0],
},
Host: "localhost",
})
http.DefaultTransport.(*http.Transport).TLSClientConfig = nil
assert.NoError(t, err)
assert.NotNil(t, res)
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
res, err = http.DefaultClient.Do(&http.Request{
Method: http.MethodGet,
URL: &url.URL{
Scheme: "http",
Host: s.addresses()[1],
},
Host: "localhost",
})
http.DefaultTransport.(*http.Transport).TLSClientConfig = nil
assert.NoError(t, err)
assert.NotNil(t, res)
assert.NoError(t, s.close())
c, err := tls.LoadX509KeyPair(
filepath.Join(dir, "tls_cert.pem"),
filepath.Join(dir, "tls_key.pem"),
)
assert.NotNil(t, c)
assert.NoError(t, err)
a = New()
a.Address = "localhost:0"
a.TLSConfig = &tls.Config{
Certificates: []tls.Certificate{c},
}
s = a.server
hijackOSStdout()
go s.serve()
time.Sleep(100 * time.Millisecond)
revertOSStdout()
assert.NoError(t, s.close())
a = New()
a.Address = "-1:0"
a.TLSCertFile = filepath.Join(dir, "tls_cert.pem")
a.TLSKeyFile = filepath.Join(dir, "tls_key.pem")
s = a.server
assert.Error(t, s.serve())
a = New()
a.Address = "localhost:0"
a.HTTPSEnforced = true
a.HTTPSEnforcedPort = "-1"
a.TLSCertFile = filepath.Join(dir, "tls_cert.pem")
a.TLSKeyFile = filepath.Join(dir, "tls_key.pem")
s = a.server
assert.Error(t, s.serve())
a = New()
a.Address = "localhost:0"
a.ACMEEnabled = true
a.ACMECertRoot = dir
a.ACMEHostWhitelist = []string{"localhost"}
a.HTTPSEnforcedPort = "0"
a.ErrorLogger = log.New(ioutil.Discard, "", 0)
s = a.server
hijackOSStdout()
go s.serve()
time.Sleep(100 * time.Millisecond)
revertOSStdout()
res, err = http.DefaultClient.Do(&http.Request{
Method: http.MethodGet,
URL: &url.URL{
Scheme: "https",
Host: s.addresses()[0],
},
Host: "example.com",
})
assert.Error(t, err)
assert.Nil(t, res)
assert.NoError(t, s.close())
a = New()
a.Address = "localhost:0"
a.ACMEEnabled = true
a.ACMECertRoot = dir
a.ACMEHostWhitelist = []string{"localhost"}
a.HTTPSEnforcedPort = "0"
a.ErrorLogger = log.New(ioutil.Discard, "", 0)
s = a.server
hijackOSStdout()
go s.serve()
time.Sleep(100 * time.Millisecond)
revertOSStdout()
res, err = http.DefaultClient.Do(&http.Request{
Method: http.MethodGet,
URL: &url.URL{
Scheme: "https",
Host: s.addresses()[0],
},
Host: "example.com",
})
assert.Error(t, err)
assert.Nil(t, res)
assert.NoError(t, s.close())
}
func TestServerClose(t *testing.T) {
a := New()
a.Address = "localhost:0"
s := a.server
hijackOSStdout()
go s.serve()
time.Sleep(100 * time.Millisecond)
revertOSStdout()
assert.NoError(t, s.close())
}
func TestServerShutdown(t *testing.T) {
a := New()
a.Address = "localhost:0"
s := a.server
foo := ""
s.addShutdownJob(func() {
foo = "bar"
})
hijackOSStdout()
go s.serve()
time.Sleep(100 * time.Millisecond)
revertOSStdout()
assert.NoError(t, s.shutdown(context.Background()))
assert.Equal(t, "bar", foo)
assert.Len(t, s.shutdownJobs, 1)
a = New()
a.Address = "localhost:0"
s = a.server
foo = ""
s.addShutdownJob(func() {
time.Sleep(100 * time.Millisecond)
foo = "bar"
})
hijackOSStdout()
go s.serve()
time.Sleep(100 * time.Millisecond)
revertOSStdout()
ctx, cancel := context.WithCancel(context.Background())
cancel()
assert.Error(t, context.Canceled, s.shutdown(ctx))
assert.Empty(t, foo)
assert.Len(t, s.shutdownJobs, 1)
}
func TestServerAddShutdownJob(t *testing.T) {
a := New()
s := a.server
assert.Len(t, s.shutdownJobs, 0)
id := s.addShutdownJob(func() {})
assert.Len(t, s.shutdownJobs, 1)
assert.Equal(t, 0, id)
}
func TestServerRemoveShutdownJob(t *testing.T) {
a := New()
s := a.server
assert.Len(t, s.shutdownJobs, 0)
id := s.addShutdownJob(func() {})
assert.Len(t, s.shutdownJobs, 1)
assert.NotNil(t, s.shutdownJobs[0])
assert.Equal(t, 0, id)
s.removeShutdownJob(id)
assert.Len(t, s.shutdownJobs, 1)
assert.Nil(t, s.shutdownJobs[0])
}
func TestServerAddresses(t *testing.T) {
a := New()
a.Address = "localhost:0"
s := a.server
hijackOSStdout()
go s.serve()
time.Sleep(100 * time.Millisecond)
revertOSStdout()
assert.Len(t, s.addresses(), 1)
assert.NoError(t, s.close())
time.Sleep(100 * time.Millisecond)
assert.Len(t, s.addresses(), 0)
}
func TestServerServeHTTP(t *testing.T) {
a := New()
a.Pregases = []Gas{func(next Handler) Handler {
return func(req *Request, res *Response) error {
req.SetValue("EasterEgg", easterEgg)
res.Defer(func() {
res.WriteString("Defer")
})
if err := res.WriteString("Pregas - "); err != nil {
return err
}
return next(req, res)
}
}}
a.Gases = []Gas{func(next Handler) Handler {
return func(req *Request, res *Response) error {
if err := res.WriteString("Gas - "); err != nil {
return err
}
return next(req, res)
}
}}
a.GET("/hello/:Name", func(req *Request, res *Response) error {
if req.Value("EasterEgg") != easterEgg {
return errors.New("wrong easter egg")
}
return res.WriteString(
"Hello, " + req.Param("Name").Value().String() + " - ",
)
})
s := a.server
req := httptest.NewRequest(http.MethodGet, "/hello/Air", nil)
rec := httptest.NewRecorder()
s.ServeHTTP(rec, req)
assert.Equal(t, http.StatusOK, rec.Code)
assert.Equal(
t,
"text/plain; charset=utf-8",
rec.HeaderMap.Get("Content-Type"),
)
assert.Equal(t, "Pregas - Gas - Hello, Air - Defer", rec.Body.String())
a = New()
a.GET("/", func(req *Request, res *Response) error {
return errors.New("handler error")
})
s = a.server
req = httptest.NewRequest(http.MethodGet, "/", nil)
rec = httptest.NewRecorder()
s.ServeHTTP(rec, req)
assert.Equal(t, http.StatusInternalServerError, rec.Code)
assert.Equal(
t,
"text/plain; charset=utf-8",
rec.HeaderMap.Get("Content-Type"),
)
assert.Equal(t, "Internal Server Error", rec.Body.String())
a = New()
a.DebugMode = true
a.GET("/:Foo", func(req *Request, res *Response) error {
return errors.New("handler error")
})
s = a.server
req = httptest.NewRequest(http.MethodGet, "/bar", nil)
rec = httptest.NewRecorder()
s.ServeHTTP(rec, req)
assert.Equal(t, http.StatusInternalServerError, rec.Code)
assert.Equal(
t,
"text/plain; charset=utf-8",
rec.HeaderMap.Get("Content-Type"),
)
assert.Equal(t, "handler error", rec.Body.String())
}
|
package main
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"encoding/gob"
"flag"
"fmt"
"github.com/boltdb/bolt"
"log"
"math"
"math/big"
"os"
"strconv"
"time"
)
const dbFile = "blockchain.db"
const blocksBucket = "blocks"
const targetBits = 24
//创建block与chain
type Block struct {
Timestamp int64
Data []byte
PrevBlockHash []byte
Hash []byte
Nonce int
}
type Blockchain struct {
tip []byte
db *bolt.DB
}
//CLI command
type CLI struct {
bc *Blockchain
}
//生成block与blockchain
func NewBlock(data string, prevBlockHash []byte) *Block {
block := &Block{
Timestamp: time.Now().Unix(),
Data: []byte(data),
PrevBlockHash: prevBlockHash,
Hash: []byte{},
Nonce: 0}
pow := NewProofOfWork(block)
nonce, hash := pow.Run()
block.Nonce = nonce
block.Hash = hash[:]
return block
}
func NewBlockchain() *Blockchain {
var tip []byte
// 打开 BoltDB 文件的标准操作
db, err := bolt.Open(dbFile, 0600, nil)
if err != nil {
log.Panic(err)
}
err = db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(blocksBucket))//打开bucket
// 如果数据库中不存在区块链就创建一个,否则直接读取最后一个块的哈希
if b == nil {
fmt.Println("No existing blockchain found. Creating a new one...")
genesis :=NewBlock("First block", []byte{})//生成创世区块
b, err := tx.CreateBucket([]byte(blocksBucket))//创建block的bucket
if err != nil {
log.Panic(err)
}
err = b.Put(genesis.Hash, genesis.Serialize())//建立hash与block的键值对
if err != nil {
log.Panic(err)
}
err = b.Put([]byte("l"), genesis.Hash)//建立l与hash的键值对
if err != nil {
log.Panic(err)
}
tip = genesis.Hash//将tip指向最后的hash
} else {
tip = b.Get([]byte("l"))//如果纯在,直接读取
}
return nil
})
if err != nil {
log.Panic(err)
}
bc := Blockchain{tip, db}
return &bc
}
// 将 Block 序列化为一个字节数组
func (b *Block) Serialize() []byte {
var result bytes.Buffer
encoder := gob.NewEncoder(&result)
err := encoder.Encode(b)
if err != nil {
log.Panic(err)
}
return result.Bytes()
}
// 将字节数组反序列化为一个 Block
func DeserializeBlock(d []byte) *Block {
var block Block
decoder := gob.NewDecoder(bytes.NewReader(d))
err := decoder.Decode(&block)
if err != nil {
log.Panic(err)
}
return &block
}
// 加入区块
func (bc *Blockchain) AddBlock(data string) {
var lastHash []byte
// 首先获取最后一个块的哈希用于生成新块的哈希
err := bc.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(blocksBucket))
lastHash = b.Get([]byte("l"))
return nil
})
if err != nil {
log.Panic(err)
}
newBlock := NewBlock(data, lastHash)
err = bc.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(blocksBucket))
err := b.Put(newBlock.Hash, newBlock.Serialize())
if err != nil {
log.Panic(err)
}
err = b.Put([]byte("l"), newBlock.Hash)
if err != nil {
log.Panic(err)
}
bc.tip = newBlock.Hash
return nil
})
}
//为了能顺序读取bolt中的数据,我们创建了迭代器
type BlockchainIterator struct {
currentHash []byte
db *bolt.DB
}
func (bc *Blockchain) Iterator() *BlockchainIterator {
bci := &BlockchainIterator{bc.tip, bc.db}
return bci
}
// 返回链中的下一个块
func (i *BlockchainIterator) Next() *Block {
var block *Block
err := i.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(blocksBucket))
encodedBlock := b.Get(i.currentHash)
block = DeserializeBlock(encodedBlock)
return nil
})
if err != nil {
log.Panic(err)
}
i.currentHash = block.PrevBlockHash
return block
}
const usage = `
Usage:
addblock -data BLOCK_DATA 添加区块,命令为addblock -data,之后空格并输入数据
printchain 呈现出所有的block的信息(模仿bilibili评论按照时间排序,最新的在最上面)
print_someblock -num NUM 呈现出某个block的信息(最古老的是第一个),some有某个的意思····(没想到居然会这么复习英语)
`
func (cli *CLI) printUsage() {
fmt.Println(usage)
}
func (cli *CLI) validateArgs() {
if len(os.Args) < 2 {
cli.printUsage()
os.Exit(1)
}
}
func (cli *CLI) Run() {
cli.validateArgs()
addBlockCmd := flag.NewFlagSet("addblock", flag.ExitOnError)
printChainCmd := flag.NewFlagSet("printchain", flag.ExitOnError)
print_someblockCmd := flag.NewFlagSet("print_someblock", flag.ExitOnError)
addBlockData := addBlockCmd.String("data", "", "Block data")
printblock := print_someblockCmd.String("num", "", "NUM")
switch os.Args[1] {
case "addblock":
err := addBlockCmd.Parse(os.Args[2:])
if err != nil {
log.Panic(err)
}
case "printchain":
err := printChainCmd.Parse(os.Args[2:])
if err != nil {
log.Panic(err)
}
case "print_someblock":
err := print_someblockCmd.Parse(os.Args[2:])
if err != nil {
log.Panic(err)
}
default:
cli.printUsage()
os.Exit(1)
}
if addBlockCmd.Parsed() {
if *addBlockData == "" {
addBlockCmd.Usage()
os.Exit(1)
}
cli.bc.AddBlock(*addBlockData)
}
if printChainCmd.Parsed() {
cli.printChain()
}
if print_someblockCmd.Parsed() {
if *printblock == "" {
print_someblockCmd.Usage()
os.Exit(1)
}
cli.print_someblock(*printblock)
}
}
func (cli *CLI) addBlock(data string) {
cli.bc.AddBlock(data)
fmt.Println("Success")
}
func (cli *CLI) printChain() {
bci := cli.bc.Iterator()
for {
block := bci.Next()
fmt.Printf("Prev hash: %x\n", block.PrevBlockHash)
fmt.Printf("Data: %s\n", block.Data)
fmt.Printf("Hash: %x\n", block.Hash)
pow := NewProofOfWork(block)
fmt.Printf("PoW: %s\n", strconv.FormatBool(pow.Validate()))
fmt.Println()
if len(block.PrevBlockHash) == 0 {
break
}
}
}
func (cli *CLI) print_someblock(num string) {
number:=0
length:=0
Num, err:= strconv.Atoi(num)
if(err==nil){
Num=Num
}
bci := cli.bc.Iterator()
counter:= cli.bc.Iterator()
for {
block1 := counter.Next()
length=length+1
if len(block1.PrevBlockHash) == 0 {
break
}
}
for {
block := bci.Next()
if((length-Num)!=number){
continue
}
fmt.Printf("Prev hash: %x\n", block.PrevBlockHash)
fmt.Printf("Data: %s\n", block.Data)
fmt.Printf("Hash: %x\n", block.Hash)
pow := NewProofOfWork(block)
fmt.Printf("PoW: %s\n", strconv.FormatBool(pow.Validate()))
fmt.Println()
if(Num==1){
break
}
if len(block.PrevBlockHash) == 0 {
fmt.Println("输入的数字有误,请查看是否有效或者超出区块链总长度")
break
}
break
}
}
var (
maxNonce = math.MaxInt64
)
//感觉不放到最前面是不是更有整体的感觉,放最前面感觉写着写着就忘了
type ProofOfWork struct {
block *Block
target *big.Int
}
func NewProofOfWork(b *Block) *ProofOfWork {
target := big.NewInt(1)
target.Lsh(target, uint(256-targetBits))
pow := &ProofOfWork{b, target}
return pow
}
//pow的入口,不想注释了····
func (pow *ProofOfWork) Run() (int, []byte) {
var hashInt big.Int
var hash [32]byte
nonce := 0
fmt.Printf("Mining the block containing \"%s\"\n", pow.block.Data)
for nonce < maxNonce {
data := bytes.Join(
[][]byte{
pow.block.PrevBlockHash,
pow.block.Data,
IntToHex(pow.block.Timestamp),
IntToHex(int64(targetBits)),
IntToHex(int64(nonce)),
},
[]byte{},
)
hash = sha256.Sum256(data)
hashInt.SetBytes(hash[:])
if hashInt.Cmp(pow.target) == -1 {
fmt.Printf("\r%x", hash)
break
} else {
nonce++
}
}
fmt.Print("\n\n")
return nonce, hash[:]
}
// Validate block's PoW
func (pow *ProofOfWork) Validate() bool {
var hashInt big.Int
data := bytes.Join(
[][]byte{
pow.block.PrevBlockHash,
pow.block.Data,
IntToHex(pow.block.Timestamp),
IntToHex(int64(targetBits)),
IntToHex(int64(pow.block.Nonce)),
},
[]byte{},
)
hash := sha256.Sum256(data)
hashInt.SetBytes(hash[:])
isValid := hashInt.Cmp(pow.target) == -1
return isValid
}
func IntToHex(num int64) []byte {
buff := new(bytes.Buffer)
err := binary.Write(buff, binary.BigEndian, num)
if err != nil {
log.Panic(err)
}
return buff.Bytes()
}
func main() {
bc := NewBlockchain()
defer bc.db.Close()
cli := CLI{bc}
cli.Run()
}
|
package zerver
type (
TaskHandlerFunc func(interface{})
TaskHandler interface {
Component
Handle(interface{})
}
)
func convertTaskHandler(i interface{}) TaskHandler {
switch t := i.(type) {
case func(interface{}):
return TaskHandlerFunc(t)
case TaskHandler:
return t
}
return nil
}
func (TaskHandlerFunc) Init(Environment) error { return nil }
func (fn TaskHandlerFunc) Handle(task interface{}) { fn(task) }
func (TaskHandlerFunc) Destroy() {}
|
// Copyright (C) 2018-Present Pivotal Software, Inc. All rights reserved.
//
// This program and the accompanying materials are made available under the
// terms of the under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
//
// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package boshdirector
import (
"github.com/pivotal-cf/on-demand-service-broker/config"
)
func (c *Client) GetDNSAddresses(deploymentName string, dnsRequest []config.BindingDNS) (map[string]string, error) {
addresses := map[string]string{}
for _, req := range dnsRequest {
providerId, err := c.dnsRetriever.LinkProviderID(deploymentName, req.InstanceGroup, req.LinkProvider)
if err != nil {
return nil, err
}
consumerId, err := c.dnsRetriever.CreateLinkConsumer(providerId)
if err != nil {
return nil, err
}
addr, err := c.dnsRetriever.GetLinkAddress(consumerId, req.Properties.AZS, req.Properties.Status)
if err != nil {
return nil, err
}
c.dnsRetriever.DeleteLinkConsumer(consumerId)
addresses[req.Name] = addr
}
return addresses, nil
}
|
package odoo
import (
"fmt"
)
// AccountFiscalPosition represents account.fiscal.position model.
type AccountFiscalPosition struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
AccountIds *Relation `xmlrpc:"account_ids,omptempty"`
Active *Bool `xmlrpc:"active,omptempty"`
AutoApply *Bool `xmlrpc:"auto_apply,omptempty"`
CompanyId *Many2One `xmlrpc:"company_id,omptempty"`
CountryGroupId *Many2One `xmlrpc:"country_group_id,omptempty"`
CountryId *Many2One `xmlrpc:"country_id,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
Note *String `xmlrpc:"note,omptempty"`
Sequence *Int `xmlrpc:"sequence,omptempty"`
StateIds *Relation `xmlrpc:"state_ids,omptempty"`
StatesCount *Int `xmlrpc:"states_count,omptempty"`
TaxIds *Relation `xmlrpc:"tax_ids,omptempty"`
VatRequired *Bool `xmlrpc:"vat_required,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
ZipFrom *Int `xmlrpc:"zip_from,omptempty"`
ZipTo *Int `xmlrpc:"zip_to,omptempty"`
}
// AccountFiscalPositions represents array of account.fiscal.position model.
type AccountFiscalPositions []AccountFiscalPosition
// AccountFiscalPositionModel is the odoo model name.
const AccountFiscalPositionModel = "account.fiscal.position"
// Many2One convert AccountFiscalPosition to *Many2One.
func (afp *AccountFiscalPosition) Many2One() *Many2One {
return NewMany2One(afp.Id.Get(), "")
}
// CreateAccountFiscalPosition creates a new account.fiscal.position model and returns its id.
func (c *Client) CreateAccountFiscalPosition(afp *AccountFiscalPosition) (int64, error) {
ids, err := c.CreateAccountFiscalPositions([]*AccountFiscalPosition{afp})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateAccountFiscalPosition creates a new account.fiscal.position model and returns its id.
func (c *Client) CreateAccountFiscalPositions(afps []*AccountFiscalPosition) ([]int64, error) {
var vv []interface{}
for _, v := range afps {
vv = append(vv, v)
}
return c.Create(AccountFiscalPositionModel, vv)
}
// UpdateAccountFiscalPosition updates an existing account.fiscal.position record.
func (c *Client) UpdateAccountFiscalPosition(afp *AccountFiscalPosition) error {
return c.UpdateAccountFiscalPositions([]int64{afp.Id.Get()}, afp)
}
// UpdateAccountFiscalPositions updates existing account.fiscal.position records.
// All records (represented by ids) will be updated by afp values.
func (c *Client) UpdateAccountFiscalPositions(ids []int64, afp *AccountFiscalPosition) error {
return c.Update(AccountFiscalPositionModel, ids, afp)
}
// DeleteAccountFiscalPosition deletes an existing account.fiscal.position record.
func (c *Client) DeleteAccountFiscalPosition(id int64) error {
return c.DeleteAccountFiscalPositions([]int64{id})
}
// DeleteAccountFiscalPositions deletes existing account.fiscal.position records.
func (c *Client) DeleteAccountFiscalPositions(ids []int64) error {
return c.Delete(AccountFiscalPositionModel, ids)
}
// GetAccountFiscalPosition gets account.fiscal.position existing record.
func (c *Client) GetAccountFiscalPosition(id int64) (*AccountFiscalPosition, error) {
afps, err := c.GetAccountFiscalPositions([]int64{id})
if err != nil {
return nil, err
}
if afps != nil && len(*afps) > 0 {
return &((*afps)[0]), nil
}
return nil, fmt.Errorf("id %v of account.fiscal.position not found", id)
}
// GetAccountFiscalPositions gets account.fiscal.position existing records.
func (c *Client) GetAccountFiscalPositions(ids []int64) (*AccountFiscalPositions, error) {
afps := &AccountFiscalPositions{}
if err := c.Read(AccountFiscalPositionModel, ids, nil, afps); err != nil {
return nil, err
}
return afps, nil
}
// FindAccountFiscalPosition finds account.fiscal.position record by querying it with criteria.
func (c *Client) FindAccountFiscalPosition(criteria *Criteria) (*AccountFiscalPosition, error) {
afps := &AccountFiscalPositions{}
if err := c.SearchRead(AccountFiscalPositionModel, criteria, NewOptions().Limit(1), afps); err != nil {
return nil, err
}
if afps != nil && len(*afps) > 0 {
return &((*afps)[0]), nil
}
return nil, fmt.Errorf("account.fiscal.position was not found with criteria %v", criteria)
}
// FindAccountFiscalPositions finds account.fiscal.position records by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountFiscalPositions(criteria *Criteria, options *Options) (*AccountFiscalPositions, error) {
afps := &AccountFiscalPositions{}
if err := c.SearchRead(AccountFiscalPositionModel, criteria, options, afps); err != nil {
return nil, err
}
return afps, nil
}
// FindAccountFiscalPositionIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountFiscalPositionIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(AccountFiscalPositionModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindAccountFiscalPositionId finds record id by querying it with criteria.
func (c *Client) FindAccountFiscalPositionId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(AccountFiscalPositionModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("account.fiscal.position was not found with criteria %v and options %v", criteria, options)
}
|
package stores
import (
"jean/instructions/base"
"jean/instructions/factory"
"jean/rtda/jvmstack"
)
type LSTORE struct {
base.Index8Instruction
}
func (l *LSTORE) Execute(frame *jvmstack.Frame) {
_lstore(frame, l.Index)
}
func _lstore(frame *jvmstack.Frame, index uint) {
val := frame.OperandStack().PopLong()
frame.LocalVars().SetLong(index, val)
}
// LSTORE_idx idx表示在局部变量表中的索引
type LSTORE_0 struct {
base.NoOperandsInstruction
}
func (l *LSTORE_0) Execute(frame *jvmstack.Frame) {
_lstore(frame, 0)
}
type LSTORE_1 struct {
base.NoOperandsInstruction
}
func (l *LSTORE_1) Execute(frame *jvmstack.Frame) {
_lstore(frame, 1)
}
type LSTORE_2 struct {
base.NoOperandsInstruction
}
func (l *LSTORE_2) Execute(frame *jvmstack.Frame) {
_lstore(frame, 2)
}
type LSTORE_3 struct {
base.NoOperandsInstruction
}
func (l *LSTORE_3) Execute(frame *jvmstack.Frame) {
_lstore(frame, 3)
}
func init() {
factory.Factory.AddInstruction(0x37, func() base.Instruction {
return &LSTORE{}
})
lstore_0 := &LSTORE_0{}
lstore_1 := &LSTORE_1{}
lstore_2 := &LSTORE_2{}
lstore_3 := &LSTORE_3{}
factory.Factory.AddInstruction(0x3f, func() base.Instruction {
return lstore_0
})
factory.Factory.AddInstruction(0x40, func() base.Instruction {
return lstore_1
})
factory.Factory.AddInstruction(0x41, func() base.Instruction {
return lstore_2
})
factory.Factory.AddInstruction(0x42, func() base.Instruction {
return lstore_3
})
}
|
package p2p
import (
"container/list"
"errors"
"math"
"sync"
)
type clientMapEntry struct {
el *list.Element
client *Client
}
type clientMap struct {
sync.Mutex
cap uint
order *list.List
entries map[string]clientMapEntry
}
func newClientMap(cap uint) *clientMap {
return &clientMap{
cap: cap,
order: list.New(),
entries: make(map[string]clientMapEntry, cap),
}
}
func (c *clientMap) get(n *Node, addr string) (*Client, bool) {
c.Lock()
defer c.Unlock()
entry, exists := c.entries[addr]
if !exists {
if uint(len(c.entries)) == n.maxInboundConnections {
el := c.order.Back()
evicted := c.order.Remove(el).(string)
e := c.entries[evicted]
delete(c.entries, evicted)
e.client.close()
e.client.waitUntilClosed()
}
entry.el = c.order.PushFront(addr)
entry.client = newClient(n)
c.entries[addr] = entry
} else {
c.order.MoveToFront(entry.el)
}
return entry.client, exists
}
func (c *clientMap) remove(addr string) {
c.Lock()
defer c.Unlock()
entry, exists := c.entries[addr]
if !exists {
return
}
c.order.Remove(entry.el)
delete(c.entries, addr)
}
func (c *clientMap) release() {
c.Lock()
entries := c.entries
c.entries = make(map[string]clientMapEntry, c.cap)
c.order.Init()
c.Unlock()
for _, e := range entries {
e.client.close()
e.client.waitUntilClosed()
}
}
func (c *clientMap) slice() []*Client {
c.Lock()
defer c.Unlock()
clients := make([]*Client, 0, len(c.entries))
for el := c.order.Front(); el != nil; el = el.Next() {
clients = append(clients, c.entries[el.Value.(string)].client)
}
return clients
}
type requestMap struct {
sync.Mutex
entries map[uint64]chan message
nonce uint64
}
func newRequestMap() *requestMap {
return &requestMap{entries: make(map[uint64]chan message)}
}
func (r *requestMap) nextNonce() (<-chan message, uint64, error) {
r.Lock()
defer r.Unlock()
if r.nonce == math.MaxUint64 {
r.nonce = 0
}
r.nonce++
nonce := r.nonce
if _, exists := r.entries[nonce]; exists {
return nil, 0, errors.New("Ran out of available nonce to use for making request")
}
ch := make(chan message, 1)
r.entries[nonce] = ch
return ch, nonce, nil
}
func (r *requestMap) markRequestFailed(nonce uint64) {
r.Lock()
defer r.Unlock()
close(r.entries[nonce])
delete(r.entries, nonce)
}
func (r *requestMap) findRequest(nonce uint64) chan<- message {
r.Lock()
defer r.Unlock()
ch, exists := r.entries[nonce]
if exists {
delete(r.entries, nonce)
}
return ch
}
func (r *requestMap) close() {
r.Lock()
defer r.Unlock()
for nonce := range r.entries {
close(r.entries[nonce])
delete(r.entries, nonce)
}
}
|
package server
import (
"encoding/json"
"github.com/kosotd/go-microservice-skeleton/cache"
"github.com/kosotd/go-microservice-skeleton/config"
"github.com/kosotd/go-microservice-skeleton/server"
"github.com/pkg/errors"
"gotest.tools/assert"
"io/ioutil"
"net/http/httptest"
"testing"
"time"
)
type testConfig struct {
config config.Config
}
func (c *testConfig) GetBaseConfig() *config.Config {
return &c.config
}
func init() {
conf := &testConfig{}
config.InitConfig(conf, func(helper config.EnvHelper) {})
conf.GetBaseConfig().CacheExpiration = "200ms"
conf.GetBaseConfig().CacheUpdatePeriod = "1ms"
cache.InitBigCache()
}
func TestCacheAndWrite(t *testing.T) {
recorder := httptest.NewRecorder()
err := server.CacheAndWrite(recorder, "cache", func() (resp interface{}, err error) {
return nil, errors.New("error1")
})
assert.Error(t, err, "server.CacheAndWrite -> responseSupplier: error1")
err = server.CacheAndWrite(recorder, "cache", func() (resp interface{}, err error) {
return map[string]string{"key": "value"}, nil
})
assert.NilError(t, err)
body, err := ioutil.ReadAll(recorder.Body)
assert.NilError(t, err)
res := map[string]string{}
err = json.Unmarshal(body, &res)
assert.NilError(t, err)
assert.Equal(t, res["key"], "value")
err = server.CacheAndWrite(recorder, "cache", func() (resp interface{}, err error) {
return nil, errors.New("error1")
})
assert.NilError(t, err)
time.Sleep(1 * time.Second)
err = server.CacheAndWrite(recorder, "cache", func() (resp interface{}, err error) {
return nil, errors.New("error1")
})
assert.Error(t, err, "server.CacheAndWrite -> responseSupplier: error1")
}
|
/*
Rotate List
Given a list, rotate the list to the right by k places, where k is non-negative.
Example:
Given 1->2->3->4->5->NULL and k = 2,
return 4->5->1->2->3->NULL.
*/
package main
func rotateRight(head *ListNode, k int) *ListNode {
if head == nil || head.Next == nil {
return head
}
pre,curr,length := head,head,0
for ;curr.Next != nil;length++ {
curr = curr.Next
}
k %= length + 1
if k == 0 {
return head
}
for i := 0;i < length - k;i++ {
pre = pre.Next
}
curr.Next,head,pre.Next = head,pre.Next,nil
return head
} |
package kubernetes
import (
"fmt"
"strings"
"github.com/kiali/kiali/config"
apps_v1 "k8s.io/api/apps/v1"
autoscaling_v1 "k8s.io/api/autoscaling/v1"
core_v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
// Networking
destinationRules = "destinationrules"
destinationRuleType = "DestinationRule"
destinationRuleTypeList = "DestinationRuleList"
gateways = "gateways"
gatewayType = "Gateway"
gatewayTypeList = "GatewayList"
sidecars = "sidecars"
sidecarType = "Sidecar"
sidecarTypeList = "SidecarList"
serviceentries = "serviceentries"
serviceentryType = "ServiceEntry"
serviceentryTypeList = "ServiceEntryList"
virtualServices = "virtualservices"
virtualServiceType = "VirtualService"
virtualServiceTypeList = "VirtualServiceList"
// Quotas
quotaspecs = "quotaspecs"
quotaspecType = "QuotaSpec"
quotaspecTypeList = "QuotaSpecList"
quotaspecbindings = "quotaspecbindings"
quotaspecbindingType = "QuotaSpecBinding"
quotaspecbindingTypeList = "QuotaSpecBindingList"
// Policies
policies = "policies"
policyType = "Policy"
policyTypeList = "PolicyList"
//MeshPolicies
meshPolicies = "meshpolicies"
meshPolicyType = "MeshPolicy"
meshPolicyTypeList = "MeshPolicyList"
// Rbac
clusterrbacconfigs = "clusterrbacconfigs"
clusterrbacconfigType = "ClusterRbacConfig"
clusterrbacconfigTypeList = "ClusterRbacConfigList"
rbacconfigs = "rbacconfigs"
rbacconfigType = "RbacConfig"
rbacconfigTypeList = "RbacConfigList"
serviceroles = "serviceroles"
serviceroleType = "ServiceRole"
serviceroleTypeList = "ServiceRoleList"
servicerolebindings = "servicerolebindings"
servicerolebindingType = "ServiceRoleBinding"
servicerolebindingTypeList = "ServiceRoleBindingList"
// Config - Rules
rules = "rules"
ruleType = "rule"
ruleTypeList = "ruleList"
// Config - Adapters
adapters = "adapters"
adapterType = "adapter"
adapterTypeList = "adapterList"
bypasses = "bypasses"
bypassType = "bypass"
bypassTypeList = "bypassList"
circonuses = "circonuses"
circonusType = "circonus"
circonusTypeList = "circonusList"
cloudwatches = "cloudwatches"
cloudwatchType = "cloudwatch"
cloudwatchTypeList = "cloudwatchList"
deniers = "deniers"
denierType = "denier"
denierTypeList = "denierList"
dogstatsds = "dogstatsds"
dogstatsdType = "dogstatsd"
dogstatsdTypeList = "dogstatsdList"
fluentds = "fluentds"
fluentdType = "fluentd"
fluentdTypeList = "fluentdList"
handlers = "handlers"
handlerType = "handler"
handlerTypeList = "handlerList"
kubernetesenvs = "kubernetesenvs"
kubernetesenvType = "kubernetesenv"
kubernetesenvTypeList = "kubernetesenvList"
listcheckers = "listcheckers"
listcheckerType = "listchecker"
listcheckerTypeList = "listcheckerList"
memquotas = "memquotas"
memquotaType = "memquota"
memquotaTypeList = "memquotaList"
noops = "noops"
noopType = "noop"
noopTypeList = "noopList"
opas = "opas"
opaType = "opa"
opaTypeList = "opaList"
prometheuses = "prometheuses"
prometheusType = "prometheus"
prometheusTypeList = "prometheusList"
rbacs = "rbacs"
rbacType = "rbac"
rbacTypeList = "rbacList"
redisquotas = "redisquotas"
redisquotaType = "redisquota"
redisquotaTypeList = "redisquotaList"
signalfxs = "signalfxs"
signalfxType = "signalfx"
signalfxTypeList = "signalfxList"
solarwindses = "solarwindses"
solarwindsType = "solarwinds"
solarwindsTypeList = "solarwindsList"
stackdrivers = "stackdrivers"
stackdriverType = "stackdriver"
stackdriverTypeList = "stackdriverList"
statsds = "statsds"
statsdType = "statsd"
statsdTypeList = "statsdList"
stdios = "stdios"
stdioType = "stdio"
stdioTypeList = "stdioList"
zipkins = "zipkins"
zipkinType = "zipkin"
zipkinTypeList = "zipkinList"
// Config - Templates
apikeys = "apikeys"
apikeyType = "apikey"
apikeyTypeList = "apikeyList"
authorizations = "authorizations"
authorizationType = "authorization"
authorizationTypeList = "authorizationList"
checknothings = "checknothings"
checknothingType = "checknothing"
checknothingTypeList = "checknothingList"
edges = "edges"
edgeType = "edge"
edgeTypeList = "edgeList"
instances = "instances"
instanceType = "instance"
instanceTypeList = "instanceList"
kuberneteses = "kuberneteses"
kubernetesType = "kubernetes"
kubernetesTypeList = "kubernetesList"
listEntries = "listentries"
listEntryType = "listentry"
listEntryTypeList = "listentryList"
logentries = "logentries"
logentryType = "logentry"
logentryTypeList = "logentryList"
metrics = "metrics"
metricType = "metric"
metricTypeList = "metricList"
quotas = "quotas"
quotaType = "quota"
quotaTypeList = "quotaList"
reportnothings = "reportnothings"
reportnothingType = "reportnothing"
reportnothingTypeList = "reportnothingList"
templates = "templates"
templateType = "template"
templateTypeList = "templateList"
tracespans = "tracespans"
tracespanType = "tracespan"
tracespanTypeList = "tracespanList"
)
var (
ConfigGroupVersion = schema.GroupVersion{
Group: "config.istio.io",
Version: "v1alpha2",
}
ApiConfigVersion = ConfigGroupVersion.Group + "/" + ConfigGroupVersion.Version
NetworkingGroupVersion = schema.GroupVersion{
Group: "networking.istio.io",
Version: "v1alpha3",
}
ApiNetworkingVersion = NetworkingGroupVersion.Group + "/" + NetworkingGroupVersion.Version
AuthenticationGroupVersion = schema.GroupVersion{
Group: "authentication.istio.io",
Version: "v1alpha1",
}
ApiAuthenticationVersion = AuthenticationGroupVersion.Group + "/" + AuthenticationGroupVersion.Version
RbacGroupVersion = schema.GroupVersion{
Group: "rbac.istio.io",
Version: "v1alpha1",
}
ApiRbacVersion = RbacGroupVersion.Group + "/" + RbacGroupVersion.Version
networkingTypes = []struct {
objectKind string
collectionKind string
}{
{
objectKind: gatewayType,
collectionKind: gatewayTypeList,
},
{
objectKind: virtualServiceType,
collectionKind: virtualServiceTypeList,
},
{
objectKind: destinationRuleType,
collectionKind: destinationRuleTypeList,
},
{
objectKind: serviceentryType,
collectionKind: serviceentryTypeList,
},
{
objectKind: sidecarType,
collectionKind: sidecarTypeList,
},
}
configTypes = []struct {
objectKind string
collectionKind string
}{
{
objectKind: ruleType,
collectionKind: ruleTypeList,
},
// Quota specs depends on Quota template but are not a "template" object itselft
{
objectKind: quotaspecType,
collectionKind: quotaspecTypeList,
},
{
objectKind: quotaspecbindingType,
collectionKind: quotaspecbindingTypeList,
},
}
authenticationTypes = []struct {
objectKind string
collectionKind string
}{
{
objectKind: policyType,
collectionKind: policyTypeList,
},
{
objectKind: meshPolicyType,
collectionKind: meshPolicyTypeList,
},
}
// TODO Adapters and Templates can be loaded from external config for easy maintenance
adapterTypes = []struct {
objectKind string
collectionKind string
}{
{
objectKind: adapterType,
collectionKind: adapterTypeList,
},
{
objectKind: bypassType,
collectionKind: bypassTypeList,
},
{
objectKind: circonusType,
collectionKind: circonusTypeList,
},
{
objectKind: cloudwatchType,
collectionKind: cloudwatchTypeList,
},
{
objectKind: denierType,
collectionKind: denierTypeList,
},
{
objectKind: dogstatsdType,
collectionKind: dogstatsdTypeList,
},
{
objectKind: fluentdType,
collectionKind: fluentdTypeList,
},
{
objectKind: handlerType,
collectionKind: handlerTypeList,
},
{
objectKind: kubernetesenvType,
collectionKind: kubernetesenvTypeList,
},
{
objectKind: listcheckerType,
collectionKind: listcheckerTypeList,
},
{
objectKind: memquotaType,
collectionKind: memquotaTypeList,
},
{
objectKind: noopType,
collectionKind: noopTypeList,
},
{
objectKind: opaType,
collectionKind: opaTypeList,
},
{
objectKind: prometheusType,
collectionKind: prometheusTypeList,
},
{
objectKind: rbacType,
collectionKind: rbacTypeList,
},
{
objectKind: redisquotaType,
collectionKind: redisquotaTypeList,
},
{
objectKind: signalfxType,
collectionKind: signalfxTypeList,
},
{
objectKind: solarwindsType,
collectionKind: solarwindsTypeList,
},
{
objectKind: stackdriverType,
collectionKind: stackdriverTypeList,
},
{
objectKind: statsdType,
collectionKind: statsdTypeList,
},
{
objectKind: stdioType,
collectionKind: stdioTypeList,
},
{
objectKind: zipkinType,
collectionKind: zipkinTypeList,
},
}
templateTypes = []struct {
objectKind string
collectionKind string
}{
{
objectKind: apikeyType,
collectionKind: apikeyTypeList,
},
{
objectKind: authorizationType,
collectionKind: authorizationTypeList,
},
{
objectKind: checknothingType,
collectionKind: checknothingTypeList,
},
{
objectKind: edgeType,
collectionKind: edgeTypeList,
},
{
objectKind: kubernetesType,
collectionKind: kubernetesTypeList,
},
{
objectKind: instanceType,
collectionKind: instanceTypeList,
},
{
objectKind: listEntryType,
collectionKind: listEntryTypeList,
},
{
objectKind: logentryType,
collectionKind: logentryTypeList,
},
{
objectKind: metricType,
collectionKind: metricTypeList,
},
{
objectKind: quotaType,
collectionKind: quotaTypeList,
},
{
objectKind: reportnothingType,
collectionKind: reportnothingTypeList,
},
{
objectKind: templateType,
collectionKind: templateTypeList,
},
{
objectKind: tracespanType,
collectionKind: tracespanTypeList,
},
}
rbacTypes = []struct {
objectKind string
collectionKind string
}{
{
objectKind: clusterrbacconfigType,
collectionKind: clusterrbacconfigTypeList,
},
{
objectKind: rbacconfigType,
collectionKind: rbacconfigTypeList,
},
{
objectKind: serviceroleType,
collectionKind: serviceroleTypeList,
},
{
objectKind: servicerolebindingType,
collectionKind: servicerolebindingTypeList,
},
}
// A map to get the plural for a Istio type using the singlar type
// Used for fetch istio actions details, so only applied to handlers (adapters) and instances (templates) types
// It should be one entry per adapter/template
adapterPlurals = map[string]string{
adapterType: adapters,
bypassType: bypasses,
circonusType: circonuses,
cloudwatchType: cloudwatches,
denierType: deniers,
dogstatsdType: dogstatsds,
fluentdType: fluentds,
handlerType: handlers,
kubernetesenvType: kubernetesenvs,
listcheckerType: listcheckers,
memquotaType: memquotas,
noopType: noops,
opaType: opas,
prometheusType: prometheuses,
rbacType: rbacs,
redisquotaType: redisquotas,
signalfxType: signalfxs,
solarwindsType: solarwindses,
stackdriverType: stackdrivers,
statsdType: statsds,
stdioType: stdios,
zipkinType: zipkins,
}
templatePlurals = map[string]string{
apikeyType: apikeys,
authorizationType: authorizations,
checknothingType: checknothings,
edgeType: edges,
instanceType: instances,
kubernetesType: kuberneteses,
listEntryType: listEntries,
logentryType: logentries,
metricType: metrics,
quotaType: quotas,
reportnothingType: reportnothings,
templateType: templates,
tracespanType: tracespans,
}
PluralType = map[string]string{
// Networking
gateways: gatewayType,
virtualServices: virtualServiceType,
destinationRules: destinationRuleType,
serviceentries: serviceentryType,
sidecars: sidecarType,
// Main Config files
rules: ruleType,
quotaspecs: quotaspecType,
quotaspecbindings: quotaspecbindingType,
// Adapters
adapters: adapterType,
bypasses: bypassType,
circonuses: circonusType,
cloudwatches: cloudwatchType,
deniers: denierType,
dogstatsds: dogstatsdType,
fluentds: fluentdType,
handlers: handlerType,
kubernetesenvs: kubernetesenvType,
listcheckers: listcheckerType,
memquotas: memquotaType,
noops: noopType,
opas: opaType,
prometheuses: prometheusType,
rbacs: rbacType,
redisquotas: redisquotaType,
signalfxs: signalfxType,
solarwindses: solarwindsType,
stackdrivers: stackdriverType,
statsds: statsdType,
stdios: stdioType,
zipkins: zipkinType,
// Templates
apikeys: apikeyType,
authorizations: authorizationType,
checknothings: checknothingType,
edges: edgeType,
instances: instanceType,
kuberneteses: kubernetesType,
listEntries: listEntryType,
logentries: logentryType,
metrics: metricType,
quotas: quotaType,
reportnothings: reportnothingType,
templates: templateType,
tracespans: tracespanType,
// Policies
policies: policyType,
meshPolicies: meshPolicyType,
// Rbac
clusterrbacconfigs: clusterrbacconfigType,
rbacconfigs: rbacconfigType,
serviceroles: serviceroleType,
servicerolebindings: servicerolebindingType,
}
)
// IstioObject is a k8s wrapper interface for config objects.
// Taken from istio.io
type IstioObject interface {
runtime.Object
GetSpec() map[string]interface{}
SetSpec(map[string]interface{})
GetTypeMeta() meta_v1.TypeMeta
SetTypeMeta(meta_v1.TypeMeta)
GetObjectMeta() meta_v1.ObjectMeta
SetObjectMeta(meta_v1.ObjectMeta)
DeepCopyIstioObject() IstioObject
}
// IstioObjectList is a k8s wrapper interface for list config objects.
// Taken from istio.io
type IstioObjectList interface {
runtime.Object
GetItems() []IstioObject
}
// ServiceList holds list of services, pods and deployments
type ServiceList struct {
Services *core_v1.ServiceList
Pods *core_v1.PodList
Deployments *apps_v1.DeploymentList
}
// ServiceDetails is a wrapper to group full Service description, Endpoints and Pods.
// Used to fetch all details in a single operation instead to invoke individual APIs per each group.
type ServiceDetails struct {
Service *core_v1.Service `json:"service"`
Endpoints *core_v1.Endpoints `json:"endpoints"`
Deployments *apps_v1.DeploymentList `json:"deployments"`
Autoscalers *autoscaling_v1.HorizontalPodAutoscalerList `json:"autoscalers"`
Pods []core_v1.Pod `json:"pods"`
}
// IstioDetails is a wrapper to group all Istio objects related to a Service.
// Used to fetch all Istio information in a single operation instead to invoke individual APIs per each group.
type IstioDetails struct {
VirtualServices []IstioObject `json:"virtualservices"`
DestinationRules []IstioObject `json:"destinationrules"`
ServiceEntries []IstioObject `json:"serviceentries"`
Gateways []IstioObject `json:"gateways"`
}
// MTLSDetails is a wrapper to group all Istio objects related to non-local mTLS configurations
type MTLSDetails struct {
DestinationRules []IstioObject `json:"destinationrules"`
MeshPolicies []IstioObject `json:"meshpolicies"`
Policies []IstioObject `json:"policies"`
}
// RBACDetails is a wrapper for objects related to Istio RBAC (Role Based Access Control)
type RBACDetails struct {
ClusterRbacConfigs []IstioObject `json:"clusterrbacconfigs"`
ServiceRoles []IstioObject `json:"serviceroles"`
ServiceRoleBindings []IstioObject `json:"servicerolebindings"`
}
type istioResponse struct {
results []IstioObject
err error
}
// GenericIstioObject is a type to test Istio types defined by Istio as a Kubernetes extension.
type GenericIstioObject struct {
meta_v1.TypeMeta `json:",inline"`
meta_v1.ObjectMeta `json:"metadata"`
Spec map[string]interface{} `json:"spec"`
}
// GenericIstioObjectList is the generic Kubernetes API list wrapper
type GenericIstioObjectList struct {
meta_v1.TypeMeta `json:",inline"`
meta_v1.ListMeta `json:"metadata"`
Items []GenericIstioObject `json:"items"`
}
// GetSpec from a wrapper
func (in *GenericIstioObject) GetSpec() map[string]interface{} {
return in.Spec
}
// SetSpec for a wrapper
func (in *GenericIstioObject) SetSpec(spec map[string]interface{}) {
in.Spec = spec
}
// GetTypeMeta from a wrapper
func (in *GenericIstioObject) GetTypeMeta() meta_v1.TypeMeta {
return in.TypeMeta
}
// SetObjectMeta for a wrapper
func (in *GenericIstioObject) SetTypeMeta(typemeta meta_v1.TypeMeta) {
in.TypeMeta = typemeta
}
// GetObjectMeta from a wrapper
func (in *GenericIstioObject) GetObjectMeta() meta_v1.ObjectMeta {
return in.ObjectMeta
}
// SetObjectMeta for a wrapper
func (in *GenericIstioObject) SetObjectMeta(metadata meta_v1.ObjectMeta) {
in.ObjectMeta = metadata
}
// GetItems from a wrapper
func (in *GenericIstioObjectList) GetItems() []IstioObject {
out := make([]IstioObject, len(in.Items))
for i := range in.Items {
out[i] = &in.Items[i]
}
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GenericIstioObject) DeepCopyInto(out *GenericIstioObject) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericIstioObject.
func (in *GenericIstioObject) DeepCopy() *GenericIstioObject {
if in == nil {
return nil
}
out := new(GenericIstioObject)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GenericIstioObject) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyIstioObject is an autogenerated deepcopy function, copying the receiver, creating a new IstioObject.
func (in *GenericIstioObject) DeepCopyIstioObject() IstioObject {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GenericIstioObjectList) DeepCopyInto(out *GenericIstioObjectList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]GenericIstioObject, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericIstioObjectList.
func (in *GenericIstioObjectList) DeepCopy() *GenericIstioObjectList {
if in == nil {
return nil
}
out := new(GenericIstioObjectList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GenericIstioObjectList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// Host represents the FQDN format for Istio hostnames
type Host struct {
Service string
Namespace string
Cluster string
CompleteInput bool
}
// ParseHost takes as an input a hostname (simple or full FQDN), namespace and clusterName and returns a parsed Host struct
func ParseHost(hostName, namespace, cluster string) Host {
if cluster == "" {
cluster = config.Get().ExternalServices.Istio.IstioIdentityDomain
}
domainParts := strings.Split(hostName, ".")
host := Host{
Service: domainParts[0],
}
if len(domainParts) > 1 {
if len(domainParts) > 2 {
parsedClusterName := strings.Join(domainParts[2:], ".")
if parsedClusterName == cluster {
// FQDN input
host.Cluster = cluster
host.CompleteInput = true
}
}
if host.CompleteInput {
host.Namespace = domainParts[1]
} else {
// ServiceEntry or broken hostname
host.Service = hostName
}
} else {
// Simple format
host.Namespace = namespace
host.Cluster = cluster
host.CompleteInput = true
}
return host
}
// String outputs a full FQDN version of the Host
func (h Host) String() string {
return fmt.Sprintf("%s.%s.%s", h.Service, h.Namespace, h.Cluster)
}
|
package handler
import (
"cgon/master/handler/api/opt"
"cgon/master/handler/api/v1"
"cgon/master/middleware"
"github.com/gin-gonic/gin"
)
func InitRouters() *gin.Engine {
var (
r *gin.Engine
apiv1 *gin.RouterGroup
)
r = gin.New()
r.Use(gin.Logger())
r.Use(gin.Recovery())
initOpt(r)
apiv1 = r.Group("/api/v1")
apiv1.Use(middleware.LoginRequire())
{
apiv1.POST("/job", v1.AddJob)
apiv1.GET("/job", v1.GETJob)
apiv1.DELETE("/job", v1.RemoveJob)
//apiv1.PUT("/job", v1.EditJob)
}
return r
}
func initOpt(r *gin.Engine) {
var (
op *gin.RouterGroup
)
op = r.Group("/opt")
//op.Use(middleware.LoginRequire())
{
op.GET("/online", opt.Online)
}
}
|
package v1alpha2
import (
"encoding/json"
"errors"
"fmt"
)
// ImageType defines the content type for mirrored images
type ImageType int
const (
TypeInvalid ImageType = iota
TypeOCPRelease
TypeOCPReleaseContent
TypeCincinnatiGraph
TypeOperatorCatalog
TypeOperatorBundle
TypeOperatorRelatedImage
TypeGeneric
)
// ImageTypeString defines the string
// respresentation of every ImageType.
var imageTypeStrings = map[ImageType]string{
TypeOCPReleaseContent: "ocpReleaseContent",
TypeCincinnatiGraph: "cincinnatiGraph",
TypeOCPRelease: "ocpRelease",
TypeOperatorCatalog: "operatorCatalog",
TypeOperatorBundle: "operatorBundle",
TypeOperatorRelatedImage: "operatorRelatedImage",
TypeGeneric: "generic",
}
var imageStringsType = map[string]ImageType{
"ocpReleaseContent": TypeOCPReleaseContent,
"cincinnatiGraph": TypeCincinnatiGraph,
"ocpRelease": TypeOCPRelease,
"operatorCatalog": TypeOperatorCatalog,
"operatorBundle": TypeOperatorBundle,
"operatorRelatedImage": TypeOperatorRelatedImage,
"generic": TypeGeneric,
}
// String returns the string representation
// of an Image Type
func (it ImageType) String() string {
return imageTypeStrings[it]
}
// MarshalJSON marshals the ImageType as a quoted json string
func (it ImageType) MarshalJSON() ([]byte, error) {
if err := it.validate(); err != nil {
return nil, err
}
return json.Marshal(it.String())
}
// UnmarshalJSON unmarshals a quoted json string to the ImageType
func (it *ImageType) UnmarshalJSON(b []byte) error {
var j string
if err := json.Unmarshal(b, &j); err != nil {
return err
}
*it = imageStringsType[j]
return nil
}
func (it ImageType) validate() error {
if _, found := imageTypeStrings[it]; found {
return nil
}
switch it {
case TypeInvalid:
// TypeInvalid is the default value for the concrete type, which means the field was not set.
return errors.New("must set image type")
default:
return fmt.Errorf("unknown image type %v", it)
}
}
// Association between an image and its children, either image layers or child manifests.
type Association struct {
// Name of the image.
Name string `json:"name"`
// Path to image in new location (archive or registry)
Path string `json:"path"`
// ID of the image. Joining this value with "manifests" and Path
// will produce a path to the image's manifest.
ID string `json:"id"`
// TagSymlink of the blob specified by ID.
// This value must be a filename on disk in the "blobs" dir
TagSymlink string `json:"tagSymlink"`
// Type of the image in the context of this tool.
// See the ImageType enum for options.
Type ImageType `json:"type"`
// ManifestDigests of images if the image is a docker manifest list or OCI index.
// These manifests refer to image manifests by content SHA256 digest.
// LayerDigests and Manifests are mutually exclusive.
ManifestDigests []string `json:"manifestDigests,omitempty"`
// LayerDigests of a single manifest if the image is not a docker manifest list
// or OCI index. These digests refer to image layer blobs by content SHA256 digest.
// LayerDigests and Manifests are mutually exclusive.
LayerDigests []string `json:"layerDigests,omitempty"`
}
// Validate checks that the Association fields are set as expected
func (a Association) Validate() error {
if len(a.ManifestDigests) != 0 && len(a.LayerDigests) != 0 {
return fmt.Errorf("image %q: child descriptors cannot contain both manifests and image layers", a.Name)
}
if len(a.ManifestDigests) == 0 && len(a.LayerDigests) == 0 {
return fmt.Errorf("image %q: child descriptors must contain at least one manifest or image layer", a.Name)
}
if a.ID == "" && a.TagSymlink == "" {
return fmt.Errorf("image %q: tag or ID must be set", a.Name)
}
return a.Type.validate()
}
|
package gen
import (
"fmt"
"strings"
"unicode"
)
func getNestedSpaces(level int) string {
var spaces string
for i := 0; i < level; i++ {
spaces += " "
}
return spaces
}
func getYamlTag(key string) string {
for i, value := range key {
if i == 0 {
continue
}
if unicode.IsUpper(value) {
return fmt.Sprintf(" `yaml:\"%s\"`", key)
}
}
return ""
}
func CreateTypeStructure(data AppTemplate, name string, nestedLevel int) string {
var str string
for key, value := range data {
stringKey := key.(string)
switch value.(type) {
case []interface{}:
var iType, itemType, baseType = "", "", "[]interface{}"
for _, v := range value.([]interface{}) {
switch v.(type) {
case string:
itemType = "[]string"
case int:
itemType = "[]int"
}
if itemType != "" && iType == "" {
iType = itemType
}
if iType != "" && itemType != iType {
iType = baseType
break
}
}
//fmt spaces filedName fieldType
str += fmt.Sprintf("%s%s %s%s\n", getNestedSpaces(nestedLevel+1), strings.Title(stringKey), iType, getYamlTag(stringKey))
case AppTemplate:
//fmt spaces filedName struct
str += fmt.Sprintf("%s%s %s\n", getNestedSpaces(nestedLevel+1), strings.Title(stringKey), CreateTypeStructure(value.(AppTemplate), stringKey, nestedLevel+1))
default:
//fmt spaces filedName fieldType
str += fmt.Sprintf("%s%s %T%s\n", getNestedSpaces(nestedLevel+1), strings.Title(stringKey), value, getYamlTag(stringKey))
}
}
if nestedLevel == 0 {
return fmt.Sprintf("type %s struct {\n%s}\n", name, str)
} else {
return fmt.Sprintf("struct {\n%s%s}%s", str, getNestedSpaces(nestedLevel), getYamlTag(name))
}
} |
// Copyright (c) 2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package walletseed
import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"strconv"
"strings"
"github.com/EXCCoin/exccwallet/v2/errors"
"github.com/EXCCoin/exccwallet/v2/pgpwordlist"
)
var (
MinEntBytes = uint(16)
MaxEntBytes = uint(32)
RecommendedEntLen = uint(32)
ErrInvalidEntLen = errors.Errorf("entropy length must be between %d and %d bits", MinEntBytes*8, MaxEntBytes*8)
)
// GenerateRandomEntropy returns a new seed created from a cryptographically-secure
// random source. If the seed size is unacceptable,
// ErrInvalidEntLen is returned.
func GenerateRandomEntropy(size uint) ([]byte, error) {
// Per [BIP32], entropy must be in range [MinEntBytes, MaxEntBytes].
if size < MinEntBytes || size > MaxEntBytes {
return nil, ErrInvalidEntLen
}
buf := make([]byte, size)
_, err := rand.Read(buf)
if err != nil {
return nil, err
}
return buf, nil
}
// ------------------------------------------------------------------------
func bytesToBits(bytes []byte) []byte {
length := len(bytes)
bits := make([]byte, length*8)
for i := 0; i < length; i++ {
b := bytes[i]
for j := 0; j < 8; j++ {
mask := byte(1 << uint8(j))
bit := b & mask
if bit == 0 {
bits[(i*8)+8-(j+1)] = '0'
} else {
bits[(i*8)+8-(j+1)] = '1'
}
}
}
return bits
}
// CheckSummed returns a bit slice of entropy with an appended check sum
func CheckSummed(ent []byte) []byte {
cs := CheckSum(ent)
bits := bytesToBits(ent)
return append(bits, cs...)
}
// CheckSum returns a slice of bits from the given entropy
func CheckSum(ent []byte) []byte {
h := sha256.New()
h.Write(ent) // nolint: errcheck
cs := h.Sum(nil)
hashBits := bytesToBits(cs)
num := len(ent) * 8 / 32
return hashBits[:num]
}
// EncodeMnemonicSlice encodes a entropy as a mnemonic word list.
func EncodeMnemonicSlice(ent []byte) ([]string, error) {
const chunkSize = 11
bits := CheckSummed(ent)
length := len(bits)
words := make([]string, length/11)
for i := 0; i < length; i += chunkSize {
stringVal := string(bits[i : chunkSize+i])
intVal, err := strconv.ParseInt(stringVal, 2, 64)
if err != nil {
return nil, errors.Errorf("could not convert %s to word index", stringVal)
}
word := pgpwordlist.WordList[intVal]
words[(chunkSize+i)/11-1] = word
}
return words, nil
}
// EncodeMnemonic encodes a entropy as a mnemonic word list separated by spaces.
func EncodeMnemonic(ent []byte) (string, error) {
words, err := EncodeMnemonicSlice(ent)
if err != nil {
return "", err
}
return strings.Join(words, " "), nil
}
// DecodeUserInput decodes a seed in either hexadecimal or mnemonic word list
// encoding back into its binary form.
func DecodeUserInput(input, password string) ([]byte, error) {
input = strings.TrimSpace(input)
var seed []byte
var err error
if strings.Contains(input, " ") {
// Assume mnemonic
seed, err = pgpwordlist.DecodeMnemonics(input, password)
} else {
// Assume hex
seed, err = hex.DecodeString(input)
}
if err != nil {
return nil, err
}
return seed, nil
}
// DecodeMnemonicSlice decodes a seed in mnemonic word list
// encoding back into its binary form.
func DecodeMnemonicSlice(input []string, password string) ([]byte, error) {
return DecodeUserInput(strings.Join(input, " "), password)
}
|
package workspace
import (
"path/filepath"
"runtime"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewSolutions(t *testing.T) {
_, cwd, _, _ := runtime.Caller(0)
root := filepath.Join(cwd, "..", "..", "fixtures", "solutions")
paths := []string{
filepath.Join(root, "alpha"),
filepath.Join(root, "bravo"),
filepath.Join(root, "charlie"),
}
sx, err := NewSolutions(paths)
assert.NoError(t, err)
if assert.Equal(t, 3, len(sx)) {
assert.Equal(t, "alpha", sx[0].ID)
assert.Equal(t, "bravo", sx[1].ID)
assert.Equal(t, "charlie", sx[2].ID)
}
paths = []string{
filepath.Join(root, "alpha"),
filepath.Join(root, "delta"),
filepath.Join(root, "bravo"),
}
_, err = NewSolutions(paths)
assert.Error(t, err)
}
|
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT License was not distributed with this
// file, you can obtain one at https://opensource.org/licenses/MIT.
//
// Copyright (c) DUSK NETWORK. All rights reserved.
package agreement
import (
"testing"
crypto "github.com/dusk-network/dusk-crypto/hash"
"github.com/stretchr/testify/assert"
)
func TestAccumulatorProcessing(t *testing.T) {
nr := 50
hlp := NewHelper(nr)
hash, _ := crypto.RandEntropy(32)
handler := NewHandler(hlp.Keys, *hlp.P)
accumulator := newAccumulator(handler, 4)
evs := hlp.Spawn(hash)
for _, msg := range evs {
accumulator.Process(msg)
}
accumulatedAggros := <-accumulator.CollectedVotesChan
assert.Equal(t, 38, len(accumulatedAggros))
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package settingswatcher
import (
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema"
"github.com/cockroachdb/cockroach/pkg/sql/row"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/errors"
)
// RowDecoder decodes rows from the settings table.
type RowDecoder struct {
codec keys.SQLCodec
alloc rowenc.DatumAlloc
colIdxMap catalog.TableColMap
}
// MakeRowDecoder makes a new RowDecoder for the settings table.
func MakeRowDecoder(codec keys.SQLCodec) RowDecoder {
return RowDecoder{
codec: codec,
colIdxMap: row.ColIDtoRowIndexFromCols(
systemschema.SettingsTable.TableDesc().Columns,
),
}
}
// DecodeRow decodes a row of the system.settings table. If the value is not
// present, the setting key will be returned but the other two fields will be
// zero and the tombstone bool will be set.
func (d *RowDecoder) DecodeRow(
kv roachpb.KeyValue,
) (setting, val, valType string, tombstone bool, _ error) {
tbl := systemschema.SettingsTable
// First we need to decode the setting name field from the index key.
{
types := []*types.T{tbl.PublicColumns()[0].GetType()}
nameRow := make([]rowenc.EncDatum, 1)
_, matches, _, err := rowenc.DecodeIndexKey(d.codec, tbl, tbl.GetPrimaryIndex().IndexDesc(), types, nameRow, nil, kv.Key)
if err != nil {
return "", "", "", false, errors.Wrap(err, "failed to decode key")
}
if !matches {
return "", "", "", false, errors.Errorf("unexpected non-settings KV with settings prefix: %v", kv.Key)
}
if err := nameRow[0].EnsureDecoded(types[0], &d.alloc); err != nil {
return "", "", "", false, err
}
setting = string(tree.MustBeDString(nameRow[0].Datum))
}
if !kv.Value.IsPresent() {
return setting, "", "", true, nil
}
// The rest of the columns are stored as a family, packed with diff-encoded
// column IDs followed by their values.
{
// column valueType can be null (missing) so we default it to "s".
valType = "s"
bytes, err := kv.Value.GetTuple()
if err != nil {
return "", "", "", false, err
}
var colIDDiff uint32
var lastColID descpb.ColumnID
var res tree.Datum
for len(bytes) > 0 {
_, _, colIDDiff, _, err = encoding.DecodeValueTag(bytes)
if err != nil {
return "", "", "", false, err
}
colID := lastColID + descpb.ColumnID(colIDDiff)
lastColID = colID
if idx, ok := d.colIdxMap.Get(colID); ok {
res, bytes, err = rowenc.DecodeTableValue(&d.alloc, tbl.PublicColumns()[idx].GetType(), bytes)
if err != nil {
return "", "", "", false, err
}
switch colID {
case tbl.PublicColumns()[1].GetID(): // value
val = string(tree.MustBeDString(res))
case tbl.PublicColumns()[3].GetID(): // valueType
valType = string(tree.MustBeDString(res))
case tbl.PublicColumns()[2].GetID(): // lastUpdated
// TODO(dt): we could decode just the len and then seek `bytes` past
// it, without allocating/decoding the unused timestamp.
default:
return "", "", "", false, errors.Errorf("unknown column: %v", colID)
}
}
}
}
return setting, val, valType, false, nil
}
|
package cache
import (
"context"
"github.com/twcclan/goback/backup"
"github.com/twcclan/goback/proto"
"github.com/twcclan/goback/storage/wrapped"
)
var _ backup.ObjectStore = (*Store)(nil)
var _ wrapped.Wrapper = (*Store)(nil)
func cacheable(obj *proto.Object) bool {
if obj == nil {
return false
}
switch obj.Type() {
case proto.ObjectType_COMMIT, proto.ObjectType_TREE, proto.ObjectType_FILE:
return true
}
return false
}
func New(cache backup.ObjectStore, wrapped backup.ObjectStore) *Store {
return &Store{
cache: cache,
wrapped: wrapped,
test: cacheable,
}
}
type Store struct {
cache backup.ObjectStore
wrapped backup.ObjectStore
test func(object *proto.Object) bool
}
func (s *Store) Unwrap() backup.ObjectStore { return s.wrapped }
func (s *Store) Put(ctx context.Context, object *proto.Object) error {
err := s.wrapped.Put(ctx, object)
if err == nil && s.test(object) {
_ = s.cache.Put(ctx, object)
}
return err
}
func (s *Store) Get(ctx context.Context, ref *proto.Ref) (*proto.Object, error) {
obj, _ := s.cache.Get(ctx, ref)
if obj != nil {
return obj, nil
}
return s.wrapped.Get(ctx, ref)
}
func (s *Store) Delete(ctx context.Context, ref *proto.Ref) error {
err := s.wrapped.Delete(ctx, ref)
if err == nil {
_ = s.cache.Delete(ctx, ref)
}
return err
}
func (s *Store) Walk(ctx context.Context, b bool, objectType proto.ObjectType, receiver backup.ObjectReceiver) error {
return s.wrapped.Walk(ctx, b, objectType, receiver)
}
func (s *Store) Has(ctx context.Context, ref *proto.Ref) (bool, error) {
has, err := s.cache.Has(ctx, ref)
if err == nil && has {
return has, err
}
return s.wrapped.Has(ctx, ref)
}
|
// Copyright 2017 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package engineccl
import (
"context"
"fmt"
"math/rand"
"os"
"path/filepath"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/errors/oserror"
)
// loadTestData writes numKeys keys in numBatches separate batches. Keys are
// written in order. Every key in a given batch has the same MVCC timestamp;
// batch timestamps start at batchTimeSpan and increase in intervals of
// batchTimeSpan.
//
// Importantly, writing keys in order convinces RocksDB to output one SST per
// batch, where each SST contains keys of only one timestamp. E.g., writing A,B
// at t0 and C at t1 will create two SSTs: one for A,B that only contains keys
// at t0, and one for C that only contains keys at t1. Conversely, writing A, C
// at t0 and B at t1 would create just one SST that contained A,B,C (due to an
// immediate compaction).
//
// The creation of the database is time consuming, so the caller can choose
// whether to use a temporary or permanent location.
func loadTestData(
dir string, numKeys, numBatches, batchTimeSpan, valueBytes int,
) (storage.Engine, error) {
ctx := context.Background()
exists := true
if _, err := os.Stat(dir); oserror.IsNotExist(err) {
exists = false
}
eng, err := storage.NewPebble(
ctx,
storage.PebbleConfig{
StorageConfig: base.StorageConfig{
Settings: cluster.MakeTestingClusterSettings(),
Dir: dir,
},
},
)
if err != nil {
return nil, err
}
if exists {
testutils.ReadAllFiles(filepath.Join(dir, "*"))
return eng, nil
}
log.Infof(context.Background(), "creating test data: %s", dir)
// Generate the same data every time.
rng := rand.New(rand.NewSource(1449168817))
keys := make([]roachpb.Key, numKeys)
for i := 0; i < numKeys; i++ {
keys[i] = roachpb.Key(encoding.EncodeUvarintAscending([]byte("key-"), uint64(i)))
}
sstTimestamps := make([]int64, numBatches)
for i := 0; i < len(sstTimestamps); i++ {
sstTimestamps[i] = int64((i + 1) * batchTimeSpan)
}
var batch storage.Batch
var minWallTime int64
for i, key := range keys {
if scaled := len(keys) / numBatches; (i % scaled) == 0 {
if i > 0 {
log.Infof(ctx, "committing (%d/~%d)", i/scaled, numBatches)
if err := batch.Commit(false /* sync */); err != nil {
return nil, err
}
batch.Close()
if err := eng.Flush(); err != nil {
return nil, err
}
}
batch = eng.NewBatch()
minWallTime = sstTimestamps[i/scaled]
}
timestamp := hlc.Timestamp{WallTime: minWallTime + rand.Int63n(int64(batchTimeSpan))}
value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueBytes))
value.InitChecksum(key)
if err := storage.MVCCPut(ctx, batch, nil, key, timestamp, value, nil); err != nil {
return nil, err
}
}
if err := batch.Commit(false /* sync */); err != nil {
return nil, err
}
batch.Close()
if err := eng.Flush(); err != nil {
return nil, err
}
return eng, nil
}
// runIterate benchmarks iteration over the entire keyspace within time bounds
// derived by the loadFactor. A loadFactor of 0.5 means that approximately 50%
// of the SSTs contain keys in the range [startTime, endTime].
func runIterate(
b *testing.B,
loadFactor float32,
makeIterator func(storage.Engine, hlc.Timestamp, hlc.Timestamp) storage.MVCCIterator,
) {
const numKeys = 100000
const numBatches = 100
const batchTimeSpan = 10
const valueBytes = 512
// Store the database in this directory so we don't have to regenerate it on
// each benchmark run.
eng, err := loadTestData("mvcc_data", numKeys, numBatches, batchTimeSpan, valueBytes)
if err != nil {
b.Fatal(err)
}
defer eng.Close()
b.SetBytes(int64(numKeys * valueBytes))
b.ResetTimer()
for i := 0; i < b.N; i++ {
n := 0
startTime := hlc.MinTimestamp
endTime := hlc.Timestamp{WallTime: int64(loadFactor * numBatches * batchTimeSpan)}
if endTime.IsEmpty() {
endTime = endTime.Next()
}
it := makeIterator(eng, startTime, endTime)
defer it.Close()
for it.SeekGE(storage.MVCCKey{Key: keys.LocalMax}); ; it.Next() {
if ok, err := it.Valid(); !ok {
if err != nil {
b.Fatal(err)
}
break
}
n++
}
if e := int(loadFactor * numKeys); n < e {
b.Fatalf("expected at least %d keys, but got %d\n", e, n)
}
}
b.StopTimer()
}
func BenchmarkTimeBoundIterate(b *testing.B) {
for _, loadFactor := range []float32{1.0, 0.5, 0.1, 0.05, 0.0} {
b.Run(fmt.Sprintf("LoadFactor=%.2f", loadFactor), func(b *testing.B) {
b.Run("NormalIterator", func(b *testing.B) {
runIterate(b, loadFactor, func(e storage.Engine, _, _ hlc.Timestamp) storage.MVCCIterator {
return e.NewMVCCIterator(storage.MVCCKeyAndIntentsIterKind, storage.IterOptions{UpperBound: roachpb.KeyMax})
})
})
b.Run("TimeBoundIterator", func(b *testing.B) {
runIterate(b, loadFactor, func(e storage.Engine, startTime, endTime hlc.Timestamp) storage.MVCCIterator {
return e.NewMVCCIterator(storage.MVCCKeyIterKind, storage.IterOptions{
MinTimestampHint: startTime,
MaxTimestampHint: endTime,
UpperBound: roachpb.KeyMax,
})
})
})
})
}
}
|
package v3
import "github.com/cockroachdb/cockroach/pkg/sql/sem/types"
type scalarProps struct {
// Columns used by the scalar expression.
inputCols bitmap
typ types.T
}
|
package solutions
type Trie struct {
children [26]*Trie
isEndOfWord bool
root *Trie
}
func Constructor() Trie {
return Trie{children: [26]*Trie{}, isEndOfWord: false, root: &Trie{}}
}
func (this *Trie) Insert(word string) {
if len(word) == 0 {
return
}
current := this.root
for i := 0; i < len(word); i++ {
index := word[i] - 'a'
if current.children[index] == nil {
current.children[index] = &Trie{}
}
current = current.children[index]
}
current.isEndOfWord = true
}
func (this *Trie) Search(word string) bool {
current := this.root
for i := 0; i< len(word); i++ {
index := word[i] - 'a'
if current.children[index] == nil {
return false
}
current = current.children[index]
}
if current.isEndOfWord == true {
return true
}
return false
}
func (this *Trie) StartsWith(prefix string) bool {
current := this.root
if len(prefix) == 1 {
if current.children[prefix[0] - 'a'] != nil {
return true
}
}
depth := 0
for i := 0; i < len(prefix); i++ {
index := prefix[i] - 'a'
depth++
if current.children[index] == nil {
return false
}
if depth == len(prefix) {
return true
}
current = current.children[index]
}
for i := 0; i < len(current.children); i++ {
if current.children[i] != nil {
return true
}
}
return false
}
|
package config
import (
"flag"
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
"github.com/derry6/gleafd/version"
yaml "gopkg.in/yaml.v2"
)
type parser struct {
Cfg *Config `yaml:"gleafd"`
showVersion bool `yaml:"-"`
flagSet *flag.FlagSet `yaml:"-"`
fileName string `yaml:"-"`
}
func (p *parser) parseFromEnv() (err error) {
setted := make(map[string]bool)
p.flagSet.Visit(func(f *flag.Flag) {
setted[f.Name] = true
})
p.flagSet.VisitAll(func(f *flag.Flag) {
if _, ok := setted[f.Name]; ok {
return
}
name := envPrefix + strings.ToUpper(strings.Replace(f.Name, "-", "_", -1))
if val, found := os.LookupEnv(name); found {
if ferr := f.Value.Set(val); ferr != nil {
err = ferr
}
}
})
return err
}
func (p *parser) parseFromFile(fileName string) (err error) {
data, err := ioutil.ReadFile(fileName)
if err != nil {
return err
}
if err = yaml.Unmarshal(data, p); err != nil {
return err
}
return p.validate()
}
func (p *parser) validate() error {
return nil
}
func (p *parser) parse(args []string) error {
err := p.flagSet.Parse(args)
if err != nil {
return err
}
if len(p.flagSet.Args()) != 0 {
return fmt.Errorf("'%s' is not a valid flag", p.flagSet.Arg(0))
}
// Show versions
if p.showVersion {
fmt.Printf("Version: %s\n", version.Version)
fmt.Printf("Git SHA: %s\n", version.GitSHA)
fmt.Printf("Go Version: %s\n", runtime.Version())
fmt.Printf("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
os.Exit(0)
}
if p.fileName != "" {
return p.parseFromFile(p.fileName)
}
return p.parseFromEnv()
}
func Load(args []string) (*Config, error) {
p := &parser{
Cfg: newConfig(),
flagSet: flag.NewFlagSet("gleafd", flag.ExitOnError),
}
flagSet := p.flagSet
flagSet.StringVar(&p.fileName, "config", "", "Location of server config file")
flagSet.BoolVar(&p.showVersion, "version", false, "show version")
// Server
flagSet.StringVar(&p.Cfg.Name, "name", p.Cfg.Name, "Assign a name to the server")
flagSet.StringVar(&p.Cfg.Addr, "addr", p.Cfg.Addr, "Listen address")
flagSet.StringVar(&p.Cfg.Log, "log", p.Cfg.Log, "Log level [debug|info|warn|error|fatal]")
// Segment
seg := &p.Cfg.Segment
flagSet.BoolVar(&seg.Enable, "segment-enable", seg.Enable, "Enable segment")
flagSet.StringVar(&seg.DBHost, "segment-db-host", seg.DBHost, "")
flagSet.StringVar(&seg.DBName, "segment-db-name", seg.DBName, "")
flagSet.StringVar(&seg.DBUser, "segment-db-user", seg.DBUser, "")
flagSet.StringVar(&seg.DBPass, "segment-db-pass", seg.DBPass, "")
// Snowflake
sf := &p.Cfg.Snowflake
flagSet.BoolVar(&sf.Enable, "snowflake-enable", sf.Enable, "")
flagSet.StringVar(&sf.RedisAddresss, "snowflake-redis-addr", sf.RedisAddresss, "")
if err := p.parse(args); err != nil {
return nil, err
}
return p.Cfg, nil
}
|
package models
import (
"../utils"
)
func GetUserInfo(key string) string {
if key == "key" {
return ""
}
var val string
// 准备预处理语句
err := db.QueryRow("SELECT `val` FROM `setting` WHERE `key` = ? LIMIT 1", key).Scan(&val)
utils.CheckErr(err)
return val
}
func EditSiteInfo(name string, desc string, author string, avatar string) bool {
if name == "" || desc == "" || author == "" || avatar == "" {
return false
}
var sql string = "UPDATE `setting` SET `val` = ? WHERE `key` = ? LIMIT 1"
var err error
_, err = db.Exec(sql, name, "name")
utils.CheckErr(err)
_, err = db.Exec(sql, desc, "desc")
utils.CheckErr(err)
_, err = db.Exec(sql, author, "author")
utils.CheckErr(err)
_, err = db.Exec(sql, avatar, "avatar")
utils.CheckErr(err)
return true
}
|
package delta
import (
"bytes"
"encoding/json"
//"time"
"github.com/adamar/delta-server/models"
)
func BuildEvent(serial string, native string, msgType string, data map[string]string) *models.Event {
var uuid, err = GenUuid()
if err != nil {
panic(err)
}
timestamp := GenTimeStamp()
flatData := new(bytes.Buffer)
enc := json.NewEncoder(flatData)
err = enc.Encode(data)
if err != nil {
panic(err)
}
evt := &models.Event{
Uuid: uuid,
Serial: serial,
TimeStamp: timestamp,
NativeTimeStamp: native,
EventType: msgType,
Data: flatData.String(),
}
return evt
}
|
package cberrors
import "sync"
type ErrorsContainer struct {
providers []ErrorProvider
suppressErrors bool
}
func NewErrorContainer(providers ...ErrorProvider) *ErrorsContainer {
return &ErrorsContainer{
providers: providers,
}
}
type ErrorProvider interface {
Error(e error)
Recover(e interface{})
Defer()
}
func (c *ErrorsContainer) AddProvider(provider ErrorProvider) {
c.providers = append(c.providers, provider)
}
func (c *ErrorsContainer) MainDefer() {
c.Recover()
wg := sync.WaitGroup{}
for _, provider := range c.providers {
wg.Add(1)
go func() {
provider.Defer()
wg.Done()
}()
}
wg.Wait()
}
func (c *ErrorsContainer) Suppress() {
c.suppressErrors = true
}
func (c *ErrorsContainer) Unsupress() {
c.suppressErrors = false
}
func (c *ErrorsContainer) Error(e error) {
if !c.suppressErrors {
for _, provider := range c.providers {
func() {
defer func() {
recover()
}()
provider.Error(e)
}()
}
}
}
func (c *ErrorsContainer) Recover() {
e := recover()
if e != nil {
for _, provider := range c.providers {
func() {
defer func() {
recover()
}()
provider.Recover(e)
}()
}
}
}
|
package config
import (
"fmt"
"path/filepath"
"testing"
"github.com/xuperchain/xupercore/lib/utils"
)
func TestLoadEngineConf(t *testing.T) {
engCfg, err := LoadEngineConf(getConfFile())
if err != nil {
t.Fatal(err)
}
fmt.Println(engCfg)
}
func getConfFile() string {
dir := utils.GetCurFileDir()
return filepath.Join(dir, "conf/engine.yaml")
}
|
package tree
import (
"fmt"
"testing"
"github.com/deepak-muley/golangexamples/data-structures/tree"
)
func Test_tree_traversals(t *testing.T) {
// (3 + 4) * 6
// *
// + 6
//3 4
root := new(tree.TreeNode)
root.SetValue("*")
root.AddRightChild("6")
plusNode := root.AddLeftChild("+")
plusNode.AddLeftChild("3")
plusNode.AddRightChild("4")
fmt.Println("Printing Tree")
root.PrintNode()
plusNode.PrintNode()
fmt.Println("Printing Inorder")
tree.PrintInorder(root)
fmt.Println()
fmt.Println("Printing Preorder")
tree.PrintPreorder(root)
fmt.Println()
fmt.Println("Printing Postorder")
tree.PrintPostorder(root)
fmt.Println()
}
|
/*
* Flow CLI
*
* Copyright 2019-2021 Dapper Labs, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package accounts
import (
"fmt"
"github.com/spf13/cobra"
"github.com/onflow/flow-cli/internal/command"
"github.com/onflow/flow-cli/pkg/flowcli/services"
)
type flagsGet struct {
Contracts bool `default:"false" flag:"contracts" info:"Display contracts deployed to the account"`
Code bool `default:"false" flag:"code" info:"⚠️ Deprecated: use contracts flag instead"`
}
var getFlags = flagsGet{}
var GetCommand = &command.Command{
Cmd: &cobra.Command{
Use: "get <address>",
Short: "Gets an account by address",
Args: cobra.ExactArgs(1),
},
Flags: &getFlags,
Run: func(
cmd *cobra.Command,
args []string,
globalFlags command.GlobalFlags,
services *services.Services,
) (command.Result, error) {
if getFlags.Code {
fmt.Println("⚠️ DEPRECATION WARNING: use contracts flag instead")
}
account, err := services.Accounts.Get(args[0]) // address
if err != nil {
return nil, err
}
return &AccountResult{
Account: account,
showCode: getFlags.Contracts || getFlags.Code,
}, nil
},
}
|
package leetcode_1360_日期之间隔几天
import (
"strconv"
"strings"
"time"
)
/*
请你编写一个程序来计算两个日期之间隔了多少天。
日期以字符串形式给出,格式为 YYYY-MM-DD,如示例所示。
示例 1:
输入:date1 = "2019-06-29", date2 = "2019-06-30"
输出:1
示例 2:
输入:date1 = "2020-01-15", date2 = "2019-12-31"
输出:15
提示:
给定的日期是 1971 年到 2100 年之间的有效日期。
*/
// 非闰年每月天数固定
var months = []int{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}
func daysBetweenDates(date1 string, date2 string) int {
res := getDays(date1) - getDays(date2)
if res < 0 {
return -res
}
return res
}
// 是否闰年定义:
// 1、不能被100整除但能被4整除
// 2、能被400整除
func isLeap(year int) int {
if (year%100 != 0 && year%4 == 0) || year%400 == 0 {
return 1
}
return 0
}
// 计算当前日期和 1971-01-01 之间有多少天
func getDays(date string) int {
fields := strings.Split(date, "-")
year, _ := strconv.Atoi(fields[0])
month, _ := strconv.Atoi(fields[1])
day, _ := strconv.Atoi(fields[2])
var days = 0
// 计算每年要多出来几天
for y := 1971; y < year; y++ {
days += 365 + isLeap(y)
}
for m := 1; m < month; m++ {
if m == 2 {
days += isLeap(year) + 28
} else {
days += months[m]
}
}
return days + day
}
// 偷懒解法,使用官方库
func daysBetweenDates2(date1 string, date2 string) int {
t1, _ := time.Parse("2006-01-02", date1)
t2, _ := time.Parse("2006-01-02", date2)
ans := t1.Sub(t2).Hours()
if ans < 0 {
return -int(ans) / 24
}
return int(ans) / 24
}
|
// Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package services_test
import (
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strings"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/pivotal-cf/brokerapi/v10/domain"
"github.com/pivotal-cf/on-demand-service-broker/authorizationheader/fakes"
"github.com/pivotal-cf/on-demand-service-broker/broker"
"github.com/pivotal-cf/on-demand-service-broker/broker/services"
"github.com/pivotal-cf/on-demand-service-broker/loggerfactory"
"github.com/pivotal-cf/on-demand-service-broker/mgmtapi"
"github.com/pivotal-cf/on-demand-service-broker/service"
fakeclients "github.com/pivotal-cf/on-demand-service-broker/broker/services/fakes"
)
var _ = Describe("Broker Services", func() {
const (
serviceInstanceGUID = "my-service-instance"
operationType = "some-process"
)
var (
brokerServices *services.BrokerServices
client *fakeclients.FakeHTTPClient
authHeaderBuilder *fakes.FakeAuthHeaderBuilder
logger *log.Logger
)
BeforeEach(func() {
client = new(fakeclients.FakeHTTPClient)
authHeaderBuilder = new(fakes.FakeAuthHeaderBuilder)
loggerFactory := loggerfactory.New(os.Stdout, "broker-services-test", loggerfactory.Flags)
logger = loggerFactory.New()
})
Describe("ProcessInstance", func() {
It("returns an bosh operation", func() {
spaceID := "space-id"
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
planUniqueID := "unique_plan_id"
expectedBody := fmt.Sprintf(`{"plan_id": "%s", "context":{"space_guid":"%s"}}`, planUniqueID, spaceID)
client.DoReturns(response(http.StatusNotFound, ""), nil)
upgradeOperation, err := brokerServices.ProcessInstance(
service.Instance{
GUID: serviceInstanceGUID,
PlanUniqueID: planUniqueID,
SpaceGUID: spaceID,
}, operationType)
Expect(err).NotTo(HaveOccurred())
request := client.DoArgsForCall(0)
Expect(request.Method).To(Equal(http.MethodPatch))
body, err := ioutil.ReadAll(request.Body)
Expect(err).NotTo(HaveOccurred())
Expect(request.URL.Path).To(Equal("/mgmt/service_instances/" + serviceInstanceGUID))
Expect(request.URL.Query()).To(Equal(url.Values{"operation_type": {operationType}}))
Expect(upgradeOperation.Type).To(Equal(services.InstanceNotFound))
Expect(string(body)).To(Equal(expectedBody))
})
It("returns an error when a new request fails to build", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "$!%#%!@#$!@%", logger)
_, err := brokerServices.ProcessInstance(service.Instance{
GUID: serviceInstanceGUID,
PlanUniqueID: "unique_plan_id",
}, operationType)
Expect(err).To(HaveOccurred())
})
It("returns an error when cannot add the authentication header", func() {
authHeaderBuilder.AddAuthHeaderReturns(errors.New("oops"))
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
_, err := brokerServices.ProcessInstance(service.Instance{
GUID: serviceInstanceGUID,
PlanUniqueID: "unique_plan_id",
}, operationType)
Expect(err).To(HaveOccurred())
})
Context("when the request fails", func() {
It("returns an error", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
client.DoReturns(nil, errors.New("connection error"))
_, err := brokerServices.ProcessInstance(service.Instance{
GUID: serviceInstanceGUID,
PlanUniqueID: "",
}, operationType)
Expect(err).To(HaveOccurred())
})
})
Context("when the broker responds with an error", func() {
It("returns an error", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
client.DoReturns(response(http.StatusInternalServerError, "error upgrading instance"), nil)
_, err := brokerServices.ProcessInstance(service.Instance{
GUID: serviceInstanceGUID,
PlanUniqueID: "",
}, operationType)
Expect(err).To(HaveOccurred())
})
})
})
Describe("LastOperation", func() {
It("returns a last operation", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
operationData := broker.OperationData{
BoshTaskID: 1,
BoshContextID: "context-id",
OperationType: broker.OperationTypeUpgrade,
PlanID: "plan-id",
}
client.DoReturns(response(http.StatusOK, `{"state":"in progress","description":"upgrade in progress"}`), nil)
lastOperation, err := brokerServices.LastOperation(serviceInstanceGUID, operationData)
Expect(err).NotTo(HaveOccurred())
request := client.DoArgsForCall(0)
Expect(request.Method).To(Equal(http.MethodGet))
Expect(err).NotTo(HaveOccurred())
Expect(request.URL.Path).To(Equal("/v2/service_instances/" + serviceInstanceGUID + "/last_operation"))
query, err := url.ParseQuery(request.URL.RawQuery)
Expect(err).NotTo(HaveOccurred())
Expect(query).To(Equal(url.Values{
"operation": []string{`{"BoshTaskID":1,"BoshContextID":"context-id","OperationType":"upgrade","PlanID":"plan-id","PostDeployErrand":{},"PreDeleteErrand":{}}`},
}))
Expect(lastOperation).To(Equal(
domain.LastOperation{State: domain.InProgress, Description: "upgrade in progress"}),
)
})
It("returns an error when a new request fails to build", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "$!%#%!@#$!@%", logger)
_, err := brokerServices.LastOperation(serviceInstanceGUID, broker.OperationData{})
Expect(err).To(HaveOccurred())
})
It("returns an error when cannot add the authentication header", func() {
authHeaderBuilder.AddAuthHeaderReturns(errors.New("oops"))
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
_, err := brokerServices.LastOperation(serviceInstanceGUID, broker.OperationData{})
Expect(err).To(HaveOccurred())
})
Context("when the request fails", func() {
It("returns an error", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
client.DoReturns(nil, errors.New("connection error"))
_, err := brokerServices.LastOperation(serviceInstanceGUID, broker.OperationData{})
Expect(err).To(HaveOccurred())
})
})
Context("when the broker response is unrecognised", func() {
It("returns an error", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
client.DoReturns(response(http.StatusOK, "invalid json"), nil)
_, err := brokerServices.LastOperation(serviceInstanceGUID, broker.OperationData{})
Expect(err).To(HaveOccurred())
})
})
})
Describe("OrphanDeployments", func() {
It("returns a list of orphan deployments", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
listOfDeployments := `[{"deployment_name":"service-instance_one"},{"deployment_name":"service-instance_two"}]`
client.DoReturns(response(http.StatusOK, listOfDeployments), nil)
instances, err := brokerServices.OrphanDeployments()
Expect(err).NotTo(HaveOccurred())
request := client.DoArgsForCall(0)
Expect(request.Method).To(Equal(http.MethodGet))
Expect(request.URL.Path).To(Equal("/mgmt/orphan_deployments"))
Expect(instances).To(ConsistOf(
mgmtapi.Deployment{Name: "service-instance_one"},
mgmtapi.Deployment{Name: "service-instance_two"},
))
})
It("returns an error when a new request fails to build", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "$!%#%!@#$!@%", logger)
_, err := brokerServices.OrphanDeployments()
Expect(err).To(HaveOccurred())
})
It("returns an error when cannot add the authentication header", func() {
authHeaderBuilder.AddAuthHeaderReturns(errors.New("oops"))
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
_, err := brokerServices.OrphanDeployments()
Expect(err).To(HaveOccurred())
})
Context("when the request fails", func() {
It("returns an error", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
client.DoReturns(nil, errors.New("connection error"))
_, err := brokerServices.OrphanDeployments()
Expect(err).To(HaveOccurred())
})
})
Context("when the broker response is unrecognised", func() {
It("returns an error", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
client.DoReturns(response(http.StatusOK, "invalid json"), nil)
_, err := brokerServices.OrphanDeployments()
Expect(err).To(HaveOccurred())
})
})
})
Describe("FilterInstances", func() {
It("returns the list of instances when called", func() {
host := "test.test"
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://"+host, logger)
client.DoReturns(response(http.StatusOK, `[{"service_instance_id": "foo", "plan_id": "plan"}, {"service_instance_id": "bar", "plan_id": "another-plan"}]`), nil)
params := map[string]string{
"org": "my-org",
"space": "my-space",
}
filteredInstances, err := brokerServices.Instances(params)
Expect(err).NotTo(HaveOccurred())
Expect(client.DoCallCount()).To(Equal(1))
req := client.DoArgsForCall(0)
Expect(req.URL.RawQuery).To(Equal("org=my-org&space=my-space"))
Expect(req.URL.Host).To(Equal(host))
Expect(req.URL.Path).To(Equal("/mgmt/service_instances"))
Expect(authHeaderBuilder.AddAuthHeaderCallCount()).To(Equal(1))
authReq, authLogger := authHeaderBuilder.AddAuthHeaderArgsForCall(0)
Expect(authReq).To(Equal(req))
Expect(authLogger).To(Equal(logger))
Expect(filteredInstances).To(Equal([]service.Instance{
{GUID: "foo", PlanUniqueID: "plan"},
{GUID: "bar", PlanUniqueID: "another-plan"},
}))
})
It("returns error when request to mgmt endpoint fails to complete", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
expectedError := errors.New("connection error")
client.DoReturns(nil, expectedError)
_, err := brokerServices.Instances(map[string]string{})
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(expectedError))
})
It("returns error when mgmt endpoint returns invalid response", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
client.DoReturns(response(http.StatusBadRequest, ""), nil)
_, err := brokerServices.Instances(map[string]string{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("failed to get service instances"))
Expect(err.Error()).To(ContainSubstring("status code: 400"))
})
It("returns error when mgmt endpoint returns invalid response", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
client.DoReturns(response(http.StatusOK, `[{"not-a-valid-instance-json": "foo"]`), nil)
_, err := brokerServices.Instances(map[string]string{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("failed to decode service instance response body with error"))
})
})
Describe("Instances", func() {
It("returns the list of instances when called", func() {
host := "test.test"
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://"+host, logger)
client.DoReturns(response(http.StatusOK, `[{"service_instance_id": "foo", "plan_id": "plan", "space_guid": "space_id"}, {"service_instance_id": "bar", "plan_id": "another-plan", "space_guid": "space_id"}]`), nil)
instances, err := brokerServices.Instances(nil)
Expect(err).NotTo(HaveOccurred())
Expect(client.DoCallCount()).To(Equal(1))
req := client.DoArgsForCall(0)
Expect(req.URL.RawQuery).To(Equal(""))
Expect(req.URL.Host).To(Equal(host))
Expect(req.URL.Path).To(Equal("/mgmt/service_instances"))
Expect(authHeaderBuilder.AddAuthHeaderCallCount()).To(Equal(1))
authReq, authLogger := authHeaderBuilder.AddAuthHeaderArgsForCall(0)
Expect(authReq).To(Equal(req))
Expect(authLogger).To(Equal(logger))
Expect(instances).To(Equal([]service.Instance{
service.Instance{
GUID: "foo",
PlanUniqueID: "plan",
SpaceGUID: "space_id",
},
service.Instance{
GUID: "bar",
PlanUniqueID: "another-plan",
SpaceGUID: "space_id",
},
}))
})
It("returns error when request to mgmt endpoint fails", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
expectedError := errors.New("connection error")
client.DoReturns(nil, expectedError)
_, err := brokerServices.Instances(nil)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(expectedError))
})
It("returns error when mgmt endpoint returns invalid response", func() {
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
client.DoReturns(response(http.StatusOK, `[{"not-a-valid-instance-json": "foo"]`), nil)
_, err := brokerServices.Instances(nil)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("failed to decode service instance response body with error"))
})
})
Describe("LatestInstanceInfo", func() {
It("refreshes an instance", func() {
client.DoReturnsOnCall(0, response(http.StatusOK, `[{"service_instance_id": "foo", "plan_id": "plan"}, {"service_instance_id": "bar", "plan_id": "another-plan"}]`), nil)
client.DoReturnsOnCall(1, response(http.StatusOK, `[{"service_instance_id": "foo", "plan_id": "plan2"}, {"service_instance_id": "bar", "plan_id": "another-plan"}]`), nil)
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
instance, err := brokerServices.LatestInstanceInfo(service.Instance{GUID: "foo"})
Expect(err).NotTo(HaveOccurred())
Expect(instance).To(Equal(service.Instance{GUID: "foo", PlanUniqueID: "plan"}))
instance, err = brokerServices.LatestInstanceInfo(service.Instance{GUID: "foo"})
Expect(err).NotTo(HaveOccurred())
Expect(instance).To(Equal(service.Instance{GUID: "foo", PlanUniqueID: "plan2"}))
})
It("returns a instance not found error when instance is not found", func() {
client.DoReturns(response(http.StatusOK, `[{"service_instance_id": "foo", "plan_id": "plan"}, {"service_instance_id": "bar", "plan_id": "another-plan"}]`), nil)
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
_, err := brokerServices.LatestInstanceInfo(service.Instance{GUID: "qux"})
Expect(err).To(Equal(service.InstanceNotFound))
})
It("returns an error when pulling the list of instances fail", func() {
client.DoReturns(response(http.StatusServiceUnavailable, ""), nil)
brokerServices = services.NewBrokerServices(client, authHeaderBuilder, "http://test.test", logger)
instance, err := brokerServices.LatestInstanceInfo(service.Instance{GUID: "foo"})
Expect(instance).To(Equal(service.Instance{}))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(ContainSubstring("failed to get service instances")))
Expect(err).To(MatchError(ContainSubstring("status code: 503")))
})
})
})
func response(statusCode int, body string) *http.Response {
return &http.Response{
StatusCode: statusCode,
Body: ioutil.NopCloser(strings.NewReader(body)),
}
}
|
package main
import "fmt"
type person struct {
firstName string
lastName string
contactinfo
}
type contactinfo struct {
email string
zip int
}
func main() {
alex := person{
firstName: "prabhaker",
lastName: "saxena",
contactinfo: contactinfo{
email: "abc@gmail.com",
zip: 4}}
/* var alex person
fmt.Println(alex)
fmt.Printf("%+v", alex)
var pravhu struct*/
// fmt.Println(alex)
alex.update("prabhu")
alex.details()
}
func (p *person) update(newname string) {
p.firstName = newname
//or
//(*p).firstName=newname
}
func (p person) details() {
fmt.Printf("%+v", p)
}
|
/*
Copyright 2021 The KServe Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"io/ioutil"
"os"
"path"
"syscall"
"testing"
"github.com/onsi/gomega"
)
func TestCreate(t *testing.T) {
g := gomega.NewGomegaWithT(t)
// This would get called in StartPullerAndProcessModels
syscall.Umask(0)
tmpDir, _ := ioutil.TempDir("", "test-create-")
defer os.RemoveAll(tmpDir)
folderPath := path.Join(tmpDir, "foo")
filePath := path.Join(folderPath, "bar.txt")
f, err := Create(filePath)
defer f.Close()
g.Expect(err).To(gomega.BeNil())
g.Expect(folderPath).To(gomega.BeADirectory())
info, _ := os.Stat(folderPath)
mode := info.Mode()
expectedMode := os.FileMode(0777)
g.Expect(mode.Perm()).To(gomega.Equal(expectedMode))
}
|
// Copyright 2017 Zhang Peihao <zhangpeihao@gmail.com>
// Package register 注册Broker
package register
import (
"github.com/golang/glog"
"github.com/zhangpeihao/zim/pkg/broker"
)
// NewBrokerHandler 新建Broker函数,参数:viper参数perfix
type NewBrokerHandler func(string) (broker.Broker, error)
var (
brokerHandlers = make(map[string]NewBrokerHandler)
)
// Register 注册Broker
func Register(name string, handler NewBrokerHandler) {
if _, found := brokerHandlers[name]; found {
glog.Warningf("broker::Register() Broker[%s] existed\n")
}
brokerHandlers[name] = handler
}
// Init 初始化
func Init(viperPerfix string) error {
for name, brokerHandler := range brokerHandlers {
glog.Infof("broker::Init() Init broker[%s]\n", name)
b, err := brokerHandler(viperPerfix)
if err != nil {
glog.Errorf("broker::Init() init broker[%s] init error: %s", name, err)
return err
}
broker.Set(name, b)
}
return nil
}
|
package tools
import (
"io/ioutil"
"os"
)
//判断文件是否存在
func PathExists(file string) (ret bool, err error) {
if _, err = os.Stat(file); err != nil {
return false, err
}
if os.IsNotExist(err) {
return false, err
}
return true, nil
}
//读取文件内容
func ReadFile(file string) (bytes []byte, err error) {
return ioutil.ReadFile(file)
}
|
package main
import "fmt"
func main() {
name, power := "Goku", 9000
fmt.Printf("%s's power is over %d\n", name, power)
}
|
package main
import (
"fmt"
"log"
"net"
"net/rpc"
"net/rpc/jsonrpc"
"os"
"strconv"
"strings"
)
//Args struct to passed to server
type Args struct {
Budget float64
StockpercentMap map[string]int
}
//PortfolioResponsedata to be send from server to client
type PortfolioResponsedata struct {
//E.g. “GOOG:100:+$520.25”, “YHOO:200:-$30.40”
Stocksbought string
CurrentMarketValue float64
UnvestedAmount float64
}
//Buyresponse to be send to client
type Buyresponse struct {
TradeID int
Stocksbought string
UnvestedAmount float64
}
//input2 for passing trading id
var input2 int
var c *rpc.Client
var err error
var client net.Conn
func main() {
// establishConnection()
client, _ := net.Dial("tcp", "127.0.0.1:1234")
c := jsonrpc.NewClient(client)
fmt.Println("Welcome to the Yahoo Finance API .What would you like to do ?")
for {
var option int
fmt.Println("SELECT:")
fmt.Println("1. Buy the stocks from market.")
fmt.Println("2. Check the Portfolio Loss/Gain")
fmt.Scan(&option)
fmt.Println("=============================================")
switch option {
/*case '1':
os.Exit(0)*/
case 1:
optionfirst(client, c)
case 2:
optionsecond(client, c)
default:
fmt.Println("You have entered wrong option")
}
}
}
func optionfirst(client net.Conn, c *rpc.Client) {
var stockipstr string
var Budget float64
fmt.Println("Enter the stock symbol and the percentage of your budget you wish to allocate with them")
fmt.Scan(&stockipstr)
fmt.Println("Enter your budget for this trasaction")
fmt.Scan(&Budget)
sStocknum := strings.Split(stockipstr, ",")
count := 0
//BuyallocationMap consists of stocks n % for eahc stock
StockpercentMap := make(map[string]int)
for _, v := range sStocknum {
sSplited := strings.Split(v, ":")
sSplitnumper := strings.Split(sSplited[1], "%")
i, err := strconv.Atoi(sSplitnumper[0])
if err != nil {
// handle error
fmt.Println(err)
os.Exit(2)
}
StockpercentMap[sSplited[0]] = i
count = count + i
}
if count != 100 {
fmt.Println("Sum of Stock Percentages should be 100")
os.Exit(2)
}
args := &Args{Budget, StockpercentMap}
var reply Buyresponse
if err != nil {
log.Fatal("dialing:", err)
}
err = c.Call("StockMarket.BuyingStocks", args, &reply)
if err != nil {
log.Fatal("Error While Buying Stocks:", err)
}
fmt.Println("The summary of your stocks purchase is:", reply)
fmt.Println("The Trade id for transaction is:", reply.TradeID)
fmt.Println("Stocks details are: ", reply.Stocksbought)
fmt.Print("The Unvested Amount: ")
fmt.Printf("%.2f", reply.UnvestedAmount)
fmt.Println()
}
func optionsecond(client net.Conn, c *rpc.Client) {
var tradeinput int
//c := jsonrpc.NewClient(client)
fmt.Println("Enter the trade ID for which you wish to see the portfolio ")
fmt.Scan(&tradeinput)
var viewportfolioResponse PortfolioResponsedata
err = c.Call("StockMarket.ViewStockPortfolio", &tradeinput, &viewportfolioResponse)
if err != nil {
log.Fatal("Error While ViewStockPortfolio:", err)
}
fmt.Println("The netsummary of the Viewportfolio for loss/gain is :", viewportfolioResponse)
fmt.Println("Stock data: ", viewportfolioResponse.Stocksbought)
fmt.Println("Current market price: ", viewportfolioResponse.CurrentMarketValue)
fmt.Print("The Unvested Amount: ")
fmt.Printf("%.2f", viewportfolioResponse.UnvestedAmount)
fmt.Println()
}
|
package lc
// Benchmark: 72ms 11mb | 97% 35%
type elem struct {
key int
next *elem
}
type MyHashSet struct {
m [1024]*elem
}
/** Initialize your data structure here. */
func Constructor() MyHashSet {
return MyHashSet{[1024]*elem{}}
}
func (this *MyHashSet) Add(key int) {
if this.Contains(key) {
return
}
head := this.m[key%1024]
if head == nil {
this.m[key%1024] = &elem{key, nil}
} else {
this.m[key%1024] = &elem{key, head}
}
}
func (this *MyHashSet) Remove(key int) {
head := this.m[key%1024]
if head == nil {
return
}
// if key is at the front
if head.key == key {
this.m[key%1024] = head.next
return
}
var prev *elem
for head != nil {
if head.key == key {
prev.next = head.next
}
prev = head
head = head.next
}
}
/** Returns true if this set contains the specified element */
func (this *MyHashSet) Contains(key int) bool {
head := this.m[key%1024]
for head != nil {
if head.key == key {
return true
}
head = head.next
}
return false
}
|
package draft
import (
"fmt"
"regexp"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
"k8s.io/client-go/kubernetes/scheme"
platform "kolihub.io/koli/pkg/apis/core/v1alpha1"
)
func (d *Deployment) GetClusterPlan() *MapValue {
return &MapValue{Val: d.GetLabel(platform.LabelClusterPlan).String()}
}
func (d *Deployment) GetStoragePlan() *MapValue {
return &MapValue{Val: d.GetLabel(platform.LabelStoragePlan).String()}
}
func (d *Deployment) SetStoragePlan(planName string) {
d.SetLabel(platform.LabelStoragePlan, planName)
}
func (d *Deployment) SetClusterPlan(planName string) {
d.SetLabel(platform.LabelClusterPlan, planName)
}
// GetObject returns the original resource
func (d *Deployment) GetObject() *v1beta1.Deployment {
return &d.Deployment
}
// Copy performs a deep copy of the resource
func (d *Deployment) Copy() (*Deployment, error) {
objCopy, err := scheme.Scheme.DeepCopy(d.GetObject())
if err != nil {
return nil, fmt.Errorf("Failed deep copying Deployment resource")
}
return NewDeployment(objCopy.(*v1beta1.Deployment)), nil
}
func (d Deployment) BuildRevision() int {
return d.GetAnnotation(platform.AnnotationBuildRevision).AsInt()
}
func (d *Deployment) HasAutoDeployAnnotation() bool {
return d.GetAnnotation(platform.AnnotationAutoDeploy).AsBool()
}
func (d *Deployment) HasSetupPVCAnnotation() bool {
return d.GetAnnotation(platform.AnnotationSetupStorage).AsBool()
}
func (d *Deployment) HasBuildAnnotation() bool {
return d.GetAnnotation(platform.AnnotationBuild).AsBool()
}
func (d *Deployment) GitRepository() string {
return d.GetAnnotation(platform.AnnotationGitRepository).String()
}
func (d *Deployment) GitRevision() (*SHA, error) {
return NewSha(d.GetAnnotation(platform.AnnotationGitRevision).String())
}
func (d *Deployment) GitBranch() string {
return d.GetAnnotation(platform.AnnotationGitBranch).String()
}
func (d *Deployment) GitSource() string {
return d.GetAnnotation(platform.AnnotationBuildSource).String()
}
func (d *Deployment) GitCompare() string {
return d.GetAnnotation(platform.AnnotationGitCompare).String()
}
func (d *Deployment) GitHubUser() *MapValue {
return d.GetAnnotation(platform.AnnotationGitHubUser)
}
func (d *Deployment) GitHubWebHookSecret() string {
return d.GetAnnotation(platform.AnnotationGitHubSecretHook).String()
}
func (d *Deployment) AuthToken() string {
return d.GetAnnotation(platform.AnnotationAuthToken).String()
}
func (d *Deployment) PodSpec() *v1.PodSpec { return &d.Spec.Template.Spec }
func (d *Deployment) HasMultipleReplicas() bool { return d.Spec.Replicas != nil && *d.Spec.Replicas > 1 }
func (d *Deployment) HasContainers() bool { return len(d.Spec.Template.Spec.Containers) > 0 }
func (d *Deployment) GetContainers() []v1.Container { return d.Spec.Template.Spec.Containers }
// IsMarkedForDeletion verifies if the metadata.deletionTimestamp is set, meaning the resource
// is marked to be excluded
func (d *Deployment) IsMarkedForDeletion() bool { return d.DeletionTimestamp != nil }
// Copy performs a deep copy of the resource
func (d *Ingress) Copy() (*Ingress, error) {
objCopy := d.DeepCopy()
if objCopy == nil {
return nil, fmt.Errorf("Failed deep copying ingress")
}
return NewIngress(objCopy), nil
}
// GetObject returns the original resource
func (i *Ingress) GetObject() *v1beta1.Ingress {
return &i.Ingress
}
// DomainPrimaryKeys returns annotations matching domains, e.g.: 'kolihub.io/domain.tld'
func (i *Ingress) DomainPrimaryKeys() (m map[string]string) {
domReg := regexp.MustCompile(`kolihub.io/.+\.+`)
for key, value := range i.Annotations {
if domReg.MatchString(key) {
if m == nil {
m = map[string]string{}
}
m[key] = value
}
}
return
}
|
package main
import (
"reflect"
"testing"
xenAPI "github.com/johnprather/go-xen-api-client"
)
func TestParseLegendEntry(t *testing.T) {
cases := []struct {
input string
expected Entry
expectErr bool
}{
{"AVERAGE:vm:15f9d56e-938a-34fc-73f3-a7e08a0445eb:vbd_xvdd_io_throughput_write", Entry{"AVERAGE", "vm", "15f9d56e-938a-34fc-73f3-a7e08a0445eb", "vbd_xvdd_io_throughput_write"}, false},
{"garbage", Entry{}, true},
}
for _, c := range cases {
actual, err := parseLegendEntry(c.input)
if err != nil && !c.expectErr {
t.Errorf("unexpected error parsing entry [%v]: %v", c.input, err)
}
if err == nil && c.expectErr {
t.Errorf("expected error for input [%v]", c.input)
}
if actual != c.expected {
t.Errorf("parsing legend entry failed. got [%+v] expected [%+v]", actual, c.expected)
}
}
}
func TestMappingRrds(t *testing.T) {
rrdMetrics := []*RrdUpdates{
&RrdUpdates{
RrdMeta{
1495554585,
3,
Legend{
[]Entry{
Entry{"AVERAGE", "VM", "1111-111", "CPU"},
Entry{"AVERAGE", "VM", "1111-111", "MEMORY"},
Entry{"AVERAGE", "HOST", "555-555", "CPU"},
},
},
},
Data{
[]Row{
{
Timestamp: 1495554585,
Values: []float64{1.1, 2.2, 8.8},
},
},
},
},
&RrdUpdates{
RrdMeta{
1495554585,
5,
Legend{
[]Entry{
Entry{},
Entry{},
Entry{},
Entry{},
Entry{},
},
},
},
Data{
[]Row{
{
Timestamp: 1495554585,
Values: []float64{1.1, 2.2, 3.3, 4.4, 5.5},
},
},
},
},
}
hostRecords := map[xenAPI.HostRef]xenAPI.HostRecord{}
vmRecords := map[xenAPI.VMRef]xenAPI.VMRecord{}
mapped := mapRrds(rrdMetrics, hostRecords, vmRecords)
var expectedLen int
for _, u := range rrdMetrics {
expectedLen += len(u.Meta.Legend.Entries)
}
if length := len(mapped); length != expectedLen {
t.Errorf("Expected 1 element but got %d", length)
}
}
func TestParseCpuMetric(t *testing.T) {
cases := []struct {
input string
expectedName string
expectedLabels map[string]string
}{
{"cpu0", "cpu", map[string]string{"cpu_num": "0"}},
{"cpu1", "cpu", map[string]string{"cpu_num": "1"}},
{"cpu0-C0", "cpu", map[string]string{"cpu_num": "0", "core_num": "0"}},
{"cpu0-C1", "cpu", map[string]string{"cpu_num": "0", "core_num": "1"}},
{"cpu1-C0", "cpu", map[string]string{"cpu_num": "1", "core_num": "0"}},
{"cpu1-C1", "cpu", map[string]string{"cpu_num": "1", "core_num": "1"}},
}
for _, c := range cases {
actualName, actualLabels := parseCpuMetric(c.input)
if actualName != c.expectedName {
t.Errorf("failed to parse cpu metric name. Got [%v] expected [%v]", actualName, c.expectedName)
}
if !reflect.DeepEqual(actualLabels, c.expectedLabels) {
t.Errorf("failed to parse cpu metric labels. Got [%v] expected [%v]", actualLabels, c.expectedLabels)
}
}
}
|
package main
import (
"fmt"
"strings"
)
/*
将一个给定字符串根据给定的行数,以从上往下、从左到右进行 Z 字形排列。
比如输入字符串为 "LEETCODEISHIRING" 行数为 3 时,排列如下:
L C I R
E T O E S I I G
E D H N
之后,你的输出需要从左往右逐行读取,产生出一个新的字符串,比如:"LCIRETOESIIGEDHN"。
请你实现这个将字符串进行指定行数变换的函数:
string convert(string s, int numRows);
示例 1:
输入: s = "LEETCODEISHIRING", numRows = 3
输出: "LCIRETOESIIGEDHN"
示例 2:
输入: s = "LEETCODEISHIRING", numRows = 4
输出: "LDREOEIIECIHNTSG"
解释:
L D R
E O E I I
E C I H N
T S G
*/
func main() {
str := "LEETCODEISHIRING"
numRows := 4
result := convert(str, numRows)
fmt.Println(result)
}
func convert(s string, numRows int) string {
arr := make([]string, numRows)
idx := 0
l := len(s)
for idx < l {
i := 0
for ; i < numRows; i++ {
arr[i] += string(s[idx])
idx++
if idx == l {
goto sum
}
}
for i -= 2; i > 0; i-- {
arr[i] += string(s[idx])
idx++
if idx == l {
goto sum
}
}
}
sum:
return strings.Join(arr, "")
}
|
package linkedlist
import (
"fmt"
"sync"
)
type item struct {
next *item
val interface{}
}
// LinkedList represents a single linked list
type LinkedList struct {
lock sync.RWMutex
head *item
}
var (
// ErrorNotFound is returned when an item is not in the single linked list
ErrorNotFound = fmt.Errorf("not found")
)
// Append adds val to the end of the single linked list
func (s *LinkedList) Append(val interface{}) {
s.lock.Lock()
defer s.lock.Unlock()
new := &item{
val: val,
}
if s.head == nil {
s.head = new
return
}
var cur *item
for cur = s.head; cur.next != nil; cur = cur.next {
}
cur.next = new
}
// Remove deletes the first occurrence of val from the single linked list
func (s *LinkedList) Remove(val interface{}) error {
s.lock.Lock()
defer s.lock.Unlock()
var prev *item
for cur := s.head; cur != nil; cur = cur.next {
if cur.val == val {
if prev == nil {
s.head = cur.next
} else {
prev.next = cur.next
}
return nil
}
prev = cur
}
return ErrorNotFound
}
// Iter provides an iterator to walk through the single linked list
func (s *LinkedList) Iter() <-chan interface{} {
ch := make(chan interface{})
s.lock.RLock()
go func() {
for cur := s.head; cur != nil; cur = cur.next {
ch <- cur.val
}
s.lock.RUnlock()
close(ch)
}()
return ch
}
// Len returns the number of items in the single linked list
func (s *LinkedList) Len() int {
s.lock.RLock()
defer s.lock.RUnlock()
var i int
for cur := s.head; cur != nil; cur = cur.next {
i++
}
return i
}
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package gtest
import (
"encoding/xml"
"fmt"
"io/ioutil"
"chromiumos/tast/errors"
)
// Report is a parsed gtest output report.
// See https://github.com/google/googletest/blob/master/googletest/docs/advanced.md#generating-an-xml-report for details. // nocheck
// Note: at the moment, only a subset of the report is parsed. More can be
// added upon requirements.
// TODO(crbug.com/940320): Consider switching to use JSON, which is supported
// gtest 1.8.1 or later.
type Report struct {
Suites []*TestSuite `xml:"testsuite"`
}
// TestSuite represents a testsuite run in Report.
type TestSuite struct {
Name string `xml:"name,attr"`
Cases []*TestCase `xml:"testcase"`
}
// TestCase represents a testcase run in TestSuite.
type TestCase struct {
Name string `xml:"name,attr"`
Failures []Failure `xml:"failure"`
}
// Failure represents a test validation failure in TestCase.
type Failure struct {
Message string `xml:"message,attr"`
}
// PassedTestNames returns an array of passed test names, in the
// "TestSuite.TestCase" format. If no passed tests are found, returns nil.
// This walks through whole the report.
func (r *Report) PassedTestNames() []string {
var ret []string
for _, s := range r.Suites {
for _, c := range s.Cases {
if len(c.Failures) == 0 {
ret = append(ret, fmt.Sprintf("%s.%s", s.Name, c.Name))
}
}
}
return ret
}
// FailedTestNames returns an array of failed test names, in the
// "TestSuite.TestCase" format. If no error is found, returns nil.
// This walks through whole the report.
func (r *Report) FailedTestNames() []string {
var ret []string
for _, s := range r.Suites {
for _, c := range s.Cases {
if len(c.Failures) > 0 {
ret = append(ret, fmt.Sprintf("%s.%s", s.Name, c.Name))
}
}
}
return ret
}
// ParseReport parses the XML gtest output report at path.
func ParseReport(path string) (*Report, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, errors.Wrap(err, "failed to read")
}
return parseReportInternal(b)
}
func parseReportInternal(b []byte) (*Report, error) {
ret := &Report{}
if err := xml.Unmarshal(b, ret); err != nil {
return nil, errors.Wrap(err, "failed to parse gtest XML report")
}
return ret, nil
}
|
package main
import(
"fmt"
"time"
"net/http"
"io/ioutil"
"log"
"encoding/json"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
)
type Cat struct {
Name string `json:"name"`
Type string `json:"type"`
}
type Slugs struct {
Name string `json:"name"`
Type string `json:"type"`
Count int `json:"count"`
SlimeLevel int `json:"slime-level"`
}
type Pandas struct {
Name string `json:"name"`
Type string `json:"type"`
Count int `json:"count"`
CannibalisticLevel int `json:"cannibal-level"`
HumansConsumed int `json:"humans-consumed"`
}
func greetingWeb(c echo.Context) error {
return c.String(http.StatusOK, "Hello from the sever side")
}
func getCats(c echo.Context) error {
catName := c.QueryParam("catname")
catType := c.QueryParam("catspecies")
dataType := c.Param("data")
if dataType == "string" {
return c.String(http.StatusOK, fmt.Sprintf("Your cat's name is %s\nand she or he's a %s\n", catName, catType))
}
if dataType == "json" {
return c.JSON(http.StatusOK, map[string]string{
"name": catName,
"type": catType,
})
}
return c.JSON(http.StatusBadRequest, map[string]string{
"error": "this is wrong",
})
}
func addKittyCat(c echo.Context) error {
cat := Cat{}
defer c.Request().Body.Close()
b, err := ioutil.ReadAll(c.Request().Body)
if err != nil {
log.Printf("Failed reading: %s", err)
return c.String(http.StatusInternalServerError, "")
}
err = json.Unmarshal(b, &cat)
if err != nil {
log.Printf("Failed inmarshalling: %s", err)
return c.String(http.StatusInternalServerError, "")
}
log.Printf("Here is your kitty cat, Sir:", cat)
return c.String(http.StatusOK, "We have received your kitty cat, Sir.")
}
func addSlugs(c echo.Context) error {
slugs := Slugs{}
defer c.Request().Body.Close()
err := json.NewDecoder(c.Request().Body).Decode(&slugs)
if err != nil {
log.Printf("Failed reading addSlugs request: %s", err)
return echo.NewHTTPError(http.StatusInternalServerError, err.Error())
}
log.Printf("Here are your slugs, Sir. Mind the slime:", slugs)
return c.String(http.StatusOK, "We have received your slugs, Sir.")
}
func addPandas(c echo.Context) error {
pandas := Pandas{}
err := c.Bind(&pandas)
if pandas.CannibalisticLevel < 1 {
logger := "A panda has never had a Cannibalistic Level below one."
log.Printf(logger)
return echo.NewHTTPError(http.StatusInternalServerError, logger)
}
if err != nil {
log.Printf("Failed reading your request to add pandas: %s", err)
return echo.NewHTTPError(http.StatusInternalServerError, err.Error())
}
log.Printf("Here are your pandas, Sir. Monsterous things, aren't they?", pandas)
return c.String(http.StatusOK, "We have received your pandas, Sir. I do hope they don't kill anyone important.")
}
//admin middleware
func mainAdmin(c echo.Context) error {
return c.String(http.StatusOK, "You twinkle above us, we twinkle below")
}
func ServerHeader(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
c.Response().Header().Set(echo.HeaderServer, "Anything")
c.Response().Header().Set("nutty nutty nutty", "Anything")
return next(c)
}
}
//cookie time
func mainCookie(c echo.Context) error {
return c.String(http.StatusOK, "Cookie with your meal, Sir?")
}
func login(c echo.Context) error {
username := c.QueryParam("username")
password := c.QueryParam("password")
//normally would check user and password against db, after hashing
if username == "PiereKirby" && password == "1234" {
cookie := &http.Cookie{}
//new(http.Cookie) same^
cookie.Name = "sessionID"
cookie.Value = "a string is not a string by any ther type"
cookie.Expires = time.Now().Add(124 * time.Hour)
c.SetCookie(cookie)
return c.String(http.StatusOK, "Excellent choice, Sir")
}
return c.String(http.StatusUnauthorized, "You are not my Lord. I will not serve you.")
}
func main() {
fmt.Printf("Mornin', Starshine!")
e := echo.New()
e.Use(ServerHeader)
g := e.Group("/admin", middleware.Logger())
cg := e.Group("/cookie")
g.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{
Format: `[${time_rfc3339}] ${status} ${method} ${host}${path} ${latency_human}` + "\n",
}))
g.Use(middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) {
if username == "PiereKirby" && password == "1234" {
return true, nil
}
return false, nil
}))
cg.GET("/main", mainCookie)
g.GET("/main", mainAdmin, middleware.Logger())
e.GET("/", greetingWeb)
e.GET("/login", login)
e.GET("/cats/:data", getCats)
e.POST("/cats", addKittyCat)
e.POST("/slugs", addSlugs)
e.POST("/pandas", addPandas)
e.Start(":8000")
}
|
package main
import (
"encoding/json"
_ "fmt"
"io/ioutil"
"net/http"
)
type Github struct{}
type Repo struct {
SshUrl string `json:"ssh_url"`
CloneUrl string `json:"clone_url"`
GitUrl string `json:"git_url"`
Fork bool `json:"fork"`
}
func (g Github) Retrieve() []Repo {
api := "https://api.github.com/users/cworsley4/repos"
resp, _ := http.Get(api)
body, _ := ioutil.ReadAll(resp.Body)
var repos []Repo
err := json.Unmarshal(body, &repos)
if err != nil {
panic(err)
}
return repos
}
|
package requests
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"strings"
"github.com/atomicjolt/canvasapi"
"github.com/atomicjolt/canvasapi/models"
)
// DeleteCommunicationChannelType Delete an existing communication channel.
// https://canvas.instructure.com/doc/api/communication_channels.html
//
// Path Parameters:
// # Path.UserID (Required) ID
// # Path.Type (Required) ID
// # Path.Address (Required) ID
//
type DeleteCommunicationChannelType struct {
Path struct {
UserID string `json:"user_id" url:"user_id,omitempty"` // (Required)
Type string `json:"type" url:"type,omitempty"` // (Required)
Address string `json:"address" url:"address,omitempty"` // (Required)
} `json:"path"`
}
func (t *DeleteCommunicationChannelType) GetMethod() string {
return "DELETE"
}
func (t *DeleteCommunicationChannelType) GetURLPath() string {
path := "users/{user_id}/communication_channels/{type}/{address}"
path = strings.ReplaceAll(path, "{user_id}", fmt.Sprintf("%v", t.Path.UserID))
path = strings.ReplaceAll(path, "{type}", fmt.Sprintf("%v", t.Path.Type))
path = strings.ReplaceAll(path, "{address}", fmt.Sprintf("%v", t.Path.Address))
return path
}
func (t *DeleteCommunicationChannelType) GetQuery() (string, error) {
return "", nil
}
func (t *DeleteCommunicationChannelType) GetBody() (url.Values, error) {
return nil, nil
}
func (t *DeleteCommunicationChannelType) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *DeleteCommunicationChannelType) HasErrors() error {
errs := []string{}
if t.Path.UserID == "" {
errs = append(errs, "'Path.UserID' is required")
}
if t.Path.Type == "" {
errs = append(errs, "'Path.Type' is required")
}
if t.Path.Address == "" {
errs = append(errs, "'Path.Address' is required")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *DeleteCommunicationChannelType) Do(c *canvasapi.Canvas) (*models.CommunicationChannel, error) {
response, err := c.SendRequest(t)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
if err != nil {
return nil, err
}
ret := models.CommunicationChannel{}
err = json.Unmarshal(body, &ret)
if err != nil {
return nil, err
}
return &ret, nil
}
|
package lib
func TranscodeGV(array [] [] []uint8,config *ConfigInfo) [] uint8 {
var IPageArrays [][][]uint8
var BPageArrays [][][][]uint8
length := len(array)
pageSkip := config.MaxBPageNum + 1
for i := 0; i < length; i += pageSkip {
IPageArrays = append(IPageArrays, array[i])
var _BPageArrays [][][]uint8
for j := 1; j < pageSkip && i+j < length; j++ {
if i+j == length-1 {
IPageArrays = append(IPageArrays, array[i+j])
} else {
_BPageArrays = append(_BPageArrays, array[i+j])
}
}
BPageArrays = append(BPageArrays, _BPageArrays)
}
var IPageByteArrays [][]uint8
for _, arr := range IPageArrays {
byteArray, _ := TranscodeGip(arr, config)
IPageByteArrays = append(IPageByteArrays, byteArray)
}
var BPageByteArrays [][]uint8
for i, arr := range BPageArrays {
byteArray, _ := TranscodeGbp(IPageArrays[i], IPageArrays[i+1], arr, config)
BPageByteArrays = append(BPageByteArrays, byteArray)
}
var byteArray = IPageByteArrays[0]
for i, arr := range IPageByteArrays {
if i > 0 {
byteArray = append(byteArray, append(arr, BPageByteArrays[i-1]...)...)
}
}
return byteArray
} |
package config
import (
"fmt"
"io/ioutil"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
yaml "gopkg.in/yaml.v2"
)
// Settings is a struct yaml configuration
type Settings struct {
Static
DB
}
type Static struct {
Host string `yaml:"host"`
Port string `yaml:"port"`
Dir string `yaml:"dir"`
}
type DB struct {
Conn string `yaml:"conn"`
Shard string `yaml:"shard"`
Name string `yaml:"name"`
Pool int `yaml:"pool"`
}
// Parse reads configuration file and stores values to struct variable
func Parse(cfgPath string) (cfg *Settings, err error) {
c, err := ioutil.ReadFile(cfgPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to read config file %s", cfgPath)
}
if err = yaml.Unmarshal(c, &cfg); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal config file %s", cfgPath)
}
log.Infof("%+v", cfg)
return cfg, err
}
// HTTPAddr returns address for HTTP server to listen on
func (cfg *Settings) Addr() string {
return fmt.Sprintf("%s:%s", cfg.Host, cfg.Port)
}
func (db *DB) DBurl(pwd string) string {
log.Infof(fmt.Sprintf("%s%s%s", db.Conn, pwd, db.Shard))
return fmt.Sprintf("%s%s%s", db.Conn, pwd, db.Shard)
}
|
package elliptic
import "math/big"
// Bitcoin's secp256k1 elliptic curve
// Reference: https://en.bitcoin.it/wiki/Secp256k1
var Secp256k1 = new(CurveParams)
func init() {
var ok bool
Secp256k1.P, ok = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
if !ok {
panic("secp256k1: SetString: P")
}
Secp256k1.N, ok = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
if !ok {
panic("secp256k1: SetString: N")
}
Secp256k1.A = new(big.Int).SetUint64(0)
Secp256k1.B = new(big.Int).SetUint64(7)
Secp256k1.Gx, ok = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16)
if !ok {
panic("secp256k1: SetString: Gx")
}
Secp256k1.Gy, ok = new(big.Int).SetString("483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", 16)
if !ok {
panic("secp256k1: SetString: Gx")
}
Secp256k1.BitSize = 256
Secp256k1.Name = "secp256k1"
}
|
package individualparsers
import (
"bytes"
)
type Raw64MZHeader struct{}
func (b Raw64MZHeader) Match(content []byte) (bool, error) {
// Raw MZ header
if len(content) < 3 {
return false, nil
}
if bytes.Equal(content[:3], []byte{0x4d, 0x5a, 0x90}) {
return true, nil
}
return false, nil
}
func (b Raw64MZHeader) Normalize(content []byte) (int, []byte, error) {
return KeyRawExecutable, content, nil
}
|
package merger
import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/threez/intm/internal/model"
"github.com/threez/intm/internal/port"
)
type MergerTestSuite struct {
Merger port.Merger
suite.Suite
}
func (suite *MergerTestSuite) TestEmpty() {
assert.Equal(suite.T(), []*model.Interval(nil), suite.Merger.Result())
}
func (suite *MergerTestSuite) test(in, out [][2]int) {
for _, v := range in {
suite.Merger.MergeInterval(model.NewInterval(v[0], v[1]))
}
expected := make([]string, len(out))
for i, v := range out {
expected[i] = model.NewInterval(v[0], v[1]).String()
}
actualResult := suite.Merger.Result()
actual := make([]string, len(actualResult))
for i, v := range actualResult {
actual[i] = v.String()
}
assert.Equal(suite.T(), expected, actual)
}
func (suite *MergerTestSuite) TestOne() {
suite.test([][2]int{
{25, 30},
}, [][2]int{
{25, 30},
})
}
func (suite *MergerTestSuite) TestSimple() {
suite.test([][2]int{
{25, 30},
{2, 19},
{14, 23},
{4, 8},
}, [][2]int{
{2, 23},
{25, 30},
})
}
func (suite *MergerTestSuite) TestExtended() {
suite.test([][2]int{
{25, 30},
{2, 19},
{14, 23},
{4, 8},
{35, 40},
{1, 50},
{32, 34},
}, [][2]int{
{1, 50},
})
}
func (suite *MergerTestSuite) TestBad() {
suite.test([][2]int{
{5, 6},
{3, 4},
{1, 2},
{4, 6},
}, [][2]int{
{1, 2},
{3, 6},
})
}
func (suite *MergerTestSuite) TestMoreBad() {
suite.test([][2]int{
{50, 60},
{10, 24},
{50, 55},
{10, 20},
{45, 48},
{10, 22},
{30, 60},
}, [][2]int{
{10, 24},
{30, 60},
})
}
|
package memfs
import (
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"testing"
"time"
"github.com/shuLhan/share/lib/test"
"github.com/shuLhan/share/lib/text/diff"
)
var (
_testWD string
)
func TestMain(m *testing.M) {
var err error
_testWD, err = os.Getwd()
if err != nil {
log.Fatal(err)
}
err = os.MkdirAll(filepath.Join(_testWD, "testdata/exclude/dir"), 0700)
if err != nil {
perr, ok := err.(*os.PathError)
if !ok {
log.Fatal("!ok:", err)
}
if perr.Err != os.ErrExist {
log.Fatalf("perr: %+v %+v\n", perr.Err, os.ErrExist)
}
}
err = os.MkdirAll(filepath.Join(_testWD, "testdata/include/dir"), 0700)
if err != nil {
perr, ok := err.(*os.PathError)
if !ok {
log.Fatal(err)
}
if perr.Err != os.ErrExist {
log.Fatal(err)
}
}
os.Exit(m.Run())
}
func TestNew(t *testing.T) {
afile := filepath.Join(_testWD, "testdata/index.html")
cases := []struct {
desc string
opts Options
expErr string
expMapKeys []string
}{{
desc: "With empty dir",
expErr: "open : no such file or directory",
expMapKeys: make([]string, 0),
}, {
desc: "With file",
opts: Options{
Root: afile,
},
expErr: fmt.Sprintf("memfs.New: mount: %q must be a directory", afile),
}, {
desc: "With directory",
opts: Options{
Root: filepath.Join(_testWD, "testdata"),
Excludes: []string{
"memfs_generate.go$",
"direct$",
},
},
expMapKeys: []string{
"/",
"/exclude",
"/exclude/index.css",
"/exclude/index.html",
"/exclude/index.js",
"/include",
"/include/index.css",
"/include/index.html",
"/include/index.js",
"/index.css",
"/index.html",
"/index.js",
"/plain",
},
}, {
desc: "With excludes",
opts: Options{
Root: filepath.Join(_testWD, "testdata"),
Excludes: []string{
`.*\.js$`,
"memfs_generate.go$",
"direct$",
},
},
expMapKeys: []string{
"/",
"/exclude",
"/exclude/index.css",
"/exclude/index.html",
"/include",
"/include/index.css",
"/include/index.html",
"/index.css",
"/index.html",
"/plain",
},
}, {
desc: "With includes",
opts: Options{
Root: filepath.Join(_testWD, "testdata"),
Includes: []string{
`.*\.js$`,
},
Excludes: []string{
"memfs_generate.go$",
"direct$",
},
},
expMapKeys: []string{
"/",
"/exclude",
"/exclude/index.js",
"/include",
"/include/index.js",
"/index.js",
},
}}
for _, c := range cases {
t.Log(c.desc)
mfs, err := New(&c.opts)
if err != nil {
test.Assert(t, "error", c.expErr, err.Error())
continue
}
gotListNames := mfs.ListNames()
test.Assert(t, "names", c.expMapKeys, gotListNames)
}
}
func TestMemFS_AddFile(t *testing.T) {
cases := []struct {
desc string
intPath string
extPath string
exp *Node
expError string
}{{
desc: "With empty internal path",
}, {
desc: "With external path is not exist",
intPath: "internal/file",
extPath: "is/not/exist",
expError: "memfs.AddFile: stat is/not/exist: no such file or directory",
}, {
desc: "With file exist",
intPath: "internal/file",
extPath: "testdata/direct/add/file",
exp: &Node{
SysPath: "testdata/direct/add/file",
Path: "internal/file",
name: "file",
ContentType: "text/plain; charset=utf-8",
size: 22,
V: []byte("Test direct add file.\n"),
GenFuncName: "generate_internal_file",
},
}, {
desc: "With directories exist",
intPath: "internal/file2",
extPath: "testdata/direct/add/file2",
exp: &Node{
SysPath: "testdata/direct/add/file2",
Path: "internal/file2",
name: "file2",
ContentType: "text/plain; charset=utf-8",
size: 24,
V: []byte("Test direct add file 2.\n"),
GenFuncName: "generate_internal_file2",
},
}}
opts := &Options{
Root: "testdata",
}
mfs, err := New(opts)
if err != nil {
t.Fatal(err)
}
for _, c := range cases {
t.Log(c.desc)
got, err := mfs.AddFile(c.intPath, c.extPath)
if err != nil {
test.Assert(t, "error", c.expError, err.Error())
continue
}
if got != nil {
got.modTime = time.Time{}
got.mode = 0
got.Parent = nil
got.Childs = nil
}
test.Assert(t, "AddFile", c.exp, got)
if c.exp == nil {
continue
}
got, err = mfs.Get(c.intPath)
if err != nil {
t.Fatal(err)
}
if got != nil {
got.modTime = time.Time{}
got.mode = 0
got.Parent = nil
got.Childs = nil
}
test.Assert(t, "Get", c.exp, got)
}
}
func TestMemFS_Get(t *testing.T) {
cases := []struct {
path string
expV []byte
expContentType []string
expErr error
}{{
path: "/",
}, {
path: "/exclude",
}, {
path: "/exclude/dir",
expErr: os.ErrNotExist,
}, {
path: "/exclude/index.css",
expV: []byte("body {\n}\n"),
expContentType: []string{"text/css; charset=utf-8"},
}, {
path: "/exclude/index.html",
expV: []byte("<html></html>\n"),
expContentType: []string{"text/html; charset=utf-8"},
}, {
path: "/exclude/index.js",
expContentType: []string{
"text/javascript; charset=utf-8",
"application/javascript",
},
}, {
path: "/include",
}, {
path: "/include/dir",
expErr: os.ErrNotExist,
}, {
path: "/include/index.css",
expV: []byte("body {\n}\n"),
expContentType: []string{"text/css; charset=utf-8"},
}, {
path: "/include/index.html",
expV: []byte("<html></html>\n"),
expContentType: []string{"text/html; charset=utf-8"},
}, {
path: "/include/index.js",
expContentType: []string{
"text/javascript; charset=utf-8",
"application/javascript",
},
}, {
path: "/index.css",
expV: []byte("body {\n}\n"),
expContentType: []string{"text/css; charset=utf-8"},
}, {
path: "/index.html",
expV: []byte("<html></html>\n"),
expContentType: []string{"text/html; charset=utf-8"},
}, {
path: "/index.js",
expContentType: []string{
"text/javascript; charset=utf-8",
"application/javascript",
},
}, {
path: "/plain",
expContentType: []string{"application/octet-stream"},
}}
dir := filepath.Join(_testWD, "/testdata")
opts := &Options{
Root: dir,
// Limit file size to allow testing Get from disk on file "index.js".
MaxFileSize: 15,
}
mfs, err := New(opts)
if err != nil {
t.Fatal(err)
}
for _, c := range cases {
t.Logf("Get %s", c.path)
got, err := mfs.Get(c.path)
if err != nil {
test.Assert(t, "error", c.expErr, err)
continue
}
if got.size <= opts.MaxFileSize {
test.Assert(t, "node.V", c.expV, got.V)
}
if len(got.ContentType) == 0 && len(c.expContentType) == 0 {
continue
}
found := false
for _, expCT := range c.expContentType {
if expCT == got.ContentType {
found = true
break
}
}
if !found {
t.Errorf("expecting one of the Content-Type %v, got %s",
c.expContentType, got.ContentType)
}
}
}
func TestMemFS_MarshalJSON(t *testing.T) {
logp := "MarshalJSON"
modTime := time.Date(2021, 7, 30, 20, 04, 00, 0, time.UTC)
opts := &Options{
Root: "testdata/direct/",
}
mfs, err := New(opts)
if err != nil {
t.Fatal(err)
}
mfs.resetAllModTime(modTime)
got, err := json.MarshalIndent(mfs, "", "\t")
if err != nil {
t.Fatal(err)
}
exp := `{
"/": {
"path": "/",
"name": "/",
"mod_time_epoch": 1627675440,
"mod_time_rfc3339": "2021-07-30 20:04:00 +0000 UTC",
"mode_string": "drwxr-xr-x",
"size": 0,
"is_dir": true,
"childs": [
{
"path": "/add",
"name": "add",
"mod_time_epoch": 1627675440,
"mod_time_rfc3339": "2021-07-30 20:04:00 +0000 UTC",
"mode_string": "drwxr-xr-x",
"size": 0,
"is_dir": true,
"childs": [
{
"path": "/add/file",
"name": "file",
"mod_time_epoch": 1627675440,
"mod_time_rfc3339": "2021-07-30 20:04:00 +0000 UTC",
"mode_string": "-rw-r--r--",
"size": 22,
"is_dir": false,
"childs": []
},
{
"path": "/add/file2",
"name": "file2",
"mod_time_epoch": 1627675440,
"mod_time_rfc3339": "2021-07-30 20:04:00 +0000 UTC",
"mode_string": "-rw-r--r--",
"size": 24,
"is_dir": false,
"childs": []
}
]
}
]
},
"/add": {
"path": "/add",
"name": "add",
"mod_time_epoch": 1627675440,
"mod_time_rfc3339": "2021-07-30 20:04:00 +0000 UTC",
"mode_string": "drwxr-xr-x",
"size": 0,
"is_dir": true,
"childs": [
{
"path": "/add/file",
"name": "file",
"mod_time_epoch": 1627675440,
"mod_time_rfc3339": "2021-07-30 20:04:00 +0000 UTC",
"mode_string": "-rw-r--r--",
"size": 22,
"is_dir": false,
"childs": []
},
{
"path": "/add/file2",
"name": "file2",
"mod_time_epoch": 1627675440,
"mod_time_rfc3339": "2021-07-30 20:04:00 +0000 UTC",
"mode_string": "-rw-r--r--",
"size": 24,
"is_dir": false,
"childs": []
}
]
},
"/add/file": {
"path": "/add/file",
"name": "file",
"mod_time_epoch": 1627675440,
"mod_time_rfc3339": "2021-07-30 20:04:00 +0000 UTC",
"mode_string": "-rw-r--r--",
"size": 22,
"is_dir": false,
"childs": []
},
"/add/file2": {
"path": "/add/file2",
"name": "file2",
"mod_time_epoch": 1627675440,
"mod_time_rfc3339": "2021-07-30 20:04:00 +0000 UTC",
"mode_string": "-rw-r--r--",
"size": 24,
"is_dir": false,
"childs": []
}
}`
diffs := diff.Text([]byte(exp), got, diff.LevelLines)
if len(diffs.Adds) != 0 {
t.Fatalf("%s: adds: %v", logp, diffs.Adds)
}
if len(diffs.Dels) != 0 {
t.Fatalf("%s: dels: %#v", logp, diffs.Dels)
}
if len(diffs.Changes) != 0 {
t.Fatalf("%s: changes: %s", logp, diffs.Changes)
}
}
func TestMemFS_isIncluded(t *testing.T) {
cases := []struct {
desc string
inc []string
exc []string
sysPath []string
exp []bool
}{{
desc: "With empty includes and excludes",
sysPath: []string{
filepath.Join(_testWD, "/testdata"),
filepath.Join(_testWD, "/testdata/index.html"),
},
exp: []bool{
true,
true,
},
}, {
desc: "With excludes only",
exc: []string{
`.*/exclude`,
`.*\.html$`,
},
sysPath: []string{
filepath.Join(_testWD, "/testdata"),
filepath.Join(_testWD, "/testdata/exclude"),
filepath.Join(_testWD, "/testdata/exclude/dir"),
filepath.Join(_testWD, "/testdata/include"),
filepath.Join(_testWD, "/testdata"),
filepath.Join(_testWD, "/testdata/index.html"),
filepath.Join(_testWD, "/testdata/index.css"),
},
exp: []bool{
true,
false,
false,
true,
true,
false,
true,
},
}, {
desc: "With includes only",
inc: []string{
".*/include",
`.*\.html$`,
},
sysPath: []string{
filepath.Join(_testWD, "/testdata"),
filepath.Join(_testWD, "/testdata/include"),
filepath.Join(_testWD, "/testdata/include/dir"),
filepath.Join(_testWD, "/testdata"),
filepath.Join(_testWD, "/testdata/index.html"),
filepath.Join(_testWD, "/testdata/index.css"),
},
exp: []bool{
true,
true,
true,
true,
true,
false,
},
}, {
desc: "With excludes and includes",
exc: []string{
`.*/exclude`,
`.*\.js$`,
},
inc: []string{
`.*/include`,
`.*\.(css|html)$`,
},
sysPath: []string{
filepath.Join(_testWD, "/testdata"),
filepath.Join(_testWD, "/testdata/index.html"),
filepath.Join(_testWD, "/testdata/index.css"),
filepath.Join(_testWD, "/testdata/exclude"),
filepath.Join(_testWD, "/testdata/exclude/dir"),
filepath.Join(_testWD, "/testdata/exclude/index.css"),
filepath.Join(_testWD, "/testdata/exclude/index.html"),
filepath.Join(_testWD, "/testdata/exclude/index.js"),
filepath.Join(_testWD, "/testdata/include"),
filepath.Join(_testWD, "/testdata/include/dir"),
filepath.Join(_testWD, "/testdata/include/index.css"),
filepath.Join(_testWD, "/testdata/include/index.html"),
filepath.Join(_testWD, "/testdata/include/index.js"),
},
exp: []bool{
true,
true,
true,
false,
false,
false,
false,
false,
true,
true,
true,
true,
false,
},
}}
for _, c := range cases {
t.Log(c.desc)
opts := &Options{
Includes: c.inc,
Excludes: c.exc,
}
mfs, err := New(opts)
if err != nil {
t.Fatal(err)
}
for x, sysPath := range c.sysPath {
fi, err := os.Stat(sysPath)
if err != nil {
t.Fatal(err)
}
got := mfs.isIncluded(sysPath, fi.Mode())
test.Assert(t, sysPath, c.exp[x], got)
}
}
}
func TestMerge(t *testing.T) {
optsDirect := &Options{
Root: "testdata/direct",
}
mfsDirect, err := New(optsDirect)
if err != nil {
t.Fatal(err)
}
optsInclude := &Options{
Root: "testdata/include",
}
mfsInclude, err := New(optsInclude)
if err != nil {
t.Fatal(err)
}
cases := []struct {
desc string
params []*MemFS
exp *MemFS
}{{
desc: "with the same instance",
params: []*MemFS{mfsDirect, mfsDirect},
exp: &MemFS{
PathNodes: &PathNode{
v: map[string]*Node{
"/": &Node{
SysPath: "..",
Path: "/",
Childs: []*Node{
mfsDirect.MustGet("/add"),
},
mode: 2147484141,
},
"/add": mfsDirect.MustGet("/add"),
"/add/file": mfsDirect.MustGet("/add/file"),
"/add/file2": mfsDirect.MustGet("/add/file2"),
},
},
},
}, {
desc: "with different instances",
params: []*MemFS{mfsDirect, mfsInclude},
exp: &MemFS{
PathNodes: &PathNode{
v: map[string]*Node{
"/": &Node{
SysPath: "..",
Path: "/",
Childs: []*Node{
mfsDirect.MustGet("/add"),
mfsInclude.MustGet("/index.css"),
mfsInclude.MustGet("/index.html"),
mfsInclude.MustGet("/index.js"),
},
mode: 2147484141,
},
"/add": mfsDirect.MustGet("/add"),
"/add/file": mfsDirect.MustGet("/add/file"),
"/add/file2": mfsDirect.MustGet("/add/file2"),
"/index.css": mfsInclude.MustGet("/index.css"),
"/index.html": mfsInclude.MustGet("/index.html"),
"/index.js": mfsInclude.MustGet("/index.js"),
},
},
},
}}
for _, c := range cases {
got := Merge(c.params...)
test.Assert(t, c.desc, c.exp.PathNodes.v, got.PathNodes.v)
}
}
|
package nil
import (
u "lib/utils"
ui "lib/UI"
c "github.com/skilstak/go/colors"
p "lib/pythagorean"
a "lib/area"
pe "lib/perimeter"
t "lib/trig"
)
func TriangleMenu() {
// Quick thanks to @whitman-colm on github for this system
isDone := false
var choice string
for isDone == false {
switch choice {
case "a":
p.PythagoreanMenu()
choice = ""
case "b":
t.TrigMenu()
choice = ""
case "c":
a.AreaMenu()
choice = ""
case "d":
pe.PerimeterMenu()
choice = ""
case "e":
isDone = true
default:
ui.TriangleMenuUI()
choice = u.Ask(c.Y + ">>>" + c.B3 + " ")
}
}
}
|
package node
import (
"net/http"
"github.com/rancher/apiserver/pkg/types"
"github.com/rancher/steve/pkg/schema"
"github.com/rancher/steve/pkg/server"
"github.com/rancher/wrangler/pkg/schemas"
"github.com/harvester/harvester/pkg/config"
)
func RegisterSchema(scaled *config.Scaled, server *server.Server, options config.Options) error {
nodeHandler := ActionHandler{
nodeClient: scaled.Management.CoreFactory.Core().V1().Node(),
nodeCache: scaled.Management.CoreFactory.Core().V1().Node().Cache(),
}
t := schema.Template{
ID: "node",
Customize: func(s *types.APISchema) {
s.Formatter = Formatter
s.ResourceActions = map[string]schemas.Action{
enableMaintenanceModeAction: {},
disableMaintenanceModeAction: {},
cordonAction: {},
uncordonAction: {},
}
s.ActionHandlers = map[string]http.Handler{
enableMaintenanceModeAction: nodeHandler,
disableMaintenanceModeAction: nodeHandler,
cordonAction: nodeHandler,
uncordonAction: nodeHandler,
}
},
}
server.SchemaFactory.AddTemplate(t)
return nil
}
|
package db
import (
_ "github.com/joho/godotenv/autoload"
"go.mongodb.org/mongo-driver/mongo"
)
var db *mongo.Client
func InitDB(client *mongo.Client) {
db = client
}
|
// Copyright © 2017 Wei Shen <shenwei356@gmail.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package process
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/cznic/sortutil"
"github.com/pkg/errors"
pb "github.com/schollz/progressbar/v3"
"github.com/shenwei356/go-logging"
psutil "github.com/shirou/gopsutil/process"
)
// Log is *logging.Logger
var Log *logging.Logger
// pid_numSecondsSinceEpoch
var ChildMarker string = strconv.Itoa(os.Getpid()) + "_" + strconv.FormatInt(time.Now().Unix(), 16)
func init() {
if Log == nil {
logFormat := logging.MustStringFormatter(`%{color}[%{level:.4s}]%{color:reset} %{message}`)
backend := logging.NewLogBackend(os.Stderr, "", 0)
backendFormatter := logging.NewBackendFormatter(backend, logFormat)
logging.SetBackend(backendFormatter)
Log = logging.MustGetLogger("process")
}
}
// Command is the Command struct
type Command struct {
ID uint64 // ID
Cmd string // command
Cancel chan struct{} // channel for close
Timeout time.Duration // time out
ctx context.Context // context.WithTimeout
ctxCancel context.CancelFunc // cancel func for timetout
Ch chan string // channel for stdout
reader *bufio.Reader // reader for stdout
tmpfile string // tmpfile for stdout
tmpfh *os.File // file handler for tmpfile
finishSendOutput bool // a flag of whether finished sending output to Ch
Err error // Error
Duration time.Duration // runtime
dryrun bool
exitStatus int
Executed chan int // for checking if the command has been executed
}
// NewCommand create a Command
func NewCommand(id uint64, cmdStr string, cancel chan struct{}, timeout time.Duration) *Command {
command := &Command{
ID: id,
Cmd: strings.TrimLeft(cmdStr, " "),
Cancel: cancel,
Timeout: timeout,
Executed: make(chan int, 2),
}
return command
}
func (c *Command) String() string {
return fmt.Sprintf("cmd #%d: %s", c.ID, c.Cmd)
}
// Verbose decides whether print extra information
var Verbose bool
var tmpfilePrefix = fmt.Sprintf("rush.%d.", os.Getpid())
// TmpOutputDataBuffer is buffer size for output of a command before saving to tmpfile,
// default 1M.
var TmpOutputDataBuffer = 1048576 // 1M
// OutputChunkSize is buffer size of output string chunk sent to channel, default 16K.
var OutputChunkSize = 16384 // 16K
// Run runs a command and send output to command.Ch in background.
func (c *Command) Run(opts *Options, tryNumber int) (chan string, error) {
// create a return chan here; we will set the c.Ch in the parent
ch := make(chan string, 1)
if c.dryrun {
ch <- c.Cmd + "\n"
close(ch)
c.finishSendOutput = true
close(c.Executed)
return ch, nil
}
c.Err = c.run(opts, tryNumber)
// don't return here, keep going so we can display
// the output from commands that error
var readErr error = nil
if Verbose {
if c.exitStatus == 0 {
Log.Infof("finish cmd #%d in %s: %s: exit status %d", c.ID, c.Duration, c.Cmd, c.exitStatus)
} else {
// exitStatus will appear in wait cmd message
Log.Infof("finish cmd #%d in %s: %s", c.ID, c.Duration, c.Cmd)
}
}
go func() {
if opts.ImmediateOutput {
close(ch)
c.finishSendOutput = true
} else {
if c.tmpfile != "" { // data saved in tempfile
c.reader = bufio.NewReader(c.tmpfh)
}
buf := make([]byte, OutputChunkSize)
var n int
var i int
var b bytes.Buffer
var bb []byte
var existedN int
// var N uint64
for {
if c.reader != nil {
n, readErr = c.reader.Read(buf)
} else {
n = 0
readErr = io.EOF
}
existedN = b.Len()
b.Write(buf[0:n])
if readErr != nil {
if readErr == io.EOF {
if b.Len() > 0 {
// if Verbose {
// N += uint64(b.Len())
// }
ch <- b.String() // string(buf[0:n])
}
b.Reset()
readErr = nil
}
break
}
bb = b.Bytes()
i = bytes.LastIndexByte(bb, '\n')
if i < 0 {
continue
}
// if Verbose {
// N += uint64(len(bb[0 : i+1]))
// }
ch <- string(bb[0 : i+1]) // string(buf[0:n])
b.Reset()
if i-existedN+1 < n {
// ------ ======i========n
// existed buf
// 5 4 6
b.Write(buf[i-existedN+1 : n])
}
// N += n
}
// if Verbose {
// Log.Debugf("cmd #%d sent %d bytes\n", c.ID, N)
// }
// if Verbose {
// Log.Infof("finish reading data from: %s", c.Cmd)
// }
close(ch)
c.finishSendOutput = true
}
}()
if c.Err != nil {
return ch, c.Err
} else {
if readErr != nil {
return ch, readErr
} else {
return ch, nil
}
}
}
// Cleanup removes tmpfile
func (c *Command) Cleanup() error {
var err error
if c.tmpfh != nil {
// if Verbose {
// Log.Infof("close tmpfh for: %s", c.Cmd)
// }
err = c.tmpfh.Close()
if err != nil {
return err
}
}
if c.tmpfile != "" {
if Verbose {
Log.Infof("remove tmpfile (%s) for command: %s", c.tmpfile, c.Cmd)
}
err = os.Remove(c.tmpfile)
}
return err
}
// ErrTimeout means command timeout
var ErrTimeout = fmt.Errorf("time out")
// ErrCancelled means command being cancelled
var ErrCancelled = fmt.Errorf("cancelled")
func (c *Command) getExitStatus(err error) int {
if exitError, ok := err.(*exec.ExitError); ok {
waitStatus := exitError.Sys().(syscall.WaitStatus)
return waitStatus.ExitStatus()
}
// no error, so return exitStatus 0
return 0
}
type TopLevelEnum int
const (
NotTopLevel TopLevelEnum = 0
TopLevel TopLevelEnum = 1
)
// lexicographically encode integer
// based on http://www.zanopha.com/docs/elen.pdf
func lexEncode(n uint64, topLevel TopLevelEnum) string {
var encoded string
// recursively calculate lex prefix
// the lex prefix allows the user to lexicographically sort the output
// need lex prefix if n has more than one digit
nstr := fmt.Sprintf("%d", n)
nlen := uint64(len(nstr))
if nlen > 1 {
// include non-numeric part of lex prefix
// to allow proper sorting, this char must come after numerics in the ascii table
encoded = fmt.Sprintf("_")
// then include recursive part
encoded += lexEncode(nlen, NotTopLevel)
// conditionally include lex separator
if topLevel == TopLevel {
// the lex separator allows the user to differentiate a numeric part of the lex prefix from the original number
// to allow proper sorting, the lex separator must come before numerics in the ascii table
encoded += "."
}
}
// include numeric part of lex prefix, or
// original number (if topLevel==true)
encoded += nstr
return encoded
}
func getEntrySeparator() string {
// to allow proper sorting, the entry separator must come before numerics in the ascii table
return "/"
}
// ImmediateLineWriter is safe to use concurrently
type ImmediateLineWriter struct {
lock *sync.Mutex
numJobs int
cmdId uint64
tryNumber int
line string
lineNumber uint64
includePrefix bool
}
func includeImmediatePrefix(cmdId uint64, tryNumber int, lineNumber uint64, data *string) {
prefix := fmt.Sprintf("(%s", lexEncode(cmdId, TopLevel))
prefix += fmt.Sprintf("%s%s", getEntrySeparator(), lexEncode(uint64(tryNumber), TopLevel))
prefix += fmt.Sprintf("%s%s): ", getEntrySeparator(), lexEncode(lineNumber, TopLevel))
if data != nil {
*data = *data + prefix
}
}
func NewImmediateLineWriter(lock *sync.Mutex, numJobs int, cmdId uint64, tryNumber int) *ImmediateLineWriter {
lw := &ImmediateLineWriter{}
lw.lock = lock
lw.numJobs = numJobs
lw.cmdId = cmdId
lw.tryNumber = tryNumber
lw.lineNumber = 1 // start with 1
lw.includePrefix = true // start line 1 with a prefix
return lw
}
func (lw *ImmediateLineWriter) addPrefixIfNeeded(output *string) {
if lw.includePrefix {
includeImmediatePrefix(lw.cmdId, lw.tryNumber, lw.lineNumber, output)
lw.includePrefix = false
}
}
func (lw *ImmediateLineWriter) WritePrefixedLines(input string, outfh *os.File) {
if lw.lock != nil {
// make immediate output thread-safe and do one write at a time
lw.lock.Lock()
// only include prefixes if jobs are running in parallel
if lw.numJobs > 1 {
var output string
// split by \r\n or \n
reg := regexp.MustCompile("(?:\r\n|\n)")
matchExtents := reg.FindAllStringIndex(input, -1)
if len(matchExtents) > 0 {
// use runes below, so we work correctly with unicode strings
rs := []rune(input)
lastStart := 0
for _, matchExtent := range matchExtents {
beforePart := string(rs[lastStart:matchExtent[0]])
lw.line = lw.line + beforePart
// skip empty lines
if len(lw.line) > 0 {
// there is some data in this part, so add prefix if needed
lw.addPrefixIfNeeded(&output)
// append the chars up to and including the delimiter
delimiterPart := string(rs[matchExtent[0]:matchExtent[1]])
output = output + beforePart + delimiterPart
// defer including prefix, so only add it on next non-empty data
lw.includePrefix = true
// clear line, since saw delimiter
lw.line = ""
lw.lineNumber++
}
lastStart = matchExtent[1]
}
// append any remaining chars after the last delimiter
lastPart := string(rs[lastStart:len(rs)])
if len(lastPart) > 0 {
// there is some data in this part, so add prefix if needed
lw.addPrefixIfNeeded(&output)
lw.line = lw.line + lastPart
output = output + lastPart
}
} else {
// no delimiters in this section
// there is some input, so add prefix if needed
lw.addPrefixIfNeeded(&output)
lw.line = lw.line + input
output = output + input
}
if outfh != nil {
outfh.WriteString(output)
}
} else {
// no prefixes needed, since jobs are running serially
// just use the input string
if outfh != nil {
outfh.WriteString(input)
}
}
lw.lock.Unlock()
}
}
type ImmediateWriter struct {
lineWriter *ImmediateLineWriter
fh *os.File
}
func NewImmediateWriter(lineWriter *ImmediateLineWriter, fh *os.File) *ImmediateWriter {
iw := &ImmediateWriter{}
iw.lineWriter = lineWriter
iw.fh = fh
return iw
}
func (iw ImmediateWriter) Write(p []byte) (n int, err error) {
dataLen := len(p)
// only write non-empty data
if dataLen > 0 {
iw.lineWriter.WritePrefixedLines(string(p), iw.fh)
}
return dataLen, nil
}
// from https://softwareengineering.stackexchange.com/questions/177428/sets-data-structure-in-golang
type IntSet struct {
// set map[int]bool
set sync.Map
}
func (set *IntSet) Add(i int) bool {
// _, found := set.set[i]
// set.set[i] = true
_, found := set.set.Load(i)
set.set.Store(i, true)
return !found //False if it existed already
}
const (
INVALID_HANDLE int = 0
CTRL_C_SIGNAL int = 0
CTRL_BREAK_SIGNAL int = 1
KILL_SIGNAL int = 2
// bit mask
SEND_NO_SIGNAL int = 0
SEND_CTRL_C_SIGNAL int = 1
SEND_CTRL_BREAK_SIGNAL int = 2
SEND_KILL_SIGNAL int = 4
)
func canSendSignal(childProcessName string, noSignalExes []string) (canSendSignal bool, err error) {
canSendSignal = true // first assume true
err = nil // first assume no error
if len(noSignalExes) > 0 {
for _, noSignalExe := range noSignalExes {
if noSignalExe == "all" {
canSendSignal = false
break
} else {
if childProcessName == noSignalExe {
canSendSignal = false
break
}
}
}
}
return canSendSignal, err
}
var processHandleStartTimes = make(map[int]uint64)
func getProcessStartTimeFromHandle(processHandle int) (startTime uint64, err error) {
startTime, keyPresent := processHandleStartTimes[processHandle]
if !keyPresent {
startTime, err = _getProcessStartTime(processHandle)
processHandleStartTimes[processHandle] = startTime
}
return
}
type ProcessRecord struct {
pid int
processHandle int
processExists bool
accessGranted bool
signalsToSend int
}
var pidRecords = make(map[int]ProcessRecord)
func getProcessRecordFromPid(pid int) (processRecord ProcessRecord, err error) {
processRecord, keyPresent := pidRecords[pid]
if !keyPresent {
processHandle, processExists, accessGranted, err := getProcess(pid)
if processHandle != INVALID_HANDLE && err == nil {
processRecord = ProcessRecord{
pid: pid,
processHandle: processHandle,
processExists: processExists,
accessGranted: accessGranted,
signalsToSend: SEND_NO_SIGNAL}
pidRecords[pid] = processRecord
}
}
return
}
func getProcessStartTimeFromPid(pid int) (startTime uint64, err error) {
startTime = 0
processRecord, err := getProcessRecordFromPid(pid)
if processRecord.processHandle != INVALID_HANDLE && err == nil {
startTime, err = getProcessStartTimeFromHandle(processRecord.processHandle)
}
return
}
type ChildCheckRecord struct {
process *psutil.Process
knownChild bool
}
func checkChildProcess(childCheckRecord ChildCheckRecord, noStopExes []string, noKillExes []string) (
processHandle int,
considerChild bool,
signalsToSend int,
err error) {
considerChild = false // first assume false
signalsToSend = SEND_NO_SIGNAL // first assume no signal
// use err2 for getProcessRecordFromPid(), since child may no longer exist
processRecord, err2 := getProcessRecordFromPid(int(childCheckRecord.process.Pid))
processHandle = processRecord.processHandle
if err2 == nil {
if processHandle != INVALID_HANDLE {
doEnvCheck := true // first assume true
if childCheckRecord.knownChild {
// use err3 for the next operations, since
// we may fallback to env check below
processStartTime, err3 := getProcessStartTimeFromHandle(processHandle)
if err3 == nil {
parentPid, err3 := childCheckRecord.process.Ppid() // get parent pid
if err3 == nil {
// use err3 for getProcessStartTimeFromPid(), since parent may no longer exist
parentStartTime, err3 := getProcessStartTimeFromPid(int(parentPid))
if err3 == nil {
if processStartTime >= parentStartTime {
// this our child and
// not from a previous parent with reused pid,
// so we can skip the env check
doEnvCheck = false
considerChild = true
}
}
}
}
}
if doEnvCheck {
// handle the orphaned child case
// by looking at the env
considerChild, err = doesChildHaveMarker(processHandle)
}
if err == nil {
if considerChild {
var childProcessName string = ""
if len(noStopExes) > 0 || len(noKillExes) > 0 {
childProcessName, err = childCheckRecord.process.Name()
if err == nil {
if len(childProcessName) == 0 {
err = errors.New("childProcessName is empty")
}
}
if err != nil {
if Verbose {
Log.Error(err)
}
}
}
signalsToSend, err = getSignalsToSend(childProcessName, noStopExes, noKillExes)
}
} else {
if Verbose {
Log.Error(err)
}
}
} else {
// failed to open child process, so don't consider it
}
} else {
// failed to open child process, so don't consider it
// check response
if processRecord.processExists {
if processRecord.accessGranted {
// report errors from processes we could access
if Verbose {
Log.Error(err2)
}
} else { // access denied
// ignore error, since we failed to get a handle to the child
// it could be a system process that we are skipping anyway
}
} else { // process no longer exists
// ignore error since no process to signal
}
}
return
}
// get process tree in bottom up order
func getProcessTreeRecursive(
childCheckRecord ChildCheckRecord,
noStopExes []string,
noKillExes []string,
pidsVisited *IntSet,
) (processRecords []ProcessRecord) {
if considerPid(int(childCheckRecord.process.Pid)) {
// avoid cycles in pid tree by looking at visited set
if pidsVisited.Add(int(childCheckRecord.process.Pid)) {
processHandle, considerChild, signalsToSend, err := checkChildProcess(
childCheckRecord,
noStopExes,
noKillExes)
if err != nil {
if Verbose {
Log.Error(err)
}
}
if processHandle != INVALID_HANDLE {
if considerChild {
grandChildren, err := childCheckRecord.process.Children()
if err != nil {
if err == psutil.ErrorNoChildren {
// ignore this error
err = nil
} else {
if Verbose {
Log.Error(err)
}
}
} else {
if grandChildren != nil {
for _, grandChildProcess := range grandChildren {
var grandChildCheckRecord = ChildCheckRecord{
process: grandChildProcess, knownChild: childCheckRecord.knownChild}
subProcessRecords := getProcessTreeRecursive(
grandChildCheckRecord,
noStopExes,
noKillExes,
pidsVisited)
for _, subProcessRecord := range subProcessRecords {
processRecords = append(processRecords, subProcessRecord)
}
}
}
}
var processRecord = ProcessRecord{
processHandle: processHandle, pid: int(childCheckRecord.process.Pid), signalsToSend: signalsToSend}
processRecords = append(processRecords, processRecord)
} else {
releaseProcessByPid(int(childCheckRecord.process.Pid))
}
}
}
}
return processRecords
}
func getChildProcesses(noStopExes []string, noKillExes []string) (processRecords []ProcessRecord) {
var childCheckRecords []ChildCheckRecord
// to handle non-orphaned children, get our immediate children
thisProcess, err := psutil.NewProcess(int32(os.Getpid()))
if err == nil {
immediateChildren, err := thisProcess.Children()
if err == nil {
if immediateChildren != nil {
for _, process := range immediateChildren {
var childCheckRecord = ChildCheckRecord{
process: process, knownChild: true} // process is known to be our child
childCheckRecords = append(childCheckRecords, childCheckRecord)
}
}
} else {
if err == psutil.ErrorNoChildren {
// ignore this error
err = nil
} else {
if Verbose {
Log.Error(err)
}
}
}
} else {
if Verbose {
Log.Error(err)
}
}
if err == nil {
// to handle orphaned children, get all processes
// we'll check for duplicates later
allProcesses, err := psutil.Processes()
if err == nil {
if allProcesses != nil {
for _, process := range allProcesses {
var childCheckRecord = ChildCheckRecord{
process: process, knownChild: false} // process may or may not be our child
childCheckRecords = append(childCheckRecords, childCheckRecord)
}
}
} else {
if Verbose {
Log.Error(err)
}
}
}
if err == nil {
// pidsVisited := IntSet{set: make(map[int]bool)}
pidsVisited := IntSet{set: sync.Map{}}
threads := 8 // runtime.NumCPU() 16 will panic
done := make(chan int)
ch := make(chan ProcessRecord, threads)
go func() {
for p := range ch {
processRecords = append(processRecords, p)
}
done <- 1
}()
tokens := make(chan int, threads)
var wg sync.WaitGroup
for _, childCheckRecord := range childCheckRecords {
wg.Add(1)
tokens <- 1
go func(childCheckRecord ChildCheckRecord) {
subProcessRecords := getProcessTreeRecursive(
childCheckRecord,
noStopExes,
noKillExes,
&pidsVisited)
for _, subProcessRecord := range subProcessRecords {
// processRecords = append(processRecords, subProcessRecord)
ch <- subProcessRecord
}
wg.Done()
<-tokens
}(childCheckRecord)
}
wg.Wait()
close(ch)
<-done
}
return processRecords
}
func signalChildProcesses(processRecords []ProcessRecord, signalNum int) (numChildrenSignaled int) {
// signal child processes
numChildrenSignaled = 0
expectedNumChildrenSignaled := 0
for _, processRecord := range processRecords {
sendSignal := false // first assume false
switch signalNum {
case CTRL_C_SIGNAL:
if processRecord.signalsToSend&SEND_CTRL_C_SIGNAL != 0 {
sendSignal = true
}
case CTRL_BREAK_SIGNAL:
if processRecord.signalsToSend&SEND_CTRL_BREAK_SIGNAL != 0 {
sendSignal = true
}
case KILL_SIGNAL:
if processRecord.signalsToSend&SEND_KILL_SIGNAL != 0 {
sendSignal = true
}
default:
Log.Error(errors.New("Unexpected signalNum"))
}
if sendSignal {
expectedNumChildrenSignaled += 1
err := signalProcess(processRecord, signalNum)
if err == nil {
numChildrenSignaled += 1
} else {
if Verbose {
Log.Error(err)
}
}
}
}
if expectedNumChildrenSignaled > 0 && numChildrenSignaled == 0 {
switch signalNum {
case CTRL_C_SIGNAL:
Log.Info("no child processes sent Ctrl+C signal")
case CTRL_BREAK_SIGNAL:
Log.Info("no child processes sent Ctrl+Break signal")
case KILL_SIGNAL:
Log.Info("no child processes killed")
default:
Log.Error(errors.New("Unexpected signalNum"))
}
}
return numChildrenSignaled
}
func anyRemainingChildren(processRecords []ProcessRecord) (anyRemaining bool) {
anyRemaining = false
for _, processRecord := range processRecords {
if doesProcessExist(processRecord.processHandle) {
anyRemaining = true
break
}
}
return anyRemaining
}
func pollRemainingChildren(processRecords []ProcessRecord, cleanupTime time.Duration) (anyRemaining bool) {
anyRemaining = false
startTime := time.Now()
sleepTime := 250 * time.Millisecond
for {
continuePolling := false
anyRemaining = anyRemainingChildren(processRecords)
if anyRemaining && cleanupTime > 0 {
time.Sleep(sleepTime)
elapsedTime := time.Since(startTime)
if elapsedTime < cleanupTime {
// exponential back off with limit:
// increase sleep time if next elapsedTime is below 1/2 of cleanupTime
if elapsedTime+sleepTime*2 < cleanupTime/2 {
// exponential back off
sleepTime *= 2
} else {
// use the same sleepTime as before
}
continuePolling = true
}
}
if !continuePolling {
break
}
}
return anyRemaining
}
func pollKillProcess(processRecord ProcessRecord) (err error) {
if doesProcessExist(processRecord.processHandle) {
attempts := 0
for {
continuePolling := false
err = killProcess(processRecord)
if doesProcessExist(processRecord.processHandle) {
if attempts < 30 {
continuePolling = true
} else {
// timed out
err = errors.New(
fmt.Sprintf("Timed out trying to kill child process, pid %d", processRecord.pid))
}
}
if continuePolling {
// don't use exponential back off here
// since want to fail out after fixed number of attempts
time.Sleep(250 * time.Millisecond)
attempts += 1
} else {
break
}
}
}
return err
}
// ensure our child processes are stopped
func stopChildProcesses(noStopExes []string, noKillExes []string, cleanupTime time.Duration) (err error) {
err = nil // first assume no error
anyRemaining := true // first assume some children
totalNumSignaled := 0
if canStopChildProcesses() {
processRecords := getChildProcesses(noStopExes, noKillExes)
// progress from most graceful to most invasive stop signal
// if no matching children, then call is a noop
numSignaled := signalChildProcesses(processRecords, CTRL_C_SIGNAL)
if numSignaled > 0 {
totalNumSignaled += numSignaled
anyRemaining = pollRemainingChildren(processRecords, cleanupTime)
} else {
anyRemaining = true
}
if anyRemaining {
numSignaled = signalChildProcesses(processRecords, CTRL_BREAK_SIGNAL)
if numSignaled > 0 {
totalNumSignaled += numSignaled
anyRemaining = pollRemainingChildren(processRecords, cleanupTime)
} else {
anyRemaining = true
}
if anyRemaining {
numSignaled = signalChildProcesses(processRecords, KILL_SIGNAL)
totalNumSignaled += numSignaled
}
}
anyRemaining = pollRemainingChildren(processRecords, 0) // wait zero time, since already waited above
// release process handles only after descending into all processes,
// to ensure pids do not get reused while descending
releaseProcesses()
}
if anyRemaining && totalNumSignaled == 0 {
msg := "No child processes stopped or killed\n"
msg += " " // seven spaces indent
msg += "You will need to manually stop or kill them"
err = errors.New(msg)
}
return err
}
func releaseProcessByPid(pid int) {
processRecord, keyPresent := pidRecords[pid]
if !keyPresent {
delete(pidRecords, processRecord.pid)
releaseProcessByHandle(processRecord.processHandle)
}
}
func releaseProcesses() {
for _, processRecord := range pidRecords {
releaseProcessByPid(processRecord.pid)
}
}
func getChildMarkerKey() string {
return "RUSH_CHILD_GROUP"
}
func getChildMarkerValue() string {
// place brackets on either side of the marker,
// so we only find exact matches
return "[" + ChildMarker + "]"
}
func getChildMarkerRegex() *regexp.Regexp {
// match string with one or more [pid_timestamp] values
return regexp.MustCompile(getChildMarkerKey() + "=\\[[0-z]+\\]")
}
func containsMarker(env string) bool {
childMarkerRegex := getChildMarkerRegex()
childMarkerValue := getChildMarkerValue()
match := childMarkerRegex.FindString(env)
return strings.Contains(match, childMarkerValue)
}
var stopOnce sync.Once
// run a command and pass output to c.reader.
// Note that output returns only after finishing run.
// This function is mainly borrowed from https://github.com/brentp/gargs .
func (c *Command) run(opts *Options, tryNumber int) error {
t := time.Now()
chCancelMonitor := make(chan struct{})
defer func() {
close(chCancelMonitor)
c.Duration = time.Now().Sub(t)
close(c.Executed)
}()
var command *exec.Cmd
qcmd := fmt.Sprintf(`%s`, c.Cmd)
if Verbose {
Log.Infof("start cmd #%d: %s", c.ID, qcmd)
}
if c.Timeout > 0 {
c.ctx, c.ctxCancel = context.WithTimeout(context.Background(), c.Timeout)
command = getCommand(c.ctx, qcmd)
} else {
command = getCommand(nil, qcmd)
}
// mark child processes with our pid,
// so we can identify them later,
// in case we need to signal them
childMarkerKey := getChildMarkerKey()
childMarkerValue := getChildMarkerValue()
priorValue, found := os.LookupEnv(childMarkerKey)
if found {
// append marker values to sames key, so
// we can handle the nested calls case
childMarkerValue = priorValue + childMarkerValue
}
childMarker := fmt.Sprintf("%s=%s", childMarkerKey, childMarkerValue)
// command de-dups variables, in favor of later values
command.Env = append(os.Environ(), childMarker)
var pipeStdout io.ReadCloser = nil
var err error = nil
if opts.ImmediateOutput {
lineWriter := NewImmediateLineWriter(&opts.ImmediateLock, opts.Jobs, c.ID, tryNumber)
command.Stdout = NewImmediateWriter(lineWriter, opts.OutFileHandle)
command.Stderr = NewImmediateWriter(lineWriter, opts.ErrFileHandle)
} else {
pipeStdout, err = command.StdoutPipe()
if err != nil {
return errors.Wrapf(err, "get stdout pipe of cmd #%d: %s", c.ID, c.Cmd)
}
// no code yet for stderr handling, so just have it go to os.Stderr
command.Stderr = os.Stderr
}
err = command.Start()
if err != nil {
return errors.Wrapf(err, "start cmd #%d: %s", c.ID, c.Cmd)
}
var outPipe *bufio.Reader = nil
if !opts.ImmediateOutput {
outPipe = bufio.NewReaderSize(pipeStdout, TmpOutputDataBuffer)
// no errPipe setting here, since having the command's stderr go to os.Stderr above
}
chErr := make(chan error, 2) // may come from three sources, must be buffered
chEndBeforeTimeout := make(chan struct{})
go func() {
select {
case <-c.Cancel:
if Verbose {
Log.Warningf("cancel cmd #%d: %s", c.ID, c.Cmd)
}
chErr <- ErrCancelled
// ensure we only initiate the stop attempt once,
// from all our command threads
stopOnce.Do(func() {
err = stopChildProcesses(opts.NoStopExes, opts.NoKillExes, opts.CleanupTime)
if err != nil {
if Verbose {
Log.Error(err)
}
os.Exit(1)
}
os.Exit(1)
})
case <-chCancelMonitor:
// default: // must not use default, if you must use, use for loop
}
}()
// detect timeout
if c.Timeout > 0 {
go func() { // goroutine #T
select {
case <-c.ctx.Done():
chErr <- ErrTimeout
c.ctxCancel()
return
case <-chEndBeforeTimeout:
chErr <- nil
return
}
}()
}
// --------------------------------
// handle output
var readed []byte
if c.Timeout > 0 {
// known shortcoming: this goroutine will remains even after timeout!
// this will cause data race.
go func() { // goroutine #P
// Peek is blocked method, it waits command even after timeout!!
if opts.ImmediateOutput {
// set EOF here, since handling output in readLine() above
err = io.EOF
} else {
readed, err = outPipe.Peek(TmpOutputDataBuffer)
}
chErr <- err
}()
err = <-chErr // from timeout #T or peek #P
} else {
if opts.ImmediateOutput {
// set EOF here, since handling output in readLine() above
err = io.EOF
} else {
readed, err = outPipe.Peek(TmpOutputDataBuffer)
}
}
// less than TmpOutputDataBuffer bytes in output...
if err == bufio.ErrBufferFull || err == io.EOF {
if c.Timeout > 0 {
go func() { // goroutine #W
err1 := command.Wait()
chErr <- err1
close(chEndBeforeTimeout)
}()
err = <-chErr // from timeout #T or normal exit #W
<-chErr // from normal exit #W or timeout #T
} else {
err = command.Wait()
}
if opts.PropExitStatus {
c.exitStatus = c.getExitStatus(err)
}
if !opts.ImmediateOutput {
// get reader even on error, so we can still print the stdout and stderr of the failed child process
c.reader = bufio.NewReader(bytes.NewReader(readed))
}
if err != nil {
if strings.Contains(err.Error(), "interrupt") {
return nil
}
return errors.Wrapf(err, "wait cmd #%d: %s", c.ID, c.Cmd)
}
c.Executed <- 1 // the command is executed!
return nil
}
// more than TmpOutputDataBuffer bytes in output. must use tmpfile
if opts.ImmediateOutput {
panic("code assumes immediate output case does not use tmpfile")
}
if err != nil {
return errors.Wrapf(err, "run cmd #%d: %s", c.ID, c.Cmd)
}
c.tmpfh, err = ioutil.TempFile("", tmpfilePrefix)
if err != nil {
return errors.Wrapf(err, "create tmpfile for cmd #%d: %s", c.ID, c.Cmd)
}
c.tmpfile = c.tmpfh.Name()
if Verbose {
Log.Infof("create tmpfile (%s) for command: %s", c.tmpfile, c.Cmd)
}
btmp := bufio.NewWriter(c.tmpfh)
_, err = io.CopyBuffer(btmp, outPipe, readed)
if err != nil {
return errors.Wrapf(err, "save buffered data to tmpfile: %s", c.tmpfile)
}
if c, ok := pipeStdout.(io.ReadCloser); ok {
c.Close()
}
btmp.Flush()
_, err = c.tmpfh.Seek(0, 0)
if err == nil {
if c.Timeout > 0 {
go func() { // goroutine #3
err1 := command.Wait()
close(chEndBeforeTimeout)
chErr <- err1
}()
err = <-chErr // from timeout or normal exit
<-chErr // wait unfinished goroutine
} else {
err = command.Wait()
}
}
if opts.PropExitStatus {
c.exitStatus = c.getExitStatus(err)
}
if err != nil {
if strings.Contains(err.Error(), "interrupt") {
return nil
}
return errors.Wrapf(err, "wait cmd #%d: %s", c.ID, c.Cmd)
}
c.Executed <- 1 // the command is executed!
return nil
}
// Options contains the options
type Options struct {
DryRun bool // just print command
Jobs int // max jobs number
ETA bool // show eta
ETABar *pb.ProgressBar
KeepOrder bool // keep output order
Retries int // max retry chances
RetryInterval time.Duration // retry interval
OutFileHandle *os.File // where to send stdout
ErrFileHandle *os.File // where to send stderr
ImmediateOutput bool // print output immediately and interleaved
ImmediateLock sync.Mutex // make immediate output thread-safe and do one write at a time
PrintRetryOutput bool // print output from retries
Timeout time.Duration // timeout
StopOnErr bool // stop on any error
NoStopExes []string // exe names to exclude from stop signal
NoKillExes []string // exe names to exclude from kill signal
CleanupTime time.Duration // time to allow children to clean up
PropExitStatus bool // propagate child exit status
RecordSuccessfulCmd bool // send successful command to channel
Verbose bool
}
// Run4Output runs commands in parallel from channel chCmdStr,
// and returns an output text channel,
// and a done channel to ensure safe exit.
func Run4Output(opts *Options, cancel chan struct{}, chCmdStr chan string) (chan string, chan string, chan int, chan int) {
if opts.Verbose {
Verbose = true
}
chCmd, chSuccessfulCmd, doneChCmd, chExitStatus := Run(opts, cancel, chCmdStr)
chOut := make(chan string, opts.Jobs)
done := make(chan int)
go func() {
var wg sync.WaitGroup
if !opts.KeepOrder { // do not keep order
tokens := make(chan int, opts.Jobs)
RECEIVECMD:
for c := range chCmd {
select {
case <-cancel:
break RECEIVECMD
default: // needed
}
wg.Add(1)
tokens <- 1
go func(c *Command) {
defer func() {
wg.Done()
<-tokens
}()
// read data from channel and outpput
// var N uint64
for msg := range c.Ch {
// if Verbose {
// N += uint64(len(msg))
// }
chOut <- msg
}
c.Cleanup()
if opts.ETA {
opts.ETABar.Add(1)
}
// if Verbose {
// Log.Debugf("receive %d bytes from cmd #%d\n", N, c.ID)
// }
// if Verbose {
// Log.Infof("finish receiving data from: %s", c.Cmd)
// }
}(c)
}
} else { // keep order
wg.Add(1)
var id uint64 = 1
var c, c1 *Command
var ok bool
cmds := make(map[uint64]*Command)
RECEIVECMD2:
for c = range chCmd {
select {
case <-cancel:
break RECEIVECMD2
default: // needed
}
if c.ID == id { // your turn
for msg := range c.Ch {
chOut <- msg
}
c.Cleanup()
if opts.ETA {
opts.ETABar.Add(1)
}
id++
} else { // wait the ID come out
for {
if c1, ok = cmds[id]; ok {
for msg := range c1.Ch {
chOut <- msg
}
c1.Cleanup()
if opts.ETA {
opts.ETABar.Add(1)
}
delete(cmds, c1.ID)
id++
} else {
break
}
}
cmds[c.ID] = c
}
}
if len(cmds) > 0 {
ids := make(sortutil.Uint64Slice, len(cmds))
i := 0
for id = range cmds {
ids[i] = id
i++
}
sort.Sort(ids)
for _, id = range ids {
c := cmds[id]
for msg := range c.Ch {
chOut <- msg
}
c.Cleanup()
if opts.ETA {
opts.ETABar.Add(1)
}
}
}
wg.Done()
}
<-doneChCmd
wg.Wait()
close(chOut)
// if Verbose {
// Log.Infof("finish sending all output")
// }
done <- 1
}()
return chOut, chSuccessfulCmd, done, chExitStatus
}
// write strings and report done
func combineWorker(input <-chan string, output chan<- string, wg *sync.WaitGroup) {
defer wg.Done()
for val := range input {
output <- val
}
}
// combine strings in input order
func combine(inputs []<-chan string, output chan<- string) {
group := new(sync.WaitGroup)
go func() {
for _, input := range inputs {
group.Add(1)
go combineWorker(input, output, group)
group.Wait() // preserve input order
}
close(output)
}()
}
// Run runs commands in parallel from channel chCmdStr,
// and returns a Command channel,
// and a done channel to ensure safe exit.
func Run(opts *Options, cancel chan struct{}, chCmdStr chan string) (chan *Command, chan string, chan int, chan int) {
if opts.Verbose {
Verbose = true
}
chCmd := make(chan *Command, opts.Jobs)
var chSuccessfulCmd chan string
if opts.RecordSuccessfulCmd {
chSuccessfulCmd = make(chan string, opts.Jobs)
}
done := make(chan int)
var chExitStatus chan int
if opts.PropExitStatus {
chExitStatus = make(chan int, opts.Jobs)
}
go func() {
var wg sync.WaitGroup
tokens := make(chan int, opts.Jobs)
var id uint64 = 1
var stop bool
RECEIVECMD:
for cmdStr := range chCmdStr {
select {
case <-cancel:
if Verbose {
Log.Debugf("cancel receiving commands")
}
break RECEIVECMD
default: // needed
}
if stop {
break
}
wg.Add(1)
tokens <- 1
go func(id uint64, cmdStr string) {
defer func() {
wg.Done()
<-tokens
}()
command := NewCommand(id, cmdStr, cancel, opts.Timeout)
if opts.DryRun {
command.dryrun = true
}
chances := opts.Retries
var outputsToPrint []<-chan string
for {
tryNumber := opts.Retries - chances + 1
ch, err := command.Run(opts, tryNumber)
if err != nil { // fail to run
if chances == 0 || opts.StopOnErr {
// print final output
outputsToPrint = append(outputsToPrint, ch)
Log.Error(err)
if opts.PropExitStatus {
chExitStatus <- command.exitStatus
}
command.Ch = make(chan string, 1)
combine(outputsToPrint, command.Ch)
chCmd <- command
} else {
Log.Warning(err)
}
if opts.StopOnErr {
select {
case <-cancel: // already closed
default:
// ensure we only initiate the stop attempt once,
// from all our command threads
stopOnce.Do(func() {
if opts.StopOnErr {
Log.Error("stop on first error")
}
err = stopChildProcesses(opts.NoStopExes, opts.NoKillExes, opts.CleanupTime)
if err != nil {
if Verbose {
Log.Error(err)
}
os.Exit(1)
}
})
}
stop = true
return
}
if chances > 0 {
if opts.PrintRetryOutput {
outputsToPrint = append(outputsToPrint, ch)
}
if Verbose && opts.Retries > 0 {
Log.Warningf("retry %d/%d times: %s",
tryNumber,
opts.Retries,
command.Cmd)
}
chances--
<-time.After(opts.RetryInterval)
continue
}
return
}
// print final output
outputsToPrint = append(outputsToPrint, ch)
if opts.PropExitStatus {
chExitStatus <- command.exitStatus
}
break
}
command.Ch = make(chan string, 1)
combine(outputsToPrint, command.Ch)
chCmd <- command
// After sending the command, it's not guaranteed that the command is executed.
// so, a feedback is needed.
v := <-command.Executed
if opts.RecordSuccessfulCmd && v == 1 {
chSuccessfulCmd <- cmdStr
}
}(id, cmdStr)
id++
}
wg.Wait()
close(chCmd)
if opts.RecordSuccessfulCmd {
close(chSuccessfulCmd)
}
if opts.PropExitStatus {
close(chExitStatus)
}
done <- 1
}()
return chCmd, chSuccessfulCmd, done, chExitStatus
}
|
package prometheus
import (
"crypto/tls"
"net/http"
"os/exec"
"time"
"fmt"
"github.com/tmax-cloud/hypercloud-multi-agent/internal/util"
"k8s.io/klog"
// "k8s.io/kubectl/pkg/cmd/annotate"
)
const (
URL_PREFIX = "http://"
URL_HYPERCLUSTERRESOURCE_PATH = "/hyperclusterresource"
URL_HTPERCLOUD_URL_PATH = "/hypercloudurl"
URL_PARAM_URL = "url"
CONFIGMAP_NAME = "hypercloud-multi-agent-agentconfig"
CONFIGMAP_NAMESPACE = "hypercloud-multi-agent-system"
CONFIGMAP_MGNT_IP = "mgnt-ip"
CONFIGMAP_MGNT_PORT = "mgnt-port"
CONFIGMAP_MY_CLUSTERNAME = "cluster-name"
CONFIGMAP_REQUESTPERIOD = "request-period"
CONFIGMAP_RESOURCELIST = "resourcelist"
PROMETHEUS_SERVICE_NAME = "prometheus-k8s"
PROMETHEUS_SERVICE_NAMESPACE = "monitoring"
PROMETHEUS_NODE_EXPORT_SERVICE_NAME = "node-exporter"
PROMETHEUS_QUERY_PATH = "/api/v1/query"
PROMETHEUS_QUERY_KEY_QUERY = "query"
PROMETHEUS_QUERY_KEY_TIME = "time"
PROMETHEUS_QUERY_POD_USAGE = "count(kube_pod_info{host_ip=\"y.y.y.y\"})"
PROMETHEUS_QUERY_CPU_USAGE = "( (1 - rate(node_cpu_seconds_total{job=\"node-exporter\", mode=\"idle\", instance=\"x.x.x.x:xxxx\"}[75s])) / ignoring(cpu) group_left count without (cpu)( node_cpu_seconds_total{job=\"node-exporter\", mode=\"idle\", instance=\"x.x.x.x:xxxx\"}) )"
PROMETHEUS_QUERY_STORAGE_USAGE = "sum( max by (device) ( node_filesystem_size_bytes{job=\"node-exporter\", instance=\"x.x.x.x:xxxx\", fstype!=\"\"} - node_filesystem_avail_bytes{job=\"node-exporter\", instance=\"x.x.x.x:xxxx\", fstype!=\"\"} ) )"
PROMETHEUS_QUERY_MEMORY_USAGE = "(( node_memory_MemTotal_bytes{job=\"node-exporter\", instance=\"x.x.x.x:xxxx\"} - node_memory_MemFree_bytes{job=\"node-exporter\", instance=\"x.x.x.x:xxxx\"} - node_memory_Buffers_bytes{job=\"node-exporter\", instance=\"x.x.x.x:xxxx\"} - node_memory_Cached_bytes{job=\"node-exporter\", instance=\"x.x.x.x:xxxx\"} )) "
LABEL_MASTER_ROLE = "node-role.kubernetes.io/master"
URL_INSTALL_REPO = "https://github.com/tmax-cloud/hypercloud-multi-agent-install-repo.git"
HYPERCLOUD_CONSOLE_LABEL_APP = "app"
HYPERCLOUD_CONSOLE_LABEL_APP_KEY = "console"
HYPERCLOUD_CONSOLE_LABEL_HYPERCLOUD = "hypercloud"
HYPERCLOUD_CONSOLE_LABEL_HYPERCLOUD_KEY = "ui"
)
func InstallPrometheus(res http.ResponseWriter, req *http.Request) {
InstallCommand()
// if err := clonePrometheus(); err != nil {
// msg := "Failed to clone prometheus. " + err.Error()
// klog.Errorln(msg)
// util.SetResponse(res, msg, nil, http.StatusInternalServerError)
// return
// }
// if msg, err := exec.Command("chmod", "+x", "/tmp/git/prometheus/install.sh").Output(); err != nil {
// klog.Errorln("Failed to chmod install.sh. \n" + string(msg))
// util.SetResponse(res, "Failed to chmod install.sh \n"+string(msg), nil, http.StatusInternalServerError)
// return
// }
// if msg, err := exec.Command("bash", "/tmp/git/prometheus/install.sh").Output(); err != nil {
// klog.Errorln("Failed to exec install.sh \n" + string(msg))
// util.SetResponse(res, "Failed to exec install.sh \n"+string(msg), nil, http.StatusInternalServerError)
// return
// }
// prometheusIngress
klog.Infoln("Success to exec install prometheus")
util.SetResponse(res, "Success to exec install prometheus", nil, http.StatusInternalServerError)
return
}
func InstallCommand() {
if out, err := exec.Command("git", "clone", URL_INSTALL_REPO, "/install-prometheus").Output(); err != nil {
fmt.Println(err.Error())
} else {
fmt.Println(string(out))
}
if out, err := exec.Command("chmod", "+x", "/install-prometheus/main.sh").Output(); err != nil {
fmt.Println(err.Error())
} else {
fmt.Println(string(out))
}
if out, err := exec.Command("bash", "/install-prometheus/main.sh").Output(); err != nil {
fmt.Println(err.Error())
} else {
fmt.Println(string(out))
}
}
// func UnInstallPrometheus(res http.ResponseWriter, req *http.Request) {
// if _, err := os.Stat("/tmp/git/prometheus"); os.IsNotExist(err) {
// klog.Errorln("Prometheus install directory is removed." + err.Error())
// util.SetResponse(res, "Failed to exec uninstall.sh "+err.Error(), nil, http.StatusInternalServerError)
// return
// }
// if _, err := exec.Command("chmod", "+x", "/tmp/git/prometheus/uninstall.sh").Output(); err != nil {
// klog.Errorln("Failed to chmod uninstall.sh " + err.Error())
// util.SetResponse(res, "Failed to chmod uninstall.sh "+err.Error(), nil, http.StatusInternalServerError)
// return
// }
// if _, err := exec.Command("bash", "/tmp/git/prometheus/uninstall.sh").Output(); err != nil {
// klog.Errorln("Failed to exec uninstallin.sh " + err.Error())
// util.SetResponse(res, "Failed to exec uninstall.sh "+err.Error(), nil, http.StatusInternalServerError)
// return
// }
// if _, err := exec.Command("/bin/sh", "-c", "rm -rf /tmp/git/prometheus").Output(); err != nil {
// klog.Errorln("Failed to remove prometheus install directory " + err.Error())
// util.SetResponse(res, "Failed to remove prometheus install directory "+err.Error(), nil, http.StatusInternalServerError)
// return
// }
// klog.Infoln("Success to exec uninstall prometheus")
// util.SetResponse(res, "Success to exec uninstall prometheus", nil, http.StatusInternalServerError)
// return
// }
// func deletePrometheusDir() error {
// if _, err := os.Stat("/tmp/git/prometheus"); os.IsNotExist(err) {
// return nil
// } else if err != nil {
// return err
// } else {
// if _, err := exec.Command("/bin/sh", "-c", "rm -rf /tmp/git/prometheus").Output(); err != nil {
// return err
// } else {
// return nil
// }
// }
// }
// func clonePrometheus() error {
// if _, err := os.Stat("/tmp/git/prometheus"); os.IsNotExist(err) {
// _, err := git.PlainClone("/tmp/git/prometheus", false, &git.CloneOptions{
// URL: "https://github.com/tmax-cloud/install-prometheus.git",
// ReferenceName: plumbing.ReferenceName("refs/heads/5.0-agent"),
// InsecureSkipTLS: true,
// })
// if err != nil {
// return err
// }
// } else {
// return errors.New("Prometheus git directory is already existed")
// }
// return nil
// }
func HealthCheck() (*http.Response, error) {
url := "http://prometheus-k8s.hypercloud5-system.svc.cluster.local:9090/-/healthy"
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} // ignore certificate
client := http.Client{
Timeout: 15 * time.Second,
}
response, err := client.Get(url)
if err != nil {
klog.Errorln(err)
return nil, err
} else {
return response, nil
}
}
|
package counter
type ChannelCounter struct {
ch chan func()
number uint64
}
func NewChannelCounter() Counter {
counter := &ChannelCounter{make(chan func(), 100), 0}
go func(counter *ChannelCounter) {
for f := range counter.ch {
f()
}
}(counter)
return counter
}
func (c *ChannelCounter) Add(num uint64) {
c.ch <- func() {
c.number = c.number + num
}
}
func (c *ChannelCounter) Read() uint64 {
ret := make(chan uint64)
c.ch <- func() {
ret <- c.number
close(ret)
}
return <-ret
}
|
package user
import (
"net/http"
noter "github.com/romycode/bank-manager/internal"
"github.com/gin-gonic/gin"
"log"
)
func CreateUserHandler(repository noter.UserRepository) gin.HandlerFunc {
return func(ctx *gin.Context) {
u := new(noter.User)
err := ctx.Bind(u)
if err != nil {
log.Fatal(err)
}
repository.Save(ctx, *u)
ctx.JSON(
http.StatusCreated,
u,
)
}
} |
package account
import (
"github.com/agiledragon/gomonkey/v2"
"github.com/kenlabs/pando/pkg/registry"
"github.com/libp2p/go-libp2p-core/peer"
. "github.com/smartystreets/goconvey/convey"
"reflect"
"testing"
)
func TestFetchPeerType(t *testing.T) {
Convey("TestFetchPeerType", t, func() {
r := ®istry.Registry{}
Convey("When peer is registered", func() {
patch := gomonkey.ApplyMethodFunc(
reflect.TypeOf(r),
"ProviderAccountLevel",
func(_ peer.ID) (int, error) {
return 1, nil
},
)
Convey("Given an untrusted account level equals to 1, should returns registeredPeer type", func() {
patch = patch.ApplyMethodFunc(
reflect.TypeOf(r),
"IsTrusted",
func(_ peer.ID) bool {
return false
},
)
defer patch.Reset()
providerInfo := FetchPeerType("", r)
So(providerInfo.PeerType, ShouldEqual, RegisteredPeer)
So(providerInfo.AccountLevel, ShouldEqual, 1)
})
Convey("Given a trusted account, should returns whitelistPeer type", func() {
patch = patch.ApplyMethodFunc(
reflect.TypeOf(r),
"IsTrusted",
func(_ peer.ID) bool {
return true
},
)
defer patch.Reset()
providerInfo := FetchPeerType("", r)
So(providerInfo.PeerType, ShouldEqual, WhiteListPeer)
So(providerInfo.AccountLevel, ShouldEqual, 1)
})
})
Convey("when peer is not registered", func() {
patch := gomonkey.ApplyMethodFunc(
reflect.TypeOf(r),
"ProviderAccountLevel",
func(_ peer.ID) (int, error) {
return -1, nil
},
)
patch = patch.ApplyMethodFunc(
reflect.TypeOf(r),
"IsTrusted",
func(_ peer.ID) bool {
return false
},
)
defer patch.Reset()
providerInfo := FetchPeerType("", r)
So(providerInfo.PeerType, ShouldEqual, UnregisteredPeer)
So(providerInfo.AccountLevel, ShouldEqual, -1)
})
})
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"strings"
)
func computeRibbon(a int, b int, c int) int {
var x int = a
var y int = b
var maxSide int = c
if x > maxSide {
maxSide, x = x, maxSide /* swap */
}
if y > maxSide {
maxSide, y = y, maxSide
}
return 2*(x+y) + a*b*c
}
func computeWrappingPaper(a int, b int, c int) int {
var x int = a * b
var y int = b * c
var z int = a * c
var slack int = x
if y < slack {
slack = y
}
if z < slack {
slack = z
}
return 2*(x+y+z) + slack
}
func main() {
file, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer file.Close()
var wrapping int = 0
var ribbon int = 0
scanner := bufio.NewScanner(file)
for scanner.Scan() {
var prism []string = strings.Split(scanner.Text(), "x")
var sq [3]int
for i, str := range prism {
sq[i], _ = strconv.Atoi(str)
}
wrapping += computeWrappingPaper(sq[0], sq[1], sq[2])
ribbon += computeRibbon(sq[0], sq[1], sq[2])
}
fmt.Printf("wrapping paper: %d\n", wrapping)
fmt.Printf("rbbon length: %d\n", ribbon)
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
}
|
package controller
import (
"yes-blog/graph/model"
"yes-blog/pkg/database"
"yes-blog/pkg/database/status"
"yes-blog/pkg/jwt"
)
/* singleton object for userController
this controller task is to perform CRUD for user.User model
it takes a dbDriver implementing database.UserDBDriver and
speaks to the database with the dbDriver for performing
the CRUD
*/
type userController struct {
dbDriver database.UserDBDriver
}
var userC *userController
func (c *userController) SetDBDriver(dbDriver database.UserDBDriver) {
userC.dbDriver = dbDriver
}
func init() {
userC = &userController{}
}
func GetUserController() *userController {
return userC
}
func (c *userController) Login(username, password string) (string, error) {
//retrieve user from data base
blogUser, err := c.Get(&username)
if err != nil {
return "", model.UserPassMissMatchException{}
}
// check if the username and password matches
if !blogUser.Verify(password) {
return "", model.UserPassMissMatchException{}
}
// generate new token
token, err2 := jwt.GenerateToken(blogUser.Name)
if err2 != nil {
return "", model.InternalServerException{}
}
return token, nil
}
func (c *userController) Promote(admin, target string) error {
return c.setLevel(admin, target, true)
}
func (c *userController) Demote(admin, target string) error {
return c.setLevel(admin, target, false)
}
func (c *userController) setLevel(admin, target string, level bool) error {
isAdmin, err := c.isAdmin(admin)
if err != nil {
return err
}
if !isAdmin {
return model.UserNotAllowedException{Message: "user is not admin!"}
}
targetUser, err2 := c.Get(&target)
if err2 != nil {
return model.NoUserFoundException{Message: "couldn't fetch target"}
}
targetUser.SetAdmin(level)
if stat := c.dbDriver.Replace(&target, targetUser); stat == status.FAILED {
return model.InternalServerException{Message: "couldn't do the task"}
}
return nil
}
func (c *userController) isAdmin(admin string) (bool, error) {
adminUser, err := c.Get(&admin)
if err != nil {
return false, model.NoUserFoundException{Message: "couldn't fetch admin"}
}
return adminUser.IsAdmin(), nil
}
func (c *userController) CanOperate(operator, target string) (bool, error) {
if operator == target {
return true, nil
}
admin, err := c.isAdmin(operator)
if err != nil {
return false, err
}
if admin {
return true, nil
}
return false, nil
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"context"
"fmt"
"os"
"github.com/google/subcommands"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/runsc/cmd/util"
"gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/container"
"gvisor.dev/gvisor/runsc/flag"
)
// Delete implements subcommands.Command for the "delete" command.
type Delete struct {
// force indicates that the container should be terminated if running.
force bool
}
// Name implements subcommands.Command.Name.
func (*Delete) Name() string {
return "delete"
}
// Synopsis implements subcommands.Command.Synopsis.
func (*Delete) Synopsis() string {
return "delete resources held by a container"
}
// Usage implements subcommands.Command.Usage.
func (*Delete) Usage() string {
return `delete [flags] <container ids>`
}
// SetFlags implements subcommands.Command.SetFlags.
func (d *Delete) SetFlags(f *flag.FlagSet) {
f.BoolVar(&d.force, "force", false, "terminate container if running")
}
// Execute implements subcommands.Command.Execute.
func (d *Delete) Execute(_ context.Context, f *flag.FlagSet, args ...any) subcommands.ExitStatus {
if f.NArg() == 0 {
f.Usage()
return subcommands.ExitUsageError
}
conf := args[0].(*config.Config)
if err := d.execute(f.Args(), conf); err != nil {
util.Fatalf("%v", err)
}
return subcommands.ExitSuccess
}
func (d *Delete) execute(ids []string, conf *config.Config) error {
for _, id := range ids {
c, err := container.Load(conf.RootDir, container.FullID{ContainerID: id}, container.LoadOpts{})
if err != nil {
if os.IsNotExist(err) && d.force {
log.Warningf("couldn't find container %q: %v", id, err)
return nil
}
return fmt.Errorf("loading container %q: %v", id, err)
}
if !d.force && c.Status != container.Created && c.Status != container.Stopped {
return fmt.Errorf("cannot delete container that is not stopped without --force flag")
}
if err := c.Destroy(); err != nil {
return fmt.Errorf("destroying container: %v", err)
}
}
return nil
}
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
"reflect"
batchv1alpha1 "sdewan.akraino.org/sdewan/api/v1alpha1"
"sdewan.akraino.org/sdewan/cnfprovider"
"sdewan.akraino.org/sdewan/openwrt"
"strconv"
)
type Mwan3PolicyHandler struct {
}
func (m *Mwan3PolicyHandler) GetType() string {
return "Mwan3Policy"
}
func (m *Mwan3PolicyHandler) GetName(instance runtime.Object) string {
policy := instance.(*batchv1alpha1.Mwan3Policy)
return policy.Name
}
func (m *Mwan3PolicyHandler) GetFinalizer() (string) {
return "rule.finalizers.sdewan.akraino.org"
}
func (m *Mwan3PolicyHandler) GetInstance(r client.Client, ctx Context, req ctrl.Request) (runtime.Object, error) {
instance := &batchv1alpha1.Mwan3Policy{}
err := r.Get(ctx, req.NamespacedName, instance)
return instance, err
}
func (m *Mwan3PolicyHandler) Convert(instance runtime.Object, deployment extensionsv1beta1.Deployment) (IOpenWrtObject, error) {
policy := instance.(*batchv1alpha1.Mwan3Policy)
members := make([]openwrt.SdewanMember, len(policy.Spec.Members))
for i, membercr := range policy.Spec.Members {
iface, err := net2iface(membercr.Network, deployment)
if err != nil {
return nil, err
}
members[i] = openwrt.SdewanMember{
Interface: iface,
Metric: strconv.Itoa(membercr.Metric),
Weight: strconv.Itoa(membercr.Weight),
}
}
return &openwrt.SdewanPolicy{Name: policy.Name, Members: members}, nil
}
func (m *Mwan3PolicyHandler) IsEqual(instance1 IOpenWrtObject, instance2 IOpenWrtObject) (bool) {
policy1 := instance1.(*openwrt.SdewanPolicy)
policy2 := instance2.(*openwrt.SdewanPolicy)
reflect.DeepEqual(*policy1, *policy2)
}
func (m *Mwan3PolicyHandler) GetObject(clientInfo *openwrt.OpenwrtClientInfo, name string) (IOpenWrtObject, error) {
openwrtClient := openwrt.GetOpenwrtClient(*clientInfo)
mwan3 := openwrt.Mwan3Client{OpenwrtClient: openwrtClient}
return mwan3.GetPolicy(policy.Name)
}
func (m *Mwan3PolicyHandler) CreateObject(clientInfo *OpenwrtClientInfo, instance IOpenWrtObject) (IOpenWrtObject, error) {
openwrtClient := openwrt.GetOpenwrtClient(*clientInfo)
mwan3 := openwrt.Mwan3Client{OpenwrtClient: openwrtClient}
policy := instance.(*openwrt.SdewanPolicy)
return mwan3.CreatePolicy(*policy)
}
func (m *Mwan3PolicyHandler) UpdateObject(clientInfo *OpenwrtClientInfo, instance IOpenWrtObject) (IOpenWrtObject, error) {
openwrtClient := openwrt.GetOpenwrtClient(*clientInfo)
mwan3 := openwrt.Mwan3Client{OpenwrtClient: openwrtClient}
policy := instance.(*openwrt.SdewanPolicy)
return mwan3.UpdatePolicy(*policy)
}
func (m *Mwan3PolicyHandler) DeleteObject(clientInfo *openwrt.OpenwrtClientInfo, name string) (error) {
openwrtClient := openwrt.GetOpenwrtClient(*clientInfo)
mwan3 := openwrt.Mwan3Client{OpenwrtClient: openwrtClient}
return mwan3.DeletePolicy(name)
}
func (m *Mwan3PolicyHandler) Restart(clientInfo *OpenwrtClientInfo) (bool, error) {
openwrtClient := openwrt.GetOpenwrtClient(*clientInfo)
service := openwrt.ServiceClient{OpenwrtClient: openwrtClient}
return service.ExecuteService("mwan3", "restart")
}
// Mwan3PolicyReconciler reconciles a Mwan3Policy object
type Mwan3PolicyReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=batch.sdewan.akraino.org,resources=mwan3policies,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=batch.sdewan.akraino.org,resources=mwan3policies/status,verbs=get;update;patch
func (r *Mwan3PolicyReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
return ProcessReconcile(r, r.Log, req, &mwan3policyHandler{})
}
/*
func (r *Mwan3PolicyReconciler) OldReconcile (req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("mwan3policy", req.NamespacedName)
// your logic here
during, _ := time.ParseDuration("5s")
instance := &batchv1alpha1.Mwan3Policy{}
err := r.Get(ctx, req.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
// No instance
return ctrl.Result{}, nil
}
// Error reading the object - requeue the request.
return ctrl.Result{RequeueAfter: during}, nil
}
cnf, err := cnfprovider.NewWrt(req.NamespacedName.Namespace, instance.Labels["sdewanPurpose"], r.Client)
if err != nil {
log.Error(err, "Failed to get cnf")
// A new event are supposed to be received upon cnf ready
// so not requeue
return ctrl.Result{}, nil
}
finalizerName := "rule.finalizers.sdewan.akraino.org"
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
// creating or updating CR
if cnf == nil {
// no cnf exists
log.Info("No cnf exist, so not create/update mwan3 policy")
return ctrl.Result{}, nil
}
changed, err := cnf.AddUpdateMwan3Policy(instance)
if err != nil {
log.Error(err, "Failed to add/update mwan3 policy")
return ctrl.Result{RequeueAfter: during}, nil
}
if !containsString(instance.ObjectMeta.Finalizers, finalizerName) {
log.Info("Adding finalizer for mwan3 policy")
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, finalizerName)
if err := r.Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
}
if changed {
instance.Status.AppliedVersion = instance.ResourceVersion
instance.Status.AppliedTime = &metav1.Time{Time: time.Now()}
instance.Status.InSync = true
err = r.Status().Update(ctx, instance)
if err != nil {
log.Error(err, "Failed to update mwan3 policy status")
return ctrl.Result{}, err
}
}
} else {
// deletin CR
if cnf == nil {
// no cnf exists
if containsString(instance.ObjectMeta.Finalizers, finalizerName) {
instance.ObjectMeta.Finalizers = removeString(instance.ObjectMeta.Finalizers, finalizerName)
if err := r.Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
_, err := cnf.DeleteMwan3Policy(instance)
if err != nil {
log.Error(err, "Failed to delete mwan3 policy")
return ctrl.Result{RequeueAfter: during}, nil
}
if containsString(instance.ObjectMeta.Finalizers, finalizerName) {
instance.ObjectMeta.Finalizers = removeString(instance.ObjectMeta.Finalizers, finalizerName)
if err := r.Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
}
}
return ctrl.Result{}, nil
}
*/
func (r *Mwan3PolicyReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&batchv1alpha1.Mwan3Policy{}).
Complete(r)
}
/*
// Helper functions to check and remove string from a slice of strings.
func containsString(slice []string, s string) bool {
for _, item := range slice {
if item == s {
return true
}
}
return false
}
func removeString(slice []string, s string) (result []string) {
for _, item := range slice {
if item == s {
continue
}
result = append(result, item)
}
return
}
*/
|
package scraper
import (
"errors"
"github.com/mkamadeus/nicscraper/models"
)
type Scraper struct {
IsVerbose bool
Students chan models.Student
Failed chan string
Args *models.Arguments
}
func New(args *models.Arguments) (*Scraper, error) {
scraper := &Scraper{
Students: make(chan models.Student),
Failed: make(chan string),
Args: args,
}
if !scraper.IsConnected() {
return nil, errors.New("scraper not connected")
}
return scraper, nil
}
|
package purchasepersister
type ErrPurchaseInvalid struct {
Message string
}
func (e ErrPurchaseInvalid) Error() string {
return e.Message
}
|
package static
import (
"io/fs"
"strings"
"testing"
)
func TestEmbed(t *testing.T) {
scenarios := []struct {
path string
shouldExist bool
expectedContainString string
}{
{
path: "index.html",
shouldExist: true,
expectedContainString: "</body>",
},
{
path: "favicon.ico",
shouldExist: true,
expectedContainString: "", // not checking because it's an image
},
{
path: "img/logo.svg",
shouldExist: true,
expectedContainString: "</svg>",
},
{
path: "css/app.css",
shouldExist: true,
expectedContainString: "background-color",
},
{
path: "js/app.js",
shouldExist: true,
expectedContainString: "function",
},
{
path: "js/chunk-vendors.js",
shouldExist: true,
expectedContainString: "function",
},
{
path: "file-that-does-not-exist.html",
shouldExist: false,
},
}
staticFileSystem, err := fs.Sub(FileSystem, RootPath)
if err != nil {
t.Fatal(err)
}
for _, scenario := range scenarios {
t.Run(scenario.path, func(t *testing.T) {
content, err := fs.ReadFile(staticFileSystem, scenario.path)
if !scenario.shouldExist {
if err == nil {
t.Errorf("%s should not have existed", scenario.path)
}
} else {
if err != nil {
t.Errorf("opening %s should not have returned an error, got %s", scenario.path, err.Error())
}
if len(content) == 0 {
t.Errorf("%s should have existed in the static FileSystem, but was empty", scenario.path)
}
if !strings.Contains(string(content), scenario.expectedContainString) {
t.Errorf("%s should have contained %s, but did not", scenario.path, scenario.expectedContainString)
}
}
})
}
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package crossdevice
import (
"context"
"regexp"
crossdevicecommon "chromiumos/tast/common/cros/crossdevice"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/lsbrelease"
)
// GetCrosAttributes gets the Chrome version and combines it into a CrosAttributes strct with the provided values for easy logging with json.MarshalIndent.
func GetCrosAttributes(ctx context.Context, tconn *chrome.TestConn, username string) (*crossdevicecommon.CrosAttributes, error) {
attrs := crossdevicecommon.CrosAttributes{
User: username,
}
out, err := testexec.CommandContext(ctx, "/opt/google/chrome/chrome", "--version").Output()
if err != nil {
return nil, errors.Wrap(err, "failed to get chrome version")
}
// Extract Chrome/Chromium version of the form, for example, 91.0.4435.0.
const versionPattern = `([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)`
r := regexp.MustCompile(versionPattern)
versionMatch := r.FindStringSubmatch(string(out))
if len(versionMatch) == 0 {
return nil, errors.New("failed to find valid Chrome version")
}
attrs.ChromeVersion = versionMatch[1]
lsb, err := lsbrelease.Load()
if err != nil {
return nil, errors.Wrap(err, "failed to read lsb-release")
}
osVersion, ok := lsb[lsbrelease.Version]
if !ok {
return nil, errors.Wrap(err, "failed to read ChromeOS version from lsb-release")
}
attrs.ChromeOSVersion = osVersion
board, ok := lsb[lsbrelease.Board]
if !ok {
return nil, errors.Wrap(err, "failed to read board from lsb-release")
}
attrs.Board = board
model, err := testexec.CommandContext(ctx, "cros_config", "/", "name").Output()
if err != nil {
return nil, errors.Wrap(err, "failed to read model from cros_config")
}
attrs.Model = string(model)
return &attrs, nil
}
|
package structs
import "fmt"
// Declaring a struct
type user struct {
name string
email string
ext int
privileged bool
}
type admin struct {
person user
level string
}
func (u user) notify() {
fmt.Printf("Sending user e-mail to %s<%s>\n",
u.name,
u.email)
}
// Call - Calls the structs content
func Call() {
fmt.Println("Structs")
bill := user{"Bill", "bill@gmail.com", 123, true}
bill.notify()
// When declaring a variable with zero values is preferable to use "var"
// when declaring a variable with values is preferable to use ":="
// Declaring using a literal struct
lisa := user{
name: "lisa",
email: "lisa@gmail.com",
ext: 123,
privileged: true,
}
fmt.Println(lisa)
// Another way to declare, this way we hide the keys, passing only values
eduart := user{"Eduart", "eduart@gmail.com", 321, false}
fmt.Println(eduart)
// Declaring a admin type (struct who have a struct)
fred := admin{
person: user{
name: "fred",
email: "fred@gmail.com",
ext: 123,
privileged: true,
},
level: "super",
}
fmt.Println(fred)
return
}
|
package main
/*
A game on an undirected graph is played by two players, Mouse and Cat, who alternate turns.
The graph is given as follows: graph[a] is a list of all nodes b such that ab is an edge of the graph.
Mouse starts at node 1 and goes first, Cat starts at node 2 and goes second, and there is a Hole at node 0.
During each player's turn, they must travel along one edge of the graph that meets where they are. For example, if the Mouse is at node 1, it must travel to any node in graph[1].
Additionally, it is not allowed for the Cat to travel to the Hole (node 0.)
Then, the game can end in 3 ways:
If ever the Cat occupies the same node as the Mouse, the Cat wins.
If ever the Mouse reaches the Hole, the Mouse wins.
If ever a position is repeated (ie. the players are in the same position as a previous turn, and it is the same player's turn to move), the game is a draw.
Given a graph, and assuming both players play optimally, return 1 if the game is won by Mouse, 2 if the game is won by Cat, and 0 if the game is a draw.
Example 1:
Input: [[2,5],[3],[0,4,5],[1,4,5],[2,3],[0,2,3]]
Output: 0
Explanation:
4---3---1
| |
2---5
\ /
0
Note:
3 <= graph.length <= 50
It is guaranteed that graph[1] is non-empty.
It is guaranteed that graph[2] contains a non-zero element.
*/
func main() {
}
/*
问题:猫捉老鼠,老鼠在1先走,猫在2后走,0是洞
猫是不能走到0,也就是洞的位置的
老鼠能进洞返回1,老鼠赢
老鼠被猫抓到返回2,老鼠和猫走到相同位置,猫赢了
如果老鼠和猫都走到了原来的地方,平局,返回0
*/
func catMouseGame(graph [][]int) int {
}
func mouseMove(u int, graph[][]int,visit map[int]bool) int {
} |
// +build darwin,!go1.12
package udwFile
import (
"os"
"syscall"
)
func FileSync(f *os.File) error {
_, _, err := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), syscall.F_FULLFSYNC, 0)
if err == 0 {
return nil
}
return err
}
|
package cmd
import (
"fmt"
"math/rand"
"time"
"github.com/spf13/cobra"
)
// snappleCmd represents the snapple command
var snappleCmd = &cobra.Command{
Use: "snapple",
Short: "Generate a random fact",
Long: `
Ever get bored and just want to hear a random fact? Same.
Beware these are snapple facts...Who knows if they're true.
`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("snapple called")
rand.Seed(time.Now().Unix())
n := rand.Int() % len(snappleFacts)
fmt.Println(snappleFacts[n].text)
},
}
func init() {
rootCmd.AddCommand(snappleCmd)
}
type snappleFactType string
const (
sports = "Sports"
politics = "Politics"
animalKingdom = "Animal Kingdom"
geography = "Geography"
random = "Random"
)
type snappleFact struct {
id int
category snappleFactType
text string
}
var snappleFacts = []snappleFact{
snappleFact{
id: 4,
category: animalKingdom,
text: "Slugs have four noses.",
},
snappleFact{
id: 8,
category: animalKingdom,
text: "A bee has five eyelids.",
},
snappleFact{
id: 9,
category: animalKingdom,
text: "The average speed of a housefly is 4.5 mph.",
},
snappleFact{
id: 38,
category: animalKingdom,
text: "Fish cough.",
},
snappleFact{
id: 61,
category: animalKingdom,
text: "Pigs can get sunburn.",
},
snappleFact{
id: 68,
category: random,
text: `The longest one-syllable word is, "screeched"`,
},
snappleFact{
id: 71,
category: geography,
text: `There is a town called "Big Ugly" in West Virginia`,
},
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.