text stringlengths 11 4.05M |
|---|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
)
// Uname implements linux syscall uname.
func Uname(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
version := t.SyscallTable().Version
uts := t.UTSNamespace()
// Fill in structure fields.
var u linux.UtsName
copy(u.Sysname[:], version.Sysname)
copy(u.Nodename[:], uts.HostName())
copy(u.Release[:], version.Release)
copy(u.Version[:], version.Version)
// build tag above.
switch t.SyscallTable().Arch {
case arch.AMD64:
copy(u.Machine[:], "x86_64")
case arch.ARM64:
copy(u.Machine[:], "aarch64")
default:
copy(u.Machine[:], "unknown")
}
copy(u.Domainname[:], uts.DomainName())
// Copy out the result.
va := args[0].Pointer()
_, err := u.CopyOut(t, va)
return 0, nil, err
}
// Setdomainname implements Linux syscall setdomainname.
func Setdomainname(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
nameAddr := args[0].Pointer()
size := args[1].Int()
utsns := t.UTSNamespace()
if !t.HasCapabilityIn(linux.CAP_SYS_ADMIN, utsns.UserNamespace()) {
return 0, nil, linuxerr.EPERM
}
if size < 0 || size > linux.UTSLen {
return 0, nil, linuxerr.EINVAL
}
name, err := t.CopyInString(nameAddr, int(size))
if err != nil {
return 0, nil, err
}
utsns.SetDomainName(name)
return 0, nil, nil
}
// Sethostname implements Linux syscall sethostname.
func Sethostname(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
nameAddr := args[0].Pointer()
size := args[1].Int()
utsns := t.UTSNamespace()
if !t.HasCapabilityIn(linux.CAP_SYS_ADMIN, utsns.UserNamespace()) {
return 0, nil, linuxerr.EPERM
}
if size < 0 || size > linux.UTSLen {
return 0, nil, linuxerr.EINVAL
}
name := make([]byte, size)
if _, err := t.CopyInBytes(nameAddr, name); err != nil {
return 0, nil, err
}
utsns.SetHostName(string(name))
return 0, nil, nil
}
|
package usecase
import (
"github.com/16francs/examin_go/domain/model"
"github.com/16francs/examin_go/domain/service"
)
// SampleUsecase - Usecase のサンプル
type SampleUsecase interface {
GetSample() (*model.Sample, error)
PostSample(name string) (*model.Sample, error)
}
type sampleUsecase struct {
service service.SampleService
}
// NewSampleUsecase - sampleUsecase の生成
func NewSampleUsecase(s service.SampleService) SampleUsecase {
return &sampleUsecase{service: s}
}
func (u *sampleUsecase) GetSample() (*model.Sample, error) {
sample, err := u.service.GetSample()
if err != nil {
return nil, err
}
return sample, nil
}
func (u *sampleUsecase) PostSample(name string) (*model.Sample, error) {
sample, err := u.service.PostSample(name)
if err != nil {
return nil, err
}
return sample, nil
}
|
package shell
import (
"os"
"strings"
"github.com/redhat-openshift-ecosystem/openshift-preflight/cli"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
type scorecardCheck struct{}
func (p *scorecardCheck) validate(items []cli.OperatorSdkScorecardItem) (bool, error) {
foundTestFailed := false
if len(items) == 0 {
log.Warn("Did not receive any test result information from scorecard output")
}
for _, item := range items {
for _, result := range item.Status.Results {
if strings.Contains(result.State, "fail") {
log.Error(result.Log)
foundTestFailed = true
}
}
}
return !foundTestFailed, nil
}
func (p *scorecardCheck) getDataToValidate(bundleImage string, selector []string, resultFile string) (*cli.OperatorSdkScorecardReport, error) {
namespace := viper.GetString("namespace")
serviceAccount := viper.GetString("serviceaccount")
kubeconfig := os.Getenv("KUBECONFIG")
opts := cli.OperatorSdkScorecardOptions{
LogLevel: "warning",
OutputFormat: "json",
Selector: selector,
ResultFile: resultFile,
Kubeconfig: kubeconfig,
Namespace: namespace,
ServiceAccount: serviceAccount,
}
return operatorSdkEngine.Scorecard(bundleImage, opts)
}
|
package ui
import (
"bytes"
)
// TitleUnderliner is the underline character for the title
var TitleUnderliner = "="
// Title is a UI component that renders a title
type Title struct {
text string
}
func NewTitle(title string) *Title {
return &Title{text: title}
}
// Format returns the formated string of the title
func (t *Title) String() string {
var buf bytes.Buffer
buf.WriteString(t.text + "\n")
for i := 0; i < len(t.text); i++ {
buf.WriteString(TitleUnderliner)
}
buf.WriteString("\n")
return buf.String()
}
|
package cli
import (
"fmt"
"testing"
"github.com/spf13/cobra"
"github.com/stretchr/testify/require"
"github.com/dikaeinstein/godl/internal/app"
"github.com/dikaeinstein/godl/test"
)
func TestVersionCmd(t *testing.T) {
info := app.BuildInfo{
BuildTime: "2021-03-14 00:28",
GitHash: "02cb593",
GitTag: "v0.11.6",
GoVersion: "go1.16.2",
}
godl := newRootCmd()
version := newVersionCmd(info)
registerSubCommands(godl, []*cobra.Command{version})
expectedOutput := fmt.Sprintf(`Version: %s
Go version: %s
Git hash: %s
Built: %s
`, info.GitTag, info.GoVersion, info.GitHash, info.BuildTime)
output, errOutput := test.ExecuteCommand(t, false, godl, "version")
require.Equal(t, "", errOutput)
require.Equal(t, expectedOutput, output)
}
|
package main
import "fmt"
func main() {
i1 := 100
i2 := 077
i3 := 0x12343ef
fmt.Printf("i1:%d\n", i1)
fmt.Printf("i1:%b\n", i1) // 二进制表示
fmt.Printf("i1:%o\n", i1) // 八进制表示
fmt.Printf("i1:%x\n", i1) // 十六进制表示
fmt.Printf("i1:%T\n", i1) // 表示类型
fmt.Printf("i1:%v\n", i1) // 表示值
fmt.Println("====================")
fmt.Printf("i2:%d\n", i2)
fmt.Printf("i2:%o\n", i2)
fmt.Printf("i2:%T\n", i2)
fmt.Println("====================")
fmt.Printf("i3:%d\n", i3)
fmt.Printf("i3:%x\n", i3)
fmt.Printf("i3:%T\n", i3)
fmt.Println("====================")
i4 := int8(67)
fmt.Printf("i4:%d\n", i4)
fmt.Printf("i4:%o\n", i4)
fmt.Printf("i4:%T\n", i4)
}
|
package worker
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/exec"
"os/signal"
"strings"
"syscall"
"time"
// include for conditional pprof HTTP server
_ "net/http/pprof"
gocontext "context"
"contrib.go.opencensus.io/exporter/stackdriver"
"go.opencensus.io/trace"
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
"github.com/cenk/backoff"
"github.com/getsentry/raven-go"
librato "github.com/mihasya/go-metrics-librato"
"github.com/pkg/errors"
metrics "github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus"
"github.com/streadway/amqp"
"github.com/travis-ci/worker/backend"
"github.com/travis-ci/worker/config"
"github.com/travis-ci/worker/context"
"github.com/travis-ci/worker/image"
travismetrics "github.com/travis-ci/worker/metrics"
cli "gopkg.in/urfave/cli.v1"
)
const (
scopeTraceAppend = "https://www.googleapis.com/auth/trace.append"
)
var (
rootContext = gocontext.TODO()
)
// CLI is the top level of execution for the whole shebang
type CLI struct {
c *cli.Context
bootTime time.Time
ctx gocontext.Context
cancel gocontext.CancelFunc
logger *logrus.Entry
Config *config.Config
BuildScriptGenerator BuildScriptGenerator
BuildTracePersister BuildTracePersister
BackendProvider backend.Provider
ProcessorPool *ProcessorPool
CancellationBroadcaster *CancellationBroadcaster
JobQueue JobQueue
LogWriterFactory LogWriterFactory
heartbeatErrSleep time.Duration
heartbeatSleep time.Duration
}
// NewCLI creates a new *CLI from a *cli.Context
func NewCLI(c *cli.Context) *CLI {
return &CLI{
c: c,
bootTime: time.Now().UTC(),
heartbeatSleep: 5 * time.Minute,
heartbeatErrSleep: 30 * time.Second,
CancellationBroadcaster: NewCancellationBroadcaster(),
}
}
// Setup runs one-time preparatory actions and returns a boolean success value
// that is used to determine if it is safe to invoke the Run func
func (i *CLI) Setup() (bool, error) {
if i.c.Bool("debug") {
logrus.SetLevel(logrus.DebugLevel)
}
ctx, cancel := gocontext.WithCancel(gocontext.Background())
logger := context.LoggerFromContext(ctx).WithField("self", "cli")
i.ctx = ctx
rootContext = ctx
i.cancel = cancel
i.logger = logger
logrus.SetFormatter(&logrus.TextFormatter{DisableColors: true})
i.Config = config.FromCLIContext(i.c)
if i.c.Bool("echo-config") {
config.WriteEnvConfig(i.Config, os.Stdout)
return false, nil
}
if i.c.Bool("list-backend-providers") {
backend.EachBackend(func(b *backend.Backend) {
fmt.Println(b.Alias)
})
return false, nil
}
if i.c.Bool("update-images") {
baseURL, err := url.Parse(i.Config.ProviderConfig.Get("IMAGE_SELECTOR_URL"))
if err != nil {
return false, err
}
imageBaseURL, err := url.Parse(i.Config.ProviderConfig.Get("IMAGE_SERVER_URL"))
if err != nil {
return false, err
}
selector := image.NewAPISelector(baseURL)
manager, err := image.NewManager(ctx, selector, imageBaseURL)
if err != nil {
logger.WithField("err", err).Error("failed to init image manager")
return false, err
}
err = manager.Update(ctx)
if err != nil {
logger.WithField("err", err).Error("failed to update images")
}
return false, err
}
logger.WithField("cfg", fmt.Sprintf("%#v", i.Config)).Debug("read config")
i.setupSentry()
i.setupMetrics()
err := i.setupOpenCensus(ctx)
if err != nil {
logger.WithField("err", err).Error("failed to set up opencensus")
return false, err
}
ctx, span := trace.StartSpan(ctx, "CLI.Setup")
defer span.End()
span.AddAttributes(trace.StringAttribute("provider", i.Config.ProviderName))
generator := NewBuildScriptGenerator(i.Config)
logger.WithField("build_script_generator", fmt.Sprintf("%#v", generator)).Debug("built")
i.BuildScriptGenerator = generator
persister := NewBuildTracePersister(i.Config)
logger.WithField("build_trace_persister", fmt.Sprintf("%#v", persister)).Debug("built")
i.BuildTracePersister = persister
if i.Config.TravisSite != "" {
i.Config.ProviderConfig.Set("TRAVIS_SITE", i.Config.TravisSite)
}
provider, err := backend.NewBackendProvider(i.Config.ProviderName, i.Config.ProviderConfig)
if err != nil {
logger.WithField("err", err).Error("couldn't create backend provider")
return false, err
}
err = provider.Setup(ctx)
if err != nil {
logger.WithField("err", err).Error("couldn't setup backend provider")
return false, err
}
logger.WithField("provider", fmt.Sprintf("%#v", provider)).Debug("built")
i.BackendProvider = provider
ppc := &ProcessorPoolConfig{
Hostname: i.Config.Hostname,
Context: rootContext,
Config: i.Config,
}
pool := NewProcessorPool(ppc, i.BackendProvider, i.BuildScriptGenerator, i.BuildTracePersister, i.CancellationBroadcaster)
logger.WithField("pool", pool).Debug("built")
i.ProcessorPool = pool
if i.c.String("remote-controller-addr") != "" {
if i.c.String("remote-controller-auth") != "" {
i.setupRemoteController()
} else {
i.logger.Info("skipping remote controller setup without remote-controller-auth set")
}
go func() {
httpAddr := i.c.String("remote-controller-addr")
i.logger.Info("listening at ", httpAddr)
_ = http.ListenAndServe(httpAddr, nil)
}()
}
err = i.setupJobQueueAndCanceller()
if err != nil {
logger.WithField("err", err).Error("couldn't create job queue and canceller")
return false, err
}
err = i.setupLogWriterFactory()
if err != nil {
logger.WithField("err", err).Error("couldn't create logs queue")
return false, err
}
return true, nil
}
// Run starts all long-running processes and blocks until the processor pool
// returns from its Run func
func (i *CLI) Run() {
i.logger.Info("starting")
i.handleStartHook()
defer i.handleStopHook()
i.logger.Info("worker started")
defer i.logProcessorInfo("worker finished")
i.logger.Info("setting up heartbeat")
i.setupHeartbeat()
i.logger.Info("starting signal handler loop")
go i.signalHandler()
i.logger.WithFields(logrus.Fields{
"pool_size": i.Config.PoolSize,
"queue": i.JobQueue,
"logwriter_factory": i.LogWriterFactory,
}).Debug("running pool")
_ = i.ProcessorPool.Run(i.Config.PoolSize, i.JobQueue, i.LogWriterFactory)
err := i.JobQueue.Cleanup()
if err != nil {
i.logger.WithField("err", err).Error("couldn't clean up job queue")
}
if i.LogWriterFactory != nil {
err := i.LogWriterFactory.Cleanup()
if err != nil {
i.logger.WithField("err", err).Error("couldn't clean up logs queue")
}
}
}
func (i *CLI) setupHeartbeat() {
hbURL := i.c.String("heartbeat-url")
if hbURL == "" {
return
}
hbTok := i.c.String("heartbeat-url-auth-token")
if strings.HasPrefix(hbTok, "file://") {
hbTokBytes, err := ioutil.ReadFile(strings.Split(hbTok, "://")[1])
if err != nil {
i.logger.WithField("err", err).Error("failed to read auth token from file")
} else {
hbTok = string(hbTokBytes)
}
}
i.logger.WithField("heartbeat_url", hbURL).Info("starting heartbeat loop")
go i.heartbeatHandler(hbURL, strings.TrimSpace(hbTok))
}
func (i *CLI) handleStartHook() {
hookValue := i.c.String("start-hook")
if hookValue == "" {
return
}
i.logger.WithField("start_hook", hookValue).Info("running start hook")
parts := stringSplitSpace(hookValue)
outErr, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
if err == nil {
return
}
i.logger.WithFields(logrus.Fields{
"err": err,
"output": string(outErr),
"start_hook": hookValue,
}).Error("start hook failed")
}
func (i *CLI) handleStopHook() {
hookValue := i.c.String("stop-hook")
if hookValue == "" {
return
}
i.logger.WithField("stop_hook", hookValue).Info("running stop hook")
parts := stringSplitSpace(hookValue)
outErr, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
if err == nil {
return
}
i.logger.WithFields(logrus.Fields{
"err": err,
"output": string(outErr),
"stop_hook": hookValue,
}).Error("stop hook failed")
}
func (i *CLI) setupSentry() {
if i.Config.SentryDSN == "" {
return
}
levels := []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
}
if i.Config.SentryHookErrors {
levels = append(levels, logrus.ErrorLevel)
}
sentryHook, err := NewSentryHook(i.Config.SentryDSN, levels)
if err != nil {
i.logger.WithField("err", err).Error("couldn't create sentry hook")
}
logrus.AddHook(sentryHook)
err = raven.SetDSN(i.Config.SentryDSN)
if err != nil {
i.logger.WithField("err", err).Error("couldn't set DSN in raven")
}
raven.SetRelease(VersionString)
}
func (i *CLI) setupMetrics() {
go travismetrics.ReportMemstatsMetrics()
if i.Config.LibratoEmail != "" && i.Config.LibratoToken != "" && i.Config.LibratoSource != "" {
i.logger.Info("starting librato metrics reporter")
go librato.Librato(metrics.DefaultRegistry, time.Minute,
i.Config.LibratoEmail, i.Config.LibratoToken, i.Config.LibratoSource,
[]float64{0.50, 0.75, 0.90, 0.95, 0.99, 0.999, 1.0}, time.Millisecond)
}
if i.c.Bool("log-metrics") {
i.logger.Info("starting logger metrics reporter")
go metrics.Log(metrics.DefaultRegistry, time.Minute,
log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
}
}
func loadStackdriverTraceJSON(ctx gocontext.Context, stackdriverTraceAccountJSON string) (*google.Credentials, error) {
if stackdriverTraceAccountJSON == "" {
creds, err := google.FindDefaultCredentials(ctx, scopeTraceAppend)
return creds, errors.Wrap(err, "could not build default client")
}
credBytes, err := loadBytes(stackdriverTraceAccountJSON)
if err != nil {
return nil, err
}
creds, err := google.CredentialsFromJSON(ctx, credBytes, scopeTraceAppend)
if err != nil {
return nil, err
}
return creds, nil
}
func loadBytes(filenameOrJSON string) ([]byte, error) {
var (
bytes []byte
err error
)
if strings.HasPrefix(strings.TrimSpace(filenameOrJSON), "{") {
bytes = []byte(filenameOrJSON)
} else {
bytes, err = ioutil.ReadFile(filenameOrJSON)
if err != nil {
return nil, err
}
}
return bytes, nil
}
func (i *CLI) setupOpenCensus(ctx gocontext.Context) error {
opencensusEnabled := i.Config.OpencensusTracingEnabled
if !opencensusEnabled {
return nil
}
creds, err := loadStackdriverTraceJSON(ctx, i.Config.StackdriverTraceAccountJSON)
if err != nil {
return err
}
sd, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: i.Config.StackdriverProjectID,
TraceClientOptions: []option.ClientOption{
option.WithCredentials(creds),
},
MonitoringClientOptions: []option.ClientOption{
option.WithCredentials(creds),
},
})
if err != nil {
return err
}
defer sd.Flush()
// Register/enable the trace exporter
trace.RegisterExporter(sd)
traceSampleRate := i.Config.OpencensusSamplingRate
if traceSampleRate <= 0 {
i.logger.WithFields(logrus.Fields{
"trace_sample_rate": traceSampleRate,
}).Error("trace sample rate must be positive")
return errors.New("invalid trace sample rate")
}
trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(1.0 / float64(traceSampleRate))})
return nil
}
func (i *CLI) heartbeatHandler(heartbeatURL, heartbeatAuthToken string) {
b := backoff.NewExponentialBackOff()
b.MaxInterval = 10 * time.Second
b.MaxElapsedTime = time.Minute
for {
err := backoff.Retry(func() error {
return i.heartbeatCheck(heartbeatURL, heartbeatAuthToken)
}, b)
if err != nil {
i.logger.WithFields(logrus.Fields{
"heartbeat_url": heartbeatURL,
"err": err,
}).Warn("failed to get heartbeat")
time.Sleep(i.heartbeatErrSleep)
continue
}
select {
case <-i.ctx.Done():
return
default:
time.Sleep(i.heartbeatSleep)
}
}
}
func (i *CLI) heartbeatCheck(heartbeatURL, heartbeatAuthToken string) error {
req, err := http.NewRequest("GET", heartbeatURL, nil)
if err != nil {
return err
}
if heartbeatAuthToken != "" {
req.Header.Set("Authorization", fmt.Sprintf("token %s", heartbeatAuthToken))
}
resp, err := (&http.Client{}).Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode > 299 {
return fmt.Errorf("unhappy status code %d", resp.StatusCode)
}
body := map[string]string{}
err = json.NewDecoder(resp.Body).Decode(&body)
if err != nil {
return err
}
if state, ok := body["state"]; ok && state == "down" {
i.logger.WithField("heartbeat_state", state).Info("starting graceful shutdown")
i.ProcessorPool.GracefulShutdown(false)
}
return nil
}
func (i *CLI) setupRemoteController() {
i.logger.Info("setting up remote controller")
(&RemoteController{
pool: i.ProcessorPool,
auth: i.c.String("remote-controller-auth"),
workerInfo: i.workerInfo,
cancel: i.cancel,
}).Setup()
}
func (i *CLI) workerInfo() workerInfo {
info := workerInfo{
Version: VersionString,
Revision: RevisionString,
Generated: GeneratedString,
Uptime: time.Since(i.bootTime).String(),
PoolSize: i.ProcessorPool.Size(),
ExpectedPoolSize: i.ProcessorPool.ExpectedSize(),
TotalProcessed: i.ProcessorPool.TotalProcessed(),
}
i.ProcessorPool.Each(func(_ int, p *Processor) {
info.Processors = append(info.Processors, p.processorInfo())
})
return info
}
func (i *CLI) signalHandler() {
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan,
syscall.SIGTERM, syscall.SIGINT, syscall.SIGUSR1,
syscall.SIGTTIN, syscall.SIGTTOU,
syscall.SIGUSR2)
for {
select {
case sig := <-signalChan:
switch sig {
case syscall.SIGINT:
i.logger.Warn("SIGINT received, starting graceful shutdown")
i.ProcessorPool.GracefulShutdown(false)
case syscall.SIGTERM:
i.logger.Warn("SIGTERM received, shutting down immediately")
i.cancel()
case syscall.SIGTTIN:
i.logger.Info("SIGTTIN received, adding processor to pool")
i.ProcessorPool.Incr()
case syscall.SIGTTOU:
i.logger.Info("SIGTTOU received, removing processor from pool")
i.ProcessorPool.Decr()
case syscall.SIGUSR2:
i.logger.Warn("SIGUSR2 received, toggling graceful shutdown and pause")
i.ProcessorPool.GracefulShutdown(true)
case syscall.SIGUSR1:
i.logProcessorInfo("received SIGUSR1")
default:
i.logger.WithField("signal", sig).Info("ignoring unknown signal")
}
default:
time.Sleep(time.Second)
}
}
}
func (i *CLI) logProcessorInfo(msg string) {
if msg == "" {
msg = "processor pool info"
}
i.logger.WithFields(logrus.Fields{
"version": VersionString,
"revision": RevisionString,
"generated": GeneratedString,
"boot_time": i.bootTime.String(),
"uptime_min": time.Since(i.bootTime).Minutes(),
"pool_size": i.ProcessorPool.Size(),
"total_processed": i.ProcessorPool.TotalProcessed(),
}).Info(msg)
i.ProcessorPool.Each(func(n int, proc *Processor) {
i.logger.WithFields(logrus.Fields{
"n": n,
"id": proc.ID,
"processed": proc.ProcessedCount,
"status": proc.CurrentStatus,
"last_job_id": proc.LastJobID,
}).Info("processor info")
})
}
func (i *CLI) setupJobQueueAndCanceller() error {
subQueues := []JobQueue{}
for _, queueType := range strings.Split(i.Config.QueueType, ",") {
queueType = strings.TrimSpace(queueType)
switch queueType {
case "amqp":
jobQueue, canceller, err := i.buildAMQPJobQueueAndCanceller()
if err != nil {
return err
}
go canceller.Run()
subQueues = append(subQueues, jobQueue)
case "file":
jobQueue, err := i.buildFileJobQueue()
if err != nil {
return err
}
subQueues = append(subQueues, jobQueue)
case "http":
jobQueue, err := i.buildHTTPJobQueue()
if err != nil {
return err
}
subQueues = append(subQueues, jobQueue)
default:
return fmt.Errorf("unknown queue type %q", queueType)
}
}
if len(subQueues) == 0 {
return fmt.Errorf("no queues built")
}
if len(subQueues) == 1 {
i.JobQueue = subQueues[0]
} else {
i.JobQueue = NewMultiSourceJobQueue(subQueues...)
}
return nil
}
func (i *CLI) buildAMQPJobQueueAndCanceller() (*AMQPJobQueue, *AMQPCanceller, error) {
var amqpConn *amqp.Connection
var err error
if i.Config.AmqpTlsCert != "" || i.Config.AmqpTlsCertPath != "" {
cfg := new(tls.Config)
cfg.RootCAs = x509.NewCertPool()
if i.Config.AmqpTlsCert != "" {
cfg.RootCAs.AppendCertsFromPEM([]byte(i.Config.AmqpTlsCert))
}
if i.Config.AmqpTlsCertPath != "" {
cert, err := ioutil.ReadFile(i.Config.AmqpTlsCertPath)
if err != nil {
return nil, nil, err
}
cfg.RootCAs.AppendCertsFromPEM(cert)
}
amqpConn, err = amqp.DialConfig(i.Config.AmqpURI,
amqp.Config{
Heartbeat: i.Config.AmqpHeartbeat,
Locale: "en_US",
TLSClientConfig: cfg,
})
} else if i.Config.AmqpInsecure {
amqpConn, err = amqp.DialConfig(
i.Config.AmqpURI,
amqp.Config{
Heartbeat: i.Config.AmqpHeartbeat,
Locale: "en_US",
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
})
} else {
amqpConn, err = amqp.DialConfig(i.Config.AmqpURI,
amqp.Config{
Heartbeat: i.Config.AmqpHeartbeat,
Locale: "en_US",
})
}
if err != nil {
i.logger.WithField("err", err).Error("couldn't connect to AMQP")
return nil, nil, err
}
go i.amqpErrorWatcher(amqpConn)
i.logger.Debug("connected to AMQP")
canceller := NewAMQPCanceller(i.ctx, amqpConn, i.CancellationBroadcaster)
i.logger.WithField("canceller", fmt.Sprintf("%#v", canceller)).Debug("built")
jobQueue, err := NewAMQPJobQueue(amqpConn, i.Config.QueueName, i.Config.StateUpdatePoolSize, i.Config.RabbitMQSharding)
if err != nil {
return nil, nil, err
}
// Set the consumer priority directly instead of altering the signature of
// NewAMQPJobQueue :sigh_cat:
jobQueue.priority = i.Config.AmqpConsumerPriority
jobQueue.DefaultLanguage = i.Config.DefaultLanguage
jobQueue.DefaultDist = i.Config.DefaultDist
jobQueue.DefaultArch = i.Config.DefaultArch
jobQueue.DefaultGroup = i.Config.DefaultGroup
jobQueue.DefaultOS = i.Config.DefaultOS
return jobQueue, canceller, nil
}
func (i *CLI) buildHTTPJobQueue() (*HTTPJobQueue, error) {
jobBoardURL, err := url.Parse(i.Config.JobBoardURL)
if err != nil {
return nil, errors.Wrap(err, "error parsing job board URL")
}
jobQueue, err := NewHTTPJobQueueWithIntervals(
jobBoardURL, i.Config.TravisSite,
i.Config.ProviderName, i.Config.QueueName,
i.Config.HTTPPollingInterval, i.Config.HTTPRefreshClaimInterval,
i.CancellationBroadcaster)
if err != nil {
return nil, errors.Wrap(err, "error creating HTTP job queue")
}
jobQueue.DefaultLanguage = i.Config.DefaultLanguage
jobQueue.DefaultDist = i.Config.DefaultDist
jobQueue.DefaultArch = i.Config.DefaultArch
jobQueue.DefaultGroup = i.Config.DefaultGroup
jobQueue.DefaultOS = i.Config.DefaultOS
return jobQueue, nil
}
func (i *CLI) buildFileJobQueue() (*FileJobQueue, error) {
jobQueue, err := NewFileJobQueue(
i.Config.BaseDir, i.Config.QueueName, i.Config.FilePollingInterval)
if err != nil {
return nil, err
}
jobQueue.DefaultLanguage = i.Config.DefaultLanguage
jobQueue.DefaultDist = i.Config.DefaultDist
jobQueue.DefaultArch = i.Config.DefaultArch
jobQueue.DefaultGroup = i.Config.DefaultGroup
jobQueue.DefaultOS = i.Config.DefaultOS
return jobQueue, nil
}
func (i *CLI) setupLogWriterFactory() error {
if i.Config.LogsAmqpURI == "" {
// If no separate URI is set for LogsAMQP, use the JobsQueue to send log parts
return nil
}
logWriterFactory, err := i.buildAMQPLogWriterFactory()
if err != nil {
return err
}
i.LogWriterFactory = logWriterFactory
return nil
}
func (i *CLI) buildAMQPLogWriterFactory() (*AMQPLogWriterFactory, error) {
var amqpConn *amqp.Connection
var err error
if i.Config.LogsAmqpTlsCert != "" || i.Config.LogsAmqpTlsCertPath != "" {
cfg := new(tls.Config)
cfg.RootCAs = x509.NewCertPool()
if i.Config.LogsAmqpTlsCert != "" {
cfg.RootCAs.AppendCertsFromPEM([]byte(i.Config.LogsAmqpTlsCert))
}
if i.Config.LogsAmqpTlsCertPath != "" {
cert, err := ioutil.ReadFile(i.Config.LogsAmqpTlsCertPath)
if err != nil {
return nil, err
}
cfg.RootCAs.AppendCertsFromPEM(cert)
}
amqpConn, err = amqp.DialConfig(i.Config.LogsAmqpURI,
amqp.Config{
Heartbeat: i.Config.AmqpHeartbeat,
Locale: "en_US",
TLSClientConfig: cfg,
})
} else if i.Config.AmqpInsecure {
amqpConn, err = amqp.DialConfig(
i.Config.LogsAmqpURI,
amqp.Config{
Heartbeat: i.Config.AmqpHeartbeat,
Locale: "en_US",
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
})
} else {
amqpConn, err = amqp.DialConfig(i.Config.LogsAmqpURI,
amqp.Config{
Heartbeat: i.Config.AmqpHeartbeat,
Locale: "en_US",
})
}
if err != nil {
i.logger.WithField("err", err).Error("couldn't connect to the logs AMQP server")
return nil, err
}
go i.amqpErrorWatcher(amqpConn)
i.logger.Debug("connected to the logs AMQP server")
logWriterFactory, err := NewAMQPLogWriterFactory(amqpConn, i.Config.RabbitMQSharding)
if err != nil {
return nil, err
}
return logWriterFactory, nil
}
func (i *CLI) amqpErrorWatcher(amqpConn *amqp.Connection) {
errChan := make(chan *amqp.Error)
errChan = amqpConn.NotifyClose(errChan)
err, ok := <-errChan
if ok {
i.logger.WithField("err", err).Error("amqp connection errored, terminating")
i.cancel()
time.Sleep(time.Minute)
i.logger.Panic("timed out waiting for shutdown after amqp connection error")
}
}
|
package main
// Leetcode 440. (hard)
func findKthNumber(n int, k int) int {
i := 1
prefix := 1
for i < k {
cnt := getCnt(prefix, n)
if i+cnt > k {
i++
prefix *= 10
} else if i+cnt <= k {
i += cnt
prefix++
}
}
return prefix
}
func getCnt(prefix, n int) int {
cur, next := prefix, prefix+1
cnt := 0
for cur <= n {
cnt += min(n+1, next) - cur
cur *= 10
next *= 10
}
return cnt
}
|
package lc
// Time: O(n)
// Benchmark: 0ms 2mb | 100%
func maxRepeating(sequence string, word string) int {
size := len(word)
var matches, max int
var isSequence bool
for i := 0; i+size <= len(sequence); i++ {
if sequence[i:i+size] == word {
matches++
i += size - 1
isSequence = true
} else {
if isSequence {
if matches > max {
max = matches
}
matches = 0
isSequence = false
}
}
}
if matches > max {
return matches
}
return max
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package perf provides utilities to build a JSON file that can be uploaded to
// Chrome Performance Dashboard (https://chromeperf.appspot.com/).
//
// Measurements processed by this package are stored in
// tests/<test-name>/results-chart.json in the Tast results dir. The data is
// typically read by the Autotest TKO parser. In order to have metrics
// uploaded, they have to be listed here:
// src/third_party/autotest/files/tko/perf_upload/perf_dashboard_config.json
//
// Chrome Performance Dashboard docs can be found here:
// https://github.com/catapult-project/catapult/tree/master/dashboard // nocheck
//
// Usage example:
//
// pv := perf.NewValues()
// pv.Set(perf.Metric{
// Name: "mytest_important_quantity"
// Unit: "gizmos"
// Direction: perf.BiggerIsBetter
// }, 42)
// if err := pv.Save(s.OutDir()); err != nil {
// s.Error("Failed saving perf data: ", err)
// }
//
// # Remote usage example
//
// Protocol buffer definition:
//
// import "values.proto";
// service ExampleService {
// rpc Method (google.protobuf.Empty)
// returns (tast.common.perf.perfpb.Values) {}
// }
//
// In order to "import values.proto", add a -I argument pointing at
// src/chromiumos/tast/common/perf/perfpb/ to the protoc command in your
// service's gen.go file. See src/chromiumos/tast/services/cros/arc/gen.go
// for an example.
//
// Service:
//
// import "chromiumos/tast/common/perf"
// import "chromiumos/tast/common/perf/perfpb"
// func (s *ExampleService) Method() (*perfpb.Values, error) {
// p := perf.NewValues()
// ... // Do some computation that generates perf values in p.
// return p.Proto(), nil
// }
//
// Test:
//
// import "chromiumos/tast/common/perf"
// func TestMethod(ctx context.Context, s *testing.State) {
// ... // Set up gRPC, ExampleServiceClient.
// res, err := service.Method()
// if err != nil {
// s.Fatal("RPC failed: ", err)
// }
// if err := perf.NewValuesFromProto(res).Save(s.OutDir()); err != nil {
// s.Fatal("Failed to save perf results: ", err)
// }
// }
package perf
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"math"
"path/filepath"
"regexp"
"sort"
"github.com/google/uuid"
"chromiumos/tast/common/perf/perfpb"
"chromiumos/tast/errors"
)
var (
// nameRe defines valid names (Name and Variant).
nameRe = regexp.MustCompile("^[a-zA-Z0-9._-]{1,256}$")
// unitRe defines valid units.
unitRe = regexp.MustCompile("^[a-zA-Z0-9._-]{1,32}$")
)
// DefaultVariantName is the default variant name treated specially by the dashboard.
const DefaultVariantName = "summary"
// genGUID generates a guid for diagnostic structs.
func genGUID(ctx context.Context) (string, error) {
id, err := uuid.NewRandom()
if err != nil {
return "", err
}
return id.String(), nil
}
// Overridable function pointer for tests.
var runGenGUID = genGUID
// Direction indicates which direction of change (bigger or smaller) means improvement
// of a performance metric.
type Direction int
const (
// SmallerIsBetter means the performance metric is considered improved when it decreases.
SmallerIsBetter Direction = iota
// BiggerIsBetter means the performance metric is considered improved when it increases.
BiggerIsBetter
)
// Metric defines the schema of a performance metric.
type Metric struct {
// Name is the name of the chart this performance metric appears in.
Name string
// Variant is the name of this performance metric in a chart. If this is empty,
// DefaultVariantName is used. It is treated specially by the dashboard.
// Charts containing only one performance metric should stick with the default.
Variant string
// Unit is a unit name to describe values of this performance metric.
Unit string
// Direction indicates which direction of change (bigger or smaller) means improvement
// of this performance metric.
Direction Direction
// Multiple specifies if this performance metric can contain multiple values at a time.
Multiple bool
}
func (s *Metric) setDefaults() {
if len(s.Variant) == 0 {
s.Variant = DefaultVariantName
}
}
// Maps a Metric unit type to a histogram unit type. TODO(stevenjb) Investigate
var supportedUnits = map[string]string{
"bytes": "sizeInBytes",
"bytesPerSecond": "bytesPerSecond",
"J": "J",
"W": "W",
"count": "count",
"ms": "ms",
"n%": "n%",
"sigma": "sigma",
"tsMs": "tsMs",
}
func (s *Metric) histogramUnit() string {
unit, ok := supportedUnits[s.Unit]
if !ok {
// "unitless" is a valid histogram unit type. Returning "unitless" is
// preferable to throwing an error here.
return "unitless"
}
switch s.Direction {
case BiggerIsBetter:
unit += "_biggerIsBetter"
case SmallerIsBetter:
unit += "_smallerIsBetter"
}
return unit
}
// Values holds performance metric values.
type Values struct {
values map[Metric][]float64
}
// NewValues returns a new empty Values.
func NewValues() *Values {
return &Values{values: make(map[Metric][]float64)}
}
// GetValues returns stored metrics values.
func (p *Values) GetValues() map[Metric][]float64 {
return p.values
}
// MergeWithSuffix merges all data points of vs into this Values structure
// optionally adding suffix to the value name.
func (p *Values) MergeWithSuffix(suffix string, vs ...*Values) {
for _, val := range vs {
if val == nil {
continue
}
for k, v := range val.values {
suffixedK := k
suffixedK.Name += suffix
if k.Multiple {
p.Append(suffixedK, v...)
} else {
if vv, c := p.values[suffixedK]; c {
panic(fmt.Sprint("MergeWithSuffix(suffix='", suffix, "'): Single-valued metric {", suffixedK, "} already present as {", vv, "}. Cannot merge with another value."))
}
p.Set(suffixedK, v...)
}
}
}
}
// Merge merges all data points of vs into this Values structure.
func (p *Values) Merge(vs ...*Values) {
p.MergeWithSuffix("", vs...)
}
// NewValuesFromProto creates a Values from a perfpf.Values.
func NewValuesFromProto(vs ...*perfpb.Values) *Values {
p := NewValues()
for _, val := range vs {
for _, v := range val.Values {
m := Metric{
Name: v.Name,
Variant: v.Variant,
Unit: v.Unit,
Direction: Direction(v.Direction),
Multiple: v.Multiple,
}
if v.Multiple {
p.Append(m, v.Value...)
} else {
p.Set(m, v.Value...)
}
}
}
return p
}
// Append appends performance metrics values. It can be called only for multi-valued
// performance metrics.
func (p *Values) Append(s Metric, vs ...float64) {
s.setDefaults()
if !s.Multiple {
panic("Append must not be called for single-valued data series")
}
p.values[s] = append(p.values[s], vs...)
validate(s, p.values[s])
}
// Set sets a performance metric value(s).
func (p *Values) Set(s Metric, vs ...float64) {
s.setDefaults()
p.values[s] = vs
validate(s, p.values[s])
}
// Format describes the output format for perf data.
type Format int
const (
// Crosbolt is used for ChromeOS infra dashboards (go/crosbolt).
Crosbolt Format = iota
// Chromeperf is used for ChromeOS infra dashboards (go/chromeperf).
Chromeperf
)
func (format Format) fileName() (string, error) {
switch format {
case Crosbolt:
return "results-chart.json", nil
case Chromeperf:
return "perf_results.json", nil
default:
return "", errors.Errorf("invalid perf format: %d", format)
}
}
// traceData is a struct corresponding to a trace entry in Chrome Performance Dashboard JSON.
// See: https://github.com/catapult-project/catapult/blob/master/dashboard/docs/data-format.md // nocheck
type traceData struct {
Units string `json:"units"`
ImprovementDirection string `json:"improvement_direction"`
Type string `json:"type"`
// These are pointers to permit us to include zero values in JSON representations.
Value *float64 `json:"value,omitempty"`
Values *[]float64 `json:"values,omitempty"`
}
// diagnostic corresponds to the catapult Diagnostic struct preferred by
// go/chromeperf. For more info see:
// https://chromium.googlesource.com/catapult/+/HEAD/docs/histogram-set-json-format.md
// https://chromeperf.appspot.com/
type diagnostic struct {
Type string `json:"type"`
GUID string `json:"guid"`
Values []string `json:"values"`
}
// diagnosticMap corresponds to the catapult DiagnosticMap struct.
type diagnosticMap struct {
Benchmarks string `json:"benchmarks"`
}
// histogram corresponds to the catapult Histogram format preferred by
// go/chromeperf. See diagnostic struct for more info.
type histogram struct {
Name string `json:"name"`
Unit string `json:"unit"`
Diagnostics diagnosticMap `json:"diagnostics"`
SampleValues []float64 `json:"sampleValues"`
Running [7]float64 `json:"running"`
AllBins [][]int `json:"allBins"`
}
func (h *histogram) updateRunning() {
sorted := h.SampleValues
sort.Float64s(sorted)
min := sorted[0]
max := sorted[len(sorted)-1]
sum := 0.0
for _, v := range sorted {
sum += v
}
mean := sum / float64(len(sorted))
variance := 0.0
for _, v := range sorted {
d := v - mean
variance += d * d
}
h.Running = [7]float64{float64(len(sorted)), max, math.Log(mean), mean, min, sum, variance}
}
// toCrosbolt returns perf values formatted as json for crosbolt.
func (p *Values) toCrosbolt() ([]byte, error) {
charts := &map[string]*map[string]*traceData{}
for s := range p.values {
// Need the original slice since we'll take a pointer to it.
vs := p.values[s]
// Avoid nil slices since they are encoded to null.
if vs == nil {
vs = []float64{}
}
traces, ok := (*charts)[s.Name]
if !ok {
traces = &map[string]*traceData{}
(*charts)[s.Name] = traces
}
var t traceData
t.Units = s.Unit
if s.Direction == BiggerIsBetter {
t.ImprovementDirection = "up"
} else {
t.ImprovementDirection = "down"
}
if s.Multiple {
t.Type = "list_of_scalar_values"
t.Values = &vs
} else {
t.Type = "scalar"
t.Value = &vs[0]
}
(*traces)[s.Variant] = &t
}
return json.MarshalIndent(charts, "", " ")
}
// toChromeperf returns perf values formatted as json for chromeperf.
func (p *Values) toChromeperf(ctx context.Context) ([]byte, error) {
guid, err := runGenGUID(ctx)
if err != nil {
return nil, err
}
diag := diagnostic{
Type: "GenericSet",
GUID: guid,
Values: []string{"disk_image_size"},
}
hgrams := map[string]histogram{}
for s, vs := range p.values {
if vs == nil {
continue
}
h, ok := hgrams[s.Name]
if ok {
// TODO(stevenjb): Handle Variances and resolve mismatched units.
h.SampleValues = append(h.SampleValues, vs...)
h.updateRunning()
} else {
h = histogram{
Name: s.Name,
Unit: s.histogramUnit(),
Diagnostics: diagnosticMap{Benchmarks: diag.GUID},
SampleValues: vs,
AllBins: [][]int{{1}},
}
h.updateRunning()
hgrams[s.Name] = h
}
}
// The json file format is an array of diagnostic and histogram structs.
var data []interface{}
// Make diag the first entry.
data = append(data, diag)
// Append the hgrams entries in deterministic (Name) order.
var keys []string
for k := range hgrams {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
data = append(data, hgrams[k])
}
return json.MarshalIndent(data, "", " ")
}
// Save saves performance metric values as a JSON file named and formatted for
// crosbolt. outDir should be the output directory path obtained from
// testing.State.
func (p *Values) Save(outDir string) error {
fileName, err := Crosbolt.fileName()
if err != nil {
return err
}
json, err := p.toCrosbolt()
if err != nil {
return err
}
return ioutil.WriteFile(filepath.Join(outDir, fileName), json, 0644)
}
// Proto converts this Values to something that can be passed in a gRPC call.
func (p *Values) Proto() *perfpb.Values {
result := &perfpb.Values{}
for k, v := range p.values {
result.Values = append(result.Values, &perfpb.Value{
Name: k.Name,
Variant: k.Variant,
Unit: k.Unit,
Direction: perfpb.Direction(k.Direction),
Multiple: k.Multiple,
Value: v,
})
}
return result
}
// SaveAs saves performance metric values in the format provided to outDir.
// outDir should be the output directory path obtained from testing.State.
// format must be either Crosbolt or Chromeperf.
func (p *Values) SaveAs(ctx context.Context, outDir string, format Format) error {
fileName, err := format.fileName()
if err != nil {
return err
}
var json []byte
switch format {
case Crosbolt:
json, err = p.toCrosbolt()
case Chromeperf:
json, err = p.toChromeperf(ctx)
}
if err != nil {
return err
}
return ioutil.WriteFile(filepath.Join(outDir, fileName), json, 0644)
}
func validate(s Metric, vs []float64) {
if !nameRe.MatchString(s.Name) {
panic(fmt.Sprintf("Metric has illegal Name: %v", s))
}
if !nameRe.MatchString(s.Variant) {
panic(fmt.Sprintf("Metric has illegal Variant: %v", s))
}
if !unitRe.MatchString(s.Unit) {
panic(fmt.Sprintf("Metric has illegal Unit: %v", s))
}
if !s.Multiple && len(vs) != 1 {
panic(fmt.Sprintf("Metric requires single-valued: %v", s))
}
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql_test
import (
"context"
"fmt"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/datadriven"
)
func TestSavepoints(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
datadriven.Walk(t, "testdata/savepoints", func(t *testing.T, path string) {
params := base.TestServerArgs{}
s, sqlConn, _ := serverutils.StartServer(t, params)
defer s.Stopper().Stop(ctx)
if _, err := sqlConn.Exec("CREATE TABLE progress(n INT, marker BOOL)"); err != nil {
t.Fatal(err)
}
datadriven.RunTest(t, path, func(t *testing.T, td *datadriven.TestData) string {
switch td.Cmd {
case "sql":
// Implicitly abort any previously-ongoing txn.
_, _ = sqlConn.Exec("ABORT")
// Prepare for the next test.
if _, err := sqlConn.Exec("DELETE FROM progress"); err != nil {
td.Fatalf(t, "cleaning up: %v", err)
}
// Prepare a buffer to accumulate the results.
var buf strings.Builder
// We're going to execute the input line-by-line.
stmts := strings.Split(td.Input, "\n")
// progressBar is going to show the cancellation of writes
// during rollbacks.
progressBar := make([]byte, len(stmts))
erase := func(status string) {
char := byte('.')
if !isOpenTxn(status) {
char = 'X'
}
for i := range progressBar {
progressBar[i] = char
}
}
// stepNum is the index of the current statement
// in the input.
var stepNum int
// updateProgress loads the current set of writes
// into the progress bar.
updateProgress := func() {
rows, err := sqlConn.Query("SELECT n FROM progress")
if err != nil {
t.Logf("%d: reading progress: %v", stepNum, err)
// It's OK if we can't read this.
return
}
defer rows.Close()
for rows.Next() {
var n int
if err := rows.Scan(&n); err != nil {
td.Fatalf(t, "%d: unexpected error while reading progress: %v", stepNum, err)
}
if n < 1 || n > len(progressBar) {
td.Fatalf(t, "%d: unexpected stepnum in progress table: %d", stepNum, n)
}
progressBar[n-1] = '#'
}
}
// getTxnStatus retrieves the current txn state.
// This is guaranteed to always succeed because SHOW TRANSACTION STATUS
// is an observer statement.
getTxnStatus := func() string {
row := sqlConn.QueryRow("SHOW TRANSACTION STATUS")
var status string
if err := row.Scan(&status); err != nil {
td.Fatalf(t, "%d: unable to retrieve txn status: %v", stepNum, err)
}
return status
}
// showSavepointStatus is like getTxnStatus but retrieves the
// savepoint stack.
showSavepointStatus := func() {
rows, err := sqlConn.Query("SHOW SAVEPOINT STATUS")
if err != nil {
td.Fatalf(t, "%d: unable to retrieve savepoint status: %v", stepNum, err)
}
defer rows.Close()
comma := ""
hasSavepoints := false
for rows.Next() {
var name string
var isRestart bool
if err := rows.Scan(&name, &isRestart); err != nil {
td.Fatalf(t, "%d: unexpected error while reading savepoints: %v", stepNum, err)
}
if isRestart {
name += "(r)"
}
buf.WriteString(comma)
buf.WriteString(name)
hasSavepoints = true
comma = ">"
}
if !hasSavepoints {
buf.WriteString("(none)")
}
}
// report shows the progress of execution so far after
// each statement executed.
report := func(beforeStatus, afterStatus string) {
erase(afterStatus)
if isOpenTxn(afterStatus) {
updateProgress()
}
fmt.Fprintf(&buf, "-- %-11s -> %-11s %s ", beforeStatus, afterStatus, string(progressBar))
buf.WriteByte(' ')
showSavepointStatus()
buf.WriteByte('\n')
}
// The actual execution of the statements starts here.
beforeStatus := getTxnStatus()
for i, stmt := range stmts {
stepNum = i + 1
// Before each statement, mark the progress so far with
// a KV write.
if isOpenTxn(beforeStatus) {
_, err := sqlConn.Exec("INSERT INTO progress(n, marker) VALUES ($1, true)", stepNum)
if err != nil {
td.Fatalf(t, "%d: before-stmt: %v", stepNum, err)
}
}
// Run the statement and report errors/results.
fmt.Fprintf(&buf, "%d: %s -- ", stepNum, stmt)
execRes, err := sqlConn.Exec(stmt)
if err != nil {
fmt.Fprintf(&buf, "%v\n", err)
} else {
nRows, err := execRes.RowsAffected()
if err != nil {
fmt.Fprintf(&buf, "error retrieving rows: %v\n", err)
} else {
fmt.Fprintf(&buf, "%d row%s\n", nRows, util.Pluralize(nRows))
}
}
// Report progress on the next line
afterStatus := getTxnStatus()
report(beforeStatus, afterStatus)
beforeStatus = afterStatus
}
return buf.String()
default:
td.Fatalf(t, "unknown directive: %s", td.Cmd)
}
return ""
})
})
}
func isOpenTxn(status string) bool {
return status == sql.OpenStateStr || status == sql.NoTxnStateStr
}
|
package main
import (
"fmt"
"os"
"strconv"
"strings"
)
func main() {
// c = 1
var startA = 0
const finishLength int = 100
var even, finished bool
instructions := strings.Split(input, "\n")
for !finished && startA < 1000 {
var a, b, c, d int
var p int
startA++
a = startA
output := make([]byte, finishLength)
outputCount := 0
for p < len(instructions) && outputCount < finishLength {
// fmt.Printf("p: %d i: %s\n", p, instructions[p])
fields := strings.Fields(instructions[p])
var val int
// var val2 int
var reg *int
var regField int
// Which field indicates a register whose value will change
switch fields[0] {
case "inc", "dec":
regField = 1
case "cpy":
regField = 2
}
// Which fields
switch fields[0] {
case "inc", "dec", "cpy":
switch fields[regField] {
case "a":
reg = &a
case "b":
reg = &b
case "c":
reg = &c
case "d":
reg = &d
default:
fmt.Println("Unexpected register - skipping instruction")
continue
}
}
// Which fields indicates a value
switch fields[0] {
case "cpy", "jnz", "out":
switch fields[1] {
case "a":
val = a
case "b":
val = b
case "c":
val = c
case "d":
val = d
default:
val, _ = strconv.Atoi(fields[1])
}
}
// if fields[0] == "jnz" {
// switch fields[2] {
// case "a":
// val2 = a
// case "b":
// val2 = b
// case "c":
// val2 = c
// case "d":
// val2 = d
// default:
// val2, _ = strconv.Atoi(fields[2])
// }
// // fmt.Printf("Jump val %d ", val2)
// }
switch fields[0] {
case "cpy":
*reg = val
p++
case "inc":
*reg = *reg + 1
p++
case "dec":
*reg = *reg - 1
p++
case "jnz":
if val != 0 {
jump, err := strconv.Atoi(fields[2])
if err != nil {
fmt.Printf("Bad jump - skipping instruction: %v\n", err)
p++
}
// fmt.Printf("jump %d to %d \n", jump, p+jump)
p += jump
} else {
p++
}
case "out":
if outputCount == 0 {
even = (val % 2) == 0
} else {
// even tells us whether it was even last time around, so this time we want it to be different
if (val%2 == 0) == even {
fmt.Printf("No good after %d chars with A set to %d", outputCount, startA)
outputCount = finishLength - 1
} else {
// fmt.Printf(".%d", outputCount)
if outputCount >= finishLength-1 {
finished = true
fmt.Printf("\nDone! started with %d: %s\n", startA, output)
os.Exit(1)
break
}
}
even = !even
}
output[outputCount] = strconv.Itoa(val)[0]
outputCount++
p++
default:
panic("Unexpected instruction")
}
// fmt.Printf(" a %d, b %d, c %d, d %d\n", a, b, c, d)
}
// fmt.Printf(" a %d, b %d, c %d, d %d\n", a, b, c, d)
fmt.Printf(" %s\n", output)
}
}
// var input = `cpy 2 a
// tgl a
// tgl a
// tgl a
// cpy 1 a
// dec a
// dec a`
var input = `cpy a d
cpy 4 c
cpy 643 b
inc d
dec b
jnz b -2
dec c
jnz c -5
cpy d a
jnz 0 0
cpy a b
cpy 0 a
cpy 2 c
jnz b 2
jnz 1 6
dec b
dec c
jnz c -4
inc a
jnz 1 -7
cpy 2 b
jnz c 2
jnz 1 4
dec b
dec c
jnz 1 -4
jnz 0 0
out b
jnz a -19
jnz 1 -21`
|
package lc
import "sort"
// Time: O(n logn)
// Benchmark: 28ms 6.8mb | 80% 19%
func countGoodRectangles(rectangles [][]int) int {
min := func(r []int) int {
if r[0] < r[1] {
return r[0]
}
return r[1]
}
sort.Slice(rectangles, func(i, j int) bool {
r1 := min(rectangles[i])
r2 := min(rectangles[j])
return r1 > r2
})
count := 1
for i := 0; i < len(rectangles)-1; i++ {
r1 := min(rectangles[i])
r2 := min(rectangles[i+1])
if r1 > r2 {
break
}
count++
}
return count
}
|
package main
import (
"fmt"
)
func main() {
slice := make([]int, 10)
for i, _ := range slice {
slice[i] = i + 1
}
fmt.Println(slice)
reverse(slice)
fmt.Println(slice)
slice = make([]int, 5)
for i, _ := range slice {
slice[i] = i + 1
}
fmt.Println(slice)
reverse(slice)
fmt.Println(slice)
slice = make([]int, 6)
for i, _ := range slice {
slice[i] = i + 1
}
fmt.Println(slice)
reverse(slice)
fmt.Println(slice)
}
func reverse(s []int) {
head := 0
tail := len(s) - 1
for head < tail {
s[head], s[tail] = s[tail], s[head]
head++
tail--
}
}
|
package apimodel
// SubmitMeasurementRequest is the SubmitMeasurement request.
type SubmitMeasurementRequest struct {
ReportID string `path:"report_id"`
Format string `json:"format"`
Content interface{} `json:"content"`
}
// SubmitMeasurementResponse is the SubmitMeasurement response.
type SubmitMeasurementResponse struct {
MeasurementUID string `json:"measurement_uid"`
}
|
package api
import (
"context"
"encoding/json"
"fmt"
"testing"
"time"
"github.com/brocaar/lorawan"
jwt "github.com/dgrijalva/jwt-go"
"google.golang.org/grpc/codes"
"google.golang.org/grpc"
"github.com/brocaar/loraserver/api/gw"
"github.com/brocaar/loraserver/api/ns"
"github.com/brocaar/loraserver/internal/api/auth"
"github.com/brocaar/loraserver/internal/common"
"github.com/brocaar/loraserver/internal/gateway"
"github.com/brocaar/loraserver/internal/maccommand"
"github.com/brocaar/loraserver/internal/node"
"github.com/brocaar/loraserver/internal/storage"
"github.com/brocaar/loraserver/internal/test"
"github.com/brocaar/lorawan/backend"
"github.com/brocaar/lorawan/band"
. "github.com/smartystreets/goconvey/convey"
)
func TestNetworkServerAPI(t *testing.T) {
conf := test.GetConfig()
db, err := common.OpenDatabase(conf.PostgresDSN)
if err != nil {
t.Fatal(err)
}
common.DB = db
common.RedisPool = common.NewRedisPool(conf.RedisURL)
common.NetID = [3]byte{1, 2, 3}
gateway.MustSetStatsAggregationIntervals([]string{"MINUTE"})
Convey("Given a clean PostgreSQL and Redis database + api instance", t, func() {
test.MustResetDB(db)
test.MustFlushRedis(common.RedisPool)
ctx := context.Background()
api := NetworkServerAPI{}
devEUI := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
devAddr := [4]byte{6, 2, 3, 4}
nwkSKey := [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
Convey("When calling CreateServiceProfile", func() {
resp, err := api.CreateServiceProfile(ctx, &ns.CreateServiceProfileRequest{
ServiceProfile: &ns.ServiceProfile{
UlRate: 1,
UlBucketSize: 2,
UlRatePolicy: ns.RatePolicy_DROP,
DlRate: 3,
DlBucketSize: 4,
DlRatePolicy: ns.RatePolicy_MARK,
AddGWMetadata: true,
DevStatusReqFreq: 4,
ReportDevStatusBattery: true,
ReportDevStatusMargin: true,
DrMin: 5,
DrMax: 6,
ChannelMask: []byte{1, 2, 3},
PrAllowed: true,
HrAllowed: true,
RaAllowed: true,
NwkGeoLoc: true,
TargetPER: 1,
MinGWDiversity: 7,
},
})
So(err, ShouldBeNil)
So(resp.ServiceProfileID, ShouldNotEqual, "")
Convey("Then GetServiceProfile returns the service-profile", func() {
getResp, err := api.GetServiceProfile(ctx, &ns.GetServiceProfileRequest{
ServiceProfileID: resp.ServiceProfileID,
})
So(err, ShouldBeNil)
So(getResp.ServiceProfile, ShouldResemble, &ns.ServiceProfile{
ServiceProfileID: resp.ServiceProfileID,
UlRate: 1,
UlBucketSize: 2,
UlRatePolicy: ns.RatePolicy_DROP,
DlRate: 3,
DlBucketSize: 4,
DlRatePolicy: ns.RatePolicy_MARK,
AddGWMetadata: true,
DevStatusReqFreq: 4,
ReportDevStatusBattery: true,
ReportDevStatusMargin: true,
DrMin: 5,
DrMax: 6,
ChannelMask: []byte{1, 2, 3},
PrAllowed: true,
HrAllowed: true,
RaAllowed: true,
NwkGeoLoc: true,
TargetPER: 1,
MinGWDiversity: 7,
})
})
Convey("Then UpdateServiceProfile updates the service-profile", func() {
_, err := api.UpdateServiceProfile(ctx, &ns.UpdateServiceProfileRequest{
ServiceProfile: &ns.ServiceProfile{
ServiceProfileID: resp.ServiceProfileID,
UlRate: 2,
UlBucketSize: 3,
UlRatePolicy: ns.RatePolicy_MARK,
DlRate: 4,
DlBucketSize: 5,
DlRatePolicy: ns.RatePolicy_DROP,
AddGWMetadata: false,
DevStatusReqFreq: 6,
ReportDevStatusBattery: false,
ReportDevStatusMargin: false,
DrMin: 7,
DrMax: 8,
ChannelMask: []byte{3, 2, 1},
PrAllowed: false,
HrAllowed: false,
RaAllowed: false,
NwkGeoLoc: false,
TargetPER: 2,
MinGWDiversity: 8,
},
})
So(err, ShouldBeNil)
getResp, err := api.GetServiceProfile(ctx, &ns.GetServiceProfileRequest{
ServiceProfileID: resp.ServiceProfileID,
})
So(err, ShouldBeNil)
So(getResp.ServiceProfile, ShouldResemble, &ns.ServiceProfile{
ServiceProfileID: resp.ServiceProfileID,
UlRate: 2,
UlBucketSize: 3,
UlRatePolicy: ns.RatePolicy_MARK,
DlRate: 4,
DlBucketSize: 5,
DlRatePolicy: ns.RatePolicy_DROP,
AddGWMetadata: false,
DevStatusReqFreq: 6,
ReportDevStatusBattery: false,
ReportDevStatusMargin: false,
DrMin: 7,
DrMax: 8,
ChannelMask: []byte{3, 2, 1},
PrAllowed: false,
HrAllowed: false,
RaAllowed: false,
NwkGeoLoc: false,
TargetPER: 2,
MinGWDiversity: 8,
})
})
Convey("Then DeleteServiceProfile deletes the service-profile", func() {
_, err := api.DeleteServiceProfile(ctx, &ns.DeleteServiceProfileRequest{
ServiceProfileID: resp.ServiceProfileID,
})
So(err, ShouldBeNil)
_, err = api.DeleteServiceProfile(ctx, &ns.DeleteServiceProfileRequest{
ServiceProfileID: resp.ServiceProfileID,
})
So(err, ShouldNotBeNil)
So(grpc.Code(err), ShouldEqual, codes.NotFound)
})
})
Convey("When calling CreateRoutingProfile", func() {
resp, err := api.CreateRoutingProfile(ctx, &ns.CreateRoutingProfileRequest{
RoutingProfile: &ns.RoutingProfile{
AsID: "application-server:1234",
},
CaCert: "CACERT",
TlsCert: "TLSCERT",
TlsKey: "TLSKEY",
})
So(err, ShouldBeNil)
So(resp.RoutingProfileID, ShouldNotEqual, "")
Convey("Then GetRoutingProfile returns the routing-profile", func() {
getResp, err := api.GetRoutingProfile(ctx, &ns.GetRoutingProfileRequest{
RoutingProfileID: resp.RoutingProfileID,
})
So(err, ShouldBeNil)
So(getResp.RoutingProfile, ShouldResemble, &ns.RoutingProfile{
AsID: "application-server:1234",
})
So(getResp.CaCert, ShouldEqual, "CACERT")
So(getResp.TlsCert, ShouldEqual, "TLSCERT")
})
Convey("Then UpdateRoutingProfile updates the routing-profile", func() {
_, err := api.UpdateRoutingProfile(ctx, &ns.UpdateRoutingProfileRequest{
RoutingProfile: &ns.RoutingProfile{
RoutingProfileID: resp.RoutingProfileID,
AsID: "new-application-server:1234",
},
CaCert: "CACERT2",
TlsCert: "TLSCERT2",
TlsKey: "TLSKEY2",
})
So(err, ShouldBeNil)
getResp, err := api.GetRoutingProfile(ctx, &ns.GetRoutingProfileRequest{
RoutingProfileID: resp.RoutingProfileID,
})
So(err, ShouldBeNil)
So(getResp.RoutingProfile, ShouldResemble, &ns.RoutingProfile{
AsID: "new-application-server:1234",
})
So(getResp.CaCert, ShouldEqual, "CACERT2")
So(getResp.TlsCert, ShouldEqual, "TLSCERT2")
})
Convey("Then DeleteRoutingProfile deletes the routing-profile", func() {
_, err := api.DeleteRoutingProfile(ctx, &ns.DeleteRoutingProfileRequest{
RoutingProfileID: resp.RoutingProfileID,
})
So(err, ShouldBeNil)
_, err = api.DeleteRoutingProfile(ctx, &ns.DeleteRoutingProfileRequest{
RoutingProfileID: resp.RoutingProfileID,
})
So(err, ShouldNotBeNil)
So(grpc.Code(err), ShouldEqual, codes.NotFound)
})
})
Convey("When calling CreateDeviceProfile", func() {
resp, err := api.CreateDeviceProfile(ctx, &ns.CreateDeviceProfileRequest{
DeviceProfile: &ns.DeviceProfile{
SupportsClassB: true,
ClassBTimeout: 1,
PingSlotPeriod: 2,
PingSlotDR: 3,
PingSlotFreq: 868100000,
SupportsClassC: true,
ClassCTimeout: 4,
MacVersion: "1.0.2",
RegParamsRevision: "B",
RxDelay1: 5,
RxDROffset1: 6,
RxDataRate2: 7,
RxFreq2: 868200000,
FactoryPresetFreqs: []uint32{868100000, 868300000, 868500000},
MaxEIRP: 14,
MaxDutyCycle: 1,
SupportsJoin: true,
Supports32BitFCnt: true,
},
})
So(err, ShouldBeNil)
So(resp.DeviceProfileID, ShouldNotEqual, "")
Convey("Then GetDeviceProfile returns the device-profile", func() {
getResp, err := api.GetDeviceProfile(ctx, &ns.GetDeviceProfileRequest{
DeviceProfileID: resp.DeviceProfileID,
})
So(err, ShouldBeNil)
So(getResp.DeviceProfile, ShouldResemble, &ns.DeviceProfile{
SupportsClassB: true,
ClassBTimeout: 1,
PingSlotPeriod: 2,
PingSlotDR: 3,
PingSlotFreq: 868100000,
SupportsClassC: true,
ClassCTimeout: 4,
MacVersion: "1.0.2",
RegParamsRevision: "B",
RxDelay1: 5,
RxDROffset1: 6,
RxDataRate2: 7,
RxFreq2: 868200000,
FactoryPresetFreqs: []uint32{868100000, 868300000, 868500000},
MaxEIRP: 14,
MaxDutyCycle: 1,
SupportsJoin: true,
RfRegion: "EU868", // set by the api
Supports32BitFCnt: true,
})
})
})
Convey("Given a ServiceProfile, RoutingProfile and DeviceProfile", func() {
sp := storage.ServiceProfile{
ServiceProfile: backend.ServiceProfile{},
}
So(storage.CreateServiceProfile(common.DB, &sp), ShouldBeNil)
rp := storage.RoutingProfile{
RoutingProfile: backend.RoutingProfile{},
}
So(storage.CreateRoutingProfile(common.DB, &rp), ShouldBeNil)
dp := storage.DeviceProfile{
DeviceProfile: backend.DeviceProfile{
FactoryPresetFreqs: []backend.Frequency{
868100000,
868300000,
868500000,
},
},
}
So(storage.CreateDeviceProfile(common.DB, &dp), ShouldBeNil)
Convey("When calling CreateDevice", func() {
_, err := api.CreateDevice(ctx, &ns.CreateDeviceRequest{
Device: &ns.Device{
DevEUI: devEUI[:],
DeviceProfileID: dp.DeviceProfile.DeviceProfileID,
ServiceProfileID: sp.ServiceProfile.ServiceProfileID,
RoutingProfileID: rp.RoutingProfile.RoutingProfileID,
},
})
So(err, ShouldBeNil)
Convey("Then GetDevice returns the device", func() {
resp, err := api.GetDevice(ctx, &ns.GetDeviceRequest{
DevEUI: devEUI[:],
})
So(err, ShouldBeNil)
So(resp.Device, ShouldResemble, &ns.Device{
DevEUI: devEUI[:],
DeviceProfileID: dp.DeviceProfile.DeviceProfileID,
ServiceProfileID: sp.ServiceProfile.ServiceProfileID,
RoutingProfileID: rp.RoutingProfile.RoutingProfileID,
})
})
Convey("Then UpdateDevice updates the device", func() {
rp2Resp, err := api.CreateRoutingProfile(ctx, &ns.CreateRoutingProfileRequest{
RoutingProfile: &ns.RoutingProfile{
AsID: "new-application-server:1234",
},
})
So(err, ShouldBeNil)
_, err = api.UpdateDevice(ctx, &ns.UpdateDeviceRequest{
Device: &ns.Device{
DevEUI: devEUI[:],
DeviceProfileID: dp.DeviceProfile.DeviceProfileID,
ServiceProfileID: sp.ServiceProfile.ServiceProfileID,
RoutingProfileID: rp2Resp.RoutingProfileID,
},
})
So(err, ShouldBeNil)
resp, err := api.GetDevice(ctx, &ns.GetDeviceRequest{
DevEUI: devEUI[:],
})
So(err, ShouldBeNil)
So(resp.Device, ShouldResemble, &ns.Device{
DevEUI: devEUI[:],
DeviceProfileID: dp.DeviceProfile.DeviceProfileID,
ServiceProfileID: sp.ServiceProfile.ServiceProfileID,
RoutingProfileID: rp2Resp.RoutingProfileID,
})
})
Convey("Then DeleteDevice deletes the device", func() {
_, err := api.DeleteDevice(ctx, &ns.DeleteDeviceRequest{
DevEUI: devEUI[:],
})
So(err, ShouldBeNil)
_, err = api.DeleteDevice(ctx, &ns.DeleteDeviceRequest{
DevEUI: devEUI[:],
})
So(err, ShouldNotBeNil)
So(grpc.Code(err), ShouldEqual, codes.NotFound)
})
})
})
Convey("Given a ServiceProfile, RoutingProfile, DeviceProfile and Device", func() {
sp := storage.ServiceProfile{
ServiceProfile: backend.ServiceProfile{
DRMin: 3,
DRMax: 6,
},
}
So(storage.CreateServiceProfile(common.DB, &sp), ShouldBeNil)
rp := storage.RoutingProfile{
RoutingProfile: backend.RoutingProfile{},
}
So(storage.CreateRoutingProfile(common.DB, &rp), ShouldBeNil)
dp := storage.DeviceProfile{
DeviceProfile: backend.DeviceProfile{
FactoryPresetFreqs: []backend.Frequency{
868100000,
868300000,
868500000,
},
RXDelay1: 3,
RXDROffset1: 2,
RXDataRate2: 5,
RXFreq2: 868900000,
},
}
So(storage.CreateDeviceProfile(common.DB, &dp), ShouldBeNil)
d := storage.Device{
DevEUI: devEUI,
DeviceProfileID: dp.DeviceProfileID,
RoutingProfileID: rp.RoutingProfileID,
ServiceProfileID: sp.ServiceProfileID,
}
So(storage.CreateDevice(common.DB, &d), ShouldBeNil)
Convey("Given an item in the device-queue", func() {
_, err := api.CreateDeviceQueueItem(ctx, &ns.CreateDeviceQueueItemRequest{
Item: &ns.DeviceQueueItem{
DevEUI: d.DevEUI[:],
FrmPayload: []byte{1, 2, 3, 4},
FCnt: 10,
FPort: 20,
},
})
So(err, ShouldBeNil)
Convey("When calling ActivateDevice", func() {
_, err := api.ActivateDevice(ctx, &ns.ActivateDeviceRequest{
DevEUI: devEUI[:],
DevAddr: devAddr[:],
NwkSKey: nwkSKey[:],
FCntUp: 10,
FCntDown: 11,
SkipFCntCheck: true,
})
So(err, ShouldBeNil)
Convey("Then the device-queue was flushed", func() {
items, err := storage.GetDeviceQueueItemsForDevEUI(common.DB, d.DevEUI)
So(err, ShouldBeNil)
So(items, ShouldHaveLength, 0)
})
Convey("Then the device was activated as expected", func() {
ds, err := storage.GetDeviceSession(common.RedisPool, devEUI)
So(err, ShouldBeNil)
So(ds, ShouldResemble, storage.DeviceSession{
DeviceProfileID: dp.DeviceProfile.DeviceProfileID,
ServiceProfileID: sp.ServiceProfile.ServiceProfileID,
RoutingProfileID: rp.RoutingProfile.RoutingProfileID,
DevAddr: devAddr,
DevEUI: devEUI,
NwkSKey: nwkSKey,
FCntUp: 10,
FCntDown: 11,
SkipFCntValidation: true,
EnabledChannels: common.Band.GetUplinkChannels(),
ChannelFrequencies: []int{868100000, 868300000, 868500000},
RXDelay: 3,
RX1DROffset: 2,
RX2DR: 5,
RX2Frequency: 868900000,
MaxSupportedDR: 6,
LastDevStatusMargin: 127,
})
})
Convey("Then GetDeviceActivation returns the expected response", func() {
resp, err := api.GetDeviceActivation(ctx, &ns.GetDeviceActivationRequest{
DevEUI: devEUI[:],
})
So(err, ShouldBeNil)
So(resp, ShouldResemble, &ns.GetDeviceActivationResponse{
DevAddr: devAddr[:],
NwkSKey: nwkSKey[:],
FCntUp: 10,
FCntDown: 11,
SkipFCntCheck: true,
})
})
Convey("Then GetNextDownlinkFCntForDevEUI returns the expected FCnt", func() {
resp, err := api.GetNextDownlinkFCntForDevEUI(ctx, &ns.GetNextDownlinkFCntForDevEUIRequest{
DevEUI: devEUI[:],
})
So(err, ShouldBeNil)
So(resp.FCnt, ShouldEqual, 11)
})
Convey("Given an item in the device-queue", func() {
_, err := api.CreateDeviceQueueItem(ctx, &ns.CreateDeviceQueueItemRequest{
Item: &ns.DeviceQueueItem{
DevEUI: d.DevEUI[:],
FrmPayload: []byte{1, 2, 3, 4},
FCnt: 11,
FPort: 20,
},
})
So(err, ShouldBeNil)
Convey("Then GetNextDownlinkFCntForDevEUI returns the expected FCnt", func() {
resp, err := api.GetNextDownlinkFCntForDevEUI(ctx, &ns.GetNextDownlinkFCntForDevEUIRequest{
DevEUI: devEUI[:],
})
So(err, ShouldBeNil)
So(resp.FCnt, ShouldEqual, 12)
})
})
Convey("Then DeactivateDevice deactivates the device and flushes the queue", func() {
_, err := api.CreateDeviceQueueItem(ctx, &ns.CreateDeviceQueueItemRequest{
Item: &ns.DeviceQueueItem{
DevEUI: d.DevEUI[:],
FrmPayload: []byte{1, 2, 3, 4},
FCnt: 10,
FPort: 20,
},
})
So(err, ShouldBeNil)
items, err := storage.GetDeviceQueueItemsForDevEUI(common.DB, d.DevEUI)
So(err, ShouldBeNil)
So(items, ShouldHaveLength, 1)
_, err = api.DeactivateDevice(ctx, &ns.DeactivateDeviceRequest{
DevEUI: devEUI[:],
})
So(err, ShouldBeNil)
_, err = api.GetDeviceActivation(ctx, &ns.GetDeviceActivationRequest{
DevEUI: devEUI[:],
})
So(grpc.Code(err), ShouldEqual, codes.NotFound)
items, err = storage.GetDeviceQueueItemsForDevEUI(common.DB, d.DevEUI)
So(err, ShouldBeNil)
So(items, ShouldHaveLength, 0)
})
Convey("When calling EnqueueDownlinkMACCommand", func() {
mac := lorawan.MACCommand{
CID: lorawan.RXParamSetupReq,
Payload: &lorawan.RX2SetupReqPayload{
Frequency: 868100000,
},
}
b, err := mac.MarshalBinary()
So(err, ShouldBeNil)
_, err = api.EnqueueDownlinkMACCommand(ctx, &ns.EnqueueDownlinkMACCommandRequest{
DevEUI: devEUI[:],
FrmPayload: true,
Cid: uint32(lorawan.RXParamSetupReq),
Commands: [][]byte{b},
})
So(err, ShouldBeNil)
Convey("Then the mac-command has been added to the queue", func() {
queue, err := maccommand.ReadQueueItems(common.RedisPool, devEUI)
So(err, ShouldBeNil)
So(queue, ShouldResemble, []maccommand.Block{
{
CID: lorawan.RXParamSetupReq,
FRMPayload: true,
External: true,
MACCommands: []lorawan.MACCommand{mac},
},
})
})
})
})
})
Convey("When calling CreateDeviceQueueItem", func() {
_, err := api.CreateDeviceQueueItem(ctx, &ns.CreateDeviceQueueItemRequest{
Item: &ns.DeviceQueueItem{
DevEUI: d.DevEUI[:],
FrmPayload: []byte{1, 2, 3, 4},
FCnt: 10,
FPort: 20,
Confirmed: true,
},
})
So(err, ShouldBeNil)
Convey("Then GetDeviceQueueItemsForDevEUI returns the item", func() {
resp, err := api.GetDeviceQueueItemsForDevEUI(ctx, &ns.GetDeviceQueueItemsForDevEUIRequest{
DevEUI: d.DevEUI[:],
})
So(err, ShouldBeNil)
So(resp.Items, ShouldHaveLength, 1)
So(resp.Items[0], ShouldResemble, &ns.DeviceQueueItem{
DevEUI: d.DevEUI[:],
FrmPayload: []byte{1, 2, 3, 4},
FCnt: 10,
FPort: 20,
Confirmed: true,
})
})
Convey("Then FlushDeviceQueueForDevEUI flushes the device-queue", func() {
_, err := api.FlushDeviceQueueForDevEUI(ctx, &ns.FlushDeviceQueueForDevEUIRequest{
DevEUI: d.DevEUI[:],
})
So(err, ShouldBeNil)
resp, err := api.GetDeviceQueueItemsForDevEUI(ctx, &ns.GetDeviceQueueItemsForDevEUIRequest{
DevEUI: d.DevEUI[:],
})
So(err, ShouldBeNil)
So(resp.Items, ShouldHaveLength, 0)
})
})
Convey("When calling GetRandomDevAddr", func() {
resp, err := api.GetRandomDevAddr(ctx, &ns.GetRandomDevAddrRequest{})
So(err, ShouldBeNil)
Convey("A random DevAddr has been returned", func() {
So(resp.DevAddr, ShouldHaveLength, 4)
So(resp.DevAddr, ShouldNotResemble, []byte{0, 0, 0, 0})
})
})
})
Convey("When calling CreateGateway", func() {
req := ns.CreateGatewayRequest{
Mac: []byte{1, 2, 3, 4, 5, 6, 7, 8},
Name: "test-gateway",
Description: "rooftop gateway",
Latitude: 1.1234,
Longitude: 1.1235,
Altitude: 15.5,
}
_, err := api.CreateGateway(ctx, &req)
So(err, ShouldBeNil)
Convey("Then the gateway has been created", func() {
resp, err := api.GetGateway(ctx, &ns.GetGatewayRequest{Mac: req.Mac})
So(err, ShouldBeNil)
So(resp.Mac, ShouldResemble, req.Mac)
So(resp.Name, ShouldEqual, req.Name)
So(resp.Description, ShouldEqual, req.Description)
So(resp.Latitude, ShouldEqual, req.Latitude)
So(resp.Longitude, ShouldEqual, req.Longitude)
So(resp.Altitude, ShouldEqual, req.Altitude)
So(resp.CreatedAt, ShouldNotEqual, "")
So(resp.UpdatedAt, ShouldNotEqual, "")
So(resp.FirstSeenAt, ShouldEqual, "")
So(resp.LastSeenAt, ShouldEqual, "")
})
Convey("Then UpdateGateway updates the gateway", func() {
req := ns.UpdateGatewayRequest{
Mac: []byte{1, 2, 3, 4, 5, 6, 7, 8},
Name: "test-gateway-updated",
Description: "garden gateway",
Latitude: 1.1235,
Longitude: 1.1236,
Altitude: 15.7,
}
_, err := api.UpdateGateway(ctx, &req)
So(err, ShouldBeNil)
resp, err := api.GetGateway(ctx, &ns.GetGatewayRequest{Mac: req.Mac})
So(err, ShouldBeNil)
So(resp.Mac, ShouldResemble, req.Mac)
So(resp.Name, ShouldEqual, req.Name)
So(resp.Description, ShouldEqual, req.Description)
So(resp.Latitude, ShouldEqual, req.Latitude)
So(resp.Longitude, ShouldEqual, req.Longitude)
So(resp.Altitude, ShouldEqual, req.Altitude)
So(resp.CreatedAt, ShouldNotEqual, "")
So(resp.UpdatedAt, ShouldNotEqual, "")
So(resp.FirstSeenAt, ShouldEqual, "")
So(resp.LastSeenAt, ShouldEqual, "")
})
Convey("Then ListGateways returns the gateway", func() {
resp, err := api.ListGateways(ctx, &ns.ListGatewayRequest{
Limit: 10,
Offset: 0,
})
So(err, ShouldBeNil)
So(resp.TotalCount, ShouldEqual, 1)
So(resp.Result, ShouldHaveLength, 1)
So(resp.Result[0].Mac, ShouldResemble, []byte{1, 2, 3, 4, 5, 6, 7, 8})
})
Convey("Then DeleteGateway deletes the gateway", func() {
_, err := api.DeleteGateway(ctx, &ns.DeleteGatewayRequest{
Mac: []byte{1, 2, 3, 4, 5, 6, 7, 8},
})
So(err, ShouldBeNil)
_, err = api.GetGateway(ctx, &ns.GetGatewayRequest{
Mac: []byte{1, 2, 3, 4, 5, 6, 7, 8},
})
So(err, ShouldResemble, grpc.Errorf(codes.NotFound, "gateway does not exist"))
})
Convey("When calling GenerateGatewayToken", func() {
common.GatewayServerJWTSecret = "verysecret"
tokenResp, err := api.GenerateGatewayToken(ctx, &ns.GenerateGatewayTokenRequest{
Mac: []byte{1, 2, 3, 4, 5, 6, 7, 8},
})
So(err, ShouldBeNil)
Convey("Then a valid JWT token has been returned", func() {
token, err := jwt.ParseWithClaims(tokenResp.Token, &auth.Claims{}, func(token *jwt.Token) (interface{}, error) {
if token.Header["alg"] != "HS256" {
return nil, fmt.Errorf("invalid algorithm %s", token.Header["alg"])
}
return []byte("verysecret"), nil
})
So(err, ShouldBeNil)
So(token.Valid, ShouldBeTrue)
claims, ok := token.Claims.(*auth.Claims)
So(ok, ShouldBeTrue)
So(claims.MAC, ShouldEqual, lorawan.EUI64{1, 2, 3, 4, 5, 6, 7, 8})
})
})
Convey("Given some stats for this gateway", func() {
now := time.Now().UTC()
_, err := db.Exec(`
insert into gateway_stats (
mac,
"timestamp",
"interval",
rx_packets_received,
rx_packets_received_ok,
tx_packets_received,
tx_packets_emitted
) values ($1, $2, $3, $4, $5, $6, $7)`,
[]byte{1, 2, 3, 4, 5, 6, 7, 8},
now.Truncate(time.Minute),
"MINUTE",
10,
5,
11,
10,
)
So(err, ShouldBeNil)
Convey("Then GetGatewayStats returns these stats", func() {
resp, err := api.GetGatewayStats(ctx, &ns.GetGatewayStatsRequest{
Mac: []byte{1, 2, 3, 4, 5, 6, 7, 8},
Interval: ns.AggregationInterval_MINUTE,
StartTimestamp: now.Truncate(time.Minute).Format(time.RFC3339Nano),
EndTimestamp: now.Format(time.RFC3339Nano),
})
So(err, ShouldBeNil)
So(resp.Result, ShouldHaveLength, 1)
ts, err := time.Parse(time.RFC3339Nano, resp.Result[0].Timestamp)
So(err, ShouldBeNil)
So(ts.Equal(now.Truncate(time.Minute)), ShouldBeTrue)
So(resp.Result[0].RxPacketsReceived, ShouldEqual, 10)
So(resp.Result[0].RxPacketsReceivedOK, ShouldEqual, 5)
So(resp.Result[0].TxPacketsReceived, ShouldEqual, 11)
So(resp.Result[0].TxPacketsEmitted, ShouldEqual, 10)
})
})
Convey("Given 20 logs for two different DevEUIs", func() {
now := time.Now()
rxInfoSet := []gw.RXInfo{
{
MAC: lorawan.EUI64{1, 2, 3, 4, 5, 6, 7, 8},
Time: &now,
Timestamp: 1234,
Frequency: 868100000,
Channel: 1,
RFChain: 1,
CRCStatus: 1,
CodeRate: "4/5",
RSSI: 110,
LoRaSNR: 5.5,
Size: 10,
DataRate: band.DataRate{
Modulation: band.LoRaModulation,
SpreadFactor: 12,
Bandwidth: 125,
},
},
}
ts := uint32(12345)
txInfo := gw.TXInfo{
MAC: lorawan.EUI64{1, 2, 3, 4, 5, 6, 7, 8},
Immediately: true,
Timestamp: &ts,
Frequency: 868100000,
Power: 14,
CodeRate: "4/5",
DataRate: band.DataRate{
Modulation: band.LoRaModulation,
SpreadFactor: 12,
Bandwidth: 125,
},
}
devEUI1 := lorawan.EUI64{1, 2, 3, 4, 5, 6, 7, 8}
devEUI2 := lorawan.EUI64{8, 7, 6, 5, 4, 3, 2, 1}
phy := lorawan.PHYPayload{
MHDR: lorawan.MHDR{
MType: lorawan.UnconfirmedDataUp,
Major: lorawan.LoRaWANR1,
},
MACPayload: &lorawan.MACPayload{
FHDR: lorawan.FHDR{
DevAddr: lorawan.DevAddr{1, 2, 3, 4},
FCnt: 1,
},
},
}
rxBytes, err := json.Marshal(rxInfoSet)
So(err, ShouldBeNil)
txBytes, err := json.Marshal(txInfo)
So(err, ShouldBeNil)
phyBytes, err := phy.MarshalBinary()
So(err, ShouldBeNil)
for i := 0; i < 10; i++ {
frameLog := node.FrameLog{
DevEUI: devEUI1,
RXInfoSet: &rxBytes,
TXInfo: &txBytes,
PHYPayload: phyBytes,
}
So(node.CreateFrameLog(db, &frameLog), ShouldBeNil)
frameLog.DevEUI = devEUI2
frameLog.TXInfo = nil
So(node.CreateFrameLog(db, &frameLog), ShouldBeNil)
}
Convey("Then GetFrameLogsForDevEUI returns the expected logs", func() {
resp, err := api.GetFrameLogsForDevEUI(ctx, &ns.GetFrameLogsForDevEUIRequest{
DevEUI: devEUI1[:],
Limit: 1,
Offset: 0,
})
So(err, ShouldBeNil)
So(resp.TotalCount, ShouldEqual, 10)
So(resp.Result, ShouldHaveLength, 1)
So(resp.Result[0].CreatedAt, ShouldNotEqual, "")
resp.Result[0].CreatedAt = ""
So(resp.Result[0], ShouldResemble, &ns.FrameLog{
PhyPayload: phyBytes,
TxInfo: &ns.TXInfo{
CodeRate: "4/5",
Frequency: 868100000,
Immediately: true,
Mac: []byte{1, 2, 3, 4, 5, 6, 7, 8},
Power: 14,
Timestamp: 12345,
DataRate: &ns.DataRate{
Modulation: "LORA",
BandWidth: 125,
SpreadFactor: 12,
},
},
RxInfoSet: []*ns.RXInfo{
{
Channel: 1,
CodeRate: "4/5",
Frequency: 868100000,
LoRaSNR: 5.5,
Rssi: 110,
Time: now.Format(time.RFC3339Nano),
Timestamp: 1234,
Mac: []byte{1, 2, 3, 4, 5, 6, 7, 8},
DataRate: &ns.DataRate{
Modulation: "LORA",
BandWidth: 125,
SpreadFactor: 12,
},
},
},
})
})
Convey("When calling CreateChannelConfiguration", func() {
cfResp, err := api.CreateChannelConfiguration(ctx, &ns.CreateChannelConfigurationRequest{
Name: "test-config",
Channels: []int32{0, 1, 2},
})
So(err, ShouldBeNil)
So(cfResp.Id, ShouldNotEqual, 0)
Convey("Then the channel-configuration has been created", func() {
cf, err := api.GetChannelConfiguration(ctx, &ns.GetChannelConfigurationRequest{
Id: cfResp.Id,
})
So(err, ShouldBeNil)
So(cf.Name, ShouldEqual, "test-config")
So(cf.Channels, ShouldResemble, []int32{0, 1, 2})
So(cf.CreatedAt, ShouldNotEqual, "")
So(cf.UpdatedAt, ShouldNotEqual, "")
Convey("Then ListChannelConfigurations returns the channel-configuration", func() {
cfs, err := api.ListChannelConfigurations(ctx, &ns.ListChannelConfigurationsRequest{})
So(err, ShouldBeNil)
So(cfs.Result, ShouldHaveLength, 1)
So(cfs.Result[0], ShouldResemble, cf)
})
Convey("Then UpdateChannelConfiguration updates the channel-configuration", func() {
_, err := api.UpdateChannelConfiguration(ctx, &ns.UpdateChannelConfigurationRequest{
Id: cfResp.Id,
Name: "updated-channel-conf",
Channels: []int32{0, 1},
})
So(err, ShouldBeNil)
cf2, err := api.GetChannelConfiguration(ctx, &ns.GetChannelConfigurationRequest{
Id: cfResp.Id,
})
So(err, ShouldBeNil)
So(cf2.Name, ShouldEqual, "updated-channel-conf")
So(cf2.Channels, ShouldResemble, []int32{0, 1})
So(cf2.CreatedAt, ShouldEqual, cf.CreatedAt)
So(cf2.UpdatedAt, ShouldNotEqual, "")
So(cf2.UpdatedAt, ShouldNotEqual, cf.UpdatedAt)
})
})
Convey("Then the channel-configuration can be assigned to the gateway", func() {
req := ns.UpdateGatewayRequest{
Mac: []byte{1, 2, 3, 4, 5, 6, 7, 8},
Name: "test-gateway-updated",
Description: "garden gateway",
Latitude: 1.1235,
Longitude: 1.1236,
Altitude: 15.7,
ChannelConfigurationID: cfResp.Id,
}
_, err := api.UpdateGateway(ctx, &req)
So(err, ShouldBeNil)
gw, err := api.GetGateway(ctx, &ns.GetGatewayRequest{
Mac: []byte{1, 2, 3, 4, 5, 6, 7, 8},
})
So(err, ShouldBeNil)
So(gw.ChannelConfigurationID, ShouldEqual, cfResp.Id)
})
Convey("Then DeleteChannelConfiguration deletes the channel-configuration", func() {
_, err := api.DeleteChannelConfiguration(ctx, &ns.DeleteChannelConfigurationRequest{
Id: cfResp.Id,
})
So(err, ShouldBeNil)
_, err = api.GetChannelConfiguration(ctx, &ns.GetChannelConfigurationRequest{
Id: cfResp.Id,
})
So(err, ShouldNotBeNil)
So(grpc.Code(err), ShouldEqual, codes.NotFound)
})
Convey("Then CreateExtraChannel creates an extra channel-configuration channel", func() {
ecRes, err := api.CreateExtraChannel(ctx, &ns.CreateExtraChannelRequest{
ChannelConfigurationID: cfResp.Id,
Modulation: ns.Modulation_LORA,
Frequency: 867100000,
BandWidth: 125,
SpreadFactors: []int32{0, 1, 2, 3, 4, 5},
})
So(err, ShouldBeNil)
So(ecRes.Id, ShouldNotEqual, 0)
Convey("Then UpdateExtraChannel updates this extra channel", func() {
_, err := api.UpdateExtraChannel(ctx, &ns.UpdateExtraChannelRequest{
Id: ecRes.Id,
ChannelConfigurationID: cfResp.Id,
Modulation: ns.Modulation_LORA,
Frequency: 867300000,
BandWidth: 250,
SpreadFactors: []int32{5},
})
So(err, ShouldBeNil)
extraChans, err := api.GetExtraChannelsForChannelConfigurationID(ctx, &ns.GetExtraChannelsForChannelConfigurationIDRequest{
Id: cfResp.Id,
})
So(err, ShouldBeNil)
So(extraChans.Result, ShouldHaveLength, 1)
So(extraChans.Result[0].Modulation, ShouldEqual, ns.Modulation_LORA)
So(extraChans.Result[0].Frequency, ShouldEqual, 867300000)
So(extraChans.Result[0].Bandwidth, ShouldEqual, 250)
So(extraChans.Result[0].SpreadFactors, ShouldResemble, []int32{5})
So(extraChans.Result[0].CreatedAt, ShouldNotEqual, "")
So(extraChans.Result[0].UpdatedAt, ShouldNotEqual, "")
})
Convey("Then DeleteExtraChannel deletes this extra channel", func() {
_, err := api.DeleteExtraChannel(ctx, &ns.DeleteExtraChannelRequest{
Id: ecRes.Id,
})
So(err, ShouldBeNil)
extraChans, err := api.GetExtraChannelsForChannelConfigurationID(ctx, &ns.GetExtraChannelsForChannelConfigurationIDRequest{
Id: cfResp.Id,
})
So(err, ShouldBeNil)
So(extraChans.Result, ShouldHaveLength, 0)
})
})
})
})
})
})
}
|
package toy
const baseFragmentShader = `
#version 330 core
out vec4 fragColor;
uniform vec3 iResolution; // viewport resolution (in pixels)
uniform float iTime; // shader playback time (in seconds)
uniform float iTimeDelta; // render time (in seconds)
uniform int iFrame; // shader playback frame
uniform sampler2D iChannel0; // input channel. 00 = 2D/Cube
uniform sampler2D iChannel1; // input channel. 01 = 2D/Cube
uniform sampler2D iChannel2; // input channel. 02 = 2D/Cube
uniform sampler2D iChannel3; // input channel. 03 = 2D/Cube
uniform vec3 iChannelResolution[4]; // channel resolution (in pixels)
uniform float iChannelTime[4]; // channel playback time (in seconds)
uniform vec4 iDate; // (year, month, day, time in seconds)
uniform vec4 iMouse; // mouse pixel coords. xy: current (if MLB down), zw: click
vec4 texture(sampler2D s, vec2 c) { return texture2D(s,c); }
void mainImage( out vec4 fragColor, in vec2 fragCoord );
void main() {
fragColor = vec4(0.0, 0.0, 0.0, 1.0);
mainImage(fragColor, gl_FragCoord.xy);
fragColor.w = 1.0;
}`
const defaultVertexShader = `
#version 330 core
in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
`
const defaultFragmentShader = `
void mainImage( out vec4 fragColor, in vec2 fragCoord ) {}
`
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package rgbkbd
import (
"context"
"io/ioutil"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/input"
"chromiumos/tast/local/rgbkbd"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: CapslockColorChangePreventedForZonedKeyboards,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verifies that the caps lock keys do not change colors when SetCapsLockState is called from a zoned keyboard",
Contacts: []string{
"michaelcheco@google.com",
"cros-peripherals@google.com",
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Timeout: 3 * time.Minute,
})
}
// CapslockColorChangePreventedForZonedKeyboards verifies that enabling or disabling Capslock
// updates the RGB backlight.
func CapslockColorChangePreventedForZonedKeyboards(ctx context.Context, s *testing.State) {
const (
dbusName = "org.chromium.Rgbkbd"
dbusPath = "/org/chromium/Rgbkbd"
dbusInterface = "org.chromium.Rgbkbd"
fourZoneFortyLed uint32 = 2
job = "rgbkbd"
)
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
s.Logf("Restarting %s job and waiting for %s service", job, dbusName)
if err := upstart.RestartJob(ctx, job); err != nil {
s.Fatalf("Failed to start %s: %v", job, err)
}
rgbkbdService, err := rgbkbd.NewRgbkbd(ctx)
if err != nil {
s.Fatalf("Failed to connect to %s: %v", dbusName, err)
}
err = rgbkbdService.SetTestingMode(ctx, fourZoneFortyLed)
if err != nil {
s.Fatal("Failed to set testing mode: ", err)
}
cr, err := chrome.New(ctx, chrome.EnableFeatures("RgbKeyboard"))
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
defer cr.Close(cleanupCtx)
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect Test API: ", err)
}
defer faillog.DumpUITreeOnError(cleanupCtx, s.OutDir(), s.HasError, tconn)
kb, err := input.Keyboard(ctx)
if err != nil {
s.Fatal("Failed to find keyboard: ", err)
}
defer kb.Close()
initialContent, err := ioutil.ReadFile("/run/rgbkbd/log")
if err != nil {
s.Fatal("Failed to read initial rgbkbd log contents: ", err)
}
// Enable and Disable Capslock.
if err := kb.Accel(ctx, "alt+search"); err != nil {
s.Fatal("Failed to press alt+search to enable caps lock: ", err)
}
if err := kb.Accel(ctx, "alt+search"); err != nil {
s.Fatal("Failed to press alt+search to disable caps lock: ", err)
}
contentAfterCapsLockKeyPress, err := ioutil.ReadFile("/run/rgbkbd/log")
if err != nil {
s.Fatal("Failed to read rgbkbd log contents: ", err)
}
if string(initialContent) != string(contentAfterCapsLockKeyPress) {
s.Fatal("Caps lock change logs written for zoned keyboard")
}
}
|
package data
import (
"fmt"
"net/url"
"os"
)
func NewSite(root string, virtualPaths []string, replaceRoots []string, sitemap string, headers Headers, urlFilename, errorFilename string, timeout, retryLimit int) (*Site, error) {
site := new(Site)
site.Root = root
site.Timeout = timeout
site.RetryLimit = retryLimit
site.Sitemap = sitemap
u, err := url.Parse(root)
if err != nil {
return nil, fmt.Errorf("parsing url %s. %w", root, err)
}
site.RootUrl = u
site.VirtualPaths = virtualPaths
site.Headers = headers
site.ReplaceRoots = replaceRoots
ef, err := os.OpenFile(errorFilename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return nil, fmt.Errorf("opening error file: %w", err)
}
uf, err := os.OpenFile(urlFilename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return nil, fmt.Errorf("opening error file: %w", err)
}
site.errorFile = ef
site.urlsFile = uf
site.errorFile.WriteString("Url, Referrer, Code, Details\n")
site.urlsFile.WriteString("Url, Referrer\n")
return site, nil
}
func (s *Site) CleanUp() {
s.errorFile.Close()
s.urlsFile.Close()
}
func (s Site) WriteError(url, referrer string, code int, details string) {
s.errorFile.WriteString(fmt.Sprintf("%s, %s, %d, %s\n", url, referrer, code, details))
}
func (s Site) WriteUrl(url, referrer string) {
s.urlsFile.WriteString(fmt.Sprintf("%s, %s\n", url, referrer))
}
|
package main
import "fmt"
type person struct {
first string
last string
age int
}
func main() {
x := person{
first: "Rudi",
last: "Visagie",
age: 25,
}
x.speak()
}
func (p person) speak() {
fmt.Println("My Name is:", p.first, p.last, "and I am ", p.age, " years old")
}
|
package main
import (
"fmt"
"io"
"os"
)
// Echo prints its command line arguments
func Echo(w io.Writer, args []string) {
var s, sep string
for i := 1; i < len(args); i++ {
s += sep + args[i]
sep = " "
}
fmt.Fprintln(w, s)
}
func main() {
Echo(os.Stdout, os.Args)
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trackerserver
import (
"testing"
"github.com/uber/kraken/core"
"github.com/uber/kraken/lib/hashring"
"github.com/uber/kraken/lib/hostlist"
"github.com/uber/kraken/tracker/metainfoclient"
"github.com/uber/kraken/utils/httputil"
"github.com/uber/kraken/utils/testutil"
"github.com/stretchr/testify/require"
)
func newMetaInfoClient(addr string) metainfoclient.Client {
return metainfoclient.New(hashring.NoopPassiveRing(hostlist.Fixture(addr)), nil)
}
func TestGetMetaInfoHandlerFetchesFromOrigin(t *testing.T) {
require := require.New(t)
mocks, cleanup := newServerMocks(t, Config{})
defer cleanup()
addr, stop := testutil.StartServer(mocks.handler())
defer stop()
namespace := core.TagFixture()
mi := core.MetaInfoFixture()
mocks.originCluster.EXPECT().GetMetaInfo(namespace, mi.Digest()).Return(mi, nil)
client := newMetaInfoClient(addr)
result, err := client.Download(namespace, mi.Digest())
require.NoError(err)
require.Equal(mi, result)
}
func TestGetMetaInfoHandlerPropagatesOriginError(t *testing.T) {
require := require.New(t)
mocks, cleanup := newServerMocks(t, Config{})
defer cleanup()
addr, stop := testutil.StartServer(mocks.handler())
defer stop()
namespace := core.TagFixture()
mi := core.MetaInfoFixture()
mocks.originCluster.EXPECT().GetMetaInfo(
namespace, mi.Digest()).Return(nil, httputil.StatusError{Status: 599}).MinTimes(1)
client := newMetaInfoClient(addr)
_, err := client.Download(namespace, mi.Digest())
require.Error(err)
require.True(httputil.IsStatus(err, 599))
}
|
package main
import (
"errors"
"fmt"
"os"
"os/signal"
"syscall"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/bwmarrin/discordgo"
)
var (
svcEC2 *ec2.EC2
)
func init() {
region := os.Getenv("AWS_REGION")
if len(region) == 0 {
fmt.Println("AWS_REGION not set; using ap-east-1 as default")
region = "ap-east-1"
}
sess, err := session.NewSession(&aws.Config{
Region: aws.String(region),
})
if err != nil {
fmt.Println("Error", err)
}
svcEC2 = ec2.New(sess)
}
func main() {
key := os.Getenv("DISCORD_KEY")
discord, err := discordgo.New("Bot " + key)
if err != nil {
fmt.Println("Error creating Discord session,", err)
return
}
discord.AddHandler(messageCreate)
discord.Identify.Intents = discordgo.MakeIntent(discordgo.IntentsGuildMessages)
err = discord.Open()
if err != nil {
fmt.Println("Error opening connection,", err)
return
}
fmt.Println("Bot is now running. Press CTRL-C to exit.")
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
discord.Close()
}
func messageCreate(session *discordgo.Session, msg *discordgo.MessageCreate) {
if msg.Author.ID == session.State.User.ID {
return
}
var reply string
// TODO: Move this or the entire function to a separate file?
switch msg.Content {
case "good morning":
reply = "<:kys:620483919774744596>"
case "gsgo":
reply = "<@!199174365408002049> <@!199462593264484352> <@!357042093199196162> <@!199917417953099778> <@!379870473481355264>"
case "mc.start":
if !stringInSlice(msg.Member.Roles, "779331911944372225") {
reply = "You are not allowed to run this command."
break
}
instanceID, err := getInstanceIDWithNameTag("minecraft-server")
if err != nil {
fmt.Println("Error", err)
reply = "Error starting minecraft server: " + err.Error()
break
}
result, err := startInstanceWithID(instanceID)
if err != nil {
fmt.Println("Error", err)
reply = "Error starting minecraft server: " + err.Error()
break
}
fmt.Println(result.StartingInstances)
reply = "Starting Minecraft server..."
}
session.ChannelMessageSend(msg.ChannelID, reply)
}
func stringInSlice(haystack []string, needle string) bool {
for _, item := range haystack {
if item == needle {
return true
}
}
return false
}
func getInstanceIDWithNameTag(name string) (string, error) {
input := &ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
&ec2.Filter{
Name: aws.String("tag:Name"),
Values: []*string{
aws.String(name),
},
},
},
}
result, err := svcEC2.DescribeInstances(input)
if err != nil {
fmt.Println("Error", err)
return "", err
}
if len(result.Reservations) == 0 {
err := errors.New("Instance with Name \"" + name + "\" not found")
fmt.Println("Error", err)
return "", err
}
return *result.Reservations[0].Instances[0].InstanceId, nil
}
func startInstanceWithID(id string) (*ec2.StartInstancesOutput, error) {
input := &ec2.StartInstancesInput{
InstanceIds: []*string{
aws.String(id),
},
}
return svcEC2.StartInstances(input)
}
|
package main
import (
"sync/atomic"
"fmt"
)
func main() {
var a int32 = 3
b := atomic.AddInt32(&a,3)
fmt.Printf("a is %d, b is %d\n",a,b)
}
|
package webhook
import (
"testing"
"github.com/Dynatrace/dynatrace-operator/src/cmd/config"
cmdManager "github.com/Dynatrace/dynatrace-operator/src/cmd/manager"
"github.com/stretchr/testify/assert"
)
func TestWebhookCommandBuilder(t *testing.T) {
t.Run("build command", func(t *testing.T) {
builder := NewWebhookCommandBuilder()
csiCommand := builder.Build()
assert.NotNil(t, csiCommand)
assert.Equal(t, use, csiCommand.Use)
assert.NotNil(t, csiCommand.RunE)
})
t.Run("set config provider", func(t *testing.T) {
builder := NewWebhookCommandBuilder()
assert.NotNil(t, builder)
expectedProvider := &config.MockProvider{}
builder = builder.SetConfigProvider(expectedProvider)
assert.Equal(t, expectedProvider, builder.configProvider)
})
t.Run("set manager provider", func(t *testing.T) {
expectedProvider := &cmdManager.MockProvider{}
builder := NewWebhookCommandBuilder().SetManagerProvider(expectedProvider)
assert.Equal(t, expectedProvider, builder.managerProvider)
})
t.Run("set namespace", func(t *testing.T) {
builder := NewWebhookCommandBuilder().SetNamespace("namespace")
assert.Equal(t, "namespace", builder.namespace)
})
}
|
package api
import (
"github.com/Tnze/go-mc/data"
pk "github.com/Tnze/go-mc/net/packet"
)
type Hand int32
const (
MainHand Hand = iota
OffHand
)
func (c *Client) Chat(msg string) {
c.SendPacket(pk.Marshal(data.ChatMessageServerbound, pk.String(msg)))
}
func (c *Client) ToggleFly(enable bool) {
if enable {
c.SendPacket(pk.Marshal(data.PlayerAbilitiesServerbound, pk.Byte(2)))
} else {
c.SendPacket(pk.Marshal(data.PlayerAbilitiesServerbound, pk.Byte(0)))
}
}
func (c *Client) Move(x, y, z float64, onGround bool) {
c.SetX(x)
c.SetY(y)
c.SetZ(z)
c.SetOnGround(onGround)
c.SendPacket(pk.Marshal(
data.PlayerPosition,
pk.Double(x),
pk.Double(y),
pk.Double(z),
pk.Boolean(onGround),
))
}
func (c *Client) Rotation(yaw, pitch float32, onGround bool) {
c.SetYaw(yaw)
c.SetPitch(pitch)
c.SetOnGround(onGround)
c.SendPacket(pk.Marshal(
data.PlayerLook,
pk.Float(yaw),
pk.Float(pitch),
pk.Boolean(onGround),
))
}
func (c *Client) MoveAndRotation(x, y, z float64, yaw, pitch float32, onGround bool) {
c.SetX(x)
c.SetY(y)
c.SetZ(z)
c.SetYaw(yaw)
c.SetPitch(pitch)
c.SetOnGround(onGround)
c.SendPacket(pk.Marshal(
data.PlayerPositionAndLookServerbound,
pk.Double(x),
pk.Double(y),
pk.Double(z),
pk.Float(yaw),
pk.Float(pitch),
pk.Boolean(onGround),
))
}
func (c *Client) StartBreakBlock(x, y, z int, direction Direction) {
c.SendPacket(pk.Marshal(
data.PlayerDigging,
pk.VarInt(0),
pk.Position{X: x, Y: y, Z: z},
pk.Byte(direction),
))
}
func (c *Client) CancelBreakBlock(x, y, z int, direction Direction) {
c.SendPacket(pk.Marshal(
data.PlayerDigging,
pk.VarInt(1),
pk.Position{X: x, Y: y, Z: z},
pk.Byte(direction),
))
}
func (c *Client) FinishBreakBlock(x, y, z int, direction Direction) {
c.SendPacket(pk.Marshal(
data.PlayerDigging,
pk.VarInt(2),
pk.Position{X: x, Y: y, Z: z},
pk.Byte(direction),
))
}
func (c *Client) AttackEntity(id int32) {
c.SendPacket(pk.Marshal(
data.UseEntity,
pk.VarInt(id),
pk.VarInt(1),
))
}
func (c *Client) SwitchHotBar(slot int16) {
// 接受0~8的格數
if slot < 0 || slot > 8 {
panic("switch hot bar error: unknown slot")
}
c.SendPacket(pk.Marshal(
data.HeldItemChangeServerbound,
pk.Short(slot),
))
}
func (c *Client) CloseWindow(id uint8) {
c.SendPacket(pk.Marshal(
data.CloseWindowServerbound,
pk.UnsignedByte(id),
))
}
func (c *Client) PlaceBlock(hand Hand, x, y, z int, face Direction, cursorX, cursorY, cursorZ float32, insideBlock bool) {
c.SendPacket(pk.Marshal(
data.PlayerBlockPlacement,
pk.VarInt(hand),
pk.Position{X: x, Y: y, Z: z},
pk.VarInt(face),
pk.Float(cursorX),
pk.Float(cursorY),
pk.Float(cursorZ),
pk.Boolean(insideBlock),
))
}
func (c *Client) SwingArm(hand Hand) {
c.SendPacket(pk.Marshal(
data.AnimationServerbound,
pk.VarInt(hand),
))
}
func (c *Client) ClickWindow(id uint8, slot int16, button int8, mode int32) {
c.SendPacket(pk.Marshal(
data.ClickWindow,
pk.UnsignedByte(id),
pk.Short(slot),
pk.Byte(button),
pk.Short(0),
pk.VarInt(mode),
pk.Byte(0),
))
}
func (c *Client) ReSpawn() {
c.SendPacket(pk.Marshal(data.ClientStatus, pk.VarInt(0)))
}
|
package core
// Plugin modes
const (
Docker = "docker"
Kubernetes = "kubernetes"
SwarmMode = "swarm-mode"
Test = "test"
)
|
package main
import "fmt"
func main() {
var a [3]string
fmt.Println("a:", a)
// Set using index
a[1] = "Hello"
fmt.Println("a:", a, "a[1]: ", a[1]) // Get using index
a[0] = "Hi!"
fmt.Println("a:", a, " Length: ", len(a)) // Get Length
// Declare and initialize together
b := [5]int{1, 2, 3, 4, 5}
fmt.Println("b:", b)
// Two dimensional Arrays
var c [2][2]int
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
c[i][j] = i + j
}
}
fmt.Println("c:", c)
}
|
package permission
import (
"log"
)
//Session interface hooks up registering Providers and answering questions about Roles and Permissions
type Session interface {
Name() string
SetName(name string)
RoleProviders() []RoleProvider
RegisterRoleProvider(roleProvider RoleProvider) error
RoleProviderFor(profileName string, resourceName string) (RoleProvider, error)
PermissionProviders() []PermissionProvider
RegisterPermissionProvider(permissionProvider PermissionProvider) error
PermissionProviderFor(resourceName string) (PermissionProvider, error)
Logger() *log.Logger
SetLogger(logger *log.Logger)
DefaultRole(profile Profile, resource Resource) (Role, error)
SetDefaultRole(fn DefaultRoleFunc)
GetRole(p Profile, r Resource) (Role, error)
GetPermission(p Profile, r Resource, permission string) (Permission, error)
SetContext(context interface{})
Context() interface{}
NewSession(name string) Session
SetParent(sess Session)
Parent() Session
}
//Profile interface represents a requesting user, group, organizational unit, etc.
type Profile interface {
ProfileName() string
ProfileIdentifier() string
}
//Resource interface represents something that can have permissions
type Resource interface {
ResourceName() string
ResourceIdentifier() string
}
//Role interface represents a role relationalship between a Profile and Resource
type Role interface {
RoleName() string
Profile() Profile
SetProfile(Profile)
Resource() Resource
SetResource(Resource)
RoleProvider() RoleProvider
SetRoleProvider(RoleProvider)
}
type RoleProviderAllRoles func(roleProvider RoleProvider, p Profile, r Resource) ([]Role, error)
type RoleProviderBestRole func(ropeProvider RoleProvider, p Profile, r Resource) (Role, error)
//RoleProvider provides an interface to ask what role or roles a Profile and Resource matching would have
type RoleProvider interface {
HandledProfileName() string
HandledResourceName() string
AllRoles(profile Profile, resource Resource) ([]Role, error) // (p Profile, r Resource) []Role //Returns all the applicable roles a Profile and Resource could potentially have. Ordered by
SetAllRoles(roleProviderAllRoles RoleProviderAllRoles)
BestRole(p Profile, r Resource) (Role, error)
SetBestRole(roleProviderBestRole RoleProviderBestRole)
SetSession(sess Session)
Session() Session
}
//Permission represents the answer to "Does Role with Resource have this `permission`?"
type Permission interface {
PermissionName() string
Granted() bool
SetGranted(bool)
Role() Role
SetRole(Role)
PermissionProvider() PermissionProvider
SetPermissionProvider(PermissionProvider)
}
type PermissionProviderGetPermission func(permissionProvider PermissionProvider, role Role, permission string) (Permission, error)
//PermissionProvider
type PermissionProvider interface {
HandledResourceName() string
GetPermission(role Role, permission string) (Permission, error)
SetGetPermission(getPermission PermissionProviderGetPermission)
SetSession(sess Session)
Session() Session
}
|
package echarge
const (
ModeEco = "eco"
ModeManual = "manual"
)
|
package bigtable
import (
"cloud.google.com/go/bigtable"
"context"
"encoding/json"
"fmt"
"github.com/mattwelke/manydocs/utils"
)
const (
IDPropName = "_id"
)
func (service DocService) SaveDoc(newDoc map[string]interface{}, newDocQueryPrefixes []string) (string, error) {
newDocID := utils.NewID()
newDoc[IDPropName] = newDocID
// JSON encode doc
newDocJSON, err := json.Marshal(newDoc)
if err != nil {
return "", fmt.Errorf("could not JSON encode doc for save doc operation: %v", err)
}
insertMut := bigtable.NewMutation()
insertMut.Set("value", "value", bigtable.Now(), newDocJSON)
if err := service.docsByDocsIDTable.Apply(context.Background(), newDocID, insertMut); err != nil {
return "", fmt.Errorf("could not apply Bigtable insert mutation for docs by doc ID table for save doc operation: %v", err)
}
addedDocRefs := make([]addedDocRef, 0)
// Save ref to doc in docs by doc ID table for "delete doc" operation later
addedDocRefs = append(addedDocRefs, addedDocRef{
docID: newDocID,
refType: addedDocRefTypeByDocID,
rowKey: newDocID,
})
for _, prefix := range newDocQueryPrefixes {
finalRowKey := fmt.Sprintf("%s%s", prefix, utils.NewID())
insertMut := bigtable.NewMutation()
insertMut.Set("value", "value", bigtable.Now(), newDocJSON)
if err := service.docsByQueryPrefixTable.Apply(context.Background(), finalRowKey, insertMut); err != nil {
return "", fmt.Errorf("could not apply Bigtable insert mutation for docs by query prefix table for save doc operation: %v", err)
}
// Save ref to doc in docs by query prefix table for "delete doc" operation later
addedDocRefs = append(addedDocRefs, addedDocRef{
docID: newDocID,
refType: addedDocRefTypeByQueryPrefix,
rowKey: finalRowKey,
})
}
for _, ref := range addedDocRefs {
// row key is the doc ID with a UUID appended to it, so we can get all doc refs later by doc ID
finalDocInsertPrimaryKey := fmt.Sprintf("%s%s", ref.docID, utils.NewID())
insertMut := bigtable.NewMutation()
insertMut.Set("data", "ref_type", bigtable.Now(), []byte(ref.refType))
insertMut.Set("data", "row_key", bigtable.Now(), []byte(ref.rowKey))
if err := service.addedDocRefsTable.Apply(context.Background(), finalDocInsertPrimaryKey, insertMut); err != nil {
return "", fmt.Errorf("could not apply Bigtable insert mutation for added doc refs table for save doc operation: %v", err)
}
}
return newDocID, nil
}
|
package generator
import (
"bytes"
"fmt"
"go/ast"
"go/parser"
"go/token"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"github.com/benbjohnson/megajson/generator/decoder"
"github.com/benbjohnson/megajson/generator/encoder"
)
var extregexp = regexp.MustCompile(`\.go$`)
// Generator generates encoders and decoders for Go files matching a given path.
type Generator interface {
Generate(path string) error
}
type generator struct {
decoder decoder.Generator
encoder encoder.Generator
}
func New() Generator {
return &generator{decoder:decoder.NewGenerator(), encoder:encoder.NewGenerator()}
}
// Generate recursively iterates over a path and generates encoders and decoders.
func (g *generator) Generate(path string) error {
return filepath.Walk(path, g.walk)
}
// walk iterates is the callback used by Generate() for iterating over files and directories.
func (g *generator) walk(path string, info os.FileInfo, err error) error {
// Only go file are used for generation.
if info == nil {
return fmt.Errorf("file not found: %s", path)
} else if info.IsDir() || filepath.Ext(path) != ".go" {
return nil
}
// Parse Go file.
file, err := parser.ParseFile(token.NewFileSet(), path, nil, 0)
if err != nil {
return err
}
if err := g.encode(file, extregexp.ReplaceAllString(path, "_encoder.go"), info.Mode()); err != nil {
return err
}
if err := g.decode(file, extregexp.ReplaceAllString(path, "_decoder.go"), info.Mode()); err != nil {
return err
}
return nil
}
// decode generates a decoder file from a given Go file.
func (g *generator) decode(file *ast.File, path string, mode os.FileMode) error {
var b bytes.Buffer
if err := g.decoder.Generate(&b, file); err != nil {
return err
}
if b.Len() > 0 {
if err := ioutil.WriteFile(path, b.Bytes(), mode); err != nil {
return err
}
}
return nil
}
// encode generates an encoder file from a given Go file.
func (g *generator) encode(file *ast.File, path string, mode os.FileMode) error {
var b bytes.Buffer
if err := g.encoder.Generate(&b, file); err != nil {
return err
}
if b.Len() > 0 {
if err := ioutil.WriteFile(path, b.Bytes(), mode); err != nil {
return err
}
}
return nil
}
|
package ws
import "github.com/gorilla/websocket"
//Hub alacen de clientes websocket
type Hub struct {
clients map[string]*user
broadcast chan []byte
}
var hub = &Hub{clients: make(map[string]*user)}
//GetHub devuelve el hub
func GetHub() *Hub {
return hub
}
//IsConnect devuelve el estado de un usuario, conectado o desconectado.
func (hub Hub) IsConnect(user string) bool {
usuario, ok := hub.clients[user]
if ok && len(usuario.clients) > 0 {
return true
}
return false
}
//Send enviar mensajes a los usuarios
func (hub Hub) Send(user string, mensaje []byte) {
if client, ok := hub.clients[user]; ok {
for conection := range client.clients {
conection.conn.WriteMessage(websocket.TextMessage, mensaje)
}
}
}
|
package gothic
import (
"net/http"
"syscall"
"testing"
"time"
"github.com/jrapoport/gothic/config"
"github.com/jrapoport/gothic/hosts"
"github.com/jrapoport/gothic/test/tconf"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_Main(t *testing.T) {
c := tconf.TempDB(t)
c.Network.AdminAddress = "127.0.0.1:0"
c.Network.RESTAddress = "127.0.0.1:0"
c.Network.RPCAddress = "127.0.0.1:0"
c.Network.RPCWebAddress = "127.0.0.1:0"
c.Network.HealthAddress = "127.0.0.1:0"
go func() {
err := Main(c)
assert.NoError(t, err)
}()
assert.Eventually(t, func() bool {
return hosts.Running()
}, 5*time.Second, 100*time.Millisecond)
healthURI := func() string {
require.NotEmpty(t, c.Network.HealthAddress)
return "http://" + c.Network.HealthAddress + config.HealthCheck
}
_, err := http.Get(healthURI())
assert.NoError(t, err)
err = syscall.Kill(syscall.Getpid(), syscall.SIGINT)
assert.NoError(t, err)
assert.Eventually(t, func() bool {
return !hosts.Running()
}, 5*time.Second, 100*time.Millisecond)
}
|
// Copyright (c) Red Hat, Inc.
// Copyright Contributors to the Open Cluster Management project
//Package managedcluster ...
package managedcluster
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"fmt"
"os"
"k8s.io/klog"
"net/url"
corev1 "k8s.io/api/core/v1"
clusterv1 "github.com/open-cluster-management/api/cluster/v1"
"github.com/open-cluster-management/applier/pkg/templateprocessor"
"github.com/open-cluster-management/managedcluster-import-controller/pkg/bindata"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
registrationOperatorImageEnvVarName = "REGISTRATION_OPERATOR_IMAGE"
registrationImageEnvVarName = "REGISTRATION_IMAGE"
workImageEnvVarName = "WORK_IMAGE"
klusterletNamespace = "open-cluster-management-agent"
envVarNotDefined = "environment variable %s not defined"
managedClusterImagePullSecretName = "open-cluster-management-image-pull-credentials"
)
func generateImportYAMLs(
client client.Client,
managedCluster *clusterv1.ManagedCluster,
excluded []string,
) (crds map[string][]*unstructured.Unstructured, yamls []*unstructured.Unstructured, err error) {
klog.V(4).Info("Create templateProcessor")
tp, err := templateprocessor.NewTemplateProcessor(bindata.NewBindataReader(), &templateprocessor.Options{})
if err != nil {
return nil, nil, err
}
crds = make(map[string][]*unstructured.Unstructured)
klog.V(4).Info("TemplateResources klusterlet/crds/v1beta1/")
crds["v1beta1"], err = tp.TemplateResourcesInPathUnstructured("klusterlet/crds/v1beta1/", nil, true, nil)
if err != nil {
return nil, nil, err
}
klog.V(4).Info("TemplateResources klusterlet/crds/v1/")
crds["v1"], err = tp.TemplateResourcesInPathUnstructured("klusterlet/crds/v1/", nil, true, nil)
if err != nil {
return nil, nil, err
}
bootStrapSecret, err := getBootstrapSecret(client, managedCluster)
if err != nil {
return nil, nil, err
}
klog.V(4).Infof("createKubeconfigData for bootsrapSecret %s", bootStrapSecret.Name)
bootstrapKubeconfigData, err := createKubeconfigData(client, bootStrapSecret)
if err != nil {
return nil, nil, err
}
useImagePullSecret := false
imagePullSecretDataBase64 := ""
imagePullSecret, err := getImagePullSecret(client)
if err != nil {
return nil, nil, err
}
if imagePullSecret != nil && len(imagePullSecret.Data[".dockerconfigjson"]) != 0 {
imagePullSecretDataBase64 = base64.StdEncoding.EncodeToString(imagePullSecret.Data[".dockerconfigjson"])
useImagePullSecret = true
}
registrationOperatorImageName := os.Getenv(registrationOperatorImageEnvVarName)
if registrationOperatorImageName == "" {
return nil, nil, fmt.Errorf(envVarNotDefined, registrationOperatorImageEnvVarName)
}
registrationImageName := os.Getenv(registrationImageEnvVarName)
if registrationImageName == "" {
return nil, nil, fmt.Errorf(envVarNotDefined, registrationImageEnvVarName)
}
workImageName := os.Getenv(workImageEnvVarName)
if workImageName == "" {
return nil, nil, fmt.Errorf(envVarNotDefined, workImageEnvVarName)
}
config := struct {
KlusterletNamespace string
ManagedClusterNamespace string
BootstrapKubeconfig string
UseImagePullSecret bool
ImagePullSecretName string
ImagePullSecretData string
ImagePullSecretType corev1.SecretType
RegistrationOperatorImage string
RegistrationImageName string
WorkImageName string
}{
ManagedClusterNamespace: managedCluster.Name,
KlusterletNamespace: klusterletNamespace,
BootstrapKubeconfig: base64.StdEncoding.EncodeToString(bootstrapKubeconfigData),
UseImagePullSecret: useImagePullSecret,
ImagePullSecretName: managedClusterImagePullSecretName,
ImagePullSecretData: imagePullSecretDataBase64,
ImagePullSecretType: corev1.SecretTypeDockerConfigJson,
RegistrationOperatorImage: registrationOperatorImageName,
RegistrationImageName: registrationImageName,
WorkImageName: workImageName,
}
tp, err = templateprocessor.NewTemplateProcessor(bindata.NewBindataReader(), &templateprocessor.Options{})
if err != nil {
return nil, nil, err
}
if !useImagePullSecret {
excluded = append(excluded, "klusterlet/image_pull_secret.yaml")
}
klusterletYAMLs, err := tp.TemplateResourcesInPathUnstructured(
"klusterlet",
excluded,
false,
config,
)
if err != nil {
return nil, nil, err
}
yamls = append(yamls, klusterletYAMLs...)
return crds, yamls, nil
}
func getImagePullSecret(client client.Client) (*corev1.Secret, error) {
if os.Getenv("DEFAULT_IMAGE_PULL_SECRET") == "" {
return nil, nil
}
secret := &corev1.Secret{}
err := client.Get(context.TODO(), types.NamespacedName{
Name: os.Getenv("DEFAULT_IMAGE_PULL_SECRET"),
Namespace: os.Getenv("POD_NAMESPACE"),
}, secret)
if err != nil {
return nil, err
}
return secret, nil
}
// getValidCertificatesFromURL dial to serverURL and get certificates
// only will return certificates signed by trusted ca and verified (with verifyOptions)
// if certificates are all signed by unauthorized party, will return nil
// rootCAs is for tls handshake verification
func getValidCertificatesFromURL(serverURL string, rootCAs *x509.CertPool) ([]*x509.Certificate, error) {
u, err := url.Parse(serverURL)
if err != nil {
log.Error(err, "failed to parse url: "+serverURL)
return nil, err
}
log.Info("getting certificate of " + u.Hostname() + ":" + u.Port())
conf := &tls.Config{
// server should support tls1.2
MinVersion: tls.VersionTLS12,
ServerName: u.Hostname(),
}
if rootCAs != nil {
conf.RootCAs = rootCAs
}
conn, err := tls.Dial("tcp", u.Hostname()+":"+u.Port(), conf)
if err != nil {
log.Error(err, "failed to dial "+serverURL)
// ignore certificate signed by unknown authority error
if _, ok := err.(x509.UnknownAuthorityError); ok {
return nil, nil
}
return nil, err
}
defer conn.Close()
certs := conn.ConnectionState().PeerCertificates
retCerts := []*x509.Certificate{}
opt := x509.VerifyOptions{Roots: rootCAs}
// check certificates
for _, cert := range certs {
if _, err := cert.Verify(opt); err == nil {
log.V(2).Info("Adding a valid certificate")
retCerts = append(retCerts, cert)
} else {
log.V(2).Info("Skipping an invalid certificate")
}
}
return retCerts, nil
}
func createKubeconfigData(client client.Client, bootStrapSecret *corev1.Secret) ([]byte, error) {
saToken := bootStrapSecret.Data["token"]
kubeAPIServer, err := getKubeAPIServerAddress(client)
if err != nil {
return nil, err
}
var certData []byte
if u, err := url.Parse(kubeAPIServer); err == nil {
apiServerCertSecretName, err := getKubeAPIServerSecretName(client, u.Hostname())
if err != nil {
return nil, err
}
if len(apiServerCertSecretName) > 0 {
apiServerCert, err := getKubeAPIServerCertificate(client, apiServerCertSecretName)
if err != nil {
return nil, err
}
certData = apiServerCert
}
}
if len(certData) == 0 {
// fallback to service account token ca.crt
if _, ok := bootStrapSecret.Data["ca.crt"]; ok {
certData = bootStrapSecret.Data["ca.crt"]
}
// check if it's roks
// if it's ocp && it's on ibm cloud, we treat it as roks
isROKS, err := checkIsIBMCloud(client)
if err != nil {
return nil, err
}
if isROKS {
// ROKS should have a certificate that is signed by trusted CA
if certs, err := getValidCertificatesFromURL(kubeAPIServer, nil); err != nil {
// should retry if failed to connect to apiserver
log.Error(err, fmt.Sprintf("failed to connect to %s", kubeAPIServer))
return nil, err
} else if len(certs) > 0 {
// simply don't give any certs as the apiserver is using certs signed by known CAs
certData = nil
} else {
log.Info("No additional valid certificate found for APIserver. Skipping.")
}
}
}
bootstrapConfig := clientcmdapi.Config{
// Define a cluster stanza based on the bootstrap kubeconfig.
Clusters: map[string]*clientcmdapi.Cluster{"default-cluster": {
Server: kubeAPIServer,
InsecureSkipTLSVerify: false,
CertificateAuthorityData: certData,
}},
// Define auth based on the obtained client cert.
AuthInfos: map[string]*clientcmdapi.AuthInfo{"default-auth": {
Token: string(saToken),
}},
// Define a context that connects the auth info and cluster, and set it as the default
Contexts: map[string]*clientcmdapi.Context{"default-context": {
Cluster: "default-cluster",
AuthInfo: "default-auth",
Namespace: "default",
}},
CurrentContext: "default-context",
}
return runtime.Encode(clientcmdlatest.Codec, &bootstrapConfig)
}
|
package myreplication
import (
"bytes"
"reflect"
"testing"
"time"
)
func TestReadPackTotal(t *testing.T) {
mockBuff := []byte{0x03, 0x00, 0x00, 0x0a, 0x01, 0x02, 0x03, 0x03, 0x00, 0x00, 0x0b, 0x04, 0x05, 0x06}
reader := newPackReader(bytes.NewBuffer(mockBuff))
pack, err := reader.readNextPack()
if err != nil {
t.Error("Got error", err)
}
var expectedLength uint32 = 3
if pack.length != expectedLength {
t.Error(
"incorrect length",
"expected", expectedLength,
"got", pack.length,
)
}
var expectedSequence byte = 10
if pack.sequence != expectedSequence {
t.Error(
"incorrect sequence",
"expected", expectedSequence,
"got", pack.sequence,
)
}
expectedBuff := []byte{0x01, 0x02, 0x03}
if !reflect.DeepEqual(expectedBuff, pack.buff) {
t.Error(
"incorrect buff",
"expected", expectedBuff,
"got", pack.buff,
)
}
pack, err = reader.readNextPack()
if err != nil {
t.Error("Got error", err)
}
if pack.length != expectedLength {
t.Error(
"incorrect length",
"expected", expectedLength,
"got", pack.length,
)
}
expectedSequence = 11
if pack.sequence != expectedSequence {
t.Error(
"incorrect sequence",
"expected", expectedSequence,
"got", pack.sequence,
)
}
expectedBuff = []byte{0x04, 0x05, 0x06}
if !reflect.DeepEqual(expectedBuff, pack.buff) {
t.Error(
"incorrect buff",
"expected", expectedBuff,
"got", pack.buff,
)
}
}
func TestReadPackByte(t *testing.T) {
mockBuff := []byte{
0x01, 0x00, 0x00,
0x0a,
0x10,
}
reader := newPackReader(bytes.NewBuffer(mockBuff))
pack, _ := reader.readNextPack()
var expected byte = 16
var result byte
err := pack.readByte(&result)
if err != nil {
t.Error(
"Got error", err,
)
}
if result != expected {
t.Error(
"Incorrect result",
"expected", expected,
"got", result,
)
}
}
func TestReadUint16(t *testing.T) {
mockBuff := []byte{
0x02, 0x00, 0x00,
0x0a,
0x1D, 0x86,
}
reader := newPackReader(bytes.NewBuffer(mockBuff))
pack, _ := reader.readNextPack()
var expected uint16 = 34333
var result uint16
err := pack.readUint16(&result)
if err != nil {
t.Error(
"Got error", err,
)
}
if result != expected {
t.Error(
"Incorrect result",
"expected", expected,
"got", result,
)
}
}
func TestReadThreeByteUint32(t *testing.T) {
mockBuff := []byte{
0x03, 0x00, 0x00,
0x0a,
0x76, 0x8A, 0x34,
}
reader := newPackReader(bytes.NewBuffer(mockBuff))
pack, _ := reader.readNextPack()
var expected uint32 = 3443318
var result uint32
err := pack.readThreeByteUint32(&result)
if err != nil {
t.Error(
"Got error", err,
)
}
if result != expected {
t.Error(
"Incorrect result",
"expected", expected,
"got", result,
)
}
}
func TestReadUint32(t *testing.T) {
mockBuff := []byte{
0x04, 0x00, 0x00,
0x0a,
0xD6, 0x00, 0x77, 0x14,
}
reader := newPackReader(bytes.NewBuffer(mockBuff))
pack, _ := reader.readNextPack()
var expected uint32 = 343343318
var result uint32
err := pack.readUint32(&result)
if err != nil {
t.Error(
"Got error", err,
)
}
if result != expected {
t.Error(
"Incorrect result",
"expected", expected,
"got", result,
)
}
}
func TestPackReadUint64(t *testing.T) {
mockBuff := []byte{
0x08, 0x00, 0x00,
0x0a,
0xC4, 0x74, 0x77, 0xCE, 0xCF, 0x11, 0x5E, 0x20,
}
reader := newPackReader(bytes.NewBuffer(mockBuff))
pack, _ := reader.readNextPack()
var expected uint64 = 2332321241244333252
var result uint64
err := pack.readUint64(&result)
if err != nil {
t.Error(
"Got error", err,
)
}
if result != expected {
t.Error(
"Incorrect result",
"expected", expected,
"got", result,
)
}
}
func TestPackReadSixByteUint64(t *testing.T) {
mockBuff := []byte{
0x08, 0x00, 0x00,
0x0a,
0x8F, 0x7F, 0xE8, 0x44, 0x9A, 0x27,
}
reader := newPackReader(bytes.NewBuffer(mockBuff))
pack, _ := reader.readNextPack()
var expected uint64 = 43543534534543
var result uint64
err := pack.readSixByteUint64(&result)
if err != nil {
t.Error(
"Got error", err,
)
}
if result != expected {
t.Error(
"Incorrect result",
"expected", expected,
"got", result,
)
}
}
func TestReadIntLengthOrNil(t *testing.T) {
mockBuff := []byte{
//pack 0, nil length encoded integer
0x01, 0x00, 0x00,
0x00,
0xFB,
//pack 1, 29 integer
0x01, 0x00, 0x00,
0x01,
0x1D,
//pack 2, 251 integer
0x03, 0x00, 0x00,
0x02,
0xFC, 0xFB, 0x00,
//pack 3, 3443318 integer
0x04, 0x00, 0x00,
0x03,
0xFD, 0x76, 0x8A, 0x34,
//pack 4, 2332321241244333252 integer
0x09, 0x00, 0x00,
0x03,
0xFE, 0xC4, 0x74, 0x77, 0xCE, 0xCF, 0x11, 0x5E, 0x20,
}
reader := newPackReader(bytes.NewBuffer(mockBuff))
var (
null bool
result uint64
)
type (
expectedPair struct {
null bool
result uint64
}
)
testsCollection := []*expectedPair{
&expectedPair{true, 0},
&expectedPair{false, 29},
&expectedPair{false, 251},
&expectedPair{false, 3443318},
&expectedPair{false, 2332321241244333252},
}
for i, test := range testsCollection {
t.Log("length int. Test pack", i, "with nil:", test.null, "result:", test.result)
null = false
result = 0
pack, err := reader.readNextPack()
if err != nil {
t.Fatal(
"Error read pack:", err,
)
}
err = pack.readIntLengthOrNil(&result, &null)
if err != nil {
t.Error(
"Got error", err,
)
}
if null != test.null {
t.Error(
"Incorrect nil",
"expected", test.null,
"got", null,
)
}
if result != test.result {
t.Error(
"Incorrect result",
"expected", test.result,
"got", result,
)
}
}
}
func TestReadNilString(t *testing.T) {
mockBuff := []byte{
0x20,
0x00, 0x00, 0x0a,
0x35, 0x2e, 0x35, 0x2e, 0x33, 0x38, 0x2d, 0x30, 0x75, 0x62, 0x75, 0x6e, 0x74, 0x75, 0x30, 0x2e, 0x31, 0x34,
0x2e, 0x30, 0x34, 0x2e, 0x31, 0x2d, 0x6c, 0x6f, 0x67, 0x00,
//garbage byte
0x35, 0x2e, 0x35, 0x2e,
}
reader := newPackReader(bytes.NewBuffer(mockBuff))
pack, _ := reader.readNextPack()
expected := []byte("5.5.38-0ubuntu0.14.04.1-log")
result, err := pack.readNilString()
if err != nil {
t.Error("Got error", err)
}
if !reflect.DeepEqual(expected, result) {
t.Error("Expected", string(expected), "got", string(result))
}
}
func TestReadStringLength(t *testing.T) {
mockBuff := []byte{
0x1D,
0x00, 0x00, 0x0a,
0x1B, 0x35, 0x2e, 0x35, 0x2e, 0x33, 0x38, 0x2d, 0x30, 0x75, 0x62, 0x75, 0x6e, 0x74, 0x75, 0x30, 0x2e, 0x31, 0x34,
0x2e, 0x30, 0x34, 0x2e, 0x31, 0x2d, 0x6c, 0x6f, 0x67,
//garbage byte
0xFF,
}
reader := newPackReader(bytes.NewBuffer(mockBuff))
pack, _ := reader.readNextPack()
expected := []byte("5.5.38-0ubuntu0.14.04.1-log")
result, err := pack.readStringLength()
if err != nil {
t.Error("Got error", err)
}
if !reflect.DeepEqual(expected, result) {
t.Error("Expected", string(expected), "got", string(result))
}
}
func TestReadTotal(t *testing.T) {
mockBuff := []byte{
0x14, 0x00, 0x00,
0x0a,
0x10,
0x1D, 0x86,
0x76, 0x8A, 0x34,
0xD6, 0x00, 0x77, 0x14,
0xC4, 0x74, 0x77, 0xCE, 0xCF, 0x11, 0x5E, 0x20,
}
reader := newPackReader(bytes.NewBuffer(mockBuff))
pack, _ := reader.readNextPack()
var expectedByte byte = 16
var expected16 uint16 = 34333
var expectedtb32 uint32 = 3443318
var expected32 uint32 = 343343318
var expected64 uint64 = 2332321241244333252
var resultByte byte
var result16 uint16
var resulttb32 uint32
var result32 uint32
var result64 uint64
err := pack.readByte(&resultByte)
if err != nil {
t.Error(
"Got error", err,
)
}
if resultByte != expectedByte {
t.Error(
"Incorrect result",
"expected", expectedByte,
"got", resultByte,
)
}
err = pack.readUint16(&result16)
if err != nil {
t.Error(
"Got error", err,
)
}
if result16 != expected16 {
t.Error(
"Incorrect result",
"expected", expected16,
"got", result16,
)
}
err = pack.readThreeByteUint32(&resulttb32)
if err != nil {
t.Error(
"Got error", err,
)
}
if resulttb32 != expectedtb32 {
t.Error(
"Incorrect result",
"expected", expectedtb32,
"got", resulttb32,
)
}
err = pack.readUint32(&result32)
if err != nil {
t.Error(
"Got error", err,
)
}
if result32 != expected32 {
t.Error(
"Incorrect result",
"expected", expected32,
"got", result32,
)
}
err = pack.readUint64(&result64)
if err != nil {
t.Error(
"Got error", err,
)
}
if result64 != expected64 {
t.Error(
"Incorrect result",
"expected", expected64,
"got", result64,
)
}
}
func TestWritePackUint16(t *testing.T) {
pack := newPack()
expected := []byte{
0x00, 0x00, 0x00,
0x00,
0x30, 0x82,
}
var data uint16 = 33328
err := pack.writeUInt16(data)
if err != nil {
t.Error("Got error", err)
}
if !reflect.DeepEqual(expected, pack.Bytes()) {
t.Error("Expected", expected, "got", pack.Bytes())
}
}
func TestWritePackThreeByteUint32(t *testing.T) {
pack := newPack()
expected := []byte{
0x00, 0x00, 0x00,
0x00,
0x76, 0x8A, 0x34,
}
var data uint32 = 3443318
err := pack.writeThreeByteUInt32(data)
if err != nil {
t.Error("Got error", err)
}
if !reflect.DeepEqual(expected, pack.Bytes()) {
t.Error("Expected", expected, "got", pack.Bytes())
}
}
func TestWritePackUint32(t *testing.T) {
pack := newPack()
expected := []byte{
0x00, 0x00, 0x00,
0x00,
0xD6, 0x00, 0x77, 0x14,
}
var data uint32 = 343343318
err := pack.writeUInt32(data)
if err != nil {
t.Error("Got error", err)
}
if !reflect.DeepEqual(expected, pack.Bytes()) {
t.Error("Expected", expected, "got", pack.Bytes())
}
}
func TestWriteNilString(t *testing.T) {
pack := newPack()
expected := []byte{
0x00, 0x00, 0x00,
0x00,
0x68, 0x65, 0x6C, 0x6C, 0x6F, 0x00,
}
err := pack.writeStringNil("hello")
if err != nil {
t.Error("Got error", err)
}
if !reflect.DeepEqual(expected, pack.Bytes()) {
t.Error("Expected", expected, "got", pack.Bytes())
}
}
func TestWriteStringLength(t *testing.T) {
pack := newPack()
expected := []byte{
0x00, 0x00, 0x00,
0x00,
0x05, 0x68, 0x65, 0x6C, 0x6C, 0x6F,
}
err := pack.writeStringLength("hello")
if err != nil {
t.Error("Got error", err)
}
if !reflect.DeepEqual(expected, pack.Bytes()) {
t.Error("Expected", expected, "got", pack.Bytes())
}
}
func TestPackWithLength(t *testing.T) {
pack := newPack()
pack.setSequence(byte(10))
expected := []byte{
0x06, 0x00, 0x00,
0x0A,
0x68, 0x65, 0x6C, 0x6C, 0x6F, 0x00,
}
err := pack.writeStringNil("hello")
if err != nil {
t.Error("Got error", err)
}
if !reflect.DeepEqual(expected, pack.packBytes()) {
t.Error("Expected", expected, "got", pack.Bytes())
}
}
func TestPackFlush(t *testing.T) {
mockBuff := bytes.NewBuffer([]byte{})
packWriter := newPackWriter(mockBuff)
pack := newPack()
pack.setSequence(byte(10))
pack.writeStringNil("hello")
err := packWriter.flush(pack)
if err != nil {
t.Error("Got error", err)
}
expected := []byte{
0x06, 0x00, 0x00,
0x0A,
0x68, 0x65, 0x6C, 0x6C, 0x6F, 0x00,
}
if !reflect.DeepEqual(expected, mockBuff.Bytes()) {
t.Error("Expected", expected, "got", pack.Bytes())
}
}
func TestOkPacket(t *testing.T) {
mockBuff := []byte{
//length
0x07, 0x00, 0x00,
//sequence id
0x02,
//code
0x00,
0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
}
reader := newPackReader(bytes.NewBuffer(mockBuff))
pack, _ := reader.readNextPack()
if pack.isError() != nil {
t.Error(
"Got error", pack.isError(),
)
}
}
func TestOkPacketError(t *testing.T) {
mockBuff := []byte{
//length
0x17, 0x00, 0x00,
//sequence
0x01,
//err code
0xff,
//error id
0x48, 0x04,
//error text
0x23, 0x48, 0x59, 0x30, 0x30, 0x30, 0x4e, 0x6f, 0x20, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x20, 0x75, 0x73,
0x65, 0x64,
}
reader := newPackReader(bytes.NewBuffer(mockBuff))
pack, _ := reader.readNextPack()
errorText := "#HY000No tables used"
err := pack.isError()
if err == nil || err.Error() != errorText {
t.Error(
"incorrect err packet",
"expected", errorText,
"got", err.Error(),
)
}
}
func TestEOFPacket(t *testing.T) {
mockBuff := []byte{
//length
0x05, 0x00, 0x00,
//sequence
0x01,
//EOF
0xFE,
//warning
0x00, 0x00,
//status
0x02, 0x00,
}
reader := newPackReader(bytes.NewBuffer(mockBuff))
pack, _ := reader.readNextPack()
if !pack.isEOF() {
t.Error("packet is not EOF")
}
}
func TestReadDateTime(t *testing.T) {
type dateTimeTestCase struct {
buff []byte
expectedTime time.Time
}
testCases := []*dateTimeTestCase{
&dateTimeTestCase{
buff: []byte{
0x10, 0x00, 0x00, 0x01,
0x0b, 0xda, 0x07, 0x0a, 0x11, 0x13, 0x1b, 0x1e, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
},
expectedTime: time.Date(2010, 10, 17, 19, 27, 30, 1, time.Local),
},
&dateTimeTestCase{
buff: []byte{
0x09, 0x00, 0x00, 0x01,
0x04, 0xda, 0x07, 0x0a, 0x11, 0x00, 0x00, 0x02, 0x00,
},
expectedTime: time.Date(2010, 10, 17, 0, 0, 0, 0, time.Local),
},
&dateTimeTestCase{
buff: []byte{
0x05, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02, 0x00,
},
expectedTime: time.Time{}.In(time.Local),
},
&dateTimeTestCase{
buff: []byte{
0x0C, 0x00, 0x00, 0x01,
0x07, 0xda, 0x07, 0x0a, 0x11, 0x13, 0x1b, 0x1e,
0x00, 0x00, 0x02, 0x00,
},
expectedTime: time.Date(2010, 10, 17, 19, 27, 30, 0, time.Local),
},
}
for i, testCase := range testCases {
reader := newPackReader(bytes.NewBuffer(testCase.buff))
pack, _ := reader.readNextPack()
result := pack.readDateTime()
if !testCase.expectedTime.Equal(result) {
t.Fatal(
"incorrect date time at test", i,
"expected", testCase.expectedTime,
"got", result,
)
}
}
}
func TestReadTime(t *testing.T) {
type timeTestCase struct {
buff []byte
expectedTime time.Duration
}
testCases := []*timeTestCase{
&timeTestCase{
buff: []byte{
0x11, 0x00, 0x00, 0x01,
0x0c, 0x01, 0x78, 0x00, 0x00, 0x00, 0x13, 0x1b, 0x1e, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x00,
},
expectedTime: -time.Duration(
120*24*time.Hour + 19*time.Hour + 27*time.Minute + 30*time.Second + time.Microsecond,
),
},
&timeTestCase{
buff: []byte{
0x11, 0x00, 0x00, 0x01,
0x0c, 0x00, 0x78, 0x00, 0x00, 0x00, 0x13, 0x1b, 0x1e, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x00,
},
expectedTime: time.Duration(
120*24*time.Hour + 19*time.Hour + 27*time.Minute + 30*time.Second + time.Microsecond,
),
},
&timeTestCase{
buff: []byte{
0x0D, 0x00, 0x00, 0x01,
0x08, 0x00, 0x78, 0x00, 0x00, 0x00, 0x13, 0x1b, 0x1e,
0x00, 0x00, 0x02, 0x00,
},
expectedTime: time.Duration(
120*24*time.Hour + 19*time.Hour + 27*time.Minute + 30*time.Second,
),
},
}
for i, testCase := range testCases {
reader := newPackReader(bytes.NewBuffer(testCase.buff))
pack, _ := reader.readNextPack()
result := pack.readTime()
if result != testCase.expectedTime {
t.Fatal(
"incorrect time at test", i,
"expected", testCase.expectedTime,
"got", result,
)
}
}
}
func TestNewDecimal(t *testing.T) {
type decimalTestCase struct {
buff []byte
expectedDecimal float64
precission, scale int
}
testCases := []*decimalTestCase{
&decimalTestCase{
buff: []byte{
0x07, 0x00, 0x00, 0x01,
0x9e, 0x61, 0x42,
0x00, 0x00, 0x02, 0x00,
},
precission: 6,
scale: 2,
expectedDecimal: 7777.66,
},
&decimalTestCase{
buff: []byte{
0x07, 0x00, 0x00, 0x01,
0x84, 0xd2, 0x38,
0x00, 0x00, 0x02, 0x00,
},
precission: 6,
scale: 2,
expectedDecimal: 1234.56,
},
&decimalTestCase{
buff: []byte{
0x09, 0x00, 0x00, 0x01,
0x80, 0x00, 0x00, 0x01, 0x4d,
0x00, 0x00, 0x02, 0x00,
},
precission: 10,
scale: 0,
expectedDecimal: 333,
},
&decimalTestCase{
buff: []byte{
0x09, 0x00, 0x00, 0x01,
0x7f, 0xff, 0xff, 0xfe, 0xb2,
0x00, 0x00, 0x02, 0x00,
},
precission: 10,
scale: 0,
expectedDecimal: -333,
},
}
for i, testCase := range testCases {
reader := newPackReader(bytes.NewBuffer(testCase.buff))
pack, _ := reader.readNextPack()
result, _ := pack.readNewDecimal(testCase.precission, testCase.scale).Float64()
if result != testCase.expectedDecimal {
t.Fatal(
"incorrect decimal at test", i,
"expected", testCase.expectedDecimal,
"got", result,
)
}
}
}
func TestDecimalBinarySize(t *testing.T) {
type decimalSizeTestCase struct {
expectedSize, precission, scale int
}
testCases := []*decimalSizeTestCase{
&decimalSizeTestCase{
precission: 6,
scale: 2,
expectedSize: 3,
},
&decimalSizeTestCase{
precission: 10,
scale: 0,
expectedSize: 5,
},
}
for i, testCase := range testCases {
size := getDecimalBinarySize(testCase.precission, testCase.scale)
if size != testCase.expectedSize {
t.Fatal(
"incorrect decimal binary size at test", i,
"expected", testCase.expectedSize,
"got", size,
)
}
}
}
|
package source
const (
DNSSource = "dns"
GoDaddySource = "gds"
NameCheapSource = "ncs"
)
// Source is the interface for domain search sources
type Source interface {
IsAvailable(string) (bool, error)
}
// Get returns a search source
func Get(config interface{}, sourceType string) Source {
switch sourceType {
case DNSSource:
return NewDNS(config.(*DNSConfig)).(Source)
case GoDaddySource:
return NewGoDaddy(config.(*GoDaddyConfig)).(Source)
case NameCheapSource:
return NewNameCheap(config.(*NameCheapConfig)).(Source)
default:
panic("Invalid source: " + sourceType)
}
}
|
package FIFO
import (
"fmt"
"testing"
)
func TestInit(t *testing.T) {
fifoCache := Init(3)
if fifoCache.dList.Capacity == 3 {
t.Log("FIFO Cache init success")
} else {
t.Error("FIFO Cache init failed")
}
}
func TestGet(t *testing.T) {
fifoCache := Init(3)
fifoCache.Put(1)
fifoCache.Put(2)
fifoCache.Put(3)
fmt.Println(fifoCache.Get(5))
}
func TestPut(t *testing.T) {
fifoCache := Init(3)
fifoCache.Put(1)
fifoCache.Put(2)
fifoCache.Put(3)
fifoCache.Put(4)
fifoCache.Put(1)
fifoCache.Put(2)
fifoCache.Put(5)
fifoCache.Put(1)
fifoCache.Put(2)
fifoCache.Put(3)
fifoCache.Put(4)
fifoCache.Put(5)
fifoCache.Put(6)
fifoCache.PrintFIFOCache()
fmt.Println(fifoCache.Get(3))
} |
package main
import "fmt"
func main() {
var s []string
s = append(s, "string1")
s = append(s, "string2")
value, ok := interface{}(s).([]string)
if ok != true {
fmt.Println("value is not a []string")
} else {
fmt.Println("value is []string")
fmt.Println(value)
}
}
|
package solutions
func moveZeroes(nums []int) {
count := 0
for i := 0; i < len(nums); i++ {
if nums[i] != 0 {
nums[count] = nums[i]
count++
}
}
for i := count; i < len(nums); i++ {
nums[i] = 0
}
}
|
// The MIT License (MIT)
//
// Copyright (c) 2014 winlin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package rtmp
import (
"fmt"
)
// AMF0 marker
const amf0Number = 0x00
const amf0Boolean = 0x01
const amf0String = 0x02
const amf0Object = 0x03
const amf0MovieClip = 0x04 // reserved, not supported
const amf0Null = 0x05
const amf0Undefined = 0x06
const amf0Reference = 0x07
const amf0EcmaArray = 0x08
const amf0ObjectEnd = 0x09
const amf0StrictArray = 0x0A
const amf0Date = 0x0B
const amf0LongString = 0x0C
const amf0UnSupported = 0x0D
const amf0RecordSet = 0x0E // reserved, not supported
const amf0XMLDocument = 0x0F
const amf0TypedObject = 0x10
// AMF0AVMplusObject object is the AMF3 object.
const AMF0AVMplusObject = 0x11
// AMF0OriginStrictArray whose data takes the same form as LengthValueBytes
const AMF0OriginStrictArray = 0x20
const amf0Invalid = 0x3F
/**
* to ensure in inserted order.
* for the FMLE will crash when AMF0Object is not ordered by inserted,
* if ordered in map, the string compare order, the FMLE will creash when
* get the response of connect app.
*/
// @see: SrsUnSortedHashtable
type amf0UnSortedHashtable struct {
propertyIndex []string
properties map[string]*amf0Any
}
func NewAmf0UnSortedHashtable() *amf0UnSortedHashtable {
r := &amf0UnSortedHashtable{}
r.properties = make(map[string]*amf0Any)
return r
}
func (r *amf0UnSortedHashtable) count() (n int) {
return len(r.properties)
}
func (r *amf0UnSortedHashtable) size() (n int) {
if r.count() <= 0 {
return 0
}
for k, v := range r.properties {
n += Amf0SizeUtf8(k)
n += v.size()
}
return
}
func (r *amf0UnSortedHashtable) Write(codec *Amf0Codec) (err error) {
// properties
for _, k := range r.propertyIndex {
v := r.properties[k]
if err = codec.WriteUtf8(k); err != nil {
return
}
if err = v.Write(codec); err != nil {
return
}
}
return
}
func (r *amf0UnSortedHashtable) set(k string, v *amf0Any) (err error) {
if v == nil {
err = Error{code: errORGOAMF0NILPROPERTY, desc: "AMF0 object property value should never be nil"}
return
}
if _, ok := r.properties[k]; !ok {
r.propertyIndex = append(r.propertyIndex, k)
}
r.properties[k] = v
return
}
func (r *amf0UnSortedHashtable) getPropertyString(k string) (v string, ok bool) {
var prop *amf0Any
if prop, ok = r.properties[k]; !ok {
return
}
return prop.String()
}
func (r *amf0UnSortedHashtable) getPropertyNumber(k string) (v float64, ok bool) {
var prop *amf0Any
if prop, ok = r.properties[k]; !ok {
return
}
return prop.number()
}
/**
* 2.5 Object Type
* anonymous-object-type = object-marker *(object-property)
* object-property = (UTF-8 value-type) | (UTF-8-empty object-end-marker)
*/
// @see: SrsAmf0Object
type Amf0Object struct {
marker byte
properties *amf0UnSortedHashtable
}
func NewAmf0Object() *Amf0Object {
r := &Amf0Object{}
r.marker = amf0Object
r.properties = NewAmf0UnSortedHashtable()
return r
}
func (r *Amf0Object) size() (n int) {
if n = r.properties.size(); n <= 0 {
return 0
}
n++
n += Amf0SizeObjectEOF()
return
}
func (r *Amf0Object) Read(codec *Amf0Codec) (err error) {
// marker
if !codec.stream.Requires(1) {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 object requires 1bytes marker"}
return
}
if r.marker = codec.stream.ReadByte(); r.marker != amf0Object {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 object marker invalid"}
return
}
for !codec.stream.Empty() {
// property-name: utf8 string
var propertyName string
if propertyName, err = codec.ReadUtf8(); err != nil {
return
}
// property-value: any
var propertyValue amf0Any
if err = propertyValue.Read(codec); err != nil {
return
}
// AMF0 Object EOF.
if len(propertyName) <= 0 || propertyValue.isNil() || propertyValue.isObjectEOF() {
break
}
// add property
if err = r.set(propertyName, &propertyValue); err != nil {
return
}
}
return
}
func (r *Amf0Object) Write(codec *Amf0Codec) (err error) {
// marker
if !codec.stream.Requires(1) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write object marker failed"}
return
}
codec.stream.writeByte(byte(amf0Object))
// properties
if err = r.properties.Write(codec); err != nil {
return
}
// object EOF
return codec.WriteObjectEOF()
}
func (r *Amf0Object) set(k string, v *amf0Any) (err error) {
return r.properties.set(k, v)
}
func (r *Amf0Object) getPropertyString(k string) (v string, ok bool) {
return r.properties.getPropertyString(k)
}
func (r *Amf0Object) getPropertyNumber(k string) (v float64, ok bool) {
return r.properties.getPropertyNumber(k)
}
/**
* 2.10 ECMA Array Type
* ecma-array-type = associative-count *(object-property)
* associative-count = U32
* object-property = (UTF-8 value-type) | (UTF-8-empty object-end-marker)
*/
// @see: SrsASrsAmf0EcmaArray
type Amf0EcmaArray struct {
marker byte
count uint32
properties *amf0UnSortedHashtable
}
func NewAmf0EcmaArray() *Amf0EcmaArray {
r := &Amf0EcmaArray{}
r.marker = amf0EcmaArray
r.properties = NewAmf0UnSortedHashtable()
return r
}
func (r *Amf0EcmaArray) size() (n int) {
if n = r.properties.size(); n <= 0 {
return 0
}
n++
n += 4
n += Amf0SizeObjectEOF()
return
}
// srs_amf0_read_ecma_array
func (r *Amf0EcmaArray) Read(codec *Amf0Codec) (err error) {
// marker
if !codec.stream.Requires(1) {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 EcmaArray requires 1bytes marker"}
return
}
if r.marker = codec.stream.ReadByte(); r.marker != amf0EcmaArray {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 EcmaArray marker invalid"}
return
}
// count
if !codec.stream.Requires(4) {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 read ecma_array count failed"}
return
}
r.count = codec.stream.ReadUInt32()
for !codec.stream.Empty() {
// property-name: utf8 string
var propertyName string
if propertyName, err = codec.ReadUtf8(); err != nil {
return
}
// property-value: any
var propertyValue amf0Any
if err = propertyValue.Read(codec); err != nil {
return
}
// AMF0 Object EOF.
if len(propertyName) <= 0 || propertyValue.isNil() || propertyValue.isObjectEOF() {
break
}
// add property
if err = r.set(propertyName, &propertyValue); err != nil {
return
}
}
return
}
// srs_amf0_write_ecma_array
func (r *Amf0EcmaArray) Write(codec *Amf0Codec) (err error) {
// marker
if !codec.stream.Requires(1) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write EcmaArray marker failed"}
return
}
codec.stream.writeByte(byte(amf0EcmaArray))
// count
if !codec.stream.Requires(4) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write ecma_array count failed"}
return
}
codec.stream.WriteUInt32(r.count)
// properties
if err = r.properties.Write(codec); err != nil {
return
}
// object EOF
return codec.WriteObjectEOF()
}
func (r *Amf0EcmaArray) set(k string, v *amf0Any) (err error) {
err = r.properties.set(k, v)
r.count = uint32(r.properties.count())
return
}
func (r *Amf0EcmaArray) getPropertyString(k string) (v string, ok bool) {
return r.properties.getPropertyString(k)
}
func (r *Amf0EcmaArray) getPropertyNumber(k string) (v float64, ok bool) {
return r.properties.getPropertyNumber(k)
}
/**
* any amf0 value.
* 2.1 Types Overview
* value-type = number-type | boolean-type | string-type | object-type
* | null-marker | undefined-marker | reference-type | ecma-array-type
* | strict-array-type | date-type | long-string-type | xml-document-type
* | typed-object-type
* create any with NewAmf0(), or create a default one and Read from stream.
*/
// @see: SrsAmf0Any
type amf0Any struct {
Marker byte
Value interface{}
}
func NewAmf0(v interface{}) *amf0Any {
switch t := v.(type) {
case bool:
return &amf0Any{Marker: amf0Boolean, Value: t}
case string:
return &amf0Any{Marker: amf0String, Value: t}
case int:
return &amf0Any{Marker: amf0Number, Value: float64(t)}
case float64:
return &amf0Any{Marker: amf0Number, Value: t}
case *Amf0Object:
return &amf0Any{Marker: amf0Object, Value: t}
case *Amf0EcmaArray:
return &amf0Any{Marker: amf0EcmaArray, Value: t}
}
return nil
}
func NewAmf0Null() *amf0Any {
return &amf0Any{Marker: amf0Null}
}
func NewAmf0Undefined() *amf0Any {
return &amf0Any{Marker: amf0Undefined}
}
func (r *amf0Any) size() int {
switch {
case r.Marker == amf0String:
v, _ := r.String()
return Amf0SizeString(v)
case r.Marker == amf0Boolean:
return Amf0SizeBoolean()
case r.Marker == amf0Number:
return Amf0SizeNumber()
case r.Marker == amf0Null || r.Marker == amf0Undefined:
return Amf0SizeNullOrUndefined()
case r.Marker == amf0ObjectEnd:
return Amf0SizeObjectEOF()
case r.Marker == amf0Object:
v, _ := r.object()
return v.size()
case r.Marker == amf0EcmaArray:
v, _ := r.ecmaArray()
return v.size()
// TODO: FIXME: implements it.
}
return 0
}
func (r *amf0Any) Write(codec *Amf0Codec) (err error) {
switch {
case r.Marker == amf0String:
v, _ := r.String()
return codec.WriteString(v)
case r.Marker == amf0Boolean:
v, _ := r.boolean()
return codec.WriteBoolean(v)
case r.Marker == amf0Number:
v, _ := r.number()
return codec.WriteNumber(v)
case r.Marker == amf0Null:
return codec.WriteNull()
case r.Marker == amf0Undefined:
return codec.WriteUndefined()
case r.Marker == amf0ObjectEnd:
return codec.WriteObjectEOF()
case r.Marker == amf0Object:
v, _ := r.object()
return v.Write(codec)
case r.Marker == amf0EcmaArray:
v, _ := r.ecmaArray()
return v.Write(codec)
// TODO: FIXME: implements it.
}
return
}
func (r *amf0Any) Read(codec *Amf0Codec) (err error) {
// marker
if !codec.stream.Requires(1) {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 any requires 1bytes marker"}
return
}
r.Marker = codec.stream.ReadByte()
codec.stream.Skip(-1)
switch {
case r.Marker == amf0String:
r.Value, err = codec.ReadString()
case r.Marker == amf0Boolean:
r.Value, err = codec.ReadBoolean()
case r.Marker == amf0Number:
r.Value, err = codec.ReadNumber()
case r.Marker == amf0Null || r.Marker == amf0Undefined || r.Marker == amf0ObjectEnd:
codec.stream.ReadByte()
case r.Marker == amf0Object:
r.Value, err = codec.ReadObject()
case r.Marker == amf0EcmaArray:
r.Value, err = codec.ReadEcmaArray()
// TODO: FIXME: implements it.
default:
err = Error{code: errORRTMPAMF0INVALID, desc: fmt.Sprintf("invalid amf0 message type. marker=%#x", r.Marker)}
}
return
}
func (r *amf0Any) isNil() (v bool) {
return r.Value == nil
}
func (r *amf0Any) isObjectEOF() (v bool) {
return r.Marker == amf0ObjectEnd
}
func (r *amf0Any) object() (v *Amf0Object, ok bool) {
if r.Marker == amf0Object {
v, ok = r.Value.(*Amf0Object), true
}
return
}
func (r *amf0Any) ecmaArray() (v *Amf0EcmaArray, ok bool) {
if r.Marker == amf0EcmaArray {
v, ok = r.Value.(*Amf0EcmaArray), true
}
return
}
func (r *amf0Any) String() (v string, ok bool) {
if r.Marker == amf0String {
v, ok = r.Value.(string), true
}
return
}
func (r *amf0Any) number() (v float64, ok bool) {
if r.Marker == amf0Number {
v, ok = r.Value.(float64), true
}
return
}
func (r *amf0Any) boolean() (v bool, ok bool) {
if r.Marker == amf0Boolean {
v, ok = r.Value.(bool), true
}
return
}
type Amf0Codec struct {
stream *buffer
}
func NewAmf0Codec(stream *buffer) *Amf0Codec {
r := Amf0Codec{}
r.stream = stream
return &r
}
// Size
func Amf0SizeString(v string) int {
return 1 + Amf0SizeUtf8(v)
}
func Amf0SizeUtf8(v string) int {
return 2 + len(v)
}
func Amf0SizeNumber() int {
return 1 + 8
}
func Amf0SizeNullOrUndefined() int {
return 1
}
func Amf0SizeBoolean() int {
return 1 + 1
}
func Amf0SizeObjectEOF() int {
return 2 + 1
}
// srs_amf0_read_string
func (r *Amf0Codec) ReadString() (v string, err error) {
// marker
if !r.stream.Requires(1) {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 string requires 1bytes marker"}
return
}
if marker := r.stream.ReadByte(); marker != amf0String {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 string marker invalid"}
return
}
v, err = r.ReadUtf8()
return
}
// srs_amf0_write_string
func (r *Amf0Codec) WriteString(v string) (err error) {
// marker
if !r.stream.Requires(1) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write string marker failed"}
return
}
r.stream.writeByte(byte(amf0String))
return r.WriteUtf8(v)
}
// srs_amf0_write_boolean
func (r *Amf0Codec) WriteBoolean(v bool) (err error) {
// marker
if !r.stream.Requires(1) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write bool marker failed"}
return
}
r.stream.writeByte(byte(amf0Boolean))
// value
if !r.stream.Requires(1) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write bool value failed"}
return
}
if v {
r.stream.writeByte(byte(0x01))
} else {
r.stream.writeByte(byte(0x00))
}
return
}
// srs_amf0_read_utf8
func (r *Amf0Codec) ReadUtf8() (v string, err error) {
// len
if !r.stream.Requires(2) {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 utf8 len requires 2bytes"}
return
}
len := r.stream.readUInt16()
// empty string
if len <= 0 {
return
}
// data
if !r.stream.Requires(int(len)) {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 utf8 data requires more bytes"}
return
}
v = string(r.stream.Read(int(len)))
// support utf8-1 only
// 1.3.1 Strings and UTF-8
// UTF8-1 = %x00-7F
for _, ch := range v {
if (ch & 0x80) != 0 {
// ignored. only support utf8-1, 0x00-0x7F
//err = Error{code:ERROR_RTMP_AMF0_DECODE, desc:"only support utf8-1, 0x00-0x7F"}
//return
}
}
return
}
// srs_amf0_write_utf8
func (r *Amf0Codec) WriteUtf8(v string) (err error) {
// len
if !r.stream.Requires(2) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write string length failed"}
return
}
r.stream.writeUInt16(uint16(len(v)))
// empty string
if len(v) <= 0 {
return
}
// data
if !r.stream.Requires(len(v)) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write string data failed"}
return
}
r.stream.Write([]byte(v))
return
}
// srs_amf0_read_number
func (r *Amf0Codec) ReadNumber() (v float64, err error) {
// marker
if !r.stream.Requires(1) {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 number requires 1bytes marker"}
return
}
if marker := r.stream.ReadByte(); marker != amf0Number {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 number marker invalid"}
return
}
// value
if !r.stream.Requires(8) {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 number requires 8bytes value"}
return
}
v = r.stream.ReadFloat64()
return
}
// srs_amf0_write_number
func (r *Amf0Codec) WriteNumber(v float64) (err error) {
// marker
if !r.stream.Requires(1) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write number marker failed"}
return
}
r.stream.writeByte(byte(amf0Number))
// value
if !r.stream.Requires(8) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write number value failed"}
return
}
r.stream.writeFloat64(v)
return
}
// srs_amf0_write_null
func (r *Amf0Codec) WriteNull() (err error) {
// marker
if !r.stream.Requires(1) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write null marker failed"}
return
}
r.stream.writeByte(byte(amf0Null))
return
}
// srs_amf0_read_null
func (r *Amf0Codec) ReadNull() (err error) {
// marker
if !r.stream.Requires(1) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 read null marker failed"}
return
}
r.stream.ReadByte()
return
}
// srs_amf0_read_undefined
func (r *Amf0Codec) WriteUndefined() (err error) {
// marker
if !r.stream.Requires(1) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write undefined marker failed"}
return
}
r.stream.writeByte(byte(amf0Undefined))
return
}
// srs_amf0_read_boolean
func (r *Amf0Codec) ReadBoolean() (v bool, err error) {
// marker
if !r.stream.Requires(1) {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 bool requires 1bytes marker"}
return
}
if marker := r.stream.ReadByte(); marker != amf0Boolean {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 bool marker invalid"}
return
}
// value
if !r.stream.Requires(1) {
err = Error{code: errORRTMPAMF0DECODE, desc: "amf0 bool requires 8bytes value"}
return
}
if r.stream.ReadByte() == 0 {
v = false
} else {
v = true
}
return
}
// srs_amf0_read_object
func (r *Amf0Codec) ReadObject() (v *Amf0Object, err error) {
// value
v = NewAmf0Object()
return v, v.Read(r)
}
// srs_amf0_read_ecma_array
func (r *Amf0Codec) ReadEcmaArray() (v *Amf0EcmaArray, err error) {
// value
v = NewAmf0EcmaArray()
return v, v.Read(r)
}
// srs_amf0_write_object
func (r *Amf0Codec) WriteObject(v *Amf0Object) (err error) {
return v.Write(r)
}
// srs_amf0_read_ecma_array
func (r *Amf0Codec) WriteEcmaArray(v *Amf0EcmaArray) (err error) {
return v.Write(r)
}
// srs_amf0_write_object_eof
func (r *Amf0Codec) WriteObjectEOF() (err error) {
// value
if !r.stream.Requires(2) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write object eof value failed"}
return
}
r.stream.writeUInt16(uint16(0))
// marker
if !r.stream.Requires(1) {
err = Error{code: errORRTMPAMF0ENCODE, desc: "amf0 write object eof marker failed"}
return
}
r.stream.writeByte(byte(amf0ObjectEnd))
return
}
|
package utils
import (
"encoding/json"
"github.com/cosmos/cosmos-sdk/x/bank"
"io/ioutil"
"path/filepath"
)
type TempInput struct {
In []bank.Input `json:"Input"`
}
type TempOutput struct {
Out []bank.Output `json:"Output"`
}
func ParseInput(fp string) ([]bank.Input, error) {
var inputs TempInput
file, err := filepath.Abs(fp)
if err != nil {
return nil, err
}
input, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
err = json.Unmarshal(input, &inputs)
if err != nil {
return nil, err
}
return inputs.In, nil
}
func ParseOutput(fp string) ([]bank.Output, error) {
var outputs TempOutput
file, err := filepath.Abs(fp)
if err != nil {
return nil, err
}
o, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
err = json.Unmarshal(o, &outputs)
if err != nil {
return nil, err
}
return outputs.Out, nil
}
|
package form_test
import (
"strings"
"testing"
"github.com/moltin/gomo/form"
)
func boundary(s string) (b string) {
bits := strings.SplitN(s, "=", 2)
if len(bits) == 2 {
b = "--" + bits[1]
}
return
}
func TestEncode(t *testing.T) {
for _, test := range []struct {
name string
object interface{}
expected string
}{
{
"simple",
struct {
Name string `form:"name"`
Skip string `form:"-"`
Empty string `form:"empty,omitempty"`
}{
Name: "test",
Skip: "foo",
},
`BOUNDARY
Content-Disposition: form-data; name="name"
test
BOUNDARY--
`,
},
{
"reader",
struct {
Name *form.File `form:"name"`
}{
Name: &form.File{
Name: "test.txt",
Content: strings.NewReader("test"),
},
},
`BOUNDARY
Content-Disposition: form-data; name="name"; filename="test.txt"
Content-Type: application/octet-stream
test
BOUNDARY--
`,
},
{
"bad type",
true,
`
BOUNDARY--
`,
},
} {
t.Run(test.name, func(t *testing.T) {
body, contentType, err := form.Encode(test.object)
if err != nil {
t.Fatal(err)
}
expected := strings.ReplaceAll(
test.expected,
"BOUNDARY",
boundary(contentType),
)
bodyS := strings.ReplaceAll(string(body), "\r", "")
if string(bodyS) != expected {
t.Errorf(
"\nexpected:\n>%s<\ngot:\n>%s<",
expected,
bodyS,
)
}
})
}
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvserver
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/readsummary/rspb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/errors"
)
// MergeRange expands the left-hand replica, leftRepl, to absorb the right-hand
// replica, identified by rightDesc. freezeStart specifies the time at which the
// right-hand replica promised to stop serving traffic and is used to initialize
// the timestamp cache's low water mark for the right-hand keyspace. The
// right-hand replica must exist on this store and the raftMus for both the
// left-hand and right-hand replicas must be held.
func (s *Store) MergeRange(
ctx context.Context,
leftRepl *Replica,
newLeftDesc, rightDesc roachpb.RangeDescriptor,
freezeStart hlc.ClockTimestamp,
rightClosedTS hlc.Timestamp,
rightReadSum *rspb.ReadSummary,
) error {
if oldLeftDesc := leftRepl.Desc(); !oldLeftDesc.EndKey.Less(newLeftDesc.EndKey) {
return errors.Errorf("the new end key is not greater than the current one: %+v <= %+v",
newLeftDesc.EndKey, oldLeftDesc.EndKey)
}
rightRepl, err := s.GetReplica(rightDesc.RangeID)
if err != nil {
return err
}
leftRepl.raftMu.AssertHeld()
rightRepl.raftMu.AssertHeld()
if err := rightRepl.postDestroyRaftMuLocked(ctx, rightRepl.GetMVCCStats()); err != nil {
return err
}
// Note that we were called (indirectly) from raft processing so we must
// call removeInitializedReplicaRaftMuLocked directly to avoid deadlocking
// on the right-hand replica's raftMu.
if err := s.removeInitializedReplicaRaftMuLocked(ctx, rightRepl, rightDesc.NextReplicaID, RemoveOptions{
// The replica was destroyed by the tombstones added to the batch in
// runPreApplyTriggersAfterStagingWriteBatch.
DestroyData: false,
}); err != nil {
return errors.Errorf("cannot remove range: %s", err)
}
if leftRepl.leaseholderStats != nil {
leftRepl.leaseholderStats.resetRequestCounts()
}
if leftRepl.writeStats != nil {
// Note: this could be drastically improved by adding a replicaStats method
// that merges stats. Resetting stats is typically bad for the rebalancing
// logic that depends on them.
leftRepl.writeStats.resetRequestCounts()
}
// Clear the concurrency manager's lock and txn wait-queues to redirect the
// queued transactions to the left-hand replica, if necessary.
rightRepl.concMgr.OnRangeMerge()
leftLease, _ := leftRepl.GetLease()
rightLease, _ := rightRepl.GetLease()
if leftLease.OwnedBy(s.Ident.StoreID) {
if !rightLease.OwnedBy(s.Ident.StoreID) {
// We hold the lease for the LHS, but do not hold the lease for the RHS.
// That means we don't have up-to-date timestamp cache entries for the
// keyspace previously owned by the RHS. Update the timestamp cache for
// the RHS keyspace. If the merge trigger included a prior read summary
// then we can use that directly to update the timestamp cache.
// Otherwise, we pessimistically assume that the right-hand side served
// reads all the way up to freezeStart, the time at which the RHS
// promised to stop serving traffic.
//
// Note that we need to update our clock with freezeStart to preserve
// the invariant that our clock is always greater than or equal to any
// timestamps in the timestamp cache. For a full discussion, see the
// comment on TestStoreRangeMergeTimestampCacheCausality.
s.Clock().Update(freezeStart)
var sum rspb.ReadSummary
if rightReadSum != nil {
sum = *rightReadSum
} else {
sum = rspb.FromTimestamp(freezeStart.ToTimestamp())
}
applyReadSummaryToTimestampCache(s.tsCache, &rightDesc, sum)
}
// When merging ranges, the closed timestamp of the RHS can regress. It's
// possible that, at subsumption time, the RHS had a high closed timestamp.
// Being ingested by the LHS, the closed timestamp of the RHS is lost, and
// the LHS's closed timestamp takes over the respective keys. In order to
// not violate reads that might have been performed by the RHS according to
// the old closed ts (either by the leaseholder or by followers), we bump
// the timestamp cache.
// In the case when the RHS lease was not collocated with the LHS, this bump
// is frequently (but not necessarily) redundant with the bumping to the
// freeze time done above.
sum := rspb.FromTimestamp(rightClosedTS)
applyReadSummaryToTimestampCache(s.tsCache, &rightDesc, sum)
}
// Update the subsuming range's descriptor.
leftRepl.setDescRaftMuLocked(ctx, &newLeftDesc)
return nil
}
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package merger
import (
"fmt"
"testing"
"github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1"
apiutils "github.com/DataDog/datadog-operator/apis/utils"
"github.com/DataDog/datadog-operator/controllers/datadogagent/dependencies"
"github.com/DataDog/datadog-operator/pkg/kubernetes"
securityv1 "github.com/openshift/api/security/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func TestPodSecurityManager_AddSecurityContextConstraints(t *testing.T) {
ns := "bar"
newSCCName := "foo"
existingSCCName := "foo2"
newSCC := &securityv1.SecurityContextConstraints{
Users: []string{
fmt.Sprintf("system:serviceaccount:%s:%s", ns, newSCCName),
},
Priority: apiutils.NewInt32Pointer(8),
AllowedCapabilities: []corev1.Capability{
"SYS_ADMIN",
"SYS_RESOURCE",
"SYS_PTRACE",
"NET_ADMIN",
"NET_BROADCAST",
"NET_RAW",
"IPC_LOCK",
"CHOWN",
"AUDIT_CONTROL",
"AUDIT_READ",
},
AllowHostDirVolumePlugin: true,
AllowHostIPC: true,
AllowPrivilegedContainer: false,
FSGroup: securityv1.FSGroupStrategyOptions{
Type: securityv1.FSGroupStrategyMustRunAs,
},
}
existingSCC := securityv1.SecurityContextConstraints{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: existingSCCName,
},
Users: []string{
fmt.Sprintf("system:serviceaccount:%s:%s", ns, existingSCCName),
},
AllowHostDirVolumePlugin: false,
FSGroup: securityv1.FSGroupStrategyOptions{
Type: securityv1.FSGroupStrategyMustRunAs,
},
Volumes: []securityv1.FSType{
securityv1.FSTypeConfigMap,
securityv1.FSTypeDownwardAPI,
securityv1.FSTypeEmptyDir,
securityv1.FSTypePersistentVolumeClaim,
securityv1.FSProjected,
securityv1.FSTypeSecret,
},
}
testScheme := runtime.NewScheme()
testScheme.AddKnownTypes(v2alpha1.GroupVersion, &v2alpha1.DatadogAgent{})
storeOptions := &dependencies.StoreOptions{
Scheme: testScheme,
}
owner := &v2alpha1.DatadogAgent{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: newSCCName,
},
}
type args struct {
namespace string
name string
scc *securityv1.SecurityContextConstraints
}
tests := []struct {
name string
store *dependencies.Store
args args
wantErr bool
validateFunc func(*testing.T, *dependencies.Store)
}{
{
name: "empty store",
store: dependencies.NewStore(owner, storeOptions),
args: args{
namespace: ns,
name: newSCCName,
scc: newSCC,
},
wantErr: false,
validateFunc: func(t *testing.T, store *dependencies.Store) {
if _, found := store.Get(kubernetes.SecurityContextConstraintsKind, ns, newSCCName); !found {
t.Errorf("missing SecurityContextConstraints %s/%s", ns, newSCCName)
}
},
},
{
name: "another SecurityContextConstraints already exists",
store: dependencies.NewStore(owner, storeOptions).AddOrUpdateStore(kubernetes.SecurityContextConstraintsKind, &existingSCC),
args: args{
namespace: ns,
name: newSCCName,
scc: newSCC,
},
wantErr: false,
validateFunc: func(t *testing.T, store *dependencies.Store) {
if _, found := store.Get(kubernetes.SecurityContextConstraintsKind, ns, newSCCName); !found {
t.Errorf("missing SecurityContextConstraints %s/%s", ns, newSCCName)
}
},
},
{
name: "update existing SecurityContextConstraints",
store: dependencies.NewStore(owner, storeOptions).AddOrUpdateStore(kubernetes.SecurityContextConstraintsKind, &existingSCC),
args: args{
namespace: ns,
name: existingSCCName,
scc: newSCC,
},
wantErr: false,
validateFunc: func(t *testing.T, store *dependencies.Store) {
obj, found := store.Get(kubernetes.SecurityContextConstraintsKind, ns, existingSCCName)
if !found {
t.Errorf("missing SecurityContextConstraints %s/%s", ns, existingSCCName)
}
scc, ok := obj.(*securityv1.SecurityContextConstraints)
if !ok || !scc.AllowHostDirVolumePlugin {
t.Errorf("AllowHostDirVolumePlugin not updated in SecurityContextConstraints %s/%s", ns, existingSCCName)
}
if len(scc.Volumes) != 6 {
t.Errorf("Volumes changed in SecurityContextConstraints %s/%s", ns, existingSCCName)
}
if len(scc.AllowedCapabilities) != 10 {
t.Errorf("AllowedCapabilities not added in SecurityContextConstraints %s/%s", ns, existingSCCName)
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &podSecurityManagerImpl{
store: tt.store,
}
if err := m.AddSecurityContextConstraints(tt.args.name, tt.args.namespace, tt.args.scc); (err != nil) != tt.wantErr {
t.Errorf("PodSecurityManager.AddSecurityContextConstraints() error = %v, wantErr %v", err, tt.wantErr)
}
if tt.validateFunc != nil {
tt.validateFunc(t, tt.store)
}
})
}
}
|
package orm
import (
"github.com/jinzhu/gorm"
)
// ProductDataStore is the product data store
type ProductDataStore struct {
DB *gorm.DB
}
// GetAll returns all the saved products
func (store *ProductDataStore) GetAll() (interface{}, int64, error) {
products := []Product{}
connection := store.DB.Find(&products)
if connection.Error != nil {
return nil, connection.RowsAffected, connection.Error
}
return products, connection.RowsAffected, nil
}
// GetByID returns a product based on its ID
func (store *ProductDataStore) GetByID(id uint) (interface{}, int64, error) {
product := Product{}
connection := store.DB.First(&product, id)
if connection.RecordNotFound() {
return nil, connection.RowsAffected, nil
}
if connection.Error != nil {
return nil, connection.RowsAffected, connection.Error
}
return []Product{product}, connection.RowsAffected, nil
}
// Add creates a new product
func (store *ProductDataStore) Add(item interface{}) (interface{}, int64, error) {
product := item.(*Product)
connection := store.DB.Create(product)
if connection.Error != nil {
return nil, connection.RowsAffected, connection.Error
}
return []Product{*product}, connection.RowsAffected, nil
}
// DeleteByID removes a product based in its ID
func (store *ProductDataStore) DeleteByID(id uint) (int64, error) {
product := Product{Model: Model{ID: id}}
connection := store.DB.Delete(&product)
if connection.Error != nil {
return connection.RowsAffected, connection.Error
}
return connection.RowsAffected, nil
}
// UpdateByID updates a product based on its ID
func (store *ProductDataStore) UpdateByID(id uint, item interface{}) (interface{}, int64, error) {
product := item.(*Product)
targetProduct := Product{}
targetProduct.ID = id
connection := store.DB.Model(&targetProduct).Updates(*product)
if connection.Error != nil {
return nil, connection.RowsAffected, connection.Error
}
return []Product{targetProduct}, connection.RowsAffected, nil
}
|
package qmd
import (
"crypto/sha1"
"fmt"
"strings"
"time"
)
// Unique ID generator
var idChan chan string = make(chan string)
func NewID() string {
go generateID()
return <-idChan
}
func generateID() {
h := sha1.New()
c := []byte(time.Now().String())
for {
h.Write(c)
idChan <- fmt.Sprintf("%x", h.Sum(nil))
}
}
// Flag string arrays
type StringFlagArray []string
func (a *StringFlagArray) Set(s string) error {
*a = append(*a, s)
return nil
}
func (a *StringFlagArray) String() string {
return strings.Join(*a, ",")
}
|
package utils
import (
"io/ioutil"
)
// WriteTmpFile writes data to a temp file and returns the path.
func WriteTmpFile(data string) (string, error) {
f, err := ioutil.TempFile("", "*")
if err != nil {
return "", err
}
defer f.Close()
_, err = f.Write([]byte(data))
if err != nil {
return "", err
}
return f.Name(), nil
}
|
package main
import "fmt"
func removeDuplicates(nums []int) int {
if len(nums) == 0 {
return 0
}
i, j := 1, 2
for ; j < len(nums); j++ {
if nums[j] != nums[i-1] {
i++
nums[i] = nums[j]
}
}
fmt.Println(nums[0 : i+1])
return i + 1
}
func main() {
// 0, 0, 1, 1, 2, 2, 3, 3, 4, 4
a := []int{0, 0, 1, 1, 1, 2, 2, 3, 3, 4}
b := []int{0, 0, 1, 1, 1, 1, 2, 3, 3}
fmt.Println(removeDuplicates(a))
fmt.Println(removeDuplicates(b))
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
)
const salesTax = 0.06
func main() {
subtotal := 0.00
total := 0.00
value := 0.00
change := 0.00
reader := bufio.NewReader(os.Stdin)
for {
fmt.Printf("Enter value or command: ")
command, _ := reader.ReadString('\n')
switch command {
case "quit\n":
fmt.Printf("Quitting.\n")
os.Exit(0)
case "total\n":
fmt.Printf("Subtotal: $%.2f\n", subtotal)
fmt.Printf("Total: $%.2f\n", total)
break
case "tender\n":
fmt.Printf("Subtotal: $%.2f\n", subtotal)
fmt.Printf("Total: $%.2f\n", total)
fmt.Printf("Enter amount tendered: ")
amount, _ := reader.ReadString('\n')
tendered, err := strconv.ParseFloat(amount[0:len(amount)-1], 64)
if err != nil {
fmt.Println(err)
}
if tendered < total {
fmt.Println("We can take that off the bill.")
subtotal = subtotal - tendered
total = subtotal + (subtotal * salesTax)
fmt.Printf("Subtotal: $%.2f\n", subtotal)
fmt.Printf("Total: $%.2f\n", total)
} else {
change = tendered - total
subtotal = 0.00
total = 0.00
fmt.Printf("Thank you!\n")
fmt.Printf("Change: $%.2f\n", change)
os.Exit(0)
}
break
default:
value, err := strconv.ParseFloat(command[0:len(command)-1], 64)
if err != nil {
fmt.Println(err)
}
subtotal += value
fmt.Printf("Input: $%.2f\n", value)
fmt.Printf("Subtotal: $%.2f\n", subtotal)
total = subtotal + (subtotal * salesTax)
break
}
}
_ = total
_ = value
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package netutil
import (
"net"
"runtime"
)
// closeTrackingConn wraps a net.Conn and keeps track of if it was closed
// or if it was leaked (and closes it if it was leaked).
type closeTrackingConn struct {
net.Conn
}
// TrackClose wraps the conn and sets a finalizer on the returned value to
// close the conn and monitor that it was leaked.
func TrackClose(conn net.Conn) net.Conn {
tracked := &closeTrackingConn{Conn: conn}
runtime.SetFinalizer(tracked, (*closeTrackingConn).finalize)
return tracked
}
// Close clears the finalizer and closes the connection.
func (c *closeTrackingConn) Close() error {
runtime.SetFinalizer(c, nil)
mon.Event("connection_closed")
return c.Conn.Close()
}
// finalize monitors that a connection was leaked and closes the connection.
func (c *closeTrackingConn) finalize() {
mon.Event("connection_leaked")
_ = c.Conn.Close()
}
|
package test
import (
"go_code/execrise/testexec/testexec01/model"
"testing"
)
func TestGetSum(t *testing.T){
flag := model.GetSum()
if flag {
t.Logf("答案正确")
}else {
t.Fatalf("答案错误")
}
} |
package main
import (
"org.milkyway/gravity/commands"
"runtime"
)
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
// make the release version and commit info available to the version command
commands.MercurialCommit = MercurialCommit
commands.ReleaseVersion = ReleaseVersion
commands.Execute()
}
|
// Copyright 2019 liuxiaodong Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"github.com/shirou/gopsutil/host"
"github.com/shirou/gopsutil/load"
"github.com/shirou/gopsutil/mem"
"knife-panel/internal/app/schema"
)
func NewSystemMonitor() *SystemMonitor {
return &SystemMonitor{}
}
type SystemMonitor struct {
}
func (a *SystemMonitor) GetSystemInfo() (*schema.SystemInfo, error) {
systemInfo := schema.SystemInfo{}
if infoStat, err := host.Info(); err != nil {
return nil, err
} else {
systemInfo.InfoStat = infoStat
}
if vMemStat, err := mem.VirtualMemory(); err != nil {
return nil, err
} else {
systemInfo.VMemStat = vMemStat
}
if swapMemStat, err := mem.SwapMemory(); err != nil {
return nil, err
} else {
systemInfo.SwapMemStat = swapMemStat
}
if loadStat, err := load.Avg(); err != nil {
return nil, err
} else {
systemInfo.LoadStat = loadStat
}
return &systemInfo, nil
}
|
package helpers
import (
"github.com/LiveSocket/bot/service"
"github.com/gammazero/nexus/v3/wamp"
)
// IgnoreChannel WAMP call helper for ignoring a channel
func IgnoreChannel(service *service.Service, name string) error {
// Call get channel by name endpoint
_, err := service.SimpleCall("private.channel.ignore", nil, wamp.Dict{"name": name})
return err
}
|
/*
* Created on Wed Feb 27 2019 20:54:22
* Author: WuLC
* EMail: liangchaowu5@gmail.com
*/
// record the number of lights at each row, column, left diagonal and right diagonal
// use hashmap instead of list to avoid memory limit exceeded
func gridIllumination(N int, lamps [][]int, queries [][]int) []int {
row, col := make(map[int]int), make(map[int]int)
left_diagonal, right_diagonal := make(map[int]int), make(map[int]int)
source := make(map[int]map[int]int) // the value is also a map
for _, v := range lamps {
if _, ok := source[v[0]]; !ok {
source[v[0]] = make(map[int]int)
}
source[v[0]][v[1]] = 1
row[v[0]]++
col[v[1]]++
left_diagonal[v[0]+v[1]]++
right_diagonal[v[0]-v[1]]++
}
result := []int{}
for _, v := range queries {
_, in_row := row[v[0]]
_, in_col := col[v[1]]
_, in_left_diagonal := left_diagonal[v[0]+v[1]]
_, in_right_diagonal := right_diagonal[v[0]-v[1]]
if (in_row && row[v[0]] > 0) ||
(in_col && col[v[1]] > 0) ||
(in_left_diagonal && left_diagonal[v[0]+v[1]] > 0) ||
(in_right_diagonal && right_diagonal[v[0]-v[1]] > 0) {
result = append(result, 1)
} else {
result = append(result, 0)
}
for i := v[0] - 1; i <= v[0]+1; i++ {
for j := v[1] - 1; j <= v[1]+1; j++ {
if val, ok := source[i]; ok {
if _, ok := val[j]; ok {
row[i]--
col[j]--
left_diagonal[i+j]--
right_diagonal[i-j]--
}
}
}
}
}
return result
} |
package main
import "fmt"
func logMap(cityMap map[string]string) {
// cityMap 是一个引用传递,和 js 一样
for key, value := range cityMap {
fmt.Println("key", key, " value:", value)
}
// 原 map 会被影响到
cityMap["demo"] = "ZYK"
fmt.Println("-==--=--=-=-=--")
}
func main() {
cityMap := make(map[string]string)
cityMap["CHINA"] = "NO_1"
cityMap["AMERICA"] = "NO_100"
cityMap["INDIA"] = "NO_-1"
logMap(cityMap)
delete(cityMap, "INDIA")
cityMap["AMERICA"] = "NO_-100"
logMap(cityMap)
}
|
package example
import (
"time"
"github.com/ipiao/metools/creator"
)
// User is user
type User struct {
Name string
Age int `json:"age"`
BirthDay time.Time
creator.People
people *creator.People
}
func (u User) Hello(s string) string {
return "hello"
}
|
package main
import "fmt"
func main() {
funcsPorLetra := map[string]map[string]float64{
"G": {
"Gabriela Silva": 9564.56,
"Guga Pereira": 4566.85,
},
"J": {
"José João": 6566.84,
},
"P": {
"Pedro Junior": 5948.56,
},
}
delete(funcsPorLetra, "P") // deleta todos os valores dentro da chave "P"
for letra, funcs := range funcsPorLetra {
fmt.Println(letra)
for nome, salario := range funcs {
fmt.Println(nome, salario)
}
}
}
|
package main
import (
"fmt"
"strings"
"testing"
)
type tuple struct {
a, b int
}
func TestIsMagic(t *testing.T) {
m := []int{1, 9, 35, 37, 174, 1267, 3562, 6712, 6392, 9263, 9627}
n := []int{10, 12, 18, 175, 1624, 2715, 3261, 6372, 7216, 9876}
for _, i := range m {
if !isMagic(i) {
t.Errorf("failed: isMagic %d is true, got false", i)
}
}
for _, i := range n {
if isMagic(i) {
t.Errorf("failed: isMagic %d is false, got true", i)
}
}
}
func TestMagicNumbers(t *testing.T) {
for k, v := range map[tuple]string{
tuple{10, 100}: "13 15 17 19 31 35 37 39 51 53 57 59 71 73 75 79 91 93 95 97",
tuple{8382, 8841}: "-1",
tuple{1, 10000}: "1 2 3 4 5 6 7 8 9 13 15 17 19 31 35 37 39 51 53 57 59 71 73 75 79 91 93 95 97 147 174 258 285 417 471 528 582 714 741 825 852 1263 1267 1623 1627 2316 2356 2396 2631 2635 2639 2671 2675 2679 2716 2756 2796 3126 3162 3526 3562 3926 3962 5263 5267 5623 5627 6231 6235 6239 6271 6275 6279 6312 6352 6392 6712 6752 6792 7126 7162 7526 7562 7926 7962 9263 9267 9623 9627"} {
if r := magicNumbers(k.a, k.b); r != v {
t.Errorf("failed: magicNumbers %d, %d is %s, got %s",
k.a, k.b, v, r)
}
}
}
func BenchmarkIsMagic(b *testing.B) {
for i := 0; i < b.N; i++ {
isMagic(i%10000 + 1)
}
}
func BenchmarkMagicNumbers(b *testing.B) {
for i := 0; i < b.N; i++ {
n := i%10000 + 1
magicNumbers(n, n+(i/10000)%(10001-n))
}
}
func isMagic(a int) bool {
var (
dig, r uint
ns []uint
)
for a > 0 {
r = uint(a % 10)
if r == 0 || dig&(1<<r) > 0 {
return false
}
dig |= 1 << r
ns = append(ns, r)
a /= 10
}
dig, r = 0, 0
for _ = range ns {
r = (r + ns[(uint(len(ns))-1-r)]) % uint(len(ns))
if dig&(1<<r) > 0 {
return false
}
dig |= 1 << r
}
return r == 0
}
var magic []int
func init() {
for i := 1; i <= 9876; i++ {
if isMagic(i) {
magic = append(magic, i)
}
}
}
func magicNumbers(a, b int) string {
var r []string
for i := 0; i < len(magic) && magic[i] <= b; i++ {
if magic[i] >= a {
r = append(r, fmt.Sprint(magic[i]))
}
}
if len(r) == 0 {
return "-1"
}
return strings.Join(r, " ")
}
|
package main
import (
"github.com/Rorical/NearDB/src/rpc"
"log"
)
func main() {
ser, err := rpc.NewService()
if err != nil {
panic(err)
}
log.Println("Running Service at :9888")
rpc.RunService(":9888", ser)
}
|
// +build !windows
package main
import (
"fmt"
"os"
"strings"
)
func GetDeviceNumber(deviceName string) string {
fi, err := os.Readlink(fmt.Sprintf("/sys/block/%s", deviceName))
if err != nil {
fmt.Println(err)
return ""
}
// segments := strings.Split(strings.TrimPrefix(fi, "/"), "/")
for _, v := range strings.Split(strings.TrimPrefix(fi, "/"), "/") {
if strings.HasPrefix(v, "ata") {
return v
}
}
return ""
// linkedName := From(segments).FirstWithT(func(s string) bool { return strings.HasPrefix(s, "ata") })
// fmt.Println(linkedName)
// return linkedName.data
// return ""
}
|
package pgiface
/*
链接管理模块
*/
type IConnManager interface {
// Add 添加链接
Add(conn IConnection)
// Remove 删除链接
Remove(conn IConnection)
// Get 根据connID获取链接
Get(connID uint32) (IConnection, error)
// Len 得到当前链接总数
Len() int
// ClearConn 清楚并终止所有链接
ClearConn()
}
|
package viewservice
import "fmt"
import "time"
func main() {
dick := make(map[string]time.Time)
dick["sf"] = time.Now()
fmt.Printf("%v\n", dick["sf"])
} |
package google
import (
"fmt"
"regexp"
"time"
"github.com/protofire/polkadot-failover-mechanism/pkg/helpers/validate"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const (
// Copied from the official Google Cloud auto-generated client.
ProjectRegex = "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))"
ProjectRegexWildCard = "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?)|-)"
RegionRegex = "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
SubnetworkRegex = "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
)
// nolint
func validateRegexp(re string) schema.SchemaValidateDiagFunc {
return validate.DiagFunc(func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(re).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q (%q) doesn't match regexp %q", k, value, re))
}
return
})
}
// nolint
func validateNonNegativeDuration() schema.SchemaValidateDiagFunc {
return validate.DiagFunc(func(i interface{}, k string) (s []string, es []error) {
v, ok := i.(string)
if !ok {
es = append(es, fmt.Errorf("expected type of %s to be string", k))
return
}
dur, err := time.ParseDuration(v)
if err != nil {
es = append(es, fmt.Errorf("expected %s to be a duration, but parsing gave an error: %s", k, err.Error()))
return
}
if dur < 0 {
es = append(es, fmt.Errorf("duration %v must be a non-negative duration", dur))
return
}
return
})
}
|
// Copyright (c) 2018 The MATRIX Authors
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php
package miner
import (
"math/big"
"github.com/MatrixAINetwork/go-matrix/common"
"github.com/MatrixAINetwork/go-matrix/core/types"
"github.com/MatrixAINetwork/go-matrix/log"
"github.com/MatrixAINetwork/go-matrix/params"
"github.com/pkg/errors"
)
type mineTaskManager struct {
curNumber uint64
curRole common.RoleType
powTaskCache map[common.Hash]*powMineTask
aiTaskCache map[common.Hash]*aiMineTask
unusedMineHeader map[common.Hash]*types.Header
bc ChainReader
logInfo string
}
func newMineTaskManager(bc ChainReader, logInfo string) *mineTaskManager {
return &mineTaskManager{
curNumber: 0,
curRole: common.RoleNil,
powTaskCache: make(map[common.Hash]*powMineTask),
aiTaskCache: make(map[common.Hash]*aiMineTask),
unusedMineHeader: make(map[common.Hash]*types.Header),
bc: bc,
logInfo: logInfo,
}
}
func (mgr *mineTaskManager) Clear() {
mgr.curNumber = 0
mgr.powTaskCache = make(map[common.Hash]*powMineTask)
mgr.aiTaskCache = make(map[common.Hash]*aiMineTask)
mgr.unusedMineHeader = make(map[common.Hash]*types.Header)
return
}
func (mgr *mineTaskManager) SetNewNumberAndRole(number uint64, role common.RoleType) {
if mgr.curNumber > number {
return
}
if mgr.curNumber < number {
mgr.curNumber = number
mgr.fixMap() // todo 删除和更新分开做
}
mgr.curRole = role
return
}
func (mgr *mineTaskManager) AddTaskByInsertedHeader(headerHash common.Hash) {
insertedHeader := mgr.bc.GetHeaderByHash(headerHash)
if nil == insertedHeader {
log.Info(mgr.logInfo, "AddTaskByInsertedHeader", "get cur header failed")
return
}
bcInterval, err := mgr.bc.GetBroadcastIntervalByHash(headerHash)
if err != nil || bcInterval == nil {
log.Info(mgr.logInfo, "AddTaskByInsertedHeader", "get broadcast interval failed")
return
}
insertedNumber := insertedHeader.Number.Uint64()
if bcInterval.IsReElectionNumber(insertedNumber) {
log.Trace(mgr.logInfo, "AddTaskByInsertedHeader", "忽略区块", "插入区块是选举换届区块", insertedNumber)
return
}
var aiHeader *types.Header = nil
if insertedHeader.IsAIHeader(bcInterval.GetBroadcastInterval()) {
// 插入区块即是AI区块
aiHeader = insertedHeader
} else {
// 获取上一个AI区块
aiHeaderNumber := params.GetCurAIBlockNumber(insertedHeader.Number.Uint64(), bcInterval.GetBroadcastInterval())
aiHeaderHash, err := mgr.bc.GetAncestorHash(insertedHeader.ParentHash, aiHeaderNumber)
if err != nil {
log.Info(mgr.logInfo, "AddTaskByInsertedHeader", "获取pre ai header hash 失败", "err", err, "aiHeaderNumber", aiHeaderNumber, "cur header number", insertedHeader.Number)
return
}
aiHeader = mgr.bc.GetHeaderByHash(aiHeaderHash)
if aiHeader == nil {
log.Info(mgr.logInfo, "AddTaskByInsertedHeader", "get pre ai header failed")
return
}
}
powTask, aiTask, err := mgr.createMineTask(aiHeader, false)
if err != nil {
log.Trace(mgr.logInfo, "AddTaskByInsertedHeader", "创建任务失败", "err", err)
return
}
mgr.addPowTask(powTask)
mgr.addAITask(aiTask)
}
func (mgr *mineTaskManager) AddMineHeader(mineHeader *types.Header) error {
if nil == mineHeader {
return errors.New("mine header为nil")
}
if mineHeader.Number.Uint64()+params.PowBlockPeriod+1 < mgr.curNumber {
// 高度过低
return errors.Errorf("mine header number(%d) is too less than cur number(%d)", mineHeader.Number.Uint64(), mgr.curNumber)
}
mineHash := mineHeader.HashNoSignsAndNonce()
if mgr.isExistMineHash(mineHash) {
return errors.Errorf("mine hash(%s) already exist", mineHash.TerminalString())
}
if mgr.bc.GetHeaderByHash(mineHeader.ParentHash) == nil {
// 没有父区块的挖矿header 先缓存
for len(mgr.unusedMineHeader) > OVERFLOWLEN {
var earliestHeaderTime *big.Int = nil
var earliestHash common.Hash
for hash, header := range mgr.unusedMineHeader {
if earliestHeaderTime != nil && earliestHeaderTime.Cmp(header.Time) <= 0 {
continue
}
earliestHeaderTime = header.Time
earliestHash = hash
}
delete(mgr.unusedMineHeader, earliestHash)
}
mgr.unusedMineHeader[mineHash] = mineHeader
return nil
} else {
// 已有父区块,将mineHeader转换成mineTask
powTask, aiTask, err := mgr.createMineTask(mineHeader, true)
if err != nil {
log.Info(mgr.logInfo, "create mine task err", err)
return err
}
mgr.addPowTask(powTask)
mgr.addAITask(aiTask)
return nil
}
}
func (mgr *mineTaskManager) CanMining() bool {
return mgr.curRole == common.RoleMiner || mgr.curRole == common.RoleInnerMiner
}
func (mgr *mineTaskManager) GetBestPowTask() (bestTask *powMineTask) {
bestTask = nil
for hash, task := range mgr.powTaskCache {
if task.minedPow {
continue
}
if task.powMiningNumber < mgr.curNumber {
log.Info(mgr.logInfo, "GetBestPowTask", "task mining number < cur number", "hash", hash.TerminalString(), "task mining number", task.powMiningNumber, "cur number", mgr.curNumber)
delete(mgr.aiTaskCache, hash)
continue
}
if bestTask != nil && bestTask.mineHeader.Time.Cmp(task.mineHeader.Time) >= 0 {
// 时间搓最大的task 为最好的task
continue
}
bestTask = task
}
return bestTask
}
func (mgr *mineTaskManager) GetBestAITask() (bestTask *aiMineTask) {
bestTask = nil
for hash, task := range mgr.aiTaskCache {
if task.minedAI {
continue
}
if task.aiMiningNumber < mgr.curNumber {
log.Info(mgr.logInfo, "GetBestAITask", "task mining number < cur number", "hash", hash.TerminalString(), "task mining number", task.aiMiningNumber, "cur number", mgr.curNumber)
delete(mgr.aiTaskCache, hash)
continue
}
if bestTask != nil && bestTask.mineHeader.Time.Cmp(task.mineHeader.Time) >= 0 {
// 时间搓最大的task 为最好的task
continue
}
bestTask = task
}
return bestTask
}
func (mgr *mineTaskManager) createMineTask(mineHeader *types.Header, verify bool) (powTask *powMineTask, aiTask *aiMineTask, returnErr error) {
bcInterval, err := mgr.bc.GetBroadcastIntervalByHash(mineHeader.ParentHash)
if err != nil || bcInterval == nil {
return nil, nil, errors.Errorf("get broadcast interval err: %v", err)
}
if verify {
if mineHeader.Difficulty.Uint64() == 0 {
return nil, nil, difficultyIsZero
}
if mineHeader.IsAIHeader(bcInterval.GetBroadcastInterval()) == false {
return nil, nil, errors.Errorf("mine header is not ai header")
}
err = mgr.bc.DPOSEngine(mineHeader.Version).VerifyBlock(mgr.bc, mineHeader)
if err != nil {
return nil, nil, errors.Errorf("verify mine header err: %v", err)
}
}
powMiningNumber := mineHeader.Number.Uint64() + params.PowBlockPeriod - 1
aiMiningNumber := params.GetNextAIBlockNumber(mineHeader.Number.Uint64(), bcInterval.GetBroadcastInterval())
mineHash := mineHeader.HashNoSignsAndNonce()
difficulty := mineHeader.Difficulty
if mgr.curRole == common.RoleInnerMiner {
difficulty = params.InnerMinerDifficulty
}
powTask = newPowMineTask(mineHash, mineHeader, powMiningNumber, bcInterval, difficulty)
if bcInterval.IsReElectionNumber(aiMiningNumber - 1) {
aiTask = nil
} else {
aiTask = newAIMineTask(mineHash, mineHeader, aiMiningNumber, bcInterval)
}
return powTask, aiTask, nil
}
func (mgr *mineTaskManager) addPowTask(powTask *powMineTask) {
if powTask == nil {
return
}
if powTask.powMiningNumber < mgr.curNumber {
log.Trace(mgr.logInfo, "add pow task failed", "task number < cur number", "task number", powTask.powMiningNumber, "cur number", mgr.curNumber)
return
}
_, exist := mgr.powTaskCache[powTask.mineHash]
if exist {
log.Trace(mgr.logInfo, "add pow task failed", "already exist", "task number", powTask.powMiningNumber, "mine hash", powTask.mineHash.TerminalString())
return
}
mgr.powTaskCache[powTask.mineHash] = powTask
log.Info(mgr.logInfo, "add pow task success", powTask.mineHash.TerminalString(), "mining number", powTask.powMiningNumber, "cur number", mgr.curNumber)
}
func (mgr *mineTaskManager) addAITask(aiTask *aiMineTask) {
if aiTask == nil {
return
}
if aiTask.aiMiningNumber < mgr.curNumber {
log.Trace(mgr.logInfo, "add ai task failed", "task number < cur number", "task number", aiTask.aiMiningNumber, "cur number", mgr.curNumber)
return
}
_, exist := mgr.aiTaskCache[aiTask.mineHash]
if exist {
log.Trace(mgr.logInfo, "add ai task failed", "already exist", "task number", aiTask.aiMiningNumber, "mine hash", aiTask.mineHash.TerminalString())
return
}
mgr.aiTaskCache[aiTask.mineHash] = aiTask
log.Info(mgr.logInfo, "add ai task success", aiTask.mineHash.TerminalString(), "mining number", aiTask.aiMiningNumber, "cur number", mgr.curNumber)
}
func (mgr *mineTaskManager) isExistMineHash(mineHash common.Hash) bool {
if _, exist := mgr.powTaskCache[mineHash]; exist {
log.Trace(mgr.logInfo, "mine hash exist in pow task cache", mineHash.TerminalString())
return true
}
if _, exist := mgr.aiTaskCache[mineHash]; exist {
log.Trace(mgr.logInfo, "mine hash exist in ai task cache", mineHash.TerminalString())
return true
}
if _, exist := mgr.unusedMineHeader[mineHash]; exist {
log.Trace(mgr.logInfo, "mine hash exist in unusedMineHeader cache", mineHash.TerminalString())
return true
}
return false
}
func (mgr *mineTaskManager) fixMap() {
// 删除高度过低的task
for hash, task := range mgr.powTaskCache {
if task.powMiningNumber < mgr.curNumber {
log.Trace(mgr.logInfo, "fix map", "delete pow task", "task number", task.powMiningNumber, "cur number", mgr.curNumber, "key hash", task.mineHash.TerminalString())
delete(mgr.powTaskCache, hash)
}
}
for hash, task := range mgr.aiTaskCache {
if task.aiMiningNumber < mgr.curNumber {
log.Trace(mgr.logInfo, "fix map", "delete ai task", "task number", task.aiMiningNumber, "cur number", mgr.curNumber, "key hash", task.mineHash.TerminalString())
delete(mgr.aiTaskCache, hash)
}
}
// 检查是否有可用header
for hash, header := range mgr.unusedMineHeader {
if mgr.bc.GetHeaderByHash(header.ParentHash) == nil {
if header.Number.Uint64()+params.PowBlockPeriod+1 < mgr.curNumber {
// 高度过低,删除
delete(mgr.unusedMineHeader, hash)
}
} else {
delete(mgr.unusedMineHeader, hash)
powTask, aiTask, err := mgr.createMineTask(header, true)
if err != nil {
log.Trace(mgr.logInfo, "create mine task err", err)
continue
}
mgr.addPowTask(powTask)
mgr.addAITask(aiTask)
}
}
}
|
package rabbitmq
import (
"github.com/streadway/amqp"
)
type Subscriber interface {
Subscribe(handler func(message *Message))
}
func NewSubscriber(options *SubscriberOptions) Subscriber {
conn := NewConnection(&ConnectionOptions{
URI: options.URI,
})
return &subscriber{
conn: conn,
subscriberOptions: options,
active: true,
disconnectionErrorChannel: make(chan error),
}
}
type subscriber struct {
active bool
disconnectionErrorChannel chan error
subscriberOptions *SubscriberOptions
subscriberHandler func(message *Message)
messageDeliveryChannel <-chan amqp.Delivery
queue amqp.Queue
conn Connection
}
func (s *subscriber) Subscribe(handler func(message *Message)) {
s.registerSubscriberHandler(handler)
s.setupSubscriber()
s.openConsumerChannel()
s.startSubscriber()
}
func (s *subscriber) registerSubscriberHandler(handler func(message *Message)) {
s.subscriberHandler = handler
}
func (s *subscriber) setupSubscriber() {
s.setupChannelQos()
s.setupExchange()
s.setupQueue()
s.bindQueueToExchange()
}
func (s *subscriber) openConsumerChannel() {
delivery, err := s.conn.GetChannel().Consume(
s.queue.Name,
s.subscriberOptions.Name,
s.subscriberOptions.AutoAck,
s.subscriberOptions.Exclusive,
s.subscriberOptions.NoLocal,
s.subscriberOptions.NoWait,
s.subscriberOptions.Args)
if err != nil {
panic(err)
}
s.messageDeliveryChannel = delivery
}
func (s *subscriber) startSubscriber() {
s.conn.SetReconnectHooks(s.reconnectSubscriber)
for {
s.handleConsume()
}
}
func (s *subscriber) setupChannelQos() {
err := s.conn.GetChannel().Qos(s.subscriberOptions.PrefetchCount, 0, false)
if err != nil {
panic(err)
}
}
func (s *subscriber) setupQueue() {
queue, err := s.conn.GetChannel().QueueDeclare(
s.subscriberOptions.QueueOptions.Name,
s.subscriberOptions.QueueOptions.Durable,
s.subscriberOptions.QueueOptions.AutoDelete,
s.subscriberOptions.QueueOptions.Exclusive,
s.subscriberOptions.QueueOptions.NoWait,
s.subscriberOptions.QueueOptions.GetArgs())
if err != nil {
panic(err)
}
s.queue = queue
}
func (s *subscriber) reconnectSubscriber() {
s.setupSubscriber()
s.openConsumerChannel()
}
func (s *subscriber) handleConsume() {
for delivery := range s.messageDeliveryChannel {
message := newMessageFromDelivery(delivery)
go s.handleDelivery(message)
}
}
func (s *subscriber) handleDelivery(message *Message) {
s.subscriberHandler(message)
}
func (s *subscriber) setupExchange() {
if s.subscriberOptions.ExchangeOptions == nil {
return
}
err := s.conn.GetChannel().ExchangeDeclare(
s.subscriberOptions.ExchangeOptions.Name,
s.subscriberOptions.ExchangeOptions.Type.String(),
s.subscriberOptions.ExchangeOptions.IsDurable,
s.subscriberOptions.ExchangeOptions.IsAutoDeleted,
s.subscriberOptions.ExchangeOptions.IsInternal,
s.subscriberOptions.ExchangeOptions.NoWait,
s.subscriberOptions.ExchangeOptions.Args)
if err != nil {
panic(err)
}
}
func (s *subscriber) bindQueueToExchange() {
if s.subscriberOptions.ExchangeOptions == nil {
return
}
err := s.conn.GetChannel().QueueBind(
s.queue.Name,
s.subscriberOptions.QueueOptions.RoutingKey,
s.subscriberOptions.ExchangeOptions.Name,
s.subscriberOptions.QueueOptions.NoWait,
s.subscriberOptions.QueueOptions.QueueBindArgs)
if err != nil {
panic(err)
}
}
|
package internal
import (
"crypto/md5"
"fmt"
)
func trimPrefixPath(s string) string {
ii := []int32(s)
for i, s := range ii {
if s == '.' || s == '/' {
continue
}
return string(ii[i:])
}
return ""
}
func getMd5(s []byte) string {
m := md5.New()
m.Write(s)
return fmt.Sprintf("%x", m.Sum(nil))
}
func splitBySizeChat(s string, size int) (res []string) {
tmp := []int32{}
for _, v := range []int32(s) {
if len(tmp) < size {
tmp = append(tmp, v)
} else {
res = append(res, string(tmp))
tmp = []int32{}
}
}
if len(tmp) > 0 {
res = append(res, string(tmp))
}
return res
}
|
package est_utils
import (
//"github.com/application-research/filclient"
"github.com/libp2p/go-libp2p-core/peer"
//"github.com/application-research/filclient"
"github.com/ipfs/go-cid"
"time"
)
type AddResponse struct {
Cid string
EstuaryId uint64
Providers []string
}
type ContentStatus struct {
Content struct {
Id int `json:"id"`
Cid string `json:"cid"`
Name string `json:"name"`
UserId int `json:"userId"`
Description string `json:"description"`
Size int `json:"size"`
Active bool `json:"active"`
Offloaded bool `json:"offloaded"`
Replication int `json:"replication"`
AggregatedIn int `json:"aggregatedIn"`
Aggregate bool `json:"aggregate"`
Pinning bool `json:"pinning"`
PinMeta string `json:"pinMeta"`
Failed bool `json:"failed"`
Location string `json:"location"`
DagSplit bool `json:"dagSplit"`
} `json:"content"`
Deals []*DealStatus `json:"deals"`
FailuresCount int `json:"failuresCount"`
}
type DealStatus struct {
Deal contentDeal `json:"deal"`
TransferStatus *ChannelState `json:"transfer"`
OnChainState *onChainDealState `json:"onChainState"`
}
type contentDeal struct {
Content uint `json:"content" gorm:"index:,option:CONCURRENTLY"`
PropCid DbCID `json:"propCid"`
Miner string `json:"miner"`
DealID int64 `json:"dealId"`
Failed bool `json:"failed"`
Verified bool `json:"verified"`
FailedAt time.Time `json:"failedAt,omitempty"`
DTChan string `json:"dtChan" gorm:"index"`
TransferStarted time.Time `json:"transferStarted"`
TransferFinished time.Time `json:"transferFinished"`
OnChainAt time.Time `json:"onChainAt"`
SealedAt time.Time `json:"sealedAt"`
}
type DbCID struct {
CID cid.Cid
}
type onChainDealState struct {
SectorStartEpoch uint64 `json:"sectorStartEpoch"`
LastUpdatedEpoch uint64 `json:"lastUpdatedEpoch"`
SlashEpoch uint64 `json:"slashEpoch"`
}
type ChannelState struct {
//datatransfer.Channel
// SelfPeer returns the peer this channel belongs to
SelfPeer peer.ID `json:"selfPeer"`
RemotePeer peer.ID `json:"remotePeer"`
// Status is the current status of this channel
Status uint64 `json:"status"`
StatusStr string `json:"statusMessage"`
// Sent returns the number of bytes sent
Sent uint64 `json:"sent"`
// Received returns the number of bytes received
Received uint64 `json:"received"`
// Message offers additional information about the current status
Message string `json:"message"`
BaseCid string `json:"baseCid"`
ChannelID interface{} `json:"channelId"`
}
|
// Copyright 2021 The Mellium Contributors.
// Use of this source code is governed by the BSD 2-clause
// license that can be found in the LICENSE file.
package forward_test
import (
"encoding/xml"
"strings"
"testing"
"time"
"mellium.im/xmlstream"
"mellium.im/xmpp/forward"
"mellium.im/xmpp/stanza"
)
func TestWrap(t *testing.T) {
r := forward.Wrap(stanza.Message{
Type: stanza.NormalMessage,
}, "foo", time.Time{},
xmlstream.Wrap(nil, xml.StartElement{Name: xml.Name{Local: "foo"}}),
)
var buf strings.Builder
e := xml.NewEncoder(&buf)
_, err := xmlstream.Copy(e, r)
if err != nil {
t.Fatalf("error encoding: %v", err)
}
err = e.Flush()
if err != nil {
t.Fatalf("error flushing: %v", err)
}
const expected = `<message type="normal"><body>foo</body><forwarded xmlns="urn:xmpp:forward:0"><delay xmlns="urn:xmpp:delay" stamp="0001-01-01T00:00:00Z"></delay><foo></foo></forwarded></message>`
if out := buf.String(); out != expected {
t.Fatalf("wrong output:\nwant=%s,\n got=%s", expected, out)
}
}
func TestMarshal(t *testing.T) {
f := forward.Forwarded{}
var buf strings.Builder
e := xml.NewEncoder(&buf)
_, err := f.WriteXML(e)
if err != nil {
t.Fatalf("error encoding: %v", err)
}
err = e.Flush()
if err != nil {
t.Fatalf("error flushing: %v", err)
}
const expected = `<forwarded xmlns="urn:xmpp:forward:0"><delay xmlns="urn:xmpp:delay" stamp="0001-01-01T00:00:00Z"></delay></forwarded>`
if out := buf.String(); out != expected {
t.Fatalf("wrong output:\nwant=%s,\n got=%s", expected, out)
}
}
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package arc
import (
"context"
"math"
"path/filepath"
"time"
"chromiumos/tast/common/testexec"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/bundles/cros/arc/wm"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/display"
"chromiumos/tast/local/coords"
"chromiumos/tast/local/screenshot"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: WindowDefaultBounds,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Test default window size behavior",
Contacts: []string{"yhanada@chromium.org", "arc-framework+tast@google.com"},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"android_p", "chrome"},
Fixture: "arcBooted",
})
}
func WindowDefaultBounds(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(*arc.PreData).Chrome
a := s.FixtValue().(*arc.PreData).ARC
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create Test API connection: ", err)
}
if err := a.Install(ctx, arc.APKPath(wm.APKNameArcWMTestApp24)); err != nil {
s.Fatal("Failed installing app: ", err)
}
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 30*time.Second)
defer cancel()
// Force Chrome to be in clamshell mode.
cleanup, err := ash.EnsureTabletModeEnabled(ctx, tconn, false)
if err != nil {
s.Fatal("Failed to set tablet mode disabled: ", err)
}
defer cleanup(cleanupCtx)
// Reset WM state to default values.
if err := a.Command(ctx, "am", "broadcast", "-a", "android.intent.action.arc.cleartaskstate").Run(testexec.DumpLogOnError); err != nil {
s.Fatal("Failed to clear task states: ", err)
}
s.Run(ctx, "Default size", func(ctx context.Context, s *testing.State) {
if err := wmSystemDefaultHandling(ctx, tconn, a); err != nil {
path := filepath.Join(s.OutDir(), "screenshot-default-size-failed-test.png")
if err := screenshot.CaptureChrome(ctx, cr, path); err != nil {
s.Log("Failed to capture screenshot: ", err)
}
s.Fatal("subtest failed: ", err)
}
})
s.Run(ctx, "Manifest specified size", func(ctx context.Context, s *testing.State) {
if err := wmSpecifiedSizeHandling(ctx, tconn, a); err != nil {
path := filepath.Join(s.OutDir(), "screenshot-specified-size-failed-test.png")
if err := screenshot.CaptureChrome(ctx, cr, path); err != nil {
s.Log("Failed to capture screenshot: ", err)
}
s.Fatal("subtest failed: ", err)
}
})
}
// wmSystemDefaultHandling verifies that applications which use the metadata flag
// <meta-data android:name="WindowManagerPreference:FreeformWindowSize" android:value="system-default" />
// will restore to 80% of the screen size.
func wmSystemDefaultHandling(ctx context.Context, tconn *chrome.TestConn, a *arc.ARC) error {
const (
// wmSystemDefaultActivity denotes an activity which follows the 'new system default size style' of 80% screen size.
wmSystemDefaultActivity = "org.chromium.arc.testapp.windowmanager.NewDefaultSizeActivity"
// wmNormalDefaultActivity denotes an activity which follows the 'normal restore size style' of phone size.
wmNormalDefaultActivity = "org.chromium.arc.testapp.windowmanager.ResizeableUnspecifiedActivity"
)
// wmSizeTestFunc represents a function that tests if the window has a certain size.
type wmSizeTestFunc func(context.Context, *chrome.TestConn, *arc.Activity) error
for _, test := range []struct {
name string
act string
wantedRestoredState wmSizeTestFunc
}{
{"NormalSizeWindow", wmNormalDefaultActivity, checkPhoneSizeRestored},
{"SystemDefaultSizeWindow", wmSystemDefaultActivity, check80PercentRestored},
} {
if err := func() error {
testing.ContextLogf(ctx, "Running subtest %q", test.name)
act, err := arc.NewActivity(a, wm.Pkg24, test.act)
if err != nil {
return err
}
defer act.Close()
if err := act.StartWithDefaultOptions(ctx, tconn); err != nil {
return err
}
// Stop activity at exit time so that the next WM test can launch a different activity from the same package.
defer act.Stop(ctx, tconn)
if err := compareWindowState(ctx, act, arc.WindowStateMaximized); err != nil {
return err
}
if _, err := ash.SetARCAppWindowState(ctx, tconn, act.PackageName(), ash.WMEventNormal); err != nil {
return err
}
if err := ash.WaitForARCAppWindowState(ctx, tconn, act.PackageName(), ash.WindowStateNormal); err != nil {
return err
}
if err := test.wantedRestoredState(ctx, tconn, act); err != nil {
return err
}
if _, err := ash.SetARCAppWindowState(ctx, tconn, act.PackageName(), ash.WMEventMaximize); err != nil {
return err
}
if err := ash.WaitForARCAppWindowState(ctx, tconn, act.PackageName(), ash.WindowStateMaximized); err != nil {
return err
}
return compareWindowState(ctx, act, arc.WindowStateMaximized)
}(); err != nil {
return errors.Wrapf(err, "%q subtest failed", test.name)
}
}
return nil
}
// checkPhoneSizeRestored checks that the window is in restored size portrait sized phone size.
func checkPhoneSizeRestored(ctx context.Context, tconn *chrome.TestConn, act *arc.Activity) error {
if err := compareWindowState(ctx, act, arc.WindowStateNormal); err != nil {
return err
}
bounds, err := act.WindowBounds(ctx)
if err != nil {
return err
}
_, workArea, err := screenSizeAndInternalWorkArea(ctx, tconn)
if err != nil {
return err
}
if err := checkCentered(bounds, *workArea); err != nil {
return err
}
if bounds.Width >= bounds.Height {
return errors.Errorf("the phone sized window is not portrait sized: got (%d, %d)", bounds.Width, bounds.Height)
}
// We could consider checking now the phone window size (currently 412dp, 732dp).
// However - beside the fact that this gets changed once in a while by UX +
// there is a chance that the window gets cropped on low res devices. As such a
// direct test for the size does not seem to be important enough.
// => For now we are happy to simply see that it is portrait sized.
return nil
}
// check80PercentRestored checks that the window has 80% of the screen size in the restored state.
func check80PercentRestored(ctx context.Context, tconn *chrome.TestConn, act *arc.Activity) error {
if err := compareWindowState(ctx, act, arc.WindowStateNormal); err != nil {
return err
}
bounds, err := act.WindowBounds(ctx)
if err != nil {
return err
}
screen, workArea, err := screenSizeAndInternalWorkArea(ctx, tconn)
if err != nil {
return err
}
if err := checkCentered(bounds, *workArea); err != nil {
return err
}
const (
// defaultSizePercentage is the size of a restored window in percents of the screen size.
defaultSizePercentage = 80.0
// epsilonFractionInPercent is the allowable derivation of the screensize in percent for the new default size handling.
epsilonFractionInPercent = 2.0
)
// Check that the size is ~80% of the screen size (not the work space).
deltaFractionX := math.Abs(defaultSizePercentage - 100.0*float64(bounds.Width)/float64(screen.Width))
if deltaFractionX > epsilonFractionInPercent {
return errors.Errorf("the width of the window diverts too much: got %f%%; wants <= %f%%", deltaFractionX, defaultSizePercentage)
}
deltaFractionY := math.Abs(defaultSizePercentage - 100.0*float64(bounds.Height)/float64(screen.Height))
if deltaFractionY > epsilonFractionInPercent {
return errors.Errorf("the height of the window diverts too much: got %f%%; wants <= %f%%", deltaFractionY, defaultSizePercentage)
}
return nil
}
// wmSpecifiedSizeHandling verifies that applications which sets the layout parameter
// <layout android:defaultWidth="XXX" android:defaultHeight="XXX" />
// will be launched with the specified size.
func wmSpecifiedSizeHandling(ctx context.Context, tconn *chrome.TestConn, a *arc.ARC) error {
const (
wmSizeSpecifiedActivity = "org.chromium.arc.testapp.windowmanager.SizeSpecifiedActivity"
epsilon = 2 // used to compare obtained bounds size and expected size in DP.
)
act, err := arc.NewActivity(a, wm.Pkg24, wmSizeSpecifiedActivity)
if err != nil {
return err
}
defer act.Close()
if err := act.StartWithDefaultOptions(ctx, tconn); err != nil {
return err
}
defer act.Stop(ctx, tconn)
density, err := act.DisplayDensity(ctx)
if err != nil {
return errors.Wrap(err, "failed to get physical display density")
}
if err := compareWindowState(ctx, act, arc.WindowStateNormal); err != nil {
return err
}
bounds, err := act.SurfaceBounds(ctx)
if err != nil {
return err
}
got := coords.ConvertBoundsFromPXToDP(bounds, density).Size()
want := coords.Size{Width: 600, Height: 500} // in DP, specified in AndroidManifest.xml
if !similarSize(got, want, epsilon) {
return errors.Errorf("the activity doesn't have an expected size: got %+v; want %+v", got, want)
}
return nil
}
// compareWindowState compares the activity window state with the wanted one.
// Returns nil only if they are equal.
func compareWindowState(ctx context.Context, act *arc.Activity, wanted arc.WindowState) error {
state, err := act.GetWindowState(ctx)
if err != nil {
return err
}
if state != wanted {
return errors.Errorf("invalid window state: got %v; want %v", state, wanted)
}
return nil
}
// screenSizeAndInternalWorkArea returns the screen size and the workspace in pixels of the currently selected internal display.
func screenSizeAndInternalWorkArea(ctx context.Context, tconn *chrome.TestConn) (*coords.Size, *coords.Rect, error) {
dispInfo, err := display.GetInternalInfo(ctx, tconn)
if err != nil {
// This could be fizz which does not have an internal screen.
infos, err := display.GetInfo(ctx, tconn)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to get any display info")
}
for i := range infos {
if infos[i].IsPrimary {
dispInfo = &infos[i]
break
}
}
if dispInfo == nil {
return nil, nil, errors.New("failed to get any display info")
}
testing.ContextLog(ctx, "Could not get an internal display. Trying with the primary one")
}
mode, err := dispInfo.GetSelectedMode()
if err != nil {
return nil, nil, errors.Wrap(err, "failed to get selected display mode")
}
displaySize := coords.ConvertBoundsFromDPToPX(dispInfo.Bounds, mode.DeviceScaleFactor).Size()
workArea := coords.ConvertBoundsFromDPToPX(dispInfo.WorkArea, mode.DeviceScaleFactor)
return &displaySize, &workArea, nil
}
// checkCentered is checking that a given rectangle is (roughly) in the middle of the screen.
// We cannot do an exact job here as we might see rounding issues in X because of dp/px translations.
// For Y we have the additional problem that the caption height is unknown to Android in Pi
// as it is not part of the window, and Android will guess a height.
func checkCentered(bounds, workArea coords.Rect) error {
const (
// screenCenterVerticalEpsilon is the allowable epsilon for rounding of the vertical center derivation from the screen center.
screenCenterVerticalEpsilon = 3
// screenCenterHorizontalEpsilon same as above only horizontal - we need to allow for a caption height delta between Chrome and Android.
screenCenterHorizontalEpsilon = 25
)
deltaX := int(math.Abs((float64(bounds.Left) + float64(bounds.Width)/2.0 - (float64(workArea.Left) + float64(workArea.Width)/2.0))))
if deltaX > screenCenterVerticalEpsilon {
return errors.Errorf("window is not horizontally centered: got %dpx; want less than %dpx", deltaX, screenCenterVerticalEpsilon)
}
deltaY := int(math.Abs((float64(bounds.Top) + float64(bounds.Height)/2.0) - (float64(workArea.Top) + float64(workArea.Height)/2.0)))
if deltaY > screenCenterHorizontalEpsilon {
return errors.Errorf("window is not vertically not centered: got %dpx; want less than %dpx", deltaY, screenCenterHorizontalEpsilon)
}
// This expects that the caption is not part of the window (P case, might not be true for R).
if bounds.Top < 0 {
return errors.Errorf("a window should never go negative, making the caption inaccessible: got %d", bounds.Top)
}
if bounds.Height >= workArea.Height || bounds.Width >= workArea.Width {
return errors.Errorf("a window should never be bigger than the workspace: got (%d, %d); wants <= (%d, %d)", bounds.Width, bounds.Height, workArea.Width, workArea.Height)
}
return nil
}
// similarSize compares two coords.Size whether they are similar by epsilon.
func similarSize(l, r coords.Size, epsilon int) bool {
abs := func(x int) int {
if x < 0 {
return -x
}
return x
}
return abs(l.Width-r.Width) <= epsilon && abs(l.Height-r.Height) <= epsilon
}
|
package rotationalcipher
import (
"unicode"
)
// RotationalCipher implements Ceasar cipher with `shiftKey` value
func RotationalCipher(plain string, shiftKey int) string {
var res []rune
for _, r := range plain {
if unicode.IsLetter(r) {
if unicode.IsLower(r) {
res = append(res, 97+(r+rune(shiftKey)-97)%26)
} else {
res = append(res, 65+(r+rune(shiftKey)-65)%26)
}
} else {
res = append(res, r)
}
}
return string(res)
}
|
// Copyright 2018 Istio Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linter
import (
"log"
"os"
"path/filepath"
"strings"
)
// TestType is type ID of tests
type TestType int
// All types of tests to parse.
const (
UnitTest TestType = iota // UnitTest == 0
IntegTest TestType = iota // IntegTest == 1
E2eTest TestType = iota // E2eTest == 2
NonTest TestType = iota // NonTest == 3
)
// PathFilter filters out test files and detects test type.
type PathFilter struct {
WPaths map[string]map[string]bool // absolute paths that are whitelisted.
}
// NewPathFilter creates a new PathFilter object.
func NewPathFilter() PathFilter {
p := PathFilter{map[string]map[string]bool{}}
p.getWhitelistedPathsMap()
return p
}
// getWhitelistedPathsMap converts whitelistedPaths to a map that maps path to rules
func (pf *PathFilter) getWhitelistedPathsMap() {
for path, rules := range WhitelistPath {
pf.WPaths[path] = map[string]bool{}
for _, rule := range rules {
pf.WPaths[path][rule] = true
}
}
}
// GetTestType checks path absp and desides whether absp is a test file. It returns true and test type
// for a test file. If path absp should be skipped, it returns false.
// If one of the following cases meet, path absp is a valid path to test file.
// (1) e2e test file
// .../e2e/.../*_test.go
// (2) integration test file
// .../integ/.../*_test.go
// .../integ/.../*_integ_test.go
// .../*_integ_test.go
// (3) unit test file
// .../*_test.go
func (pf *PathFilter) GetTestType(absp string, info os.FileInfo) (TestType, map[string]bool) {
// sRules stores skipped rules for file path absp.
var sRules = map[string]bool{}
paths := strings.Split(absp, "/")
if len(paths) == 0 {
return NonTest, sRules
}
// Skip path which is not go file.
if info.IsDir() || !strings.HasSuffix(absp, ".go") {
return NonTest, sRules
}
// Check whether path is whitelisted
for wp, ruleMap := range pf.WPaths {
matched, err := filepath.Match(wp, absp)
if err != nil {
log.Printf("file match returns error: %v", err)
}
if matched {
sRules = ruleMap
}
}
var isUnderE2eDir, isUnderIntegDir = false, false
for _, path := range paths {
if path == "e2e" {
isUnderE2eDir = true
} else if path == "integ" {
isUnderIntegDir = true
}
}
if isUnderE2eDir && isUnderIntegDir {
log.Printf("Invalid path %q under both e2e directory and integ directory", absp)
return NonTest, sRules
} else if isUnderE2eDir && strings.HasSuffix(paths[len(paths)-1], "_test.go") {
return E2eTest, sRules
} else if (isUnderIntegDir && strings.HasSuffix(paths[len(paths)-1], "_test.go")) ||
strings.HasSuffix(paths[len(paths)-1], "_integ_test.go") {
return IntegTest, sRules
} else if strings.HasSuffix(paths[len(paths)-1], "_test.go") &&
!strings.HasSuffix(paths[len(paths)-1], "_integ_test.go") {
return UnitTest, sRules
}
return NonTest, sRules
}
|
// Created by Noy Hillel
// https://github.com/noy
// You are free to use/modify this software in your own projects
package tsql
import (
"testing"
"database/sql"
_ "github.com/go-sql-driver/mysql"
)
func TestNewSQLClient(t *testing.T) {
database, err := sql.Open("mysql", "username:password@tcp(localhost)/database")
database.SetMaxIdleConns(0)
sqlClient := NewSQLClient(database, err)
query := sqlClient.Select("username, id").From("users").Where("username").Like("test", true)
rows, err := query.QueryResult(true)
var id int
var username, password string
for rows.Next() {
if err := rows.Scan(&username, &id); err != nil {
t.Log(err.Error())
} else {
t.Log(id, username, password) // will not print password as we did not include it in our query
}
}
}
func TestClient_From(t *testing.T) {
// tests a little bit more than just from but still
statement := "SELECT * FROM test WHERE test = 'test'"
client := NewSQLClient(nil, nil)
tSQLStatement := client.Select("*").From("test").Where("test").Equals("test", true)
if tSQLStatement.Query == statement {
t.Log("Tests passed: " + tSQLStatement.Query)
} else {
t.Errorf("Something went wrong. TsqlStatement=%v and query string=%v", tSQLStatement.Query, statement)
}
}
func TestClient_BigQuery(t *testing.T) {
// Don't even think this query makes sense lol
statement := "SELECT one,two,three FROM table.test WHERE test != 3 AND test1 = 5 AND test2 >= 50 " +
"LEFT OUTER JOIN table.test3 ON table.test.test2 = table.test3.test2 GROUP BY table.test ORDER BY table.test.test1 DESC"
client := NewSQLClient(nil, nil)
tSQLStatement := client.Select("one,two,three").From("table.test").Where("test").NotEqual(3).And("test1").Equals("5", false).
And("test2").GTE("50", false).LeftOuterJoin("table.test3").On("table.test.test2 = table.test3.test2").GroupBy("table.test").
OrderBy("table.test.test1", false)
if tSQLStatement.Query == statement {
t.Log("Tests passed: " + tSQLStatement.Query)
} else {
t.Errorf("Something went wrong. TsqlStatement=%v and query string=%v", tSQLStatement.Query, statement)
}
}
func TestClient_GroupBy(t *testing.T) {
statement := "SELECT * FROM test WHERE s = 1 GROUP BY one,two,three"
client := NewSQLClient(nil, nil)
tSQLStatement := client.Select("*").From("test").Where("s").Equals("1", false).GroupBy("one", "two", "three")
if tSQLStatement.Query == statement {
t.Log("Tests passed: " + tSQLStatement.Query)
} else {
t.Errorf("Something went wrong. TsqlStatement=%v and query string=%v", tSQLStatement.Query, statement)
}
}
func TestClient_Values(t *testing.T) {
statement := "INSERT INTO test VALUES('one','two','three')"
client := NewSQLClient(nil, nil)
tSQLStatement := client.InsertInto("test").Values("'one'", "'two'", "'three'")
if tSQLStatement.Query == statement {
t.Log("Tests passed: " + tSQLStatement.Query)
} else {
t.Errorf("Something went wrong. TsqlStatement=%v and query string=%v", tSQLStatement.Query, statement)
}
} |
package main
import (
"flag"
"io"
"log"
"net/http"
"netunnel/message"
"time"
"github.com/gorilla/websocket"
)
var (
addr = flag.String("addr", ":88", "wesocket address")
upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool {
log.Println(r.Cookies())
time.Sleep(time.Second * 5)
if _, err := r.Cookie("PHPSESSID"); err != nil {
return false
}
return true
},
}
)
func handleConn(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println("Upgrade:", err)
return
}
defer conn.Close()
log.Println(conn.RemoteAddr())
for {
msg := new(message.Format)
err := conn.ReadJSON(msg)
if err != nil {
if err == io.EOF {
continue
}
log.Println(err)
return
}
log.Println(msg)
if msg.Tag == 0 {
conn.WriteJSON(msg.Only.Must)
} else {
conn.WriteJSON(msg.Only)
}
}
}
func main() {
flag.Parse()
http.HandleFunc("/ws", handleConn)
if err := http.ListenAndServe(*addr, nil); err != nil {
log.Fatal("ListenAndServer: ", err)
}
}
|
package handler
import (
"fmt"
"net/http"
"runtime/debug"
)
type StackTraceMiddlewareHandler struct {
Handler http.Handler
}
func (h StackTraceMiddlewareHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
defer func() {
if p := recover(); p != nil {
body := fmt.Sprintf("%v\n\n%v", p, string(debug.Stack()))
http.Error(w, body, http.StatusInternalServerError)
}
}()
h.Handler.ServeHTTP(w, r)
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package extensions_test
import (
"bytes"
"crypto/x509"
"crypto/x509/pkix"
"encoding/gob"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zeebo/errs"
"storj.io/common/identity"
"storj.io/common/peertls/extensions"
"storj.io/common/peertls/testpeertls"
"storj.io/common/storj"
"storj.io/common/testrand"
)
func TestHandlers_Register(t *testing.T) {
var (
handlers = extensions.HandlerFactories{}
ids []*extensions.ExtensionID
opts []*extensions.Options
exts []pkix.Extension
chains [][][]*x509.Certificate
)
for idx := 0; idx < 5; idx++ {
i := idx
ids = append(ids, &extensions.ExtensionID{2, 999, 999, i})
opts = append(opts, &extensions.Options{})
exts = append(exts, pkix.Extension{Id: *ids[i]})
_, chain, err := testpeertls.NewCertChain(2, storj.LatestIDVersion().Number)
require.NoError(t, err)
chains = append(chains, identity.ToChains(chain))
testHandler := extensions.NewHandlerFactory(
ids[i],
func(opt *extensions.Options) extensions.HandlerFunc {
assert.Equal(t, opts[i], opt)
assert.NotNil(t, opt)
return func(ext pkix.Extension, chain [][]*x509.Certificate) error {
assert.NotNil(t, ext)
assert.Equal(t, exts[i], ext)
assert.NotNil(t, ext.Id)
assert.Equal(t, *ids[i], ext.Id)
assert.NotNil(t, chain)
assert.Equal(t, chains[i], chain)
return errs.New(strconv.Itoa(i))
}
},
)
handlers.Register(testHandler)
err = handlers[i].NewHandlerFunc(opts[i])(exts[i], chains[i])
assert.Errorf(t, err, strconv.Itoa(i))
}
}
func TestHandlers_WithOptions(t *testing.T) {
var (
handlers = extensions.HandlerFactories{}
ids []*extensions.ExtensionID
opts []*extensions.Options
exts []pkix.Extension
chains [][][]*x509.Certificate
)
for idx := 0; idx < 5; idx++ {
i := idx
ids = append(ids, &extensions.ExtensionID{2, 999, 999, i})
opts = append(opts, &extensions.Options{})
exts = append(exts, pkix.Extension{Id: *ids[i]})
_, chain, err := testpeertls.NewCertChain(2, storj.LatestIDVersion().Number)
require.NoError(t, err)
chains = append(chains, identity.ToChains(chain))
testHandler := extensions.NewHandlerFactory(
ids[i],
func(opt *extensions.Options) extensions.HandlerFunc {
assert.Equal(t, opts[i], opt)
assert.NotNil(t, opt)
return func(ext pkix.Extension, chain [][]*x509.Certificate) error {
assert.NotNil(t, ext)
assert.Equal(t, exts[i], ext)
assert.NotNil(t, ext.Id)
assert.Equal(t, *ids[i], ext.Id)
assert.NotNil(t, chain)
assert.Equal(t, chains[i], chain)
return errs.New(strconv.Itoa(i))
}
},
)
handlers.Register(testHandler)
handlerFuncMap := handlers.WithOptions(&extensions.Options{})
id := handlers[i].ID()
require.NotNil(t, id)
handleFunc, ok := handlerFuncMap[id]
assert.True(t, ok)
assert.NotNil(t, handleFunc)
}
}
func TestRevocationMarshaling(t *testing.T) {
for _, tt := range []struct {
// gob is the older version of Gob encoding
gobbytes []byte
revocation extensions.Revocation
}{
{
gobbytes: []byte{0x40, 0xff, 0x81, 0x3, 0x1, 0x1, 0xa, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1, 0xff, 0x82, 0x0, 0x1, 0x3, 0x1, 0x9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1, 0x4, 0x0, 0x1, 0x7, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x1, 0xa, 0x0, 0x1, 0x9, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1, 0xa, 0x0, 0x0, 0x0, 0x3, 0xff, 0x82, 0x0},
revocation: extensions.Revocation{},
}, {
gobbytes: []byte{0x40, 0xff, 0x81, 0x3, 0x1, 0x1, 0xa, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1, 0xff, 0x82, 0x0, 0x1, 0x3, 0x1, 0x9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1, 0x4, 0x0, 0x1, 0x7, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x1, 0xa, 0x0, 0x1, 0x9, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1, 0xa, 0x0, 0x0, 0x0, 0x5, 0xff, 0x82, 0x1, 0x2, 0x0},
revocation: extensions.Revocation{Timestamp: 1},
}, {
gobbytes: []byte{0x40, 0xff, 0x81, 0x3, 0x1, 0x1, 0xa, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1, 0xff, 0x82, 0x0, 0x1, 0x3, 0x1, 0x9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1, 0x4, 0x0, 0x1, 0x7, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x1, 0xa, 0x0, 0x1, 0x9, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1, 0xa, 0x0, 0x0, 0x0, 0xd, 0xff, 0x82, 0x1, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x0},
revocation: extensions.Revocation{Timestamp: 9223372036854775807},
}, {
gobbytes: []byte{0x40, 0xff, 0x81, 0x3, 0x1, 0x1, 0xa, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1, 0xff, 0x82, 0x0, 0x1, 0x3, 0x1, 0x9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1, 0x4, 0x0, 0x1, 0x7, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x1, 0xa, 0x0, 0x1, 0x9, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1, 0xa, 0x0, 0x0, 0x0, 0x8, 0xff, 0x82, 0x2, 0x3, 0x1, 0x2, 0x3, 0x0},
revocation: extensions.Revocation{KeyHash: []byte{1, 2, 3}},
}, {
gobbytes: []byte{0x40, 0xff, 0x81, 0x3, 0x1, 0x1, 0xa, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1, 0xff, 0x82, 0x0, 0x1, 0x3, 0x1, 0x9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1, 0x4, 0x0, 0x1, 0x7, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x1, 0xa, 0x0, 0x1, 0x9, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1, 0xa, 0x0, 0x0, 0x0, 0x8, 0xff, 0x82, 0x3, 0x3, 0x5, 0x4, 0x3, 0x0},
revocation: extensions.Revocation{Signature: []byte{5, 4, 3}},
}, {
gobbytes: []byte{0x40, 0xff, 0x81, 0x3, 0x1, 0x1, 0xa, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1, 0xff, 0x82, 0x0, 0x1, 0x3, 0x1, 0x9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1, 0x4, 0x0, 0x1, 0x7, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x1, 0xa, 0x0, 0x1, 0x9, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1, 0xa, 0x0, 0x0, 0x0, 0x12, 0xff, 0x82, 0x1, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x1, 0x3, 0x5, 0x4, 0x3, 0x0},
revocation: extensions.Revocation{
Timestamp: 9223372036854775807,
KeyHash: []byte{5, 4, 3}},
}, {
gobbytes: []byte{0x40, 0xff, 0x81, 0x3, 0x1, 0x1, 0xa, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1, 0xff, 0x82, 0x0, 0x1, 0x3, 0x1, 0x9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1, 0x4, 0x0, 0x1, 0x7, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x1, 0xa, 0x0, 0x1, 0x9, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1, 0xa, 0x0, 0x0, 0x0, 0x12, 0xff, 0x82, 0x1, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x2, 0x3, 0x5, 0x4, 0x3, 0x0},
revocation: extensions.Revocation{
Timestamp: 9223372036854775807,
Signature: []byte{5, 4, 3}},
}, {
gobbytes: []byte{0x40, 0xff, 0x81, 0x3, 0x1, 0x1, 0xa, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1, 0xff, 0x82, 0x0, 0x1, 0x3, 0x1, 0x9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1, 0x4, 0x0, 0x1, 0x7, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x1, 0xa, 0x0, 0x1, 0x9, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1, 0xa, 0x0, 0x0, 0x0, 0xd, 0xff, 0x82, 0x2, 0x3, 0x1, 0x2, 0x3, 0x1, 0x3, 0x5, 0x4, 0x3, 0x0},
revocation: extensions.Revocation{
KeyHash: []byte{1, 2, 3},
Signature: []byte{5, 4, 3}},
}, {
gobbytes: nil, // skip the encoding test for this
revocation: extensions.Revocation{
Timestamp: testrand.Int63n(9223372036854775807),
KeyHash: testrand.BytesInt(testrand.Intn(600000)),
Signature: testrand.BytesInt(testrand.Intn(500000))},
},
} {
customEncoded, err := tt.revocation.Marshal()
require.NoError(t, err)
if tt.gobbytes != nil {
// compare gob marshaler output with our marshaler
require.Equal(t, tt.gobbytes, customEncoded)
}
revocatationDocodeGob := &extensions.Revocation{}
gobDecoder := gob.NewDecoder(bytes.NewBuffer(customEncoded))
err = gobDecoder.Decode(revocatationDocodeGob)
require.NoError(t, err)
require.Equal(t, tt.revocation, *revocatationDocodeGob)
// unmarshal data from our marshaler with our marshaler
revocatationCustom := extensions.Revocation{}
err = revocatationCustom.Unmarshal(customEncoded)
require.NoError(t, err)
require.Equal(t, tt.revocation, revocatationCustom)
require.Equal(t, *revocatationDocodeGob, revocatationCustom)
}
}
func TestRevocationMarshalingInvalid(t *testing.T) {
gobEncoded := new(bytes.Buffer)
encoder := gob.NewEncoder(gobEncoded)
// encode different object
err := encoder.Encode(extensions.Options{})
require.NoError(t, err)
revocatationCustom := extensions.Revocation{}
err = revocatationCustom.Unmarshal(gobEncoded.Bytes())
require.Error(t, err)
// try to unmarshal random bytes
revocatationCustom = extensions.Revocation{}
err = revocatationCustom.Unmarshal(testrand.BytesInt(10000))
require.Error(t, err)
}
func TestRevocationDecoderCrashers(t *testing.T) {
crashers := []string{
"@\xff\x81\x03\x01\x01\nRevocation\x01\xff\x82\x00\x01\x03\x01\tTimestamp\x01\x04\x00\x01\aKeyHash\x01\n\x00\x01\tSignature\x01\n\x00\x00\x00\r\xff\x82\x02\xf8000000000",
}
for _, crasher := range crashers {
rev := extensions.Revocation{}
_ = rev.Unmarshal([]byte(crasher))
}
}
|
package uintshamir
import (
"fmt"
"crypto/rand"
"math/big"
)
//The SecretSharingScheme defines the parameters for the Shamir secret sharing
type SecretSharingScheme struct {
p int64
t int
n int
}
var standardSetting = SecretSharingScheme { p : 5, t : 2, n : 3}
var wikipediaExample = SecretSharingScheme { p : 11, t : 2, n : 5}
type point = struct {
x, y int64
}
type polynomial = []int64
//Share splits a secret into shares
func (ss *SecretSharingScheme) Share(secret int64) []point { //todo probably returns array not slice?
secret = secret % ss.p
fmt.Println("Creating", ss.n, "shares")
var h = polynomial{secret}
for coefficient := 1; coefficient <= ss.t; coefficient++ {
randBigInt, _ := rand.Int(rand.Reader, big.NewInt( int64(ss.p) ))
h = append(h, randBigInt.Int64())
}
fmt.Println(h)
var shares []point
for i := 1; i <= ss.n; i++ {//Todo optimize and make overflow safe
//Evaluate h in "i" and add share (i, h(i))
y := secret
x := int64(i)
xpower := int64(i)
for coefficient := 1; coefficient <= ss.t; coefficient++ {
// fmt.Println(i, y)
y += h[coefficient] * xpower
xpower = xpower * x
}
// fmt.Println(i, y)
fmt.Println(i, y % ss.p)
shares = append(shares, point{x: x, y : y % ss.p})
}
return shares
}
//Reconstruct extracts the secret from t or more shares
func (ss *SecretSharingScheme) Reconstruct(shares []point) int64{
k := len(shares)
if k < ss.t + 1 {
fmt.Println("Not enough shares to reconstruct")
return 0
}
//todo check k distinct x's
var nums []int64
var dens []int64
for _, share := range(shares) {
fmt.Println("Coordinates", share.x, share.y)
num := int64(1)
den := int64(1)
for _, otherShare := range(shares) {
if share.x == otherShare.x { continue}
num = num * (0 - otherShare.x)
den = den * (share.x - otherShare.x)
}
nums = append(nums, num)
dens = append(dens, den)
}
den := int64(1)
for _, d := range(dens) {den *= d}
num := int64(0)
for i := range(dens) {
num += divideMod(nums[i] * den * shares[i].y % ss.p, dens[i], ss.p)
}
return (divideMod(num, den, ss.p) + ss.p) % ss.p
// secret := int64(0)
// for _, share := range(shares) {
// fmt.Println("Coordinates", share.x, share.y)
// num := int64(1)
// den := int64(1)
// for _, otherShare := range(shares) {
// if share.x == otherShare.x { continue}
// num = num * (otherShare.x)
// den = den * (otherShare.x - share.x)
// }
// //num*(den)^-1= delta_i
// secret += share.y * num * multiplicativeInverse(den, ss.p)
// }
// return secret % ss.p
}
//**********Number theory:
func extEuclid(a, b int64) (int64, int64) {
//fmt.Println("a", a, "b", b)
x := int64(0)
lastX := int64(1)
y := int64(1)
lastY := int64(0)
for b != 0 {
quot := a / b
//fmt.Println(quot)
a, b = b, a % b
//fmt.Println("a", a, "b", b)
x, lastX = lastX - quot * x, x
y, lastY = lastY - quot * y, y
}
//lastX is multiplicative inverse of a mod b
return lastX, lastY
}
func extGcd(a, b int64) (int64, int64, int64) {
if a == 0 {
return b, 0, 1
}
//gcd, x, y := extGcd(b % a, a)
return 0, 1, 2
}
func divideMod(dividend, divisor, p int64) int64 {
divisorInv, _ := extEuclid(mod(divisor, p), p)
//fmt.Println(divisorInv)
//for divisorInv < 0 {divisorInv += int64(mod)}
return (dividend * divisorInv) //% mod
}
func multiplicativeInverse(element, mod int64) int64 {
inv, _ := extEuclid(element % mod, mod)
for inv < 0 {inv += mod}
return int64(inv) % mod
}
func lagrangeInterpolationAtZero(points []point, prime int64) int64 {
res := int64(0)
for _, pointI := range(points) {
num := int64(1)
den := int64(1)
for _, pointJ := range(points) {
if pointI.x != pointJ.x {
num = (num * pointJ.x)// % prime
den = (den * (pointJ.x - pointI.x))// % prime
}
}
//fmt.Println("num", num, "den", den)
// res = (res + (pointI.y * num * multiplicativeInverse(den, prime)))// % prime
delta := divideMod(num, den, prime)
//fmt.Println("delta", delta)
res = (res + (pointI.y * delta))// % prime
//fmt.Println(res )
}
return mod(res, prime)
}
func mod(a, b int64) int64 {
res := a % b
if res < 0 {
return res + b
}
return res
} |
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package arcappcompat will have tast tests for android apps on Chromebooks.
package arcappcompat
import (
"context"
"time"
"chromiumos/tast/common/android/ui"
"chromiumos/tast/errors"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/bundles/cros/arcappcompat/pre"
"chromiumos/tast/local/bundles/cros/arcappcompat/testutil"
"chromiumos/tast/local/chrome"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
// clamshellLaunchForMicrosoftOnedrive launches MicrosoftOnedrive in clamshell mode.
var clamshellLaunchForMicrosoftOnedrive = []testutil.TestCase{
{Name: "Launch app in Clamshell", Fn: launchAppForMicrosoftOnedrive},
}
// touchviewLaunchForMicrosoftOnedrive launches MicrosoftOnedrive in tablet mode.
var touchviewLaunchForMicrosoftOnedrive = []testutil.TestCase{
{Name: "Launch app in Touchview", Fn: launchAppForMicrosoftOnedrive},
}
// clamshellAppSpecificTestsForMicrosoftOnedrive are placed here.
var clamshellAppSpecificTestsForMicrosoftOnedrive = []testutil.TestCase{
{Name: "Clamshell: Signout app", Fn: signOutOfMicrosoftOnedrive},
}
// touchviewAppSpecificTestsForMicrosoftOnedrive are placed here.
var touchviewAppSpecificTestsForMicrosoftOnedrive = []testutil.TestCase{
{Name: "Touchview: Signout app", Fn: signOutOfMicrosoftOnedrive},
}
func init() {
testing.AddTest(&testing.Test{
Func: MicrosoftOnedrive,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Functional test for MicrosoftOnedrive that installs the app also verifies it is logged in and that the main page is open, checks MicrosoftOnedrive correctly changes the window state in both clamshell and touchview mode",
Contacts: []string{"mthiyagarajan@chromium.org", "cros-appcompat-test-team@google.com"},
Attr: []string{"group:appcompat"},
SoftwareDeps: []string{"chrome"},
Params: []testing.Param{{
Name: "clamshell_mode_default",
Val: testutil.TestParams{
LaunchTests: clamshellLaunchForMicrosoftOnedrive,
CommonTests: testutil.ClamshellCommonTests,
AppSpecificTests: clamshellAppSpecificTestsForMicrosoftOnedrive,
},
ExtraAttr: []string{"appcompat_default"},
ExtraSoftwareDeps: []string{"android_p"},
// TODO(b/189704585): Remove hwdep.SkipOnModel once the solution is found.
// Skip on tablet only models.
ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel(testutil.TabletOnlyModels...)),
Pre: pre.AppCompatBootedUsingTestAccountPool,
}, {
Name: "tablet_mode_default",
Val: testutil.TestParams{
LaunchTests: touchviewLaunchForMicrosoftOnedrive,
CommonTests: testutil.TouchviewCommonTests,
AppSpecificTests: touchviewAppSpecificTestsForMicrosoftOnedrive,
},
ExtraAttr: []string{"appcompat_default"},
ExtraSoftwareDeps: []string{"android_p"},
// TODO(b/189704585): Remove hwdep.SkipOnModel once the solution is found.
// Skip on clamshell only models.
ExtraHardwareDeps: hwdep.D(hwdep.TouchScreen(), hwdep.SkipOnModel(testutil.ClamshellOnlyModels...)),
Pre: pre.AppCompatBootedInTabletModeUsingTestAccountPool,
}, {
Name: "vm_clamshell_mode_default",
Val: testutil.TestParams{
LaunchTests: clamshellLaunchForMicrosoftOnedrive,
CommonTests: testutil.ClamshellCommonTests,
AppSpecificTests: clamshellAppSpecificTestsForMicrosoftOnedrive,
},
ExtraAttr: []string{"appcompat_default"},
ExtraSoftwareDeps: []string{"android_vm"},
// TODO(b/189704585): Remove hwdep.SkipOnModel once the solution is found.
// Skip on tablet only models.
ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel(testutil.TabletOnlyModels...)),
Pre: pre.AppCompatBootedUsingTestAccountPool,
}, {
Name: "vm_tablet_mode_default",
Val: testutil.TestParams{
LaunchTests: touchviewLaunchForMicrosoftOnedrive,
CommonTests: testutil.TouchviewCommonTests,
AppSpecificTests: touchviewAppSpecificTestsForMicrosoftOnedrive,
},
ExtraAttr: []string{"appcompat_default"},
ExtraSoftwareDeps: []string{"android_vm"},
// TODO(b/189704585): Remove hwdep.SkipOnModel once the solution is found.
// Skip on clamshell only models.
ExtraHardwareDeps: hwdep.D(hwdep.TouchScreen(), hwdep.SkipOnModel(testutil.ClamshellOnlyModels...)),
Pre: pre.AppCompatBootedInTabletModeUsingTestAccountPool,
}},
Timeout: 10 * time.Minute,
Vars: []string{"arcappcompat.gaiaPoolDefault"},
VarDeps: []string{"arcappcompat.MicrosoftOnedrive.emailid", "arcappcompat.MicrosoftOnedrive.password"},
})
}
// MicrosoftOnedrive test uses library for opting into the playstore and installing app.
// Checks MicrosoftOnedrive correctly changes the window states in both clamshell and touchview mode.
func MicrosoftOnedrive(ctx context.Context, s *testing.State) {
const (
appPkgName = "com.microsoft.skydrive"
appActivity = ".MainActivity"
)
testSet := s.Param().(testutil.TestParams)
testutil.RunTestCases(ctx, s, appPkgName, appActivity, testSet)
}
// launchAppForMicrosoftOnedrive verifies MicrosoftOnedrive is logged in and
// verify MicrosoftOnedrive reached main activity page of the app.
func launchAppForMicrosoftOnedrive(ctx context.Context, s *testing.State, tconn *chrome.TestConn, a *arc.ARC, d *ui.Device, appPkgName, appActivity string) {
const (
allowButtonText = "ALLOW"
cameraID = "com.microsoft.skydrive:id/fab_button"
enterEmailAddressID = "com.microsoft.skydrive:id/authentication_input_text"
gotItButtonText = "GOT IT"
meetYourPersonalVaultClassName = "android.widget.TextView"
meetYourPersonalVaulttext = "Meet your Personal Vault"
nextButtonDescription = "Next"
notNowID = "android:id/autofill_save_no"
notnowText = "NOT NOW"
okText = "OK"
passwordClassName = "android.widget.EditText"
passwordID = "i0118"
passwordText = "Password"
signInClassName = "android.widget.Button"
signinText = "SIGN IN"
turnOnCameraUploadText = "TURN ON CAMERA UPLOAD"
)
// Click on signin button.
signInButton := d.Object(ui.ClassName(testutil.AndroidButtonClassName), ui.TextMatches("(?i)"+signinText))
if err := signInButton.WaitForExists(ctx, testutil.LongUITimeout); err != nil {
s.Log("signInButton doesn't exists: ", err)
} else if err := signInButton.Click(ctx); err != nil {
s.Fatal("Failed to click on signInButton: ", err)
}
// Enter email id.
enterEmailAddress := d.Object(ui.ID(enterEmailAddressID))
if err := enterEmailAddress.WaitForExists(ctx, testutil.LongUITimeout); err != nil {
s.Error("EnterEmailAddress doesn't exists: ", err)
} else if err := enterEmailAddress.Click(ctx); err != nil {
s.Fatal("Failed to click on enterEmailAddress: ", err)
}
// Click on enterEmailAddress until the email text field is focused.
if err := testing.Poll(ctx, func(ctx context.Context) error {
if enterEmailAddressFocused, err := enterEmailAddress.IsFocused(ctx); err != nil {
return errors.New("enterEmailAddress not focused yet")
} else if !enterEmailAddressFocused {
enterEmailAddress.Click(ctx)
return errors.New("enterEmailAddress not focused yet")
}
return nil
}, &testing.PollOptions{Timeout: testutil.ShortUITimeout}); err != nil {
s.Fatal("Failed to focus enterEmailAddress: ", err)
}
emailAddress := s.RequiredVar("arcappcompat.MicrosoftOnedrive.emailid")
if err := enterEmailAddress.SetText(ctx, emailAddress); err != nil {
s.Fatal("Failed to enter EmailAddress: ", err)
}
s.Log("Entered EmailAddress")
// Click on next button
nextButton := d.Object(ui.ClassName(testutil.AndroidButtonClassName), ui.Description(nextButtonDescription))
if err := nextButton.WaitForExists(ctx, testutil.DefaultUITimeout); err != nil {
s.Log("Next Button doesn't exists: ", err)
} else if err := nextButton.Click(ctx); err != nil {
s.Fatal("Failed to click on nextButton: ", err)
}
// Enter password.
enterPassword := d.Object(ui.ID(passwordID))
if err := enterPassword.WaitForExists(ctx, testutil.LongUITimeout); err != nil {
s.Error("EnterPassword doesn't exists: ", err)
} else if err := enterPassword.Click(ctx); err != nil {
s.Fatal("Failed to click on enterPassword: ", err)
}
// Click on password text field until the password text field is focused.
if err := testing.Poll(ctx, func(ctx context.Context) error {
if pwdFocused, err := enterPassword.IsFocused(ctx); err != nil {
return errors.New("password text field not focused yet")
} else if !pwdFocused {
enterPassword.Click(ctx)
return errors.New("password text field not focused yet")
}
return nil
}, &testing.PollOptions{Timeout: testutil.ShortUITimeout}); err != nil {
s.Fatal("Failed to focus password: ", err)
}
password := s.RequiredVar("arcappcompat.MicrosoftOnedrive.password")
if err := enterPassword.SetText(ctx, password); err != nil {
s.Fatal("Failed to enter enterPassword: ", err)
}
s.Log("Entered password")
// Click on Sign in button.
signInButton = d.Object(ui.ClassName(testutil.AndroidButtonClassName), ui.TextMatches("(?i)"+signinText))
if err := signInButton.WaitForExists(ctx, testutil.DefaultUITimeout); err != nil {
s.Error("SignInButton doesn't exists: ", err)
}
// Click on signin Button until flip button exist.
signInButton = d.Object(ui.ClassName(testutil.AndroidButtonClassName), ui.TextMatches("(?i)"+signinText))
notNowButton := d.Object(ui.ID(notNowID))
if err := testing.Poll(ctx, func(ctx context.Context) error {
if err := notNowButton.Exists(ctx); err != nil {
signInButton.Click(ctx)
return err
}
return nil
}, &testing.PollOptions{Timeout: testutil.ShortUITimeout}); err != nil {
s.Log("notNowButton doesn't exist: ", err)
} else if err := notNowButton.Click(ctx); err != nil {
s.Fatal("Failed to click on notNowButton: ", err)
}
// click on got it button.
gotItButton := d.Object(ui.ClassName(testutil.AndroidButtonClassName), ui.TextMatches("(?i)"+gotItButtonText))
if err := gotItButton.WaitForExists(ctx, testutil.ShortUITimeout); err != nil {
s.Log("gotItButton doesn't exists: ", err)
} else if err := gotItButton.Click(ctx); err != nil {
s.Fatal("Failed to click on gotItButton: ", err)
}
// Click on turnOn Camera upload button.
turnOnCameraUploadButton := d.Object(ui.ClassName(testutil.AndroidButtonClassName), ui.TextMatches("(?i)"+turnOnCameraUploadText))
if err := turnOnCameraUploadButton.WaitForExists(ctx, testutil.ShortUITimeout); err != nil {
s.Log("turnOnCameraUploadButton doesn't exists: ", err)
} else if err := turnOnCameraUploadButton.Click(ctx); err != nil {
s.Fatal("Failed to click on turnOnCameraUploadButton: ", err)
}
// Click on allow button to access your photos, media and files.
allowButton := d.Object(ui.ClassName(testutil.AndroidButtonClassName), ui.TextMatches("(?i)"+allowButtonText))
if err := allowButton.WaitForExists(ctx, testutil.ShortUITimeout); err != nil {
s.Log("Allow Button doesn't exists: ", err)
} else if err := allowButton.Click(ctx); err != nil {
s.Fatal("Failed to click on allowButton: ", err)
}
// Click on notnow button for feedback.
notnowButton := d.Object(ui.ClassName(testutil.AndroidButtonClassName), ui.TextMatches("(?i)"+notnowText))
if err := notnowButton.WaitForExists(ctx, testutil.ShortUITimeout); err != nil {
s.Log("notnowButton doesn't exists: ", err)
} else if err := notnowButton.Click(ctx); err != nil {
s.Fatal("Failed to click on notnowButton: ", err)
}
// Click on meet your personal vault on homePage.
clickOnMeetYourPersonalVault := d.Object(ui.ClassName(meetYourPersonalVaultClassName), ui.Text(meetYourPersonalVaulttext))
if err := clickOnMeetYourPersonalVault.WaitForExists(ctx, testutil.ShortUITimeout); err != nil {
s.Log("clickOnMeetYourPersonalVault doesn't exists: ", err)
} else if err := clickOnMeetYourPersonalVault.Click(ctx); err != nil {
s.Fatal("Failed to click on clickOnMeetYourPersonalVault: ", err)
}
testutil.HandleDialogBoxes(ctx, s, d, appPkgName)
// Check for launch verifier.
launchVerifier := d.Object(ui.PackageName(appPkgName))
if err := launchVerifier.WaitForExists(ctx, testutil.LongUITimeout); err != nil {
testutil.DetectAndHandleCloseCrashOrAppNotResponding(ctx, s, d)
s.Fatal("launchVerifier doesn't exists: ", err)
}
}
// signOutOfMicrosoftOnedrive verifies app is signed out.
func signOutOfMicrosoftOnedrive(ctx context.Context, s *testing.State, tconn *chrome.TestConn, a *arc.ARC, d *ui.Device, appPkgName, appActivity string) {
const (
logOutOfOnedriveText = "Sign out"
okButtonText = "OK"
profileID = "com.microsoft.skydrive:id/pivot_me"
)
// Click on profile icon.
profileIcon := d.Object(ui.ID(profileID))
if err := profileIcon.WaitForExists(ctx, testutil.ShortUITimeout); err != nil {
s.Log("profileIcon doesn't exists and skipped logout: ", err)
return
} else if err := profileIcon.Click(ctx); err != nil {
s.Fatal("Failed to click on profileIcon: ", err)
}
logOutOfMicrosoftOnedrive := d.Object(ui.ClassName(testutil.AndroidButtonClassName), ui.TextMatches("(?i)"+logOutOfOnedriveText))
if err := logOutOfMicrosoftOnedrive.WaitForExists(ctx, testutil.DefaultUITimeout); err != nil {
s.Fatal("logOutOfMicrosoftOnedrive doesn't exist: ", err)
} else if err := logOutOfMicrosoftOnedrive.Click(ctx); err != nil {
s.Fatal("Failed to click on logOutOfMicrosoftOnedrive: ", err)
}
// Click on ok button to signout.
okButton := d.Object(ui.ClassName(testutil.AndroidButtonClassName), ui.TextMatches("(?i)"+okButtonText))
if err := okButton.WaitForExists(ctx, testutil.ShortUITimeout); err != nil {
s.Error("okButton doesn't exists: ", err)
} else if err := okButton.Click(ctx); err != nil {
s.Fatal("Failed to click on okButton: ", err)
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package smb
import (
"fmt"
"strings"
)
// Config represents the enter smb.conf file with a [global] section and 1 or
// more file shares.
type Config struct {
global *Section
shares []*Section
}
// Section represents either the [global] section or a file share.
// Each section is made up of parameters of the format:
// key = value
type Section struct {
name string
params map[string]string
}
// NewConfig creates a Config which represents a smb.conf file.
func NewConfig() *Config {
return &Config{
global: &Section{
name: "global",
params: map[string]string{},
},
shares: []*Section{},
}
}
// SetGlobalParam sets a key value pair for the [global] section.
func (c *Config) SetGlobalParam(key, value string) {
c.global.SetParam(key, value)
}
// AddFileShare adds a share as a section to the Config object.
func (c *Config) AddFileShare(share *Section) {
c.shares = append(c.shares, share)
}
// String returns a string representation of the samba config file as per:
// https://www.samba.org/samba/docs/current/man-html/smb.conf.5.html
func (c *Config) String() string {
var smbConf strings.Builder
if len(c.global.params) > 0 {
smbConf.WriteString(c.global.String())
smbConf.WriteString("\n")
}
if len(c.shares) > 0 {
for _, section := range c.shares {
smbConf.WriteString(section.String())
smbConf.WriteString("\n")
}
}
return smbConf.String()
}
// NewFileShare creates a subsection that starts with [name] and has key value
// pairs representing information about a file share.
func NewFileShare(name string) *Section {
return &Section{
name: name,
params: map[string]string{},
}
}
// SetParam sets a key value pair on a section, these are individual
// configuration items.
func (s *Section) SetParam(key, value string) {
s.params[key] = value
}
// String returns the string representation of a section, starting with the
// section name as [name] then each configuration item as key = value.
func (s *Section) String() string {
var section strings.Builder
section.WriteString(fmt.Sprintf("[%s]\n", s.name))
for key, value := range s.params {
section.WriteString(fmt.Sprintf("\t%s = %s\n", key, value))
}
return section.String()
}
// CreateBasicShare creates a file share Section with common parameters shared
// by all file shares.
func CreateBasicShare(name, path string) *Section {
guestshare := NewFileShare(name)
guestshare.SetParam("path", path)
guestshare.SetParam("writeable", "yes")
guestshare.SetParam("create mask", "0644")
guestshare.SetParam("directory mask", "0755")
guestshare.SetParam("read only", "no")
return guestshare
}
|
package main
import (
"fmt"
"github.com/fvbock/endless"
"go-admin-starter/middleware/jwt"
"go-admin-starter/routers"
"go-admin-starter/utils"
"go-admin-starter/utils/config"
"log"
"net/http"
"runtime"
"syscall"
)
func main() {
conf := config.New()
utils.LogSetup()
jwt.SetSignKey(conf.App.JwtSecret)
routersInit := routers.InitRouter()
readTimeout := conf.Server.ReadTimeout
writeTimeout := conf.Server.WriteTimeout
endPoint := fmt.Sprintf(":%d", conf.Server.HttpPort)
maxHeaderBytes := 1 << 20
log.Printf("Server start at http port: %d", conf.Server.HttpPort)
if runtime.GOOS == "windows" {
server := &http.Server{
Addr: endPoint,
Handler: routersInit,
ReadTimeout: readTimeout,
WriteTimeout: writeTimeout,
MaxHeaderBytes: maxHeaderBytes,
}
server.ListenAndServe()
return
}
endless.DefaultReadTimeOut = readTimeout
endless.DefaultWriteTimeOut = writeTimeout
endless.DefaultMaxHeaderBytes = maxHeaderBytes
server := endless.NewServer(endPoint, routersInit)
server.BeforeBegin = func(add string) {
log.Printf("Actual pid is %d", syscall.Getpid())
}
err := server.ListenAndServe()
if err != nil {
log.Printf("Server err: %v", err)
}
}
|
// Package main ...
package main
import (
"path/filepath"
"github.com/go-rod/rod/lib/utils"
)
var slash = filepath.FromSlash
func main() {
build := utils.S(`// Package assets is generated by "lib/assets/generate"
package assets
// MousePointer for rod
const MousePointer = {{.mousePointer}}
// Monitor for rod
const Monitor = {{.monitor}}
// MonitorPage for rod
const MonitorPage = {{.monitorPage}}
`,
"mousePointer", get("../../fixtures/mouse-pointer.svg"),
"monitor", get("monitor.html"),
"monitorPage", get("monitor-page.html"),
)
utils.E(utils.OutputFile(slash("lib/assets/assets.go"), build))
}
func get(path string) string {
code, err := utils.ReadString(slash("lib/assets/" + path))
utils.E(err)
return utils.EscapeGoString(code)
}
|
package util
import "sort"
func copyInts(ints []int) []int {
newInts := make([]int, len(ints))
copy(newInts, ints)
return newInts
}
func reverseInts(ints []int) []int {
ints = copyInts(ints)
i := 0
j := len(ints) - 1
for i < j {
ints[i], ints[j] = ints[j], ints[i]
i++
j--
}
return ints
}
func PermInts(ints ...int) <-chan []int {
ints = copyInts(ints)
n := len(ints)
sort.Ints(ints)
ch := make(chan []int)
go func() {
defer close(ch)
ch <- copyInts(ints)
for {
k := n - 2
for k >= 0 {
if ints[k] < ints[k+1] {
break
}
k--
}
if k < 0 {
return
}
l := n - 1
for l > k {
if ints[l] > ints[k] {
break
}
l--
}
ints[k], ints[l] = ints[l], ints[k]
ints = append(ints[:k+1], reverseInts(ints[k+1:])...)
ch <- copyInts(ints)
}
}()
return ch
}
|
type RecentCounter struct {
reqs []int
}
func Constructor() RecentCounter {
return RecentCounter{reqs[]{}}
}
func (this *RecentCounter) Ping(t int) int {
idx := 0
for i := 0; i < len(this.reqs); i++ {
if i >= t - 3000 {
idx = i
break
}
}
this.reqs = this.reqs[idx:]
this.reqs = append(this.reqs, t)
return len(this.reqs)
}
/**
* Your RecentCounter object will be instantiated and called as such:
* obj := Constructor();
* param_1 := obj.Ping(t);
*/ |
package main
import "fmt"
//函数是引用类型,传递的是地址
//函数作为参数的demo
func demo(iner func(name string)) {
fmt.Println("demo started")
iner("lee")
}
func main() {
demo(func(name string){
fmt.Println("name is ", name)
})
}
|
package main
import (
"net/http"
"testing"
)
func TestHandler(t *testing.T) {
tt := []struct {
name string
request APIRequest
status int
}{
{
name: "Invalid Body",
request: APIRequest{Body: "invalid"},
status: http.StatusBadRequest,
},
{
name: "Check name",
request: APIRequest{Body: `{"name":"Footters"}`},
status: http.StatusOK,
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
res, _ := Handler(tc.request)
if res.StatusCode != tc.status {
t.Errorf("Error status, expected %d, got %d", tc.status, res.StatusCode)
}
})
}
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tenantrate_test
import (
"bufio"
"bytes"
"context"
"fmt"
"regexp"
"sort"
"strings"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/tenantrate"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/datadriven"
"github.com/cockroachdb/errors"
"github.com/dustin/go-humanize"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
)
func TestCloser(t *testing.T) {
defer leaktest.AfterTest(t)()
st := cluster.MakeTestingClusterSettings()
start := timeutil.Now()
timeSource := timeutil.NewManualTime(start)
factory := tenantrate.NewLimiterFactory(st, &tenantrate.TestingKnobs{
TimeSource: timeSource,
})
tenant := roachpb.MakeTenantID(2)
closer := make(chan struct{})
limiter := factory.GetTenant(tenant, closer)
ctx := context.Background()
// First Wait call will not block.
require.NoError(t, limiter.Wait(ctx, true, 1))
errCh := make(chan error, 1)
go func() { errCh <- limiter.Wait(ctx, true, 1<<30) }()
testutils.SucceedsSoon(t, func() error {
if timers := timeSource.Timers(); len(timers) != 1 {
return errors.Errorf("expected 1 timer, found %d", len(timers))
}
return nil
})
close(closer)
require.Regexp(t, "closer", <-errCh)
}
func TestDataDriven(t *testing.T) {
defer leaktest.AfterTest(t)()
datadriven.Walk(t, "testdata", func(t *testing.T, path string) {
defer leaktest.AfterTest(t)()
datadriven.RunTest(t, path, new(testState).run)
})
}
type testState struct {
initialized bool
tenants map[roachpb.TenantID][]tenantrate.Limiter
running map[string]*launchState
rl *tenantrate.LimiterFactory
m *metric.Registry
clock *timeutil.ManualTime
settings *cluster.Settings
config tenantrate.Config
}
type launchState struct {
id string
tenantID roachpb.TenantID
ctx context.Context
cancel context.CancelFunc
isWrite bool
writeBytes int64
reserveCh chan error
}
func (s launchState) String() string {
return s.id + "@" + s.tenantID.String()
}
var testStateCommands = map[string]func(*testState, *testing.T, *datadriven.TestData) string{
"init": (*testState).init,
"update_settings": (*testState).updateSettings,
"advance": (*testState).advance,
"launch": (*testState).launch,
"await": (*testState).await,
"cancel": (*testState).cancel,
"record_read": (*testState).recordRead,
"timers": (*testState).timers,
"metrics": (*testState).metrics,
"get_tenants": (*testState).getTenants,
"release_tenants": (*testState).releaseTenants,
"estimate_iops": (*testState).estimateIOPS,
}
func (ts *testState) run(t *testing.T, d *datadriven.TestData) string {
if !ts.initialized && d.Cmd != "init" && d.Cmd != "estimate_iops" {
d.Fatalf(t, "expected init as first command, got %q", d.Cmd)
}
if f, ok := testStateCommands[d.Cmd]; ok {
return f(ts, t, d)
}
d.Fatalf(t, "unknown command %q", d.Cmd)
return ""
}
const timeFormat = "15:04:05.000"
var t0 = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
// init is called at the beginning of a test. It must be the first command.
// The argument is a yaml serialization of LimitConfigs. It returns the time as
// of initialization (00:00:00.000). For example:
//
// init
// requests: { rate: 1, burst: 2 }
// readbytes: { rate: 1024, burst: 2048 }
// writebytes: { rate: 1024, burst: 2048 }
// ----
// 00:00:00.000
//
func (ts *testState) init(t *testing.T, d *datadriven.TestData) string {
if ts.initialized {
d.Fatalf(t, "already ran init")
}
ts.initialized = true
ts.running = make(map[string]*launchState)
ts.tenants = make(map[roachpb.TenantID][]tenantrate.Limiter)
ts.clock = timeutil.NewManualTime(t0)
ts.settings = cluster.MakeTestingClusterSettings()
ts.config = tenantrate.DefaultConfig()
parseSettings(t, d, &ts.config)
ts.rl = tenantrate.NewLimiterFactory(ts.settings, &tenantrate.TestingKnobs{
TimeSource: ts.clock,
})
ts.rl.UpdateConfig(ts.config)
ts.m = metric.NewRegistry()
ts.m.AddMetricStruct(ts.rl.Metrics())
return ts.clock.Now().Format(timeFormat)
}
// updateSettings allows setting the rate and burst limits. It takes as input
// yaml object representing the limits and updates accordingly. It returns
// the current time. See init for more details as the semantics are the same.
func (ts *testState) updateSettings(t *testing.T, d *datadriven.TestData) string {
parseSettings(t, d, &ts.config)
ts.rl.UpdateConfig(ts.config)
return ts.formatTime()
}
// advance advances the clock by the provided duration and returns the new
// current time.
//
// advance
// 2s
// ----
// 00:00:02.000
//
func (ts *testState) advance(t *testing.T, d *datadriven.TestData) string {
dur, err := time.ParseDuration(d.Input)
if err != nil {
d.Fatalf(t, "failed to parse input as duration: %v", err)
}
ts.clock.Advance(dur)
return ts.formatTime()
}
// launch will launch requests with provided id, tenant, and writebytes.
// The argument is a yaml list of such request to launch. These requests
// are launched in parallel, no ordering should be assumed between them.
// It is an error to launch a request with an id of an outstanding request or
// with a tenant id that has not been previously created with at least one call
// to get_tenant. The return value is a serialization of all of the currently
// outstanding requests. Requests can be removed from the outstanding set with
// await. The set of outstanding requests is serialized as a list of
// [<id>@<tenant>, ...]
//
// The below example would launch two requests with ids "a" and "b"
// corresponding to tenants 2 and 3 respectively.
//
// launch
// - { id: a, tenant: 2, writebytes: 3}
// - { id: b, tenant: 3}
// ----
// [a@2, b@3]
//
func (ts *testState) launch(t *testing.T, d *datadriven.TestData) string {
var cmds []struct {
ID string
Tenant uint64
IsWrite bool
WriteBytes int64
}
if err := yaml.UnmarshalStrict([]byte(d.Input), &cmds); err != nil {
d.Fatalf(t, "failed to parse launch command: %v", err)
}
for _, cmd := range cmds {
var s launchState
s.id = cmd.ID
s.tenantID = roachpb.MakeTenantID(cmd.Tenant)
s.ctx, s.cancel = context.WithCancel(context.Background())
s.reserveCh = make(chan error, 1)
s.isWrite = cmd.IsWrite
s.writeBytes = cmd.WriteBytes
ts.running[s.id] = &s
lims := ts.tenants[s.tenantID]
if len(lims) == 0 {
d.Fatalf(t, "no limiter exists for tenant %v", s.tenantID)
}
go func() {
// We'll not worry about ever releasing tenant Limiters.
s.reserveCh <- lims[0].Wait(s.ctx, s.isWrite, s.writeBytes)
}()
}
return ts.FormatRunning()
}
// await will wait for an outstanding requests to complete. It is an error if
// no request with the given id exists. The input is a yaml list of request ids.
// The set of remaining requests will be returned. See launch for details on
// the serialization of the output. If the requests do not proceed soon, the
// test will fail.
//
// For example:
//
// await
// [a]
// ----
// [b@3]
//
func (ts *testState) await(t *testing.T, d *datadriven.TestData) string {
ids := parseStrings(t, d)
const awaitTimeout = time.Second
ctx, cancel := context.WithTimeout(context.Background(), awaitTimeout)
defer cancel()
for _, id := range ids {
ls, ok := ts.running[id]
if !ok {
d.Fatalf(t, "no running goroutine with id %s", id)
}
select {
case <-ctx.Done():
d.Fatalf(t, "goroutined %s failed to finish in time", id)
case err := <-ls.reserveCh:
if err != nil {
d.Fatalf(t, "expected no error for id %s, got %q", id, err)
}
}
delete(ts.running, id)
}
return ts.FormatRunning()
}
// cancel will cancel an outstanding request. It is an error if no request
// with the given id exists. The input is a yaml list of request ids. Cancel
// will wait for the cancellation to take effect and will remove the request
// from the set of outstanding requests. The set of remaining requests will be
// returned. See launch for details on the serialization of the output.
//
// cancel
// [b]
// ----
// [a@2]
//
func (ts *testState) cancel(t *testing.T, d *datadriven.TestData) string {
ids := parseStrings(t, d)
for _, id := range ids {
ls, ok := ts.running[id]
if !ok {
d.Fatalf(t, "no running goroutine with id %s", id)
}
ls.cancel()
err := <-ls.reserveCh
if !errors.Is(err, context.Canceled) {
d.Fatalf(t, "expected %v for id %s, got %q",
context.Canceled, id, err)
}
delete(ts.running, id)
}
return ts.FormatRunning()
}
// recordRead accounts for bytes read from a request. It takes as input a
// yaml list with fields tenant and readbytes. It returns the set of tasks
// currently running like launch, await, and cancel.
//
// For example:
//
// record_read
// - { tenant: 2, readbytes: 32 }
// ----
// [a@2]
//
func (ts *testState) recordRead(t *testing.T, d *datadriven.TestData) string {
var reads []struct {
Tenant uint64
ReadBytes int64
}
if err := yaml.UnmarshalStrict([]byte(d.Input), &reads); err != nil {
d.Fatalf(t, "failed to unmarshal reads: %v", err)
}
for _, r := range reads {
tid := roachpb.MakeTenantID(r.Tenant)
lims := ts.tenants[tid]
if len(lims) == 0 {
d.Fatalf(t, "no outstanding limiters for %v", tid)
}
lims[0].RecordRead(context.Background(), r.ReadBytes)
}
return ts.FormatRunning()
}
// metrics will print out the prometheus metric values. The command takes an
// argument as a regular expression over the values. The metrics are printed in
// lexicographical order. The command will retry until the output matches to
// make it more robust to races in metric recording.
//
// For example:
//
// metrics
// ----
// kv_tenant_rate_limit_current_blocked 0
// kv_tenant_rate_limit_current_blocked{tenant_id="2"} 0
// kv_tenant_rate_limit_current_blocked{tenant_id="system"} 0
// kv_tenant_rate_limit_num_tenants 0
// kv_tenant_rate_limit_read_bytes_admitted 0
// kv_tenant_rate_limit_read_bytes_admitted{tenant_id="2"} 0
// kv_tenant_rate_limit_read_bytes_admitted{tenant_id="system"} 100
// kv_tenant_rate_limit_read_requests_admitted 0
// kv_tenant_rate_limit_read_requests_admitted{tenant_id="2"} 0
// kv_tenant_rate_limit_read_requests_admitted{tenant_id="system"} 0
// kv_tenant_rate_limit_write_bytes_admitted 50
// kv_tenant_rate_limit_write_bytes_admitted{tenant_id="2"} 50
// kv_tenant_rate_limit_write_bytes_admitted{tenant_id="system"} 0
// kv_tenant_rate_limit_write_requests_admitted 0
// kv_tenant_rate_limit_write_requests_admitted{tenant_id="2"} 0
// kv_tenant_rate_limit_write_requests_admitted{tenant_id="system"} 0
//
// Or with a regular expression:
//
// metrics
// write_bytes_admitted\{tenant_id="2"\}
// ----
// kv_tenant_rate_limit_write_bytes_admitted{tenant_id="2"} 50
//
func (ts *testState) metrics(t *testing.T, d *datadriven.TestData) string {
exp := strings.TrimSpace(d.Expected)
if err := testutils.SucceedsSoonError(func() error {
got := ts.getMetricsText(t, d)
if got != exp {
return errors.Errorf("got:\n%s\nexp:\n%s\n", got, exp)
}
return nil
}); err != nil {
d.Fatalf(t, "failed to find expected metrics: %v", err)
}
return d.Expected
}
func (ts *testState) getMetricsText(t *testing.T, d *datadriven.TestData) string {
ex := metric.MakePrometheusExporter()
ex.ScrapeRegistry(ts.m, true /* includeChildMetrics */)
var in bytes.Buffer
if err := ex.PrintAsText(&in); err != nil {
d.Fatalf(t, "failed to print prometheus data: %v", err)
}
// We want to compile the input into a regular expression.
re, err := regexp.Compile(d.Input)
if err != nil {
d.Fatalf(t, "failed to compile pattern: %v", err)
}
sc := bufio.NewScanner(&in)
var outLines []string
for sc.Scan() {
if bytes.HasPrefix(sc.Bytes(), []byte{'#'}) || !re.Match(sc.Bytes()) {
continue
}
outLines = append(outLines, sc.Text())
}
if err := sc.Err(); err != nil {
d.Fatalf(t, "failed to process metrics: %v", err)
}
sort.Strings(outLines)
metricsText := strings.Join(outLines, "\n")
return metricsText
}
// timers waits for the set of open timers to match the expected output.
// timers is critical to avoid synchronization problems in testing. The command
// outputs the set of timers in increasing order with each timer's deadline on
// its own line.
//
// The following example would wait for there to be two outstanding timers at
// 00:00:01.000 and 00:00:02.000.
//
// timers
// ----
// 00:00:01.000
// 00:00:02.000
//
func (ts *testState) timers(t *testing.T, d *datadriven.TestData) string {
exp := strings.TrimSpace(d.Expected)
if err := testutils.SucceedsSoonError(func() error {
got := timesToStrings(ts.clock.Timers())
gotStr := strings.Join(got, "\n")
if gotStr != exp {
return errors.Errorf("got: %q, exp: %q", gotStr, exp)
}
return nil
}); err != nil {
d.Fatalf(t, "failed to find expected timers: %v", err)
}
return d.Expected
}
func timesToStrings(times []time.Time) []string {
strs := make([]string, len(times))
for i, t := range times {
strs[i] = t.Format(timeFormat)
}
return strs
}
// getTenants acquires references to tenants. It is a prerequisite to launching
// requests. The input is a yaml list of tenant ids. It returns the currently
// allocated limiters and their reference counts. The serialization of the
// return is a list of [<tenant id>#<ref count>, ...].
//
// For example:
//
// get_tenants
// [2, 3, 2]
// ----
// [2#2, 3#1]
//
func (ts *testState) getTenants(t *testing.T, d *datadriven.TestData) string {
tenantIDs := parseTenantIDs(t, d)
for i := range tenantIDs {
id := roachpb.MakeTenantID(tenantIDs[i])
ts.tenants[id] = append(ts.tenants[id], ts.rl.GetTenant(id, nil /* closer */))
}
return ts.FormatTenants()
}
// releaseTenants releases references to tenants. The input is a yaml list of
// tenant ids. It returns the currently allocated limiters and their reference
// counts. See getTenants for the serialization.
//
// For example:
//
// release_tenants
// [2, 3]
// ----
// [2#1]
//
func (ts *testState) releaseTenants(t *testing.T, d *datadriven.TestData) string {
tenantIDs := parseTenantIDs(t, d)
for i := range tenantIDs {
id := roachpb.MakeTenantID(tenantIDs[i])
lims := ts.tenants[id]
if len(lims) == 0 {
d.Fatalf(t, "no outstanding limiters for %v", id)
}
ts.rl.Release(lims[0])
if lims = lims[1:]; len(lims) > 0 {
ts.tenants[id] = lims
} else {
delete(ts.tenants, id)
}
}
return ts.FormatTenants()
}
// estimateIOPS takes in the description of a workload and produces an estimate
// of the IOPS for that workload (under the default settings).
//
// For example:
//
// estimate_iops
// readpercentage: 50
// readsize: 4096
// writesize: 4096
// ----
// Mixed workload (50% reads; 4.0 KiB reads; 4.0 KiB writes): 256 sustained IOPS, 256 burst.
//
func (ts *testState) estimateIOPS(t *testing.T, d *datadriven.TestData) string {
var workload struct {
ReadPercentage int
ReadSize int
WriteSize int
}
if err := yaml.UnmarshalStrict([]byte(d.Input), &workload); err != nil {
d.Fatalf(t, "failed to parse workload information: %v", err)
}
if workload.ReadPercentage < 0 || workload.ReadPercentage > 100 {
d.Fatalf(t, "Invalid read percentage %d", workload.ReadPercentage)
}
config := tenantrate.DefaultConfig()
calculateIOPS := func(rate float64) float64 {
readCost := config.ReadRequestUnits + float64(workload.ReadSize)*config.ReadUnitsPerByte
writeCost := config.WriteRequestUnits + float64(workload.WriteSize)*config.WriteUnitsPerByte
readFraction := float64(workload.ReadPercentage) / 100.0
avgCost := readFraction*readCost + (1-readFraction)*writeCost
return rate / avgCost
}
sustained := calculateIOPS(config.Rate)
burst := calculateIOPS(config.Burst)
fmtFloat := func(val float64) string {
if val < 10 {
return fmt.Sprintf("%.1f", val)
}
return fmt.Sprintf("%.0f", val)
}
switch workload.ReadPercentage {
case 0:
return fmt.Sprintf(
"Write-only workload (%s writes): %s sustained IOPS, %s burst.",
humanize.IBytes(uint64(workload.WriteSize)), fmtFloat(sustained), fmtFloat(burst),
)
case 100:
return fmt.Sprintf(
"Read-only workload (%s reads): %s sustained IOPS, %s burst.",
humanize.IBytes(uint64(workload.ReadSize)), fmtFloat(sustained), fmtFloat(burst),
)
default:
return fmt.Sprintf(
"Mixed workload (%d%% reads; %s reads; %s writes): %s sustained IOPS, %s burst.",
workload.ReadPercentage,
humanize.IBytes(uint64(workload.ReadSize)), humanize.IBytes(uint64(workload.WriteSize)),
fmtFloat(sustained), fmtFloat(burst),
)
}
}
func (rs *testState) FormatRunning() string {
var states []string
for _, ls := range rs.running {
states = append(states, ls.String())
}
sort.Strings(states)
return "[" + strings.Join(states, ", ") + "]"
}
func (ts *testState) FormatTenants() string {
var tenantCounts []string
for id, lims := range ts.tenants {
tenantCounts = append(tenantCounts, fmt.Sprintf("%s#%d", id, len(lims)))
}
sort.Strings(tenantCounts)
return "[" + strings.Join(tenantCounts, ", ") + "]"
}
func (ts *testState) formatTime() string {
return ts.clock.Now().Format(timeFormat)
}
func parseTenantIDs(t *testing.T, d *datadriven.TestData) []uint64 {
var tenantIDs []uint64
if err := yaml.UnmarshalStrict([]byte(d.Input), &tenantIDs); err != nil {
d.Fatalf(t, "failed to parse getTenants command: %v", err)
}
return tenantIDs
}
// SettingValues is a struct that can be populated from test files, via YAML.
type SettingValues struct {
Rate float64
Burst float64
Read Factors
Write Factors
}
// Factors for reads and writes.
type Factors struct {
Base float64
PerByte float64
}
// parseSettings parses a SettingValues yaml and updates the given config.
// Missing (zero) values are ignored.
func parseSettings(t *testing.T, d *datadriven.TestData, config *tenantrate.Config) {
var vals SettingValues
if err := yaml.UnmarshalStrict([]byte(d.Input), &vals); err != nil {
d.Fatalf(t, "failed to unmarshal limits: %v", err)
}
override := func(dest *float64, val float64) {
if val != 0 {
*dest = val
}
}
override(&config.Rate, vals.Rate)
override(&config.Burst, vals.Burst)
override(&config.ReadRequestUnits, vals.Read.Base)
override(&config.ReadUnitsPerByte, vals.Read.PerByte)
override(&config.WriteRequestUnits, vals.Write.Base)
override(&config.WriteUnitsPerByte, vals.Write.PerByte)
}
func parseStrings(t *testing.T, d *datadriven.TestData) []string {
var ids []string
if err := yaml.UnmarshalStrict([]byte(d.Input), &ids); err != nil {
d.Fatalf(t, "failed to parse strings: %v", err)
}
return ids
}
|
package model
type BaseModel struct {
Id int64 `gorm:"primary_key"`
}
|
package boot
import (
"fmt"
"strings"
"tax-calculator/core"
)
func Bootstrap() {
BootConfig()
BootDatabase()
BootServer()
}
func Run() {
configPort := core.Globals.Config.GetStringSlice("services.app.ports")
ports := strings.Split(configPort[0],":")
port := fmt.Sprintf(":%s", ports[0])
core.Globals.Router.Run(port)
} |
package gelf
import (
"github.com/shanexu/logn/appender/encoder"
"github.com/shanexu/logn/common"
"go.uber.org/zap"
"go.uber.org/zap/buffer"
"go.uber.org/zap/zapcore"
"os"
)
type Encoder struct {
Fields []zapcore.Field
zapcore.Encoder
}
type KeyValuePair struct {
Key string `logn-config:"key"`
Value string `logn-config:"value"`
}
type Config struct {
KeyValuePairs []KeyValuePair `logn-config:"key_value_pairs"`
}
func (e *Encoder) EncodeEntry(enc zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
newFields := make([]zap.Field, len(e.Fields)+len(fields))
i := 0
for ; i < len(e.Fields); i++ {
newFields[i] = e.Fields[i]
}
for ; i < len(e.Fields)+len(fields); i++ {
j := i - len(e.Fields)
f := fields[j]
f.Key = "_" + f.Key
newFields[i] = f
}
return e.Encoder.EncodeEntry(enc, newFields)
}
func LevelEncoder(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
level := uint8(7)
switch l {
case zapcore.DebugLevel:
level = 7
case zapcore.InfoLevel:
level = 6
case zapcore.WarnLevel:
level = 4
case zapcore.ErrorLevel:
level = 3
case zapcore.DPanicLevel:
level = 2
case zapcore.PanicLevel:
level = 1
case zapcore.FatalLevel:
level = 0
}
enc.AppendUint8(level)
}
func init() {
encoder.RegisterType("gelf", func(config *common.Config) (encoder.Encoder, error) {
cfg := Config{}
if err := config.Unpack(&cfg); err != nil {
return nil, err
}
encoderConfig := zapcore.EncoderConfig{
TimeKey: "timestamp",
LevelKey: "level",
NameKey: "_logger",
CallerKey: "_caller",
MessageKey: "short_message",
StacktraceKey: "full_message",
LineEnding: "\n",
EncodeLevel: LevelEncoder,
EncodeTime: zapcore.EpochTimeEncoder,
EncodeDuration: zapcore.SecondsDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
}
hostname, err := os.Hostname()
if err != nil {
return nil, err
}
fields := []zapcore.Field{
zap.String("version", "1.1"),
zap.String("host", hostname),
}
for _, kv := range cfg.KeyValuePairs {
fields = append(fields, zap.String("_"+kv.Key, kv.Value))
}
return &Encoder{
Fields: fields,
Encoder: zapcore.NewJSONEncoder(encoderConfig),
}, nil
})
}
|
package Models
import (
"github.com/jinzhu/gorm"
)
type Device struct {
gorm.Model
Udid string `json:"udid"`
Status int `json:"status"`
Num int `json:"num"`
Sn string `json:"sn"`
Imei string `json:"imei"`
Bt string `json:"bt"`
Wifu string `json:"wifu"`
Ecid string `json:"ecid"`
Tp string `json:"tp"`
Nb string `json:"nb"`
Reg string `json:"reg"`
Ethernet string `json:"Ethernet"`
ICCID string `json:"ICCID"`
Type string `json:"type"`
}
//默认表明为accounts 这里更改为account
func (Device) TableName() string {
return "device"
} |
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errutil
import (
"errors"
"testing"
"github.com/stretchr/testify/require"
)
func TestMultiError(t *testing.T) {
a := errors.New("a")
b := errors.New("b")
c := errors.New("c")
tests := []struct {
description string
errs []error
result string
}{
{"empty", nil, ""},
{"one error", []error{a}, "a"},
{"many errors", []error{a, b, c}, "a, b, c"},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
require.Equal(t, test.result, MultiError(test.errs).Error())
})
}
}
func TestJoinNil(t *testing.T) {
f := func() error {
var errs []error
return Join(errs)
}
require.NoError(t, f())
}
func TestJoinNonNil(t *testing.T) {
f := func() error {
var errs []error
errs = append(errs, errors.New("some error"))
return Join(errs)
}
require.Error(t, f())
}
|
package provisioning
import (
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/services/provisioning/datasources"
)
var (
logger log.Logger = log.New("services.provisioning")
)
func StartUp(datasourcePath string) error {
return datasources.Provision(datasourcePath)
}
|
package cherryPacketSimple
type Encoder struct {
}
func (s *Encoder) Encode(typ byte, data []byte) ([]byte, error) {
return nil, nil
}
|
package wrtc
import (
"fmt"
"log"
"github.com/c0re100/go-tdlib"
"github.com/pion/webrtc/v2"
)
var (
peerConnection *webrtc.PeerConnection
mediaEngine webrtc.MediaEngine
closeRTC = make(chan bool, 1)
userBot *tdlib.Client
)
func setup() {
mediaEngine = webrtc.MediaEngine{}
peerConnection = &webrtc.PeerConnection{}
mediaEngine.RegisterCodec(webrtc.NewRTPOpusCodec(111, 48000))
api := webrtc.NewAPI(webrtc.WithMediaEngine(mediaEngine))
peerConnection, _ = api.NewPeerConnection(webrtc.Configuration{})
setupMedia()
}
// CreateOffer Create Local Description
func CreateOffer(bot *tdlib.Client) *Data {
setup()
userBot = bot
peerConnection.OnICEConnectionStateChange(func(connectionState webrtc.ICEConnectionState) {
log.Printf("Connection State has changed %s \n", connectionState.String())
})
peerConnection.OnICECandidate(func(i *webrtc.ICECandidate) {
if i != nil {
fmt.Println(i.ToJSON())
}
})
offer, err := peerConnection.CreateOffer(nil)
if err != nil {
panic(err)
}
err = peerConnection.SetLocalDescription(offer)
if err != nil {
panic(err)
}
return extractDesc(peerConnection, "")
}
|
package main
import (
"./crawler"
)
func main() {
crawler.StartCrawl("https://bbc.co.uk", 3)
//crawler.StartCrawl("https://www.techcrunch.com/", 3)
}
|
package hard
import (
"testing"
"github.com/stretchr/testify/require"
"fmt"
"github.com/herlegs/programQ/golang/util"
"regexp"
)
func TestSubset(t *testing.T) {
dataSet := []struct {
str string
len int
expected []string
}{
{"abc",1, []string{"a", "b", "c"}},
{"abcd",2, []string{"ab", "bc", "cd", "ac", "ad", "bd"}},
{"abb",2, []string{"ab", "bb"}},
{"bbc",2, []string{"bb", "bc"}},
{"abbc",2, []string{"ab", "ac","bb","bc"}},
{"abbc",3, []string{"abb", "abc","bbc"}},
}
for _, data := range dataSet {
out := Subset(data.str, data.len)
fmt.Println(out)
require.True(t, util.StringListEqual(out, data.expected), "source: %v, out: %v", data.str, out)
}
}
func TestPermutation(t *testing.T) {
dataSet := []struct {
str string
expected []string
}{
{"abc", []string{"abc", "acb", "bac", "bca", "cab","cba"}},
{"abb", []string{"abb", "bab","bba"}},
{"abba", []string{"aabb", "abab","abba","baab","baba","bbaa"}},
}
for _, data := range dataSet {
out := Permutation(data.str)
fmt.Println(out)
require.True(t, util.StringListEqual(out, data.expected), "source: %v, out: %v", data.str, out)
}
}
func TestSS(t *testing.T) {
s := "Mr. Leonard Spock"
re1, _ := regexp.Compile(`(Mr)(s)?\. (\w+) (\w+)`)
result:= re1.FindStringSubmatch(s)
fmt.Println(len(result))
for k, v := range result {
fmt.Printf("%d. %s\n", k, v)
}
}
|
package set3
import (
"bytes"
"cryptopals/set1"
"cryptopals/utils"
"encoding/base64"
"log"
"testing"
)
var base64strings = [40]string{
"SSBoYXZlIG1ldCB0aGVtIGF0IGNsb3NlIG9mIGRheQ==",
"Q29taW5nIHdpdGggdml2aWQgZmFjZXM=",
"RnJvbSBjb3VudGVyIG9yIGRlc2sgYW1vbmcgZ3JleQ==",
"RWlnaHRlZW50aC1jZW50dXJ5IGhvdXNlcy4=",
"SSBoYXZlIHBhc3NlZCB3aXRoIGEgbm9kIG9mIHRoZSBoZWFk",
"T3IgcG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==",
"T3IgaGF2ZSBsaW5nZXJlZCBhd2hpbGUgYW5kIHNhaWQ=",
"UG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==",
"QW5kIHRob3VnaHQgYmVmb3JlIEkgaGFkIGRvbmU=",
"T2YgYSBtb2NraW5nIHRhbGUgb3IgYSBnaWJl",
"VG8gcGxlYXNlIGEgY29tcGFuaW9u",
"QXJvdW5kIHRoZSBmaXJlIGF0IHRoZSBjbHViLA==",
"QmVpbmcgY2VydGFpbiB0aGF0IHRoZXkgYW5kIEk=",
"QnV0IGxpdmVkIHdoZXJlIG1vdGxleSBpcyB3b3JuOg==",
"QWxsIGNoYW5nZWQsIGNoYW5nZWQgdXR0ZXJseTo=",
"QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=",
"VGhhdCB3b21hbidzIGRheXMgd2VyZSBzcGVudA==",
"SW4gaWdub3JhbnQgZ29vZCB3aWxsLA==",
"SGVyIG5pZ2h0cyBpbiBhcmd1bWVudA==",
"VW50aWwgaGVyIHZvaWNlIGdyZXcgc2hyaWxsLg==",
"V2hhdCB2b2ljZSBtb3JlIHN3ZWV0IHRoYW4gaGVycw==",
"V2hlbiB5b3VuZyBhbmQgYmVhdXRpZnVsLA==",
"U2hlIHJvZGUgdG8gaGFycmllcnM/",
"VGhpcyBtYW4gaGFkIGtlcHQgYSBzY2hvb2w=",
"QW5kIHJvZGUgb3VyIHdpbmdlZCBob3JzZS4=",
"VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=",
"V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=",
"SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==",
"U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==",
"U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=",
"VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==",
"QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu",
"SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=",
"VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs",
"WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=",
"SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0",
"SW4gdGhlIGNhc3VhbCBjb21lZHk7",
"SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=",
"VHJhbnNmb3JtZWQgdXR0ZXJseTo=",
"QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=",
}
func TestBreakFixedNonceCTR(t *testing.T) {
var plainTexts [][]byte
for _, b64string := range base64strings {
plainText, err := base64.StdEncoding.DecodeString(b64string)
if err != nil {
t.Error("Failed to base64 decode string", err)
}
plainTexts = append(plainTexts, bytes.ToLower(plainText))
}
key, err := utils.GenerateRandomAesKey()
if err != nil {
log.Println("Failed to generate random AES key:", err)
}
nonce := []byte{0, 0, 0, 0, 0, 0, 0, 0}
cipherTexts := getCTRCipherTexts(plainTexts, key, nonce)
// crib dragging technique for the first two ciphertexts
// I'm doing it manually, so bear with me
c1 := cipherTexts[0]
c2 := cipherTexts[1]
cipher := set1.FixedXOR(c1, c2)
// https://en.wikipedia.org/wiki/Trigram
// https://en.wikipedia.org/wiki/Most_common_words_in_English
cribDrag([]byte(" have "), cipher) // => "oming " in 2dn position, so let's try with "coming"
cribDrag([]byte("coming "), cipher) // => "i have " in 1st position, great
// after a lot of other tries I tried with "nde"
cribDrag([]byte("nde"), cipher) // => "the" in 8th position, so let's try with "the" (without any spaces) to see what is on the other side
cribDrag([]byte("the"), cipher) // => " vi" in 12th position...don't know what words might start with these letters, let's try other cribs starting with "the"
cribDrag([]byte("them "), cipher) // => " vivi" in 12th position...ok
// i have ... met?
cribDrag([]byte("met "), cipher) // => "with", perhaps now try "coming with"
cribDrag([]byte("coming with"), cipher) // => "i have met", great. let's try "i have met her"
cribDrag([]byte("i have met her "), cipher) // => nope, after "coming with" we get gibberish
// i have met ... them ?
cribDrag([]byte("i have met them "), cipher) // => "coming with vivi". cool
cribDrag([]byte("i have met them at"), cipher) // "coming with vivid"
// so far we have "i have met them at" and "coming with vivid "
// at this point I don't know what other words could be there
// so I searched for each of the two text and turns out they are from a poem...
cribDrag([]byte("coming with vivid faces"), cipher) // returns nothing. ok, let's cut down the text a little bit
cribDrag([]byte("coming with vivid face"), cipher) // => "i have met them at clo"
// "i have met them at clo" and "coming with vivid face"
// Turns out these are the two plain text messages that were
// encrypted using CTR.
// The length of each of the decrypted text is shorter than the
// initial plain text because when the ciphertexts were xored
// they were trimmed to the minimum length of the two.
}
|
package extra
import (
"time"
"webserver/models"
)
type Activity struct {
Id int
ImageUrl string
PageUrl string
Title string
Content string
Extra string
Status int
CreatedAt time.Time
UpdatedAt time.Time
}
func GetActivityInfo(id interface{}) (*Activity, error) {
activity := &Activity{}
if err := models.GetDb().Where("id = ?", id).First(activity).Error; err != nil {
return activity, err
}
return activity, nil
}
|
package api
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"golang.org/x/crypto/bcrypt"
"gorm.io/gorm"
"github.com/go-chi/chi"
"github.com/porter-dev/porter/internal/analytics"
"github.com/porter-dev/porter/internal/auth/token"
"github.com/porter-dev/porter/internal/forms"
"github.com/porter-dev/porter/internal/integrations/email"
"github.com/porter-dev/porter/internal/models"
"github.com/porter-dev/porter/internal/repository"
)
// Enumeration of user API error codes, represented as int64
const (
ErrUserDecode ErrorCode = iota + 600
ErrUserValidateFields
ErrUserDataRead
)
// HandleCreateUser validates a user form entry, converts the user to a gorm
// model, and saves the user to the database
func (app *App) HandleCreateUser(w http.ResponseWriter, r *http.Request) {
session, err := app.Store.Get(r, app.ServerConf.CookieName)
if err != nil {
app.handleErrorDataRead(err, w)
}
form := &forms.CreateUserForm{
// if app can send email verification, set the email verified to false
EmailVerified: !app.Capabilities.Email,
}
user, err := app.writeUser(
form,
app.Repo.User.CreateUser,
w,
r,
doesUserExist,
)
if err == nil {
// send to segment
app.analyticsClient.Identify(analytics.CreateSegmentIdentifyNewUser(user, false))
app.analyticsClient.Track(analytics.CreateSegmentNewUserTrack(user))
app.Logger.Info().Msgf("New user created: %d", user.ID)
// non-fatal email verification flow
app.startEmailVerificationFlow(user)
var redirect string
if valR := session.Values["redirect"]; valR != nil {
redirect = session.Values["redirect"].(string)
}
session.Values["authenticated"] = true
session.Values["user_id"] = user.ID
session.Values["email"] = user.Email
session.Values["redirect"] = ""
session.Save(r, w)
w.WriteHeader(http.StatusCreated)
if err := app.sendUser(w, user.ID, user.Email, false, redirect); err != nil {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return
}
}
}
// HandleAuthCheck checks whether current session is authenticated and returns user ID if so.
func (app *App) HandleAuthCheck(w http.ResponseWriter, r *http.Request) {
// first, check for token
tok := app.getTokenFromRequest(r)
if tok != nil {
// read the user
user, err := app.Repo.User.ReadUser(tok.IBy)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if err := app.sendUser(w, tok.IBy, user.Email, user.EmailVerified, ""); err != nil {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return
}
return
}
session, err := app.Store.Get(r, app.ServerConf.CookieName)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
userID, _ := session.Values["user_id"].(uint)
email, _ := session.Values["email"].(string)
user, err := app.Repo.User.ReadUser(userID)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
if err := app.sendUser(w, userID, email, user.EmailVerified, ""); err != nil {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return
}
}
// HandleCLILoginUser verifies that a user is logged in, and generates an access
// token for usage from the CLI
func (app *App) HandleCLILoginUser(w http.ResponseWriter, r *http.Request) {
queryParams, _ := url.ParseQuery(r.URL.RawQuery)
redirect := queryParams["redirect"][0]
session, err := app.Store.Get(r, app.ServerConf.CookieName)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
userID, _ := session.Values["user_id"].(uint)
// generate the token
jwt, err := token.GetTokenForUser(userID)
if err != nil {
app.handleErrorInternal(err, w)
return
}
encoded, err := jwt.EncodeToken(&token.TokenGeneratorConf{
TokenSecret: app.ServerConf.TokenGeneratorSecret,
})
if err != nil {
app.handleErrorInternal(err, w)
return
}
// generate 64 characters long authorization code
code, err := repository.GenerateRandomBytes(32)
if err != nil {
app.handleErrorInternal(err, w)
return
}
expiry := time.Now().Add(30 * time.Second)
// create auth code object and send back authorization code
authCode := &models.AuthCode{
Token: encoded,
AuthorizationCode: code,
Expiry: &expiry,
}
authCode, err = app.Repo.AuthCode.CreateAuthCode(authCode)
if err != nil {
app.handleErrorInternal(err, w)
return
}
http.Redirect(w, r, fmt.Sprintf("%s/?code=%s", redirect, url.QueryEscape(authCode.AuthorizationCode)), 302)
}
type ExchangeRequest struct {
AuthorizationCode string `json:"authorization_code"`
}
type ExchangeResponse struct {
Token string `json:"token"`
}
// HandleCLILoginExchangeToken exchanges an authorization code for a token
func (app *App) HandleCLILoginExchangeToken(w http.ResponseWriter, r *http.Request) {
// read the request body and look up the authorization token
req := &ExchangeRequest{}
if err := json.NewDecoder(r.Body).Decode(req); err != nil {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return
}
authCode, err := app.Repo.AuthCode.ReadAuthCode(req.AuthorizationCode)
if err != nil || authCode.IsExpired() {
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
return
}
res := &ExchangeResponse{
Token: authCode.Token,
}
w.WriteHeader(http.StatusOK)
if err := json.NewEncoder(w).Encode(res); err != nil {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return
}
}
// HandleLoginUser checks the request header for cookie and validates the user.
func (app *App) HandleLoginUser(w http.ResponseWriter, r *http.Request) {
session, err := app.Store.Get(r, app.ServerConf.CookieName)
if err != nil {
app.handleErrorDataRead(err, w)
return
}
form := &forms.LoginUserForm{}
// decode from JSON to form value
if err := json.NewDecoder(r.Body).Decode(form); err != nil {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return
}
storedUser, readErr := app.Repo.User.ReadUserByEmail(form.Email)
if readErr != nil {
app.sendExternalError(readErr, http.StatusUnauthorized, HTTPError{
Errors: []string{"email not registered"},
Code: http.StatusUnauthorized,
}, w)
return
}
if err := bcrypt.CompareHashAndPassword([]byte(storedUser.Password), []byte(form.Password)); err != nil {
app.sendExternalError(readErr, http.StatusUnauthorized, HTTPError{
Errors: []string{"incorrect password"},
Code: http.StatusUnauthorized,
}, w)
return
}
var redirect string
if valR := session.Values["redirect"]; valR != nil {
redirect = session.Values["redirect"].(string)
}
// Set user as authenticated
session.Values["authenticated"] = true
session.Values["user_id"] = storedUser.ID
session.Values["email"] = storedUser.Email
session.Values["redirect"] = ""
if err := session.Save(r, w); err != nil {
app.Logger.Warn().Err(err)
}
w.WriteHeader(http.StatusOK)
if err := app.sendUser(w, storedUser.ID, storedUser.Email, storedUser.EmailVerified, redirect); err != nil {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return
}
}
// HandleLogoutUser detaches the user from the session
func (app *App) HandleLogoutUser(w http.ResponseWriter, r *http.Request) {
session, err := app.Store.Get(r, app.ServerConf.CookieName)
if err != nil {
app.handleErrorDataRead(err, w)
}
session.Values["authenticated"] = false
session.Values["user_id"] = nil
session.Values["email"] = nil
session.Save(r, w)
w.WriteHeader(http.StatusOK)
}
// HandleReadUser returns an externalized User (models.UserExternal)
// based on an ID
func (app *App) HandleReadUser(w http.ResponseWriter, r *http.Request) {
user, err := app.readUser(w, r)
// error already handled by helper
if err != nil {
return
}
extUser := user.Externalize()
if err := json.NewEncoder(w).Encode(extUser); err != nil {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return
}
w.WriteHeader(http.StatusOK)
}
// HandleListUserProjects lists all projects belonging to a given user
func (app *App) HandleListUserProjects(w http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseUint(chi.URLParam(r, "user_id"), 0, 64)
if err != nil || id == 0 {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return
}
projects, err := app.Repo.Project.ListProjectsByUserID(uint(id))
if err != nil {
app.handleErrorRead(err, ErrUserDataRead, w)
}
projectsExt := make([]*models.ProjectExternal, 0)
for _, project := range projects {
projectsExt = append(projectsExt, project.Externalize())
}
w.WriteHeader(http.StatusOK)
if err := json.NewEncoder(w).Encode(projectsExt); err != nil {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return
}
}
// HandleDeleteUser removes a user after checking that the sent password is correct
func (app *App) HandleDeleteUser(w http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseUint(chi.URLParam(r, "user_id"), 0, 64)
if err != nil || id == 0 {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return
}
// TODO -- HASH AND VERIFY PASSWORD BEFORE USER DELETION
form := &forms.DeleteUserForm{
ID: uint(id),
}
user, err := app.writeUser(form, app.Repo.User.DeleteUser, w, r)
if err == nil {
app.Logger.Info().Msgf("User deleted: %d", user.ID)
w.WriteHeader(http.StatusNoContent)
}
}
// InitiateEmailVerifyUser initiates the email verification flow for a logged-in user
func (app *App) InitiateEmailVerifyUser(w http.ResponseWriter, r *http.Request) {
userID, err := app.getUserIDFromRequest(r)
if err != nil {
app.handleErrorInternal(err, w)
return
}
user, err := app.Repo.User.ReadUser(userID)
if err != nil {
app.handleErrorInternal(err, w)
return
}
err = app.startEmailVerificationFlow(user)
if err != nil {
app.handleErrorInternal(err, w)
return
}
w.WriteHeader(http.StatusOK)
}
// FinalizEmailVerifyUser completes the email verification flow for a user.
func (app *App) FinalizEmailVerifyUser(w http.ResponseWriter, r *http.Request) {
userID, err := app.getUserIDFromRequest(r)
if err != nil {
app.handleErrorInternal(err, w)
return
}
user, err := app.Repo.User.ReadUser(userID)
if err != nil {
app.handleErrorInternal(err, w)
return
}
vals, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
http.Redirect(w, r, "/dashboard?error="+url.QueryEscape("Invalid email verification URL"), 302)
return
}
var tokenStr string
var tokenID uint
if tokenArr, ok := vals["token"]; ok && len(tokenArr) == 1 {
tokenStr = tokenArr[0]
} else {
http.Redirect(w, r, "/dashboard?error="+url.QueryEscape("Invalid email verification URL: token required"), 302)
return
}
if tokenIDArr, ok := vals["token_id"]; ok && len(tokenIDArr) == 1 {
id, err := strconv.ParseUint(tokenIDArr[0], 10, 64)
if err != nil {
http.Redirect(w, r, "/dashboard?error="+url.QueryEscape("Invalid email verification URL: valid token id required"), 302)
return
}
tokenID = uint(id)
} else {
http.Redirect(w, r, "/dashboard?error="+url.QueryEscape("Invalid email verification URL: valid token id required"), 302)
return
}
// verify the token is valid
token, err := app.Repo.PWResetToken.ReadPWResetToken(tokenID)
if err != nil {
http.Redirect(w, r, "/dashboard?error="+url.QueryEscape("Email verification error: valid token required"), 302)
return
}
// make sure the token is still valid and has not expired
if !token.IsValid || token.IsExpired() {
http.Redirect(w, r, "/dashboard?error="+url.QueryEscape("Email verification error: valid token required"), 302)
return
}
// make sure the token is correct
if err := bcrypt.CompareHashAndPassword([]byte(token.Token), []byte(tokenStr)); err != nil {
http.Redirect(w, r, "/dashboard?error="+url.QueryEscape("Email verification error: valid token required"), 302)
return
}
user.EmailVerified = true
user, err = app.Repo.User.UpdateUser(user)
if err != nil {
http.Redirect(w, r, "/dashboard?error="+url.QueryEscape("Could not verify email address"), 302)
return
}
// invalidate the token
token.IsValid = false
_, err = app.Repo.PWResetToken.UpdatePWResetToken(token)
if err != nil {
http.Redirect(w, r, "/dashboard?error="+url.QueryEscape("Could not verify email address"), 302)
return
}
http.Redirect(w, r, "/dashboard", 302)
return
}
// InitiatePWResetUser initiates the password reset flow based on an email. The endpoint
// checks if the email exists, but returns a 200 status code regardless, since we don't
// want to leak in-use emails
func (app *App) InitiatePWResetUser(w http.ResponseWriter, r *http.Request) {
form := &forms.InitiateResetUserPasswordForm{}
// decode from JSON to form value
if err := json.NewDecoder(r.Body).Decode(form); err != nil {
app.handleErrorFormDecoding(err, ErrProjectDecode, w)
return
}
// validate the form
if err := app.validator.Struct(form); err != nil {
app.handleErrorFormValidation(err, ErrProjectValidateFields, w)
return
}
// check that the email exists; return 200 status code even if it doesn't
user, err := app.Repo.User.ReadUserByEmail(form.Email)
if err == gorm.ErrRecordNotFound {
w.WriteHeader(http.StatusOK)
return
} else if err != nil {
app.handleErrorDataRead(err, w)
return
}
// if the user is a Github user, send them a Github email
if user.GithubUserID != 0 {
sgClient := email.SendgridClient{
APIKey: app.ServerConf.SendgridAPIKey,
PWGHTemplateID: app.ServerConf.SendgridPWGHTemplateID,
SenderEmail: app.ServerConf.SendgridSenderEmail,
}
err = sgClient.SendGHPWEmail(
fmt.Sprintf("%s/api/oauth/login/github", app.ServerConf.ServerURL),
form.Email,
)
if err != nil {
app.handleErrorInternal(err, w)
return
}
w.WriteHeader(http.StatusOK)
return
}
// convert the form to a project model
pwReset, rawToken, err := form.ToPWResetToken()
if err != nil {
app.handleErrorFormDecoding(err, ErrProjectDecode, w)
return
}
// handle write to the database
pwReset, err = app.Repo.PWResetToken.CreatePWResetToken(pwReset)
if err != nil {
app.handleErrorDataWrite(err, w)
return
}
queryVals := url.Values{
"token": []string{rawToken},
"email": []string{form.Email},
"token_id": []string{fmt.Sprintf("%d", pwReset.ID)},
}
sgClient := email.SendgridClient{
APIKey: app.ServerConf.SendgridAPIKey,
PWResetTemplateID: app.ServerConf.SendgridPWResetTemplateID,
SenderEmail: app.ServerConf.SendgridSenderEmail,
}
err = sgClient.SendPWResetEmail(
fmt.Sprintf("%s/password/reset/finalize?%s", app.ServerConf.ServerURL, queryVals.Encode()),
form.Email,
)
if err != nil {
app.handleErrorInternal(err, w)
return
}
w.WriteHeader(http.StatusOK)
return
}
// VerifyPWResetUser makes sure that the token is correct and still valid
func (app *App) VerifyPWResetUser(w http.ResponseWriter, r *http.Request) {
form := &forms.VerifyResetUserPasswordForm{}
// decode from JSON to form value
if err := json.NewDecoder(r.Body).Decode(form); err != nil {
app.handleErrorFormDecoding(err, ErrProjectDecode, w)
return
}
// validate the form
if err := app.validator.Struct(form); err != nil {
app.handleErrorFormValidation(err, ErrProjectValidateFields, w)
return
}
token, err := app.Repo.PWResetToken.ReadPWResetToken(form.PWResetTokenID)
if err != nil {
w.WriteHeader(http.StatusForbidden)
return
}
// make sure the token is still valid and has not expired
if !token.IsValid || token.IsExpired() {
w.WriteHeader(http.StatusForbidden)
return
}
// check that the email matches
if token.Email != form.Email {
w.WriteHeader(http.StatusForbidden)
return
}
// make sure the token is correct
if err := bcrypt.CompareHashAndPassword([]byte(token.Token), []byte(form.Token)); err != nil {
w.WriteHeader(http.StatusForbidden)
return
}
w.WriteHeader(http.StatusOK)
return
}
// FinalizPWResetUser completes the password reset flow based on an email.
func (app *App) FinalizPWResetUser(w http.ResponseWriter, r *http.Request) {
form := &forms.FinalizeResetUserPasswordForm{}
// decode from JSON to form value
if err := json.NewDecoder(r.Body).Decode(form); err != nil {
app.handleErrorFormDecoding(err, ErrProjectDecode, w)
return
}
// validate the form
if err := app.validator.Struct(form); err != nil {
app.handleErrorFormValidation(err, ErrProjectValidateFields, w)
return
}
// verify the token is valid
token, err := app.Repo.PWResetToken.ReadPWResetToken(form.PWResetTokenID)
if err != nil {
w.WriteHeader(http.StatusForbidden)
return
}
// make sure the token is still valid and has not expired
if !token.IsValid || token.IsExpired() {
w.WriteHeader(http.StatusForbidden)
return
}
// check that the email matches
if token.Email != form.Email {
w.WriteHeader(http.StatusForbidden)
return
}
// make sure the token is correct
if err := bcrypt.CompareHashAndPassword([]byte(token.Token), []byte(form.Token)); err != nil {
w.WriteHeader(http.StatusForbidden)
return
}
// check that the email exists
user, err := app.Repo.User.ReadUserByEmail(form.Email)
if err != nil {
w.WriteHeader(http.StatusForbidden)
return
}
hashedPW, err := bcrypt.GenerateFromPassword([]byte(form.NewPassword), 8)
if err != nil {
app.handleErrorDataWrite(err, w)
return
}
user.Password = string(hashedPW)
user, err = app.Repo.User.UpdateUser(user)
if err != nil {
app.handleErrorDataWrite(err, w)
return
}
// invalidate the token
token.IsValid = false
_, err = app.Repo.PWResetToken.UpdatePWResetToken(token)
if err != nil {
app.handleErrorDataWrite(err, w)
return
}
w.WriteHeader(http.StatusOK)
return
}
// ------------------------ User handler helper functions ------------------------ //
// writeUser will take a POST or PUT request to the /api/users endpoint and decode
// the request into a forms.WriteUserForm model, convert it to a models.User, and
// write to the database.
func (app *App) writeUser(
form forms.WriteUserForm,
dbWrite repository.WriteUser,
w http.ResponseWriter,
r *http.Request,
validators ...func(repo *repository.Repository, user *models.User) *HTTPError,
) (*models.User, error) {
// decode from JSON to form value
if err := json.NewDecoder(r.Body).Decode(form); err != nil {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return nil, err
}
// validate the form
if err := app.validator.Struct(form); err != nil {
app.handleErrorFormValidation(err, ErrUserValidateFields, w)
return nil, err
}
// convert the form to a user model -- WriteUserForm must implement ToUser
userModel, err := form.ToUser(app.Repo.User)
if err != nil {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return nil, err
}
// Check any additional validators for any semantic errors
// We have completed all syntax checks, so these will be sent
// with http.StatusUnprocessableEntity (422), unless this is
// an internal server error
for _, validator := range validators {
err := validator(app.Repo, userModel)
if err != nil {
goErr := errors.New(strings.Join(err.Errors, ", "))
if err.Code == 500 {
app.sendExternalError(
goErr,
http.StatusInternalServerError,
*err,
w,
)
} else {
app.sendExternalError(
goErr,
http.StatusUnprocessableEntity,
*err,
w,
)
}
return nil, goErr
}
}
// handle write to the database
user, err := dbWrite(userModel)
if err != nil {
app.handleErrorDataWrite(err, w)
return nil, err
}
return user, nil
}
func (app *App) readUser(w http.ResponseWriter, r *http.Request) (*models.User, error) {
id, err := strconv.ParseUint(chi.URLParam(r, "user_id"), 0, 64)
if err != nil || id == 0 {
app.handleErrorFormDecoding(err, ErrUserDecode, w)
return nil, err
}
user, err := app.Repo.User.ReadUser(uint(id))
if err != nil {
app.handleErrorRead(err, ErrUserDataRead, w)
return nil, err
}
return user, nil
}
func doesUserExist(repo *repository.Repository, user *models.User) *HTTPError {
user, err := repo.User.ReadUserByEmail(user.Email)
if user != nil && err == nil {
return &HTTPError{
Code: ErrUserValidateFields,
Errors: []string{
"email already taken",
},
}
}
if err != gorm.ErrRecordNotFound {
return &ErrorDataRead
}
return nil
}
type SendUserExt struct {
ID uint `json:"id"`
Email string `json:"email"`
EmailVerified bool `json:"email_verified"`
Redirect string `json:"redirect,omitempty"`
}
func (app *App) sendUser(w http.ResponseWriter, userID uint, email string, emailVerified bool, redirect string) error {
resUser := &SendUserExt{
ID: userID,
Email: email,
EmailVerified: emailVerified,
Redirect: redirect,
}
if err := json.NewEncoder(w).Encode(resUser); err != nil {
return err
}
return nil
}
func (app *App) getUserIDFromRequest(r *http.Request) (uint, error) {
// first, check for token
tok := app.getTokenFromRequest(r)
if tok != nil {
return tok.IBy, nil
}
session, err := app.Store.Get(r, app.ServerConf.CookieName)
if err != nil {
return 0, fmt.Errorf("could not get session: %s", err.Error())
}
sessID, ok := session.Values["user_id"]
if !ok {
return 0, fmt.Errorf("could not get user id from session")
}
userID, ok := sessID.(uint)
if !ok {
return 0, fmt.Errorf("could not get user id from session")
}
return userID, nil
}
func (app *App) startEmailVerificationFlow(user *models.User) error {
form := &forms.InitiateResetUserPasswordForm{
Email: user.Email,
}
// convert the form to a pw reset token model
pwReset, rawToken, err := form.ToPWResetToken()
if err != nil {
return err
}
// handle write to the database
pwReset, err = app.Repo.PWResetToken.CreatePWResetToken(pwReset)
if err != nil {
return err
}
queryVals := url.Values{
"token": []string{rawToken},
"token_id": []string{fmt.Sprintf("%d", pwReset.ID)},
}
sgClient := email.SendgridClient{
APIKey: app.ServerConf.SendgridAPIKey,
VerifyEmailTemplateID: app.ServerConf.SendgridVerifyEmailTemplateID,
SenderEmail: app.ServerConf.SendgridSenderEmail,
}
return sgClient.SendEmailVerification(
fmt.Sprintf("%s/api/email/verify/finalize?%s", app.ServerConf.ServerURL, queryVals.Encode()),
form.Email,
)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.