text stringlengths 11 4.05M |
|---|
package metrics
var (
namespace = `taoblog`
subsystem = ``
)
func bool2string(v bool) string {
if v {
return `1`
}
return `0`
}
|
package vc
import (
"crypto/sha1"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
)
func HashObject(path string) {
data, err := ioutil.ReadFile(path)
if err != nil {
log.Fatalf("Error reading file [%v] - %v", path, err)
}
fmt.Println(hashObject(data, "blob"))
}
func hashObject(data []byte, type_ string) string {
obj := append([]byte(type_), []byte("\x00")...)
obj = append(obj, data...)
hashFunc := sha1.New()
hashFunc.Write(obj)
oid := hashFunc.Sum(nil)
oidHex := fmt.Sprintf("%x", oid)
filePath := filepath.Join(VcDir, "objects", oidHex)
f, err := os.Create(filePath)
if err != nil {
log.Fatalf("Error creating file [%v] - %v", filePath, err)
}
defer func() {
if err := f.Close(); err != nil {
panic(err)
}
}()
if _, err := f.Write(obj); err != nil {
panic(err)
}
return oidHex
}
|
package config
import (
"fmt"
"io/ioutil"
"gopkg.in/yaml.v3"
formatter_api "github.com/cyberark/secretless-broker/bin/juxtaposer/formatter/api"
)
// Config is the main structure used to define the perfagent parameters
type Config struct {
Backends map[string]Backend `yaml:"backends"`
Comparison Comparison `yaml:"comparison"`
Driver string `yaml:"driver"`
Formatters map[string]formatter_api.FormatterOptions `yaml:"formatters"`
}
type Backend struct {
Database string `yaml:"database"`
Debug bool `yaml:"debug"`
Description string `yaml:"description"`
Host string `yaml:"host"`
Ignore bool `yaml:"ignore"`
Password string `yaml:"password"`
Port string `yaml:"port"`
SslMode string `yaml:"sslmode"`
Socket string `yaml:"socket"`
Username string `yaml:"username"`
}
type Comparison struct {
BaselineBackend string `yaml:"baselineBackend"`
BaselineMaxThresholdPercent int `yaml:"baselineMaxThresholdPercent"`
RecreateConnections bool `yaml:"recreateConnections"`
Rounds string `yaml:"rounds"`
Silent bool `yaml:"silent"`
SqlStatementType string `yaml:"sqlStatementType"`
Threads int `yaml:"threads"`
}
func (configuration *Config) verify() error {
if configuration.Comparison.SqlStatementType != "select" {
return fmt.Errorf("comparison style supported: %s", configuration.Comparison.SqlStatementType)
}
if configuration.Comparison.Threads < 1 {
return fmt.Errorf("comparison.Threads must be >= 1. Current value: %d",
configuration.Comparison.Threads)
}
if len(configuration.Formatters) == 0 {
return fmt.Errorf("no formatters defined")
}
baselineBackend := configuration.Comparison.BaselineBackend
if baselineBackend == "" {
return fmt.Errorf("comparison baselineBackend must be specified")
}
if _, ok := configuration.Backends[baselineBackend]; !ok {
return fmt.Errorf("comparison baseline backend '%s' not found",
baselineBackend)
}
return nil
}
func NewConfiguration(configFile string) (*Config, error) {
yamlFile, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
// Default options
configuration := Config{
Comparison: Comparison{
BaselineMaxThresholdPercent: 120,
RecreateConnections: false,
Rounds: "1000",
SqlStatementType: "select",
Threads: 1,
},
Formatters: map[string]formatter_api.FormatterOptions{
"stdout": formatter_api.FormatterOptions{},
},
}
err = yaml.Unmarshal(yamlFile, &configuration)
if err != nil {
return nil, err
}
// Slice out any backends which are ignored
filteredBackends := map[string]Backend{}
for backendName, backendConfig := range configuration.Backends {
if backendConfig.Ignore == false {
filteredBackends[backendName] = backendConfig
}
}
configuration.Backends = filteredBackends
err = configuration.verify()
if err != nil {
return nil, err
}
return &configuration, nil
}
|
package main
import (
idx "../index"
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"strconv"
"net/http"
"os"
"os/exec"
"strings"
"path"
"time"
)
var SRC *string
var INDEX *string
var URL *string
type repository struct {
Url string
Dir string
}
func (r *repository) exec(name string, arg ...string) []byte {
log.Printf("%#v: exec %s %#v", r, name, arg)
out, err := exec.Command(name, arg...).Output()
if err != nil {
log.Fatalf("%#v: failed return code: %+v, output: %s", r, err, string(out))
}
return out
}
func (r *repository) path() string {
return path.Join(*SRC, r.Dir)
}
func (r *repository) exists() bool {
if _, err := os.Stat(r.path()); err == nil {
return true
}
return false
}
func (r *repository) clone_if_not_exists() {
if !r.exists() {
os.MkdirAll(r.path(), 0755)
out := r.exec("git", "clone", "--depth", "1", r.Url, r.path())
log.Printf(string(out))
}
}
func (r *repository) pull() {
os.MkdirAll(r.path(), 0755)
out := r.exec("git", fmt.Sprintf("--work-tree=%s", r.path()), fmt.Sprintf("--git-dir=%s/.git", r.path()), "pull")
log.Printf(string(out))
}
func exec_dont_care(name string, arg ...string) []byte {
out, err := exec.Command(name, arg...).Output()
log.Printf("%s %#v = %s, [ %+v] ", name, arg, out, err)
return out
}
func remove(name string) {
exec_dont_care("rm", "-rvf", name)
}
func main() {
current := 0
old_body := []byte{}
SRC = flag.String("dir-to-index", "/SRC", "directory to index")
INDEX := flag.String("dir-to-store", "/tmp/zearch", "directory to store the index")
URL := flag.String("url", "", "config url")
flag.Parse()
if len(*URL) == 0 {
log.Fatalf("need -url argument for config.json (see https://raw.githubusercontent.com/jackdoe/zearch/master/zearch.io/config.json)")
}
name_for_iteration := func(i int) string {
return fmt.Sprintf("%s.%d", *INDEX, i)
}
currentLink, err := os.Readlink(*INDEX)
if err == nil {
parts := strings.Split(currentLink,".")
n, err := strconv.Atoi(parts[len(parts)-1])
if err == nil && n >= 0 {
current = n + 1
}
}
for {
time.Sleep(10000 * time.Millisecond)
data := []repository{}
r, err := http.Get(*URL)
if err != nil {
log.Print(err)
continue
}
body, err := ioutil.ReadAll(r.Body)
r.Body.Close()
if err != nil {
log.Print(err)
continue
}
if err := json.Unmarshal(body, &data); err != nil {
log.Print(err)
continue
}
if bytes.Compare(body, old_body) != 0 {
for _, r := range data {
r.clone_if_not_exists()
r.pull()
}
name := name_for_iteration(current)
remove(name_for_iteration(current - 2))
remove(name)
a := strings.Split(*SRC, ",")
idx.Took(fmt.Sprintf("indexing %#v", a), func() {
idx.DoIndex(name, a)
})
tmp := fmt.Sprintf("%s.lnk", name)
if err := os.Symlink(name, tmp); err != nil {
log.Print(err)
}
if err := os.Rename(tmp, *INDEX); err != nil {
log.Print(err)
}
exec_dont_care("pkill", "--signal", "1", "zearch$")
current++
old_body = body
}
}
}
|
package main
import (
"fmt"
"image"
"os"
"github.com/borkshop/bork/internal/bitmap"
"github.com/borkshop/bork/internal/cops/braille"
"github.com/borkshop/bork/internal/cops/display"
)
func main() {
if err := run(); err != nil {
fmt.Printf("%v\n", err)
}
}
func run() (err error) {
w, h := 32, 16
pb := image.Rect(0, 0, w, h)
bb := braille.Bounds(pb, braille.Margin)
front := display.New(pb)
bmp := bitmap.New(bb)
for y := 0; y < h*6; y++ {
for x := 0; x < w*3; x++ {
if x == y || x+y*2/3 == 50 {
bmp.Set(x, y, true)
}
}
}
braille.DrawBitmap(front, pb, bmp, image.ZP, braille.Margin, display.Colors[7])
var buf []byte
cur := display.Reset
buf, cur = display.Render(buf, cur, front, display.Model0)
buf = append(buf, "\r\n"...)
_, err = os.Stdout.Write(buf)
return err
}
|
/*
Copyright 2021 The KodeRover Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
// RenderSet ...
type RenderSet struct {
// Name = EnvName == "" ? ProductTmpl : (EnvName + "-" + ProductTempl)
Name string `bson:"name" json:"name"`
Revision int64 `bson:"revision" json:"revision"`
// 可以为空,空时为产品模板默认的渲染集,非空时为环境的渲染集
EnvName string `bson:"env_name,omitempty" json:"env_name,omitempty"`
ProductTmpl string `bson:"product_tmpl" json:"product_tmpl"`
Team string `bson:"team,omitempty" json:"team,omitempty"`
UpdateTime int64 `bson:"update_time" json:"update_time"`
UpdateBy string `bson:"update_by" json:"update_by"`
IsDefault bool `bson:"is_default" json:"is_default"` // 是否是默认配置
KVs []*RenderKV `bson:"kvs,omitempty" json:"kvs,omitempty"`
ChartInfos []*RenderChart `bson:"chart_infos,omitempty" json:"chart_infos,omitempty"`
Descritpion string `bson:"description,omitempty" json:"description,omitempty"`
}
//// RenderChart ...
//type RenderChart struct {
// ServiceName string `bson:"service_name,omitempty" json:"service_name,omitempty"`
// ChartVersion string `bson:"chart_version,omitempty" json:"chart_version,omitempty"`
// ValuesYaml string `bson:"values_yaml,omitempty" json:"values_yaml,omitempty"`
//}
|
// Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package boshdirector
import (
"fmt"
"log"
"strings"
"github.com/blang/semver/v4"
"github.com/cloudfoundry/bosh-cli/v7/director"
"github.com/pkg/errors"
)
const (
semiSemverVersionLength = 2
semverVersionLength = 3
stemcellVersionLength = 4
uaaTypeString = "uaa"
)
func (c *Client) GetInfo(logger *log.Logger) (Info, error) {
var boshInfo Info
d, err := c.Director(director.NewNoopTaskReporter())
if err != nil {
return Info{}, errors.Wrap(err, "Failed to build director")
}
directorInfo, err := d.Info()
if err != nil {
return Info{}, err
}
boshInfo.Version = directorInfo.Version
if directorInfo.Auth.Type != uaaTypeString {
return boshInfo, nil
}
uaaURL, ok := directorInfo.Auth.Options["url"].(string)
if ok {
boshInfo.UserAuthentication = UserAuthentication{
Options: AuthenticationOptions{
URL: uaaURL,
},
}
} else {
return Info{}, errors.New("Cannot retrieve UAA URL from info endpoint")
}
return boshInfo, nil
}
func (boshInfo *Info) GetDirectorVersion() (Version, error) {
version, err := newBoshDirectorVersion(boshInfo.Version)
if err != nil {
return Version{}, err
}
return version, nil
}
func newBoshDirectorVersion(rawVersion string) (Version, error) {
trimmedVersion := strings.Fields(rawVersion)
if len(trimmedVersion) == 0 {
return Version{}, unrecognisedBoshDirectorVersionError(rawVersion)
}
versionPart := trimmedVersion[0]
versionNumbers := strings.Split(versionPart, ".")
var versionType VersionType
switch len(versionNumbers) {
case semverVersionLength, semiSemverVersionLength:
versionType = SemverDirectorVersionType
case stemcellVersionLength:
versionType = StemcellDirectorVersionType
versionNumbers = versionNumbers[1:4]
default:
return Version{}, unrecognisedBoshDirectorVersionError(rawVersion)
}
version, err := semver.ParseTolerant(strings.Join(versionNumbers, "."))
if err != nil {
return Version{}, unrecognisedBoshDirectorVersionError(rawVersion)
}
return Version{Version: version, Type: versionType}, nil
}
func unrecognisedBoshDirectorVersionError(rawVersion string) error {
return fmt.Errorf(`unrecognised BOSH Director version: %q`, rawVersion)
}
|
package netmgr
import (
. "github.com/ajruckman/xlib"
"github.com/ajruckman/ContraCore/internal/db/contralog"
"github.com/ajruckman/ContraCore/internal/schema"
"github.com/ajruckman/ContraCore/internal/system"
)
// The maximum number of logs in the query log cache.
const cacheSize = 5000
// A slice containing the latest query logs so that they may be sent to new
// ContraWeb clients.
var logCache []schema.Log
// Reads at most cacheSize logs into the query log cache.
func loadCache() {
if !system.ContraLogOnline.Load() {
system.Console.Warning("ContraLog is disconnected; not loading recent query log cache yet")
return
}
logs, err := contralog.GetLastNLogs(cacheSize)
Err(err)
logCache = schema.LogsFromContraLogs(logs)
system.Console.Infof("loaded %d recent logs from ContraLog", len(logCache))
}
|
package main
import (
"github.com/kr/pretty"
)
func generate(numRows int) [][]int {
if numRows == 0 {
return [][]int{}
}
ans := make([][]int, 0, numRows)
for i := 1; i <= numRows; i++ {
tmp := make([]int, i)
tmp[0], tmp[len(tmp)-1] = 1, 1
for j := 1; j < i-1; j++ {
tmp[j] = ans[len(ans)-1][j-1] + ans[len(ans)-1][j]
}
ans = append(ans, tmp)
}
return ans
}
func main() {
pretty.Println(generate(5))
}
|
// A simple rest client
// based on:
// - https://medium.com/@marcus.olsson/writing-a-go-client-for-your-restful-api-c193a2f4998c
// - https://golang.org/pkg/net/http/
// - https://golang.org/pkg/io/ioutil/
// author: andreasl
package main
import (
"fmt"
"io/ioutil"
"net/http"
)
// dispatch a GET request and print the results
func get() {
resp, err := http.Get("http://example.com/")
if err != nil {
fmt.Println("Looks like an error: ", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body) // ioutil.ReadALl returns a byte slice `byte[]``
if err != nil {
fmt.Println("Looks like another error: ", err)
}
fmt.Println(string(body))
}
func main() {
fmt.Print("hi!\n\n")
get()
fmt.Println("\nbye!")
}
|
package handlers
import (
"fmt"
"net/http"
)
type ProjectedHandler struct {
}
func (handler *ProjectedHandler) ServeHTTP(writer http.ResponseWriter, httpRequest *http.Request) {
fmt.Println(httpRequest)
}
|
package log
import (
"fmt"
"os"
"strconv"
"go.uber.org/zap"
)
var _logger *zap.SugaredLogger
var _debugMode bool
func init() {
// 从环境变量读取Debug模式
if v, ok := os.LookupEnv("APP_DEBUG"); ok {
if v, err := strconv.ParseBool(v); err == nil {
_debugMode = v
}
}
// 初始化日志记录器
_logger = newZapLogger()
}
func Terminate() {
err := _logger.Sync()
if err != nil {
_, _ = fmt.Fprintln(os.Stderr, "Terminate log error: ", err.Error())
}
}
func Logger() *zap.SugaredLogger {
return _logger
}
func IsDebug() bool {
return _debugMode
}
|
package main
import (
"strings"
"time"
"intra-hub/confperso"
"intra-hub/db"
_ "intra-hub/models"
_ "intra-hub/routers"
_ "intra-hub/tasks"
"encoding/json"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
_ "github.com/astaxie/beego/session/mysql"
"github.com/beego/i18n"
"github.com/eknkc/dateformat"
_ "github.com/go-sql-driver/mysql"
"math/rand"
"regexp"
"strconv"
)
const (
driverSQL = "mysql"
aliasDbName = confperso.AliasDbName
databaseName = confperso.DatabaseName
username = confperso.Username
password = confperso.Password
maxIdleConns = 150
maxOpenConns = 150
optionsDatabaseConnections = "?charset=utf8"
dateFormat = "dddd DD MMMM YYYY HH:mm:ss"
)
// langType represents a language type.
type langType struct {
Lang, Name string
}
func init() {
// This is just used to get every languages files (see app.conf files)
langs := strings.Split(beego.AppConfig.String("lang::types"), "|")
names := strings.Split(beego.AppConfig.String("lang::names"), "|")
langTypes := make([]*langType, 0, len(langs))
for i, v := range langs {
langTypes = append(langTypes, &langType{
Lang: v,
Name: names[i],
})
}
// Then we load every language files with i18n.SetMessage
for _, lang := range langs {
beego.Trace("Loading language: " + lang)
if err := i18n.SetMessage(lang, "conf/"+"locale_"+lang+".ini"); err != nil {
beego.Error("Fail to set message file: " + err.Error())
return
}
}
// Set session on
beego.SessionOn = true
beego.SessionProvider = driverSQL
beego.SessionSavePath = username + ":" + password + "@/" + databaseName + optionsDatabaseConnections
beego.EnableAdmin = true
beego.TemplateLeft = "[["
beego.TemplateRight = "]]"
// Serve static files
beego.SetStaticPath("/css", "static/css")
beego.SetStaticPath("/js", "static/js")
beego.SetStaticPath("/img", "static/img")
beego.SetStaticPath("/favicon", "static/favicon")
// Set the ORM parameters
orm.RegisterDriver(driverSQL, orm.DR_MySQL)
orm.RegisterDataBase(aliasDbName, driverSQL, username+":"+password+"@/"+databaseName+optionsDatabaseConnections)
orm.SetMaxIdleConns(aliasDbName, maxIdleConns)
orm.SetMaxOpenConns(aliasDbName, maxOpenConns)
orm.DefaultTimeLoc = time.UTC
orm.RunCommand()
db.PopulateDatabase()
// Add Default templating functions
beego.AddFuncMap("i18n", i18n.Tr)
incr := func(arg int) string {
return strconv.FormatInt(int64(arg+1), 10)
}
decr := func(arg int) string {
return strconv.FormatInt(int64(arg-1), 10)
}
randomizeLabel := func() string {
labels := []string{"success", "warning", "danger", "info", "primary", "default"}
return labels[rand.Intn(len(labels))]
}
toJSON := func(val interface{}) string {
js, _ := json.Marshal(val)
return string(js)
}
datefr := func(val time.Time) string {
locale, _ := time.LoadLocation("Europe/Paris")
return dateformat.FormatLocale(val.In(locale), dateFormat, dateformat.French)
}
// This func is compatible with markdown.
html2str := func(html string) string {
src := string(html)
re, _ := regexp.Compile("\\<[\\S\\s]+?\\>")
src = re.ReplaceAllStringFunc(src, strings.ToLower)
re, _ = regexp.Compile("\\<style[\\S\\s]+?\\</style\\>")
src = re.ReplaceAllString(src, "")
re, _ = regexp.Compile("\\<script[\\S\\s]+?\\</script\\>")
src = re.ReplaceAllString(src, "")
re, _ = regexp.Compile("\\<[\\S\\s]+?\\>")
src = re.ReplaceAllString(src, "\n")
return strings.TrimSpace(src)
}
beego.AddFuncMap("incr", incr)
beego.AddFuncMap("decr", decr)
beego.AddFuncMap("randLabel", randomizeLabel)
beego.AddFuncMap("toJSON", toJSON)
beego.AddFuncMap("datefr", datefr)
beego.AddFuncMap("myhtml2str", html2str)
}
func main() {
// Run the app
beego.Run()
}
|
package termite
import (
"fmt"
"log"
"os"
"sync"
"time"
"github.com/hanwen/go-fuse/fuse"
"github.com/hanwen/go-fuse/fuse/nodefs"
)
var _ = log.Println
type lazyLoopbackFile struct {
nodefs.File
mu sync.Mutex
f nodefs.File
Name string
}
func NewLazyLoopbackFile(n string) nodefs.File {
return &lazyLoopbackFile{
File: nodefs.NewDefaultFile(),
Name: n,
}
}
func (me *lazyLoopbackFile) file() (nodefs.File, fuse.Status) {
me.mu.Lock()
defer me.mu.Unlock()
if me.f == nil {
f, err := os.Open(me.Name)
if err != nil {
return nil, fuse.ToStatus(err)
}
me.f = nodefs.NewLoopbackFile(f)
}
return me.f, fuse.OK
}
func (me *lazyLoopbackFile) InnerFile() nodefs.File {
f, _ := me.file()
return f
}
func (me *lazyLoopbackFile) String() string {
return fmt.Sprintf("lazyLoopbackFile(%s)", me.Name)
}
func (me *lazyLoopbackFile) Read(buf []byte, off int64) (fuse.ReadResult, fuse.Status) {
f, s := me.file()
if s.Ok() {
return f.Read(buf, off)
}
return nil, fuse.OK
}
func (me *lazyLoopbackFile) Release() {
me.mu.Lock()
defer me.mu.Unlock()
if me.f != nil {
me.f.Release()
}
}
func (me *lazyLoopbackFile) Write(s []byte, off int64) (uint32, fuse.Status) {
return 0, fuse.EPERM
}
func (me *lazyLoopbackFile) GetAttr(a *fuse.Attr) fuse.Status {
f, s := me.file()
if s.Ok() {
return f.GetAttr(a)
}
return s
}
func (me *lazyLoopbackFile) Utimens(atimeNs, mtimeNs *time.Time) fuse.Status {
return fuse.EPERM
}
func (me *lazyLoopbackFile) Truncate(size uint64) fuse.Status {
return fuse.EPERM
}
func (me *lazyLoopbackFile) Chown(uid uint32, gid uint32) fuse.Status {
return fuse.EPERM
}
func (me *lazyLoopbackFile) Chmod(perms uint32) fuse.Status {
return fuse.EPERM
}
|
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"io/ioutil"
"os"
"k8s.io/apimachinery/pkg/runtime/serializer"
intconfig "github.com/gardener/test-infra/pkg/apis/config"
"github.com/gardener/test-infra/pkg/testmachinery"
flag "github.com/spf13/pflag"
"github.com/gardener/test-infra/pkg/apis/testmachinery/v1beta1"
"github.com/gardener/test-infra/pkg/logger"
"github.com/gardener/test-infra/pkg/testmachinery/testrun"
)
// Connection to remote is needed to validate remote testdefinitions
func main() {
logger.InitFlags(nil)
configPath := flag.String("config", "", "Filepath to configuration")
trFilePath := flag.String("testrun", "examples/int-testrun.yaml", "Filepath to the testrun")
flag.Parse()
data, err := ioutil.ReadFile(*configPath)
if err != nil {
fmt.Print(err.Error())
os.Exit(1)
}
decoder := serializer.NewCodecFactory(testmachinery.ConfigScheme).UniversalDecoder()
config := &intconfig.Configuration{}
if _, _, err := decoder.Decode(data, nil, config); err != nil {
fmt.Print(err.Error())
os.Exit(1)
}
if err := testmachinery.Setup(config); err != nil {
fmt.Print(err.Error())
os.Exit(1)
}
log, err := logger.New(nil)
if err != nil {
fmt.Print(err.Error())
os.Exit(1)
}
log.Info("Start Validator")
log.V(3).Info("test 3")
log.V(4).Info("test 4")
log.V(5).Info("test 5")
tr, err := testmachinery.ParseTestrunFromFile(*trFilePath)
if err != nil {
log.Error(err, "unable to parse", "path", *trFilePath)
os.Exit(1)
}
if err, _ := testrun.Validate(log.WithValues("testrun", internalName(tr)), tr); err != nil {
log.Error(err, "invalid Testrun", "testrun", internalName(tr))
os.Exit(1)
}
log.Info("successfully validated", "testrun", internalName(tr))
}
// internalName determines an internal name that can be the testruns name, generated name or
// if none is defined it returns noName
func internalName(tr *v1beta1.Testrun) string {
if tr.Name != "" {
return tr.Name
}
if tr.GenerateName != "" {
return tr.GenerateName
}
return "noName"
}
|
package Product_of_Array_Except_Self
func productExceptSelf(nums []int) []int {
if len(nums) < 2 {
return nums
}
var result = make([]int, len(nums))
var left = 1
var right = 1
for i := 0; i < len(nums); i++ {
result[i] = left
left *= nums[i]
}
for i := len(nums) - 1; i >= 0; i-- {
result[i] *= right
right *= nums[i]
}
return result
}
|
/*
Name : Kamil KAPLAN
Date : 25.07.2019
*/
package models
type AdministrativeArea struct {
ID string `json:"ID,omitempty"`
LocalizedName string `json:"LocalizedName,omitempty"`
EnglishName string `json:"EnglishName,omitempty"`
Level int32 `json:"Level,omitempty"`
LocalizedType string `json:"LocalizedType,omitempty"`
EnglishType string `json:"EnglishType,omitempty"`
CountryID string `json:"CountryID,omitempty"`
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package testcat
import (
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/errors"
)
// DropTable is a partial implementation of the DROP TABLE statement.
func (tc *Catalog) DropTable(stmt *tree.DropTable) {
for i := range stmt.Names {
tn := &stmt.Names[i]
// Update the table name to include catalog and schema if not provided.
tc.qualifyTableName(tn)
// Ensure that table with that name exists.
t := tc.Table(tn)
// Clean up FKs from tables referenced by t.
for _, fk := range t.outboundFKs {
for _, ds := range tc.testSchema.dataSources {
if ds.ID() == fk.referencedTableID {
ref := ds.(*Table)
oldFKs := ref.inboundFKs
ref.inboundFKs = nil
for i := range oldFKs {
if oldFKs[i].originTableID != t.ID() {
ref.inboundFKs = append(ref.inboundFKs, oldFKs[i])
}
}
break
}
}
}
if len(t.inboundFKs) > 0 {
panic(errors.Newf("table %s is referenced by FK constraints", tn))
}
// Remove the table from the catalog.
delete(tc.testSchema.dataSources, tn.FQString())
}
}
|
package assumeMethod
import "fmt"
type User struct {
Name string
Email string
}
func (u *User) Notify() {
fmt.Printf("%v : %v\n", u.Name, u.Email)
}
//当接受者不是一个指针时,该方法操作对应接受者的值的副本(意思就是即使你使用了指针调用函数,但是函数的接受者是值类型,所以函数内部操作还是对副本的操作,而不是指针操作。
//当接受者是指针时,即使用值类型调用那么函数内部也是对指针的操作。
func UseNotify() {
u1 := User{
"enoch",
"213855@qq.com",
}
u1.Notify()
u2 := User{"go", "go@go.com"}
u3 := &u2
u3.Notify()
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package capturemode contains helper methods to work with Capture Mode.
package capturemode
import (
"context"
"strings"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/display"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/mouse"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/quicksettings"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/coords"
)
// ErrCaptureModeNotFound is returned by TakeAreaScreenshot if capture mode was not
// found in the UI.
//
// For example, capture mode might be not allowed by admin policy.
var ErrCaptureModeNotFound = errors.New("capture mode not found in the UI")
func enterCaptureMode(ctx context.Context, tconn *chrome.TestConn) error {
if err := quicksettings.Show(ctx, tconn); err != nil {
return errors.Wrap(err, "failed to show system tray")
}
ui := uiauto.New(tconn)
if err := ui.WithTimeout(10 * time.Second).LeftClick(nodewith.Name("Screen capture").ClassName("FeaturePodIconButton"))(ctx); err != nil {
return errors.Wrap(err, "failed to find and click capture mode button")
}
return nil
}
// TakeAreaScreenshot opens system tray, enters capture mode, selects some area and takes a screenshot.
func TakeAreaScreenshot(ctx context.Context, tconn *chrome.TestConn) error {
if err := enterCaptureMode(ctx, tconn); err != nil {
return errors.Wrap(err, "failed to enter capture mode")
}
info, err := display.GetPrimaryInfo(ctx, tconn)
if err != nil {
return errors.Wrap(err, "failed to get the primary display info")
}
screenCenter := info.WorkArea.CenterPoint()
// We need to click outside of previous selected area, otherwise we might
// resize selected area to an empty rectangle and won't see a capture button.
if err := uiauto.Combine("click and drag",
mouse.Click(tconn, screenCenter.Sub(coords.Point{X: 200, Y: 200}), mouse.LeftButton),
mouse.Drag(tconn, screenCenter.Sub(coords.Point{X: 100, Y: 100}), screenCenter.Add(coords.Point{X: 100, Y: 100}), 0*time.Second),
)(ctx); err != nil {
return errors.Wrap(err, "failed to click outside of previously selected area and drag mouse")
}
ui := uiauto.New(tconn)
captureMode := nodewith.Name("Capture").Role(role.Button)
if err := ui.WithTimeout(10 * time.Second).LeftClick(captureMode)(ctx); err != nil {
// Return ErrCaptureModeNotFound if capture mode UI does not exist, so caller can handle this case separately.
if strings.Contains(err.Error(), nodewith.ErrNotFound) {
return ErrCaptureModeNotFound
}
return errors.Wrap(err, "failed to find and click capture button")
}
return nil
}
|
package websocket
import (
"log"
"sync"
"time"
websocket "github.com/gorilla/websocket"
)
/**
This file can be used to send/receive messages across/from a websocket connection.
The supported binary format is protobuf.
*/
type WebSocket interface {
Send([]byte) error // send a message to the browser
Updates() chan (<-chan []byte) // notify the listener of an open websocket connection
Close() error // try to send a websocket close message
}
type webSocket struct {
conn *websocket.Conn
mu *sync.Mutex // protect the websocket writer
updates chan (<-chan []byte) // notify the listener of a new channel
receiver chan []byte // client messages arrive here
}
// Constructor
func NewWebSocket(conn *websocket.Conn) WebSocket {
ws := &webSocket{
conn: conn,
mu: &sync.Mutex{},
updates: make(chan (<-chan []byte)),
receiver: make(chan []byte, 1024),
}
ws.conn.SetReadLimit(32000)
ws.conn.SetReadDeadline(time.Now().Add(60 * time.Second))
ws.conn.SetPongHandler(func(string) error {
ws.conn.SetReadDeadline(time.Now().Add(60 * time.Second))
return nil
})
go ws.readPump()
go ws.heartbeat()
return ws
}
// Send a message across the internal websocket channel
//
// Only one writer allowed at a time
func (ws *webSocket) Send(msg []byte) error {
ws.mu.Lock()
defer ws.mu.Unlock()
w, err := ws.conn.NextWriter(websocket.BinaryMessage)
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) {
// log.Printf("Closing Error: %s", err)
}
return err
}
w.Write(msg)
if err = w.Close(); err != nil {
// log.Printf("Error closing message: %s", err)
return err
}
return nil
}
// Receive-only channel that cannot be closed by the requester
func (ws *webSocket) Updates() chan (<-chan []byte) {
return ws.updates
}
func (ws *webSocket) Close() error {
return ws.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
}
// readPump forwards messages received from the websocket connection
//
// There is at most one reader per websocket connection
func (ws *webSocket) readPump() {
defer func() {
ws.conn.Close()
close(ws.receiver)
close(ws.updates)
log.Println("closed ws conn")
}()
ws.updates <- ws.receiver
for {
_, b, err := ws.conn.ReadMessage() // blocks until message read or error
if err != nil {
// Log an error if this websocket connection did not close properly
if websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) {
log.Printf("Closing Error: %s", err)
}
break
}
ws.receiver <- b
}
}
// Keeps the websocket connection alive
func (ws *webSocket) heartbeat() {
ticker := time.NewTicker(50 * time.Second)
defer func() {
ticker.Stop()
}()
for {
select {
case <-ticker.C:
ws.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
if err := ws.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
return
}
}
}
}
|
package acmetool_account_thumbprint
import (
"fmt"
"github.com/hlandau/acme/acmeapi/acmeutils"
"github.com/hlandau/acme/acmetool"
"github.com/hlandau/acme/storage"
)
func Register(app *acmetool.App) {
app.CommandLine.Command("account-thumbprint", "Prints account thumbprints")
app.Commands["account-thumbprint"] = Main
}
func Main(ctx acmetool.Ctx) {
s, err := storage.NewFDB(ctx.StateDir)
ctx.Logger.Fatale(err, "storage")
s.VisitAccounts(func(a *storage.Account) error {
thumbprint, _ := acmeutils.Base64Thumbprint(a.PrivateKey)
fmt.Printf("%s\t%s\n", thumbprint, a.ID())
return nil
})
}
|
package c
// Token Type
const (
TokenTypeLogin = "login"
)
// Token Obtained By
const (
TokenObtainedBySignup = "sign-up"
TokenObtainedByLogin = "login"
)
|
package pkg
import (
"net/http"
"net/http/httptest"
"testing"
)
func TestSetupRouter(t *testing.T) {
tests := []struct {
name string
verb string
route string
want int
}{
{"POST not allowed", "POST", "/word", 405},
{"PUT not allowed", "PUT", "/word", 405},
{"DELETE not allowed", "DELETE", "/word", 405},
{"PATCH not allowed", "PATCH", "/word", 405},
{"POST not allowed", "HEAD", "/word", 405},
{"OPTIONS not allowed", "OPTIONS", "/word", 405},
{"Test no route", "GET", "/", 404},
}
r := SetupRouter()
for _, tt := range tests {
request, err := http.NewRequest(tt.verb, tt.route, nil)
if err != nil {
t.Errorf("%s request to %s failed", tt.verb, tt.route)
}
w := httptest.NewRecorder()
r.ServeHTTP(w, request)
got := w.Code
t.Run(tt.name, func(t *testing.T) {
if err != nil {
t.Errorf("Request with verb %s failed", tt.verb)
}
if got != tt.want {
t.Errorf("Request verb (%s) got %v, want %v", tt.verb, got, tt.want)
}
})
}
}
func TestHelloServer(t *testing.T) {
tests := []struct {
name string
testcase string
want int
}{
{"empty string", "/", 404},
{"string not allowed", "/string!", 400},
{"valid string", "/word", 200},
}
r := SetupRouter()
for _, tt := range tests {
request, err := http.NewRequest("GET", tt.testcase, nil)
if err != nil {
t.Errorf("%s request to failed", tt.testcase)
}
w := httptest.NewRecorder()
r.ServeHTTP(w, request)
got := w.Code
t.Run(tt.name, func(t *testing.T) {
if got != tt.want {
t.Errorf("Request got %v, want %v", got, tt.want)
}
})
}
}
|
package logdb
import (
"encoding/binary"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
var coderTypes = map[string]func() *CodingDB{
"id": func() *CodingDB { return IdentityCoder(&InMemDB{}) },
"binary": func() *CodingDB { return BinaryCoder(&InMemDB{}, binary.LittleEndian) },
"gob": func() *CodingDB { return GobCoder(&InMemDB{}) },
}
func TestCoding_AppendValue(t *testing.T) {
for coderName, coderFactory := range coderTypes {
t.Logf("Coder: %s\n", coderName)
coder := coderFactory()
bss := make([][]byte, 255)
for i := 0; i < len(bss); i++ {
bss[i] = []byte(fmt.Sprintf("entry %v", i))
}
for i, bs := range bss {
idx, err := coder.AppendValue(bs)
assert.Nil(t, err, "expected no error in append")
assert.Equal(t, uint64(i+1), idx, "expected equal ID")
v := make([]byte, len(bs))
// Gob is slightly special
if coderName == "gob" {
err = coder.GetValue(idx, &v)
} else {
err = coder.GetValue(idx, v)
}
assert.Nil(t, err, "expected no error in get")
assert.Equal(t, bs, v, "expected equal '[]byte' values")
}
}
}
func TestCoding_AppendValues(t *testing.T) {
for coderName, coderFactory := range coderTypes {
t.Logf("Coder: %s\n", coderName)
coder := coderFactory()
bss := make([][]byte, 255)
for i := 0; i < len(bss); i++ {
bss[i] = []byte(fmt.Sprintf("entry %v", i))
}
idx, err := coder.AppendValues(bss)
assert.Nil(t, err, "expected no error in append")
assert.Equal(t, uint64(1), idx, "expected first ID")
for i, bs := range bss {
v := make([]byte, len(bs))
// Gob is slightly special
if coderName == "gob" {
err = coder.GetValue(uint64(i+1), &v)
} else {
err = coder.GetValue(uint64(i+1), v)
}
assert.Nil(t, err, "expected no error in get")
assert.Equal(t, bs, v, "expected equal '[]byte' values")
}
}
}
|
package helper
import (
"github.com/stretchr/testify/assert"
"runtime"
"testing"
)
func TestUtils_init(t *testing.T) {
if runtime.GOOS == "linux" {
assert.Equal(t, IsGnuTar, true)
} else {
assert.Equal(t, IsGnuTar, false)
}
}
|
package main
import "fmt"
/*
Given a string and an integer k,
you need to reverse the first k characters for every 2k characters counting
from the start of the string. If there are less than k characters left,
reverse all of them.
If there are less than 2k but greater than or equal to k characters,
then reverse the first k characters and left the other as original.
Example:
Input: s = "abcdefg", k = 2
Output: "bacdfeg"
Restrictions:
The string consists of lower English letters only.
Length of the given string and k will in the range [1, 10000]
*/
func reverseStr(s string, k int) string {
ret,i := "",0
for ;i+2*k<=len(s);i += 2*k {
ret += reverse(s,i,i+k)
ret += s[i+k:i+2*k]
}
fmt.Println(i)
if len(s) - i < k {
ret += reverse(s,i,len(s))
} else if len(s)-i < 2*k {
ret += reverse(s,i,i+k)
ret += s[i+k:]
}
return ret
}
func reverse(s string,i,j int) string {
ret := make([]byte,0)
for x:=j-1;x >=i;x--{
ret = append(ret,s[x])
}
return string(ret)
}
func main() {
fmt.Println(reverseStr("abcd",2))
}
|
package main
import (
"fmt"
"io"
)
type MifWriter struct {
w io.Writer
}
func NewMifWriter(w io.Writer) (ww *MifWriter) {
fmt.Fprint(w, "WIDTH=32;\n")
fmt.Fprint(w, "DEPTH=1024;\n")
fmt.Fprint(w, "ADDRESS_RADIX=HEX;\n")
fmt.Fprint(w, "DATA_RADIX=HEX;\n")
fmt.Fprint(w, "CONTENT BEGIN\n")
return &MifWriter{w: w}
}
func (ww *MifWriter) WriteWord(addr uint32, word uint32) {
fmt.Fprintf(ww.w, " %03X : %08X;\n", addr, word)
}
func (ww *MifWriter) Finish() {
fmt.Fprint(ww.w, "END;\n")
}
|
package ui
import "image/color"
// Color ...
type Color color.Color
// ...
var (
Black = color.Black
White = color.White
Red = color.RGBA{255, 0, 0, 255}
Green = color.RGBA{0, 255, 0, 255}
Blue = color.RGBA{0, 0, 255, 255}
Yellow = color.RGBA{255, 255, 0, 255}
Purple = color.RGBA{255, 0, 255, 255}
Brown = color.RGBA{139, 69, 19, 255}
Orange = color.RGBA{255, 165, 0, 255}
Pink = color.RGBA{255, 105, 180, 255}
DarkGrey = color.RGBA{169, 169, 169, 255}
LightGrey = color.RGBA{211, 211, 211, 255}
Transparent = color.Transparent
Opaque = color.Opaque
)
|
package service
import (
"crypto/md5"
"fmt"
"io"
"scratch_maker_server/models"
"strconv"
)
func GetUserList(pageNum, pageSize, nameQuery string) []models.User {
models.Db.LogMode(true)
var userList []models.User
pageSizeInt, err := strconv.Atoi(pageSize)
if err != nil {
fmt.Println("err", err)
}
pageNumInt, err := strconv.Atoi(pageNum)
if err != nil {
fmt.Println("err", err)
}
models.Db.Where("user_name LIKE ?", "%"+nameQuery+"%").Order("id asc").
Offset((pageNumInt - 1) * pageSizeInt).Limit(pageSize).Find(&userList)
return userList
}
func GetUserCount(nameQuery string) int {
var count int
db := models.Db.Model(&models.User{})
if nameQuery != "" {
db = db.Where("user_name LIKE ?", "%"+nameQuery+"%")
}
db.Count(&count)
return count
}
func UpdateUser(user models.User) {
models.Db.LogMode(true)
//更新
hash := md5.New()
io.WriteString(hash, user.Password) // for safety, don't just save the plain text
user.Password = fmt.Sprintf("%x", hash.Sum(nil))
models.Db.Model(&user).Updates(user)
}
func DeleteUser(id uint) {
var user = models.User{}
user.ID = id
models.Db.LogMode(true)
models.Db.Delete(&user)
}
|
package web
import (
"bytes"
"encoding/json"
)
var (
errMethodNotAllowed = newJSONError("method_not_allowed", "method not allowed")
errEmptyQuery = newJSONError("empty_query", "query cannot be empty")
errMissingType = newJSONError("missing_envelope_type", "query must provide at least one envelope type")
errCounterNamePresentButEmpty = newJSONError("missing_counter_name", "counter.name is invalid without value")
errGaugeNamePresentButEmpty = newJSONError("missing_gauge_name", "gauge.name is invalid without value")
errStreamingUnsupported = newJSONError("streaming_unsupported", "request does not support streaming")
)
type jsonError struct {
Name string `json:"error"`
Message string `json:"message"`
}
func newJSONError(name, msg string) jsonError {
return jsonError{
Name: name,
Message: msg,
}
}
func (e jsonError) Error() string {
buf := bytes.NewBuffer(nil)
// We control this struct and it should never fail to encode.
_ = json.NewEncoder(buf).Encode(&e)
return buf.String()
}
|
//
// Copyright (c) 2019-2021 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
package webhook
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"net/http"
"k8s.io/api/admission/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
var V1PodExecOptionKind = metav1.GroupVersionKind{Kind: "PodExecOptions", Group: "", Version: "v1"}
const (
ValidateWebhookPath = "/validate"
)
type WebhookHandler struct {
Client client.Client
Decoder *admission.Decoder
}
// ResourcesValidator validates execs process all exec requests and:
type ResourcesValidator struct {
*WebhookHandler
}
func NewResourcesValidator() *ResourcesValidator {
return &ResourcesValidator{&WebhookHandler{}}
}
func (v *ResourcesValidator) Handle(ctx context.Context, req admission.Request) admission.Response {
if req.Kind == V1PodExecOptionKind && req.Operation == v1beta1.Connect {
p := corev1.Pod{}
err := v.Client.Get(ctx, types.NamespacedName{
Name: req.Name,
Namespace: req.Namespace,
}, &p)
if err != nil {
return admission.Errored(http.StatusInternalServerError, err)
}
v, ok := p.Labels["exec-defender.sleshche.com"]
if ok && v == "activated" {
return admission.Denied("You can't connect to pods which are labeled with `exec-defender.sleshche.com: activated`")
}
return admission.Allowed("Pod is not marked to prevent exec")
}
// Do not allow operation if the corresponding handler is not found
// It indicates that the webhooks configuration is not a valid or incompatible with this version of controller
return admission.Denied(fmt.Sprintf("This admission controller is not designed to handle %s operation for %s. Notify an administrator about this issue", req.Operation, req.Kind))
}
// WorkspaceMutator implements inject.Client.
// A client will be automatically injected.
// InjectClient injects the client.
func (v *ResourcesValidator) InjectClient(c client.Client) error {
v.Client = c
return nil
}
// WorkspaceMutator implements admission.DecoderInjector.
// A decoder will be automatically injected.
// InjectDecoder injects the decoder.
func (v *ResourcesValidator) InjectDecoder(d *admission.Decoder) error {
v.Decoder = d
return nil
}
|
package crypto
import "encoding/json"
type BinTicker struct {
Name string `json:"symbol"`
Price string `json:"price"`
}
func (*BinTicker) GetData() (interface{}, error) {
data, err := getTickersData("https://api.binance.com/api/v3/ticker/price")
if err != nil {
return nil, err
}
ticks := make([]*BinTicker, 0)
err = json.Unmarshal(data, &ticks)
return ticks, err
}
func (t BinTicker) GetTickName() string {
return t.Name
}
func (t BinTicker) GetPrice() string {
return t.Price
}
|
// SPDX-License-Identifier: MIT
package lsp
import (
"io/ioutil"
"log"
"testing"
"github.com/issue9/assert/v3"
"github.com/caixw/apidoc/v7/core"
"github.com/caixw/apidoc/v7/core/messagetest"
"github.com/caixw/apidoc/v7/internal/ast"
"github.com/caixw/apidoc/v7/internal/lsp/protocol"
)
func loadReferencesDoc(a *assert.Assertion) *ast.APIDoc {
const referenceDefinitionDoc = `<apidoc version="1.1.1">
<title>标题</title>
<mimetype>xml</mimetype>
<tag name="t1" title="tag1" />
<tag name="t2" title="tag2" />
<api method="GET">
<tag>t1</tag>
<path path="/users" />
<response status="200" />
</api>
<api method="POST">
<tag>t1</tag>
<tag>t2</tag>
<path path="/users" />
<response status="200" />
</api>
</apidoc>`
blk := core.Block{Data: []byte(referenceDefinitionDoc), Location: core.Location{URI: "file:///root/doc.go"}}
rslt := messagetest.NewMessageHandler()
doc := &ast.APIDoc{}
doc.Parse(rslt.Handler, blk)
rslt.Handler.Stop()
a.Empty(rslt.Errors)
return doc
}
func TestServer_textDocumentReferences(t *testing.T) {
a := assert.New(t, false)
s := newTestServer(true, log.New(ioutil.Discard, "", 0), log.New(ioutil.Discard, "", 0))
var locs []core.Location
err := s.textDocumentReferences(false, &protocol.ReferenceParams{}, &locs)
a.Nil(err).Empty(locs)
s.folders = []*folder{
{
WorkspaceFolder: protocol.WorkspaceFolder{Name: "test", URI: "file:///root"},
doc: loadReferencesDoc(a),
},
}
err = s.textDocumentReferences(false, &protocol.ReferenceParams{TextDocumentPositionParams: protocol.TextDocumentPositionParams{
TextDocument: protocol.TextDocumentIdentifier{URI: "file:///root/doc.go"},
Position: core.Position{Line: 3, Character: 16},
}}, &locs)
a.NotError(err).Equal(len(locs), 2)
a.Equal(locs[0], core.Location{
URI: "file:///root/doc.go",
Range: core.Range{
Start: core.Position{Line: 6, Character: 2},
End: core.Position{Line: 6, Character: 15},
},
})
}
func TestServer_textDocumentDefinition(t *testing.T) {
a := assert.New(t, false)
s := newTestServer(true, log.New(ioutil.Discard, "", 0), log.New(ioutil.Discard, "", 0))
var locs []core.Location
err := s.textDocumentDefinition(false, &protocol.DefinitionParams{}, &locs)
a.Nil(err).Empty(locs)
s.folders = []*folder{
{
WorkspaceFolder: protocol.WorkspaceFolder{Name: "test", URI: "file:///root"},
doc: loadReferencesDoc(a),
},
}
err = s.textDocumentDefinition(false, &protocol.DefinitionParams{TextDocumentPositionParams: protocol.TextDocumentPositionParams{
TextDocument: protocol.TextDocumentIdentifier{URI: "file:///root/doc.go"},
Position: core.Position{Line: 6, Character: 2},
}}, &locs)
a.NotError(err).Equal(len(locs), 1)
a.Equal(locs[0], core.Location{
URI: "file:///root/doc.go",
Range: core.Range{
Start: core.Position{Line: 3, Character: 1},
End: core.Position{Line: 3, Character: 31},
},
})
}
func TestReferences(t *testing.T) {
a := assert.New(t, false)
doc := loadReferencesDoc(a)
pos := core.Position{}
locs := references(doc, "file:///root/doc.go", pos, false)
a.Nil(locs)
pos = core.Position{Line: 3, Character: 16}
locs = references(doc, "file:///root/doc.go", pos, false)
a.Equal(len(locs), 2).
Equal(locs[0], core.Location{
URI: "file:///root/doc.go",
Range: core.Range{
Start: core.Position{Line: 6, Character: 2},
End: core.Position{Line: 6, Character: 15},
},
})
pos = core.Position{Line: 3, Character: 16}
locs = references(doc, "file:///root/doc.go", pos, true)
a.Equal(len(locs), 3)
}
|
package gueditor
import "fmt"
func log(info interface{}) {
fmt.Printf("%+v\n", info)
} |
package core
import (
"fmt"
)
//定义地图结构体
type AOIManager struct {
//区域的左边边界
MinX int
//区域的右边边界
MaxX int
//X轴方向格子的数量
CntsX int
//区域的上边边界
MinY int
//区域的下边边界
MaxY int
//Y轴方向的格子的数量
CntsY int
//整体区域(地图中)拥有哪些格子map:key格子ID,value:格子对象
grids map[int]*Grid
}
//得到每个格子在X轴的宽度
func (m *AOIManager) GridWidth() int {
return (m.MaxX - m.MinX) / m.CntsX
}
//得到每个格子在Y轴的高度
func (m *AOIManager) GridHeight() int {
return (m.MaxY - m.MinY) / m.CntsY
}
//初始化一个地图
//minX, maxX, cntsX, minY, maxY, cntsY int
func NewAOIManager(minX, maxX, cntsX, minY, maxY, cntsY int) *AOIManager {
aoiMgr := &AOIManager{
MinX: minX,
MaxX: maxX,
CntsX: cntsX,
MinY: minY,
MaxY: maxY,
CntsY: cntsY,
grids: make(map[int]*Grid),
}
//隶属于当前地图的全部格子 也一并进行初始化
for y := 0; y < cntsY; y++ {
for x := 0; x < cntsX; x++ {
//初始化一个格子
//格子ID := cntsX * y + x
gid := y*cntsX + x
//给aoiManager(地图)添加一个格子
aoiMgr.grids[gid] = NewGid(gid,
aoiMgr.MinX+x*aoiMgr.GridWidth(),
aoiMgr.MinX+(x+1)*aoiMgr.GridWidth(),
aoiMgr.MinY+y*aoiMgr.GridHeight(),
aoiMgr.MinY+(y+1)*aoiMgr.GridHeight())
}
}
return aoiMgr
}
//打印当前地图信息
func (m *AOIManager) String() string {
s := fmt.Sprintf("AOIManager:\n Minx:%d , MaxX:%d , cntsX:%d ,minY:%d, maxY:%d,cntsY:%d,Grids inManager:\n",
m.MinX, m.MaxX, m.CntsX, m.MinY, m.MaxY, m.CntsY)
//打印全部格子信息
for _, grid := range m.grids {
s += fmt.Sprintln(grid)
}
return s
}
//添加一个playerID到一个AOI格子中
func (m *AOIManager) AddPidToGrid(PID, gID int) {
m.grids[gID].Add(PID, nil)
}
//移除一个PlayerID 从一个AOI区域中
func (m *AOIManager) RemovePidFromGrid(PID, gID int) {
m.grids[gID].Remove(PID)
}
//通过格子ID获取当前格子的全部PlayerID
func (m *AOIManager) GetPidsByGid(gID int) (playerIDs []int) {
playerIDs = m.grids[gID].GetPlayerIDs()
return
}
//通过一个格子ID得到当前格子的周边九宫格的格子集合
func (m *AOIManager) GetSurroundGridsByGid(gID int) (grids []*Grid) {
//判断gid是否在AOI中
if _, ok := m.grids[gID]; !ok {
return
}
//将当前中心GID放入九宫格
grids = append(grids, m.grids[gID])
//==== > 判读GID左边是否有格子? 右边是否有格子
//通过格子ID 得到x轴编号 idx = gID % cntsX
idx := gID % m.CntsX
//判断idx编号左边是否还有格子
if idx > 0 {
grids = append(grids, m.grids[gID-1])
}
//判断idx编号右边是否还有格子
if idx < m.CntsX-1 {
grids = append(grids, m.grids[gID+1])
}
// ===> 得到一个x轴的格子集合,遍历这个格子集合
// for ... 依次判断 格子的上面是否有格子?下面是否有格子
//将X轴全部的Grid ID 放到一个slice中 ,遍历整个slice
gidsX := make([]int, 0, len(grids))
for _, v := range grids {
gidsX = append(gidsX, v.GID)
}
for _, gid := range gidsX {
//10,11,12
//通过Gid得到当前Gid的Y轴编号
//idy = gID / cntsX
idy := gid / m.CntsX
//上方是否有格子
if idy > 0 {
grids = append(grids, m.grids[gid-m.CntsX])
}
//下方是否有格子
if idy < m.CntsY-1 {
grids = append(grids, m.grids[gid+m.CntsX])
}
}
//按照gid大小排序九宫格gird
for i := 0; i < len(grids)-1; i++ {
for j := 0; j < len(grids)-i-1; j++ {
if grids[j].GID > grids[j+1].GID {
grids[j], grids[j+1] = grids[j+1], grids[j]
}
}
}
return
}
//通过x,y坐标得到对应的格子ID
func (m *AOIManager) GetGidByPos(x, y float32) int {
if x < 0 || int(x) >= m.MaxX {
return -1
}
if y < 0 || int(y) >= m.MaxY {
return -1
}
//根据坐标 得到当前玩家所在格子ID
idx := (int(x) - m.MinX) / m.GridWidth()
idy := (int(y) - m.MinY) / m.GridHeight()
gID := idy*m.CntsX + idx
return gID
}
//根据坐标,得到周边的九宫格中的全部玩家的 ID集合
func (m *AOIManager) GetSurroundPIDsByPos(x, y float32) (playerIDs []int) {
//通过x, y得到一个格子对应的ID
gid := m.GetGidByPos(x, y)
//通过格子ID获取周边九宫格 集合
grids := m.GetSurroundGridsByGid(gid)
//fmt.Println("gid = ", gid)
//fmt.Println("grids=", grids)
//将分别将九宫格内的全部的玩家 放在 playerIDs
for _, grid := range grids {
playerIDs = append(playerIDs, grid.GetPlayerIDs()...)
}
//fmt.Println("playerIDs =", playerIDs)
return
}
//通过坐标 将玩家ID--->pid 添加到 当前坐标所定位的格子grid中
func (m *AOIManager) AddToGridByPos(PID int, x, y float32) {
gID := m.GetGidByPos(x, y)
//取出当前的格子
grid := m.grids[gID]
//给格子添加玩家
grid.Add(PID, nil)
}
//通过坐标 把一个palyer 从一个个格子中删除
func (m *AOIManager) RemoteFromGridbyPos(PID int, x, y float32) {
gID := m.GetGidByPos(x, y)
//取出当前的格子
grid := m.grids[gID]
//给格子添加玩家
grid.Remove(PID)
}
|
// project euler (projecteuler.net) problem 70
// solution by Kevin Retzke (retzkek@gmail.com), May 2012
package main
import (
"fmt"
"math"
)
type Primes struct {
Primes []int
Last int
}
// Init initializes a Primes struct with the first two primes.
func (p *Primes) Init() {
p.Primes = []int{2, 3}
p.Last = 3
}
// Next computes, returns, and appends the next prime number.
func (p *Primes) Next() int {
next := 0
i := p.Last + 2
for next == 0 {
sqrti := math.Sqrt(float64(i))
isPrime := true
for _, p := range p.Primes {
if i%p == 0 {
isPrime = false
i += 2
break
}
if float64(p) > sqrti {
break
}
}
if isPrime {
next = i
}
}
p.Primes = append(p.Primes, next)
p.Last = next
return next
}
// Eratosthenes populates p with all primes up to max, computed with
// the Sieve of Eratosthenes.
func (p *Primes) Eratosthenes(max int) {
sieve := make([]bool, max)
for i := 2; i*i < max; i++ {
if !sieve[i] {
for j := 2 * i; j < max; j += i {
sieve[j] = true
}
}
}
p.Primes = []int{2}
for i := 3; i < max; i++ {
if !sieve[i] {
p.Last = i
p.Primes = append(p.Primes, p.Last)
}
}
}
func countDigits(n int) []int {
digits := make([]int,10)
for n > 0 {
digits[n%10]++
n = n/10
}
return digits
}
func arePermutations(n, m int) bool {
// test order of magnitude
//if int(math.Log10(float64(n))) != int(math.Log10(float64(m))) {
// return false
//}
nlist := countDigits(n)
mlist := countDigits(m)
for d := 0; d < 10; d++ {
if nlist[d] != mlist[d] {
return false
}
}
return true
}
func main() {
const maxn = 10000000
primes := new(Primes)
primes.Eratosthenes(maxn)
minn := 0
minnphin := float64(9999999)
for n := 3; n < maxn; n+=2 {
phi := float64(n)
for _, m := range primes.Primes {
if m > n {
break
}
if n%m == 0 {
phi *= (1 - 1/float64(m))
}
}
if nphin := float64(n) / phi; nphin < minnphin &&
arePermutations(n,int(phi)) {
minn = n
minnphin = nphin
fmt.Println(minn,int(phi), minnphin)
}
}
}
|
package messaging
import (
"time"
)
const (
ADD string = "ADD"
DELETE string = "DEL"
MODIFIED string = "MOD"
)
type AdvertisementMessage struct {
AppName string `json:"app_name"`
BaseNode string `json:"base_node"`
Type string `json:"type"`
Components []Component `json:"components"`
Timestamp float64 `json:"timestamp"`
}
type Component struct {
Name string `json:"name"`
Function Function `json:"function"`
Parameters interface{} `json:"parameters"`
BootDependencies []string `json:"boot_dependencies"`
NodesBlacklist []string `json:"nodes-blacklist"`
NodesWhitelist []string `json:"nodes-whitelist"`
}
type Function struct {
Image string `json:"image"`
Resources Resources `json:"resources"`
}
type Resources struct {
Memory float64 `json:"memory"`
Cpu float64 `json:"cpu"`
}
func NewAdvertisementMessage(appName string, baseNode string, typeMes string, components []Component) *AdvertisementMessage {
message := AdvertisementMessage{appName, baseNode, typeMes,components,float64(time.Now().Unix())}
return &message
}
func NewComponent(name string, function Function, parameters interface{}, bootDependencies []string, NodesBlacklist []string, NodesWhitelist []string) *Component {
components:=Component{name,function,parameters,bootDependencies,NodesBlacklist,NodesWhitelist}
return &components
}
func NewFunction(image string, resources Resources) *Function{
function:= Function{image,resources}
return &function
}
func NewResources(memory float64, cpu float64) *Resources{
resources := Resources{memory, cpu}
return &resources
}
// Check if two message are equals
func (m *AdvertisementMessage) Equal(message AdvertisementMessage) bool{
if m.AppName==message.AppName && m.BaseNode==message.BaseNode {
return true
}
return false
} |
package _017_Letter_Combinations_of_a_Phone_Number
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestLetterCombinations(t *testing.T) {
ast := assert.New(t)
ast.EqualValues([]string{"ad", "bd", "cd", "ae", "af", "be", "bf", "ce", "cf"},
letterCombinations("23"))
ast.EqualValues([]string{"p", "q", "r", "s"}, letterCombinations("7"))
}
|
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package builtins
import (
"math"
"time"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
)
func initMathBuiltins() {
// Add all mathBuiltins to the Builtins map after a sanity check.
for k, v := range mathBuiltins {
if _, exists := builtins[k]; exists {
panic("duplicate builtin: " + k)
}
builtins[k] = v
}
}
var (
errAbsOfMinInt64 = pgerror.New(pgcode.NumericValueOutOfRange, "abs of min integer value (-9223372036854775808) not defined")
errLogOfNegNumber = pgerror.New(pgcode.InvalidArgumentForLogarithm, "cannot take logarithm of a negative number")
errLogOfZero = pgerror.New(pgcode.InvalidArgumentForLogarithm, "cannot take logarithm of zero")
)
const (
degToRad = math.Pi / 180.0
radToDeg = 180.0 / math.Pi
)
// math builtins contains the math built-in functions indexed by name.
//
// For use in other packages, see AllBuiltinNames and GetBuiltinProperties().
var mathBuiltins = map[string]builtinDefinition{
"abs": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Abs(x))), nil
}, "Calculates the absolute value of `val`.", tree.VolatilityImmutable),
decimalOverload1(func(x *apd.Decimal) (tree.Datum, error) {
dd := &tree.DDecimal{}
dd.Abs(x)
return dd, nil
}, "Calculates the absolute value of `val`.", tree.VolatilityImmutable),
tree.Overload{
Types: tree.ArgTypes{{"val", types.Int}},
ReturnType: tree.FixedReturnType(types.Int),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
x := tree.MustBeDInt(args[0])
switch {
case x == math.MinInt64:
return nil, errAbsOfMinInt64
case x < 0:
return tree.NewDInt(-x), nil
}
return args[0], nil
},
Info: "Calculates the absolute value of `val`.",
Volatility: tree.VolatilityImmutable,
},
),
"acos": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Acos(x))), nil
}, "Calculates the inverse cosine of `val`.", tree.VolatilityImmutable),
),
"acosd": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(radToDeg * math.Acos(x))), nil
}, "Calculates the inverse cosine of `val` with the result in degrees", tree.VolatilityImmutable),
),
"acosh": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Acosh(x))), nil
}, "Calculates the inverse hyperbolic cosine of `val`.", tree.VolatilityImmutable),
),
"asin": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Asin(x))), nil
}, "Calculates the inverse sine of `val`.", tree.VolatilityImmutable),
),
"asind": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(radToDeg * math.Asin(x))), nil
}, "Calculates the inverse sine of `val` with the result in degrees.", tree.VolatilityImmutable),
),
"asinh": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Asinh(x))), nil
}, "Calculates the inverse hyperbolic sine of `val`.", tree.VolatilityImmutable),
),
"atan": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Atan(x))), nil
}, "Calculates the inverse tangent of `val`.", tree.VolatilityImmutable),
),
"atand": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(radToDeg * math.Atan(x))), nil
}, "Calculates the inverse tangent of `val` with the result in degrees.", tree.VolatilityImmutable),
),
"atanh": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Atanh(x))), nil
}, "Calculates the inverse hyperbolic tangent of `val`.", tree.VolatilityImmutable),
),
"atan2": makeBuiltin(defProps(),
floatOverload2("x", "y", func(x, y float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Atan2(x, y))), nil
}, "Calculates the inverse tangent of `x`/`y`.", tree.VolatilityImmutable),
),
"atan2d": makeBuiltin(defProps(),
floatOverload2("x", "y", func(x, y float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(radToDeg * math.Atan2(x, y))), nil
}, "Calculates the inverse tangent of `x`/`y` with the result in degrees", tree.VolatilityImmutable),
),
"cbrt": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.Cbrt(x)
}, "Calculates the cube root (∛) of `val`.", tree.VolatilityImmutable),
decimalOverload1(func(x *apd.Decimal) (tree.Datum, error) {
return tree.DecimalCbrt(x)
}, "Calculates the cube root (∛) of `val`.", tree.VolatilityImmutable),
),
"ceil": ceilImpl,
"ceiling": ceilImpl,
"cos": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Cos(x))), nil
}, "Calculates the cosine of `val`.", tree.VolatilityImmutable),
),
"cosd": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Cos(degToRad * x))), nil
}, "Calculates the cosine of `val` where `val` is in degrees.", tree.VolatilityImmutable),
),
"cosh": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Cosh(x))), nil
}, "Calculates the hyperbolic cosine of `val`.", tree.VolatilityImmutable),
),
"cot": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(1 / math.Tan(x))), nil
}, "Calculates the cotangent of `val`.", tree.VolatilityImmutable),
),
"cotd": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(1 / math.Tan(degToRad*x))), nil
}, "Calculates the cotangent of `val` where `val` is in degrees.", tree.VolatilityImmutable),
),
"degrees": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(radToDeg * x)), nil
}, "Converts `val` as a radian value to a degree value.", tree.VolatilityImmutable),
),
"div": makeBuiltin(defProps(),
floatOverload2("x", "y", func(x, y float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Trunc(x / y))), nil
}, "Calculates the integer quotient of `x`/`y`.", tree.VolatilityImmutable),
decimalOverload2("x", "y", func(x, y *apd.Decimal) (tree.Datum, error) {
if y.Sign() == 0 {
return nil, tree.ErrDivByZero
}
dd := &tree.DDecimal{}
_, err := tree.HighPrecisionCtx.QuoInteger(&dd.Decimal, x, y)
return dd, err
}, "Calculates the integer quotient of `x`/`y`.", tree.VolatilityImmutable),
tree.Overload{
Types: tree.ArgTypes{{"x", types.Int}, {"y", types.Int}},
ReturnType: tree.FixedReturnType(types.Int),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
y := tree.MustBeDInt(args[1])
if y == 0 {
return nil, tree.ErrDivByZero
}
x := tree.MustBeDInt(args[0])
return tree.NewDInt(x / y), nil
},
Info: "Calculates the integer quotient of `x`/`y`.",
Volatility: tree.VolatilityImmutable,
},
),
"exp": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Exp(x))), nil
}, "Calculates *e* ^ `val`.", tree.VolatilityImmutable),
decimalOverload1(func(x *apd.Decimal) (tree.Datum, error) {
dd := &tree.DDecimal{}
_, err := tree.DecimalCtx.Exp(&dd.Decimal, x)
return dd, err
}, "Calculates *e* ^ `val`.", tree.VolatilityImmutable),
),
"floor": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Floor(x))), nil
}, "Calculates the largest integer not greater than `val`.", tree.VolatilityImmutable),
decimalOverload1(func(x *apd.Decimal) (tree.Datum, error) {
dd := &tree.DDecimal{}
_, err := tree.ExactCtx.Floor(&dd.Decimal, x)
return dd, err
}, "Calculates the largest integer not greater than `val`.", tree.VolatilityImmutable),
tree.Overload{
Types: tree.ArgTypes{{"val", types.Int}},
ReturnType: tree.FixedReturnType(types.Float),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(float64(*args[0].(*tree.DInt)))), nil
},
Info: "Calculates the largest integer not greater than `val`.",
Volatility: tree.VolatilityImmutable,
},
),
"isnan": makeBuiltin(defProps(),
tree.Overload{
// Can't use floatBuiltin1 here because this one returns
// a boolean.
Types: tree.ArgTypes{{"val", types.Float}},
ReturnType: tree.FixedReturnType(types.Bool),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
return tree.MakeDBool(tree.DBool(math.IsNaN(float64(*args[0].(*tree.DFloat))))), nil
},
Info: "Returns true if `val` is NaN, false otherwise.",
Volatility: tree.VolatilityImmutable,
},
tree.Overload{
Types: tree.ArgTypes{{"val", types.Decimal}},
ReturnType: tree.FixedReturnType(types.Bool),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
isNaN := args[0].(*tree.DDecimal).Decimal.Form == apd.NaN
return tree.MakeDBool(tree.DBool(isNaN)), nil
},
Info: "Returns true if `val` is NaN, false otherwise.",
Volatility: tree.VolatilityImmutable,
},
),
"ln": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Log(x))), nil
}, "Calculates the natural log of `val`.", tree.VolatilityImmutable),
decimalLogFn(tree.DecimalCtx.Ln, "Calculates the natural log of `val`.", tree.VolatilityImmutable),
),
"log": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Log10(x))), nil
}, "Calculates the base 10 log of `val`.", tree.VolatilityImmutable),
floatOverload2("b", "x", func(b, x float64) (tree.Datum, error) {
switch {
case x < 0.0:
return nil, errLogOfNegNumber
case x == 0.0:
return nil, errLogOfZero
}
switch {
case b < 0.0:
return nil, errLogOfNegNumber
case b == 0.0:
return nil, errLogOfZero
}
return tree.NewDFloat(tree.DFloat(math.Log10(x) / math.Log10(b))), nil
}, "Calculates the base `b` log of `val`.", tree.VolatilityImmutable),
decimalLogFn(tree.DecimalCtx.Log10, "Calculates the base 10 log of `val`.", tree.VolatilityImmutable),
decimalOverload2("b", "x", func(b, x *apd.Decimal) (tree.Datum, error) {
switch x.Sign() {
case -1:
return nil, errLogOfNegNumber
case 0:
return nil, errLogOfZero
}
switch b.Sign() {
case -1:
return nil, errLogOfNegNumber
case 0:
return nil, errLogOfZero
}
top := new(apd.Decimal)
if _, err := tree.IntermediateCtx.Ln(top, x); err != nil {
return nil, err
}
bot := new(apd.Decimal)
if _, err := tree.IntermediateCtx.Ln(bot, b); err != nil {
return nil, err
}
dd := &tree.DDecimal{}
_, err := tree.DecimalCtx.Quo(&dd.Decimal, top, bot)
return dd, err
}, "Calculates the base `b` log of `val`.", tree.VolatilityImmutable),
),
"mod": makeBuiltin(defProps(),
floatOverload2("x", "y", func(x, y float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Mod(x, y))), nil
}, "Calculates `x`%`y`.", tree.VolatilityImmutable),
decimalOverload2("x", "y", func(x, y *apd.Decimal) (tree.Datum, error) {
if y.Sign() == 0 {
return nil, tree.ErrDivByZero
}
dd := &tree.DDecimal{}
_, err := tree.HighPrecisionCtx.Rem(&dd.Decimal, x, y)
return dd, err
}, "Calculates `x`%`y`.", tree.VolatilityImmutable),
tree.Overload{
Types: tree.ArgTypes{{"x", types.Int}, {"y", types.Int}},
ReturnType: tree.FixedReturnType(types.Int),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
y := tree.MustBeDInt(args[1])
if y == 0 {
return nil, tree.ErrDivByZero
}
x := tree.MustBeDInt(args[0])
return tree.NewDInt(x % y), nil
},
Info: "Calculates `x`%`y`.",
Volatility: tree.VolatilityImmutable,
},
),
"pi": makeBuiltin(defProps(),
tree.Overload{
Types: tree.ArgTypes{},
ReturnType: tree.FixedReturnType(types.Float),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
return tree.NewDFloat(math.Pi), nil
},
Info: "Returns the value for pi (3.141592653589793).",
Volatility: tree.VolatilityImmutable,
},
),
"pow": powImpls,
"power": powImpls,
"radians": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(x * degToRad)), nil
}, "Converts `val` as a degree value to a radians value.", tree.VolatilityImmutable),
),
"round": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.RoundToEven(x))), nil
}, "Rounds `val` to the nearest integer using half to even (banker's) rounding.", tree.VolatilityImmutable),
decimalOverload1(func(x *apd.Decimal) (tree.Datum, error) {
return roundDecimal(x, 0)
}, "Rounds `val` to the nearest integer, half away from zero: "+
"round(+/-2.4) = +/-2, round(+/-2.5) = +/-3.", tree.VolatilityImmutable),
tree.Overload{
Types: tree.ArgTypes{{"input", types.Float}, {"decimal_accuracy", types.Int}},
ReturnType: tree.FixedReturnType(types.Float),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
f := float64(*args[0].(*tree.DFloat))
if math.IsInf(f, 0) || math.IsNaN(f) {
return args[0], nil
}
var x apd.Decimal
if _, err := x.SetFloat64(f); err != nil {
return nil, err
}
// TODO(mjibson): make sure this fits in an int32.
scale := int32(tree.MustBeDInt(args[1]))
var d apd.Decimal
if _, err := tree.RoundCtx.Quantize(&d, &x, -scale); err != nil {
return nil, err
}
f, err := d.Float64()
if err != nil {
return nil, err
}
return tree.NewDFloat(tree.DFloat(f)), nil
},
Info: "Keeps `decimal_accuracy` number of figures to the right of the zero position " +
" in `input` using half to even (banker's) rounding.",
Volatility: tree.VolatilityImmutable,
},
tree.Overload{
Types: tree.ArgTypes{{"input", types.Decimal}, {"decimal_accuracy", types.Int}},
ReturnType: tree.FixedReturnType(types.Decimal),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
// TODO(mjibson): make sure this fits in an int32.
scale := int32(tree.MustBeDInt(args[1]))
return roundDecimal(&args[0].(*tree.DDecimal).Decimal, scale)
},
Info: "Keeps `decimal_accuracy` number of figures to the right of the zero position " +
"in `input` using half away from zero rounding. If `decimal_accuracy` " +
"is not in the range -2^31...(2^31-1), the results are undefined.",
Volatility: tree.VolatilityImmutable,
},
),
"sin": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Sin(x))), nil
}, "Calculates the sine of `val`.", tree.VolatilityImmutable),
),
"sind": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Sin(degToRad * x))), nil
}, "Calculates the sine of `val` where `val` is in degrees.", tree.VolatilityImmutable),
),
"sinh": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Sinh(x))), nil
}, "Calculates the hyperbolic sine of `val`.", tree.VolatilityImmutable),
),
"sign": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
switch {
case x < 0:
return tree.NewDFloat(-1), nil
case x == 0:
return tree.NewDFloat(0), nil
}
return tree.NewDFloat(1), nil
}, "Determines the sign of `val`: **1** for positive; **0** for 0 values; **-1** for "+
"negative.", tree.VolatilityImmutable),
decimalOverload1(func(x *apd.Decimal) (tree.Datum, error) {
d := &tree.DDecimal{}
d.Decimal.SetInt64(int64(x.Sign()))
return d, nil
}, "Determines the sign of `val`: **1** for positive; **0** for 0 values; **-1** for "+
"negative.", tree.VolatilityImmutable),
tree.Overload{
Types: tree.ArgTypes{{"val", types.Int}},
ReturnType: tree.FixedReturnType(types.Int),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
x := tree.MustBeDInt(args[0])
switch {
case x < 0:
return tree.NewDInt(-1), nil
case x == 0:
return tree.DZero, nil
}
return tree.NewDInt(1), nil
},
Info: "Determines the sign of `val`: **1** for positive; **0** for 0 values; **-1** " +
"for negative.",
Volatility: tree.VolatilityImmutable,
},
),
"sqrt": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.Sqrt(x)
}, "Calculates the square root of `val`.", tree.VolatilityImmutable),
decimalOverload1(func(x *apd.Decimal) (tree.Datum, error) {
return tree.DecimalSqrt(x)
}, "Calculates the square root of `val`.", tree.VolatilityImmutable),
),
"tan": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Tan(x))), nil
}, "Calculates the tangent of `val`.", tree.VolatilityImmutable),
),
"tand": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Tan(degToRad * x))), nil
}, "Calculates the tangent of `val` where `val` is in degrees.", tree.VolatilityImmutable),
),
"tanh": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Tanh(x))), nil
}, "Calculates the hyperbolic tangent of `val`.", tree.VolatilityImmutable),
),
"trunc": makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Trunc(x))), nil
}, "Truncates the decimal values of `val`.", tree.VolatilityImmutable),
decimalOverload1(func(x *apd.Decimal) (tree.Datum, error) {
dd := &tree.DDecimal{}
x.Modf(&dd.Decimal, nil)
return dd, nil
}, "Truncates the decimal values of `val`.", tree.VolatilityImmutable),
),
"width_bucket": makeBuiltin(defProps(),
tree.Overload{
Types: tree.ArgTypes{{"operand", types.Decimal}, {"b1", types.Decimal},
{"b2", types.Decimal}, {"count", types.Int}},
ReturnType: tree.FixedReturnType(types.Int),
Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
operand, _ := args[0].(*tree.DDecimal).Float64()
b1, _ := args[1].(*tree.DDecimal).Float64()
b2, _ := args[2].(*tree.DDecimal).Float64()
count := int(tree.MustBeDInt(args[3]))
return tree.NewDInt(tree.DInt(widthBucket(operand, b1, b2, count))), nil
},
Info: "return the bucket number to which operand would be assigned in a histogram having count " +
"equal-width buckets spanning the range b1 to b2.",
Volatility: tree.VolatilityImmutable,
},
tree.Overload{
Types: tree.ArgTypes{{"operand", types.Int}, {"b1", types.Int},
{"b2", types.Int}, {"count", types.Int}},
ReturnType: tree.FixedReturnType(types.Int),
Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
operand := float64(tree.MustBeDInt(args[0]))
b1 := float64(tree.MustBeDInt(args[1]))
b2 := float64(tree.MustBeDInt(args[2]))
count := int(tree.MustBeDInt(args[3]))
return tree.NewDInt(tree.DInt(widthBucket(operand, b1, b2, count))), nil
},
Info: "return the bucket number to which operand would be assigned in a histogram having count " +
"equal-width buckets spanning the range b1 to b2.",
Volatility: tree.VolatilityImmutable,
},
tree.Overload{
Types: tree.ArgTypes{{"operand", types.Any}, {"thresholds", types.AnyArray}},
ReturnType: tree.FixedReturnType(types.Int),
Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
operand := args[0]
thresholds := tree.MustBeDArray(args[1])
if !operand.ResolvedType().Equivalent(thresholds.ParamTyp) {
return tree.NewDInt(0), errors.New("operand and thresholds must be of the same type")
}
for i, v := range thresholds.Array {
if operand.Compare(ctx, v) < 0 {
return tree.NewDInt(tree.DInt(i)), nil
}
}
return tree.NewDInt(tree.DInt(thresholds.Len())), nil
},
Info: "return the bucket number to which operand would be assigned given an array listing the " +
"lower bounds of the buckets; returns 0 for an input less than the first lower bound; the " +
"thresholds array must be sorted, smallest first, or unexpected results will be obtained",
Volatility: tree.VolatilityImmutable,
},
),
}
var ceilImpl = makeBuiltin(defProps(),
floatOverload1(func(x float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Ceil(x))), nil
}, "Calculates the smallest integer not smaller than `val`.", tree.VolatilityImmutable),
decimalOverload1(func(x *apd.Decimal) (tree.Datum, error) {
dd := &tree.DDecimal{}
_, err := tree.ExactCtx.Ceil(&dd.Decimal, x)
if dd.IsZero() {
dd.Negative = false
}
return dd, err
}, "Calculates the smallest integer not smaller than `val`.", tree.VolatilityImmutable),
tree.Overload{
Types: tree.ArgTypes{{"val", types.Int}},
ReturnType: tree.FixedReturnType(types.Float),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(float64(*args[0].(*tree.DInt)))), nil
},
Info: "Calculates the smallest integer not smaller than `val`.",
Volatility: tree.VolatilityImmutable,
},
)
var powImpls = makeBuiltin(defProps(),
floatOverload2("x", "y", func(x, y float64) (tree.Datum, error) {
return tree.NewDFloat(tree.DFloat(math.Pow(x, y))), nil
}, "Calculates `x`^`y`.", tree.VolatilityImmutable),
decimalOverload2("x", "y", func(x, y *apd.Decimal) (tree.Datum, error) {
dd := &tree.DDecimal{}
_, err := tree.DecimalCtx.Pow(&dd.Decimal, x, y)
return dd, err
}, "Calculates `x`^`y`.", tree.VolatilityImmutable),
tree.Overload{
Types: tree.ArgTypes{
{"x", types.Int},
{"y", types.Int},
},
ReturnType: tree.FixedReturnType(types.Int),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
return tree.IntPow(tree.MustBeDInt(args[0]), tree.MustBeDInt(args[1]))
},
Info: "Calculates `x`^`y`.",
Volatility: tree.VolatilityImmutable,
},
)
func decimalLogFn(
logFn func(*apd.Decimal, *apd.Decimal) (apd.Condition, error),
info string,
volatility tree.Volatility,
) tree.Overload {
return decimalOverload1(func(x *apd.Decimal) (tree.Datum, error) {
switch x.Sign() {
case -1:
return nil, errLogOfNegNumber
case 0:
return nil, errLogOfZero
}
dd := &tree.DDecimal{}
_, err := logFn(&dd.Decimal, x)
return dd, err
}, info, tree.VolatilityImmutable)
}
func floatOverload1(
f func(float64) (tree.Datum, error), info string, volatility tree.Volatility,
) tree.Overload {
return tree.Overload{
Types: tree.ArgTypes{{"val", types.Float}},
ReturnType: tree.FixedReturnType(types.Float),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
return f(float64(*args[0].(*tree.DFloat)))
},
Info: info,
Volatility: volatility,
}
}
func floatOverload2(
a, b string,
f func(float64, float64) (tree.Datum, error),
info string,
volatility tree.Volatility,
) tree.Overload {
return tree.Overload{
Types: tree.ArgTypes{{a, types.Float}, {b, types.Float}},
ReturnType: tree.FixedReturnType(types.Float),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
return f(float64(*args[0].(*tree.DFloat)),
float64(*args[1].(*tree.DFloat)))
},
Info: info,
Volatility: volatility,
}
}
func decimalOverload1(
f func(*apd.Decimal) (tree.Datum, error), info string, volatility tree.Volatility,
) tree.Overload {
return tree.Overload{
Types: tree.ArgTypes{{"val", types.Decimal}},
ReturnType: tree.FixedReturnType(types.Decimal),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
dec := &args[0].(*tree.DDecimal).Decimal
return f(dec)
},
Info: info,
Volatility: volatility,
}
}
func decimalOverload2(
a, b string,
f func(*apd.Decimal, *apd.Decimal) (tree.Datum, error),
info string,
volatility tree.Volatility,
) tree.Overload {
return tree.Overload{
Types: tree.ArgTypes{{a, types.Decimal}, {b, types.Decimal}},
ReturnType: tree.FixedReturnType(types.Decimal),
Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) {
dec1 := &args[0].(*tree.DDecimal).Decimal
dec2 := &args[1].(*tree.DDecimal).Decimal
return f(dec1, dec2)
},
Info: info,
Volatility: volatility,
}
}
// roundDDecimal avoids creation of a new DDecimal in common case where no
// rounding is necessary.
func roundDDecimal(d *tree.DDecimal, scale int32) (tree.Datum, error) {
// Fast path: check if number of digits after decimal point is already low
// enough.
if -d.Exponent <= scale {
return d, nil
}
return roundDecimal(&d.Decimal, scale)
}
func roundDecimal(x *apd.Decimal, scale int32) (tree.Datum, error) {
dd := &tree.DDecimal{}
_, err := tree.HighPrecisionCtx.Quantize(&dd.Decimal, x, -scale)
return dd, err
}
var uniqueIntState struct {
syncutil.Mutex
timestamp uint64
}
var uniqueIntEpoch = time.Date(2015, time.January, 1, 0, 0, 0, 0, time.UTC).UnixNano()
// widthBucket returns the bucket number to which operand would be assigned in a histogram having count
// equal-width buckets spanning the range b1 to b2
func widthBucket(operand float64, b1 float64, b2 float64, count int) int {
bucket := 0
if (b1 < b2 && operand > b2) || (b1 > b2 && operand < b2) {
return count + 1
}
if (b1 < b2 && operand < b1) || (b1 > b2 && operand > b1) {
return 0
}
width := (b2 - b1) / float64(count)
difference := operand - b1
bucket = int(math.Floor(difference/width) + 1)
return bucket
}
|
package consensus
import (
"errors"
"runtime"
"github.com/NebulousLabs/Sia/crypto"
"github.com/NebulousLabs/Sia/encoding"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/types"
)
const (
MaxCatchUpBlocks = 10
MaxSynchronizeAttempts = 8
)
// blockHistory returns up to 32 BlockIDs, starting with the 12 most recent
// BlockIDs and then doubling in step size until the genesis block is reached.
// The genesis block is always included. This array of BlockIDs is used to
// establish a shared commonality between peers during synchronization.
func (s *ConsensusSet) blockHistory() (blockIDs [32]types.BlockID) {
knownBlocks := make([]types.BlockID, 0, 32)
step := types.BlockHeight(1)
for height := s.height(); ; height -= step {
// after 12, start doubling
knownBlocks = append(knownBlocks, s.db.getPath(height))
if len(knownBlocks) >= 12 {
step *= 2
}
// this check has to come before height -= step;
// otherwise we might underflow
if height <= step {
break
}
}
// always include the genesis block
knownBlocks = append(knownBlocks, s.db.getPath(0))
copy(blockIDs[:], knownBlocks)
return
}
// receiveBlocks is the calling end of the SendBlocks RPC.
func (s *ConsensusSet) receiveBlocks(conn modules.PeerConn) error {
// get blockIDs to send
lockID := s.mu.RLock()
if !s.db.open {
s.mu.RUnlock(lockID)
return errors.New("database not open")
}
history := s.blockHistory()
s.mu.RUnlock(lockID)
if err := encoding.WriteObject(conn, history); err != nil {
return err
}
// loop until no more blocks are available
moreAvailable := true
for moreAvailable {
var newBlocks []types.Block
if err := encoding.ReadObject(conn, &newBlocks, MaxCatchUpBlocks*types.BlockSizeLimit); err != nil {
return err
}
if err := encoding.ReadObject(conn, &moreAvailable, 1); err != nil {
return err
}
// integrate received blocks.
for _, block := range newBlocks {
// Blocks received during synchronize aren't trusted; activate full
// verification.
lockID := s.mu.Lock()
if !s.db.open {
s.mu.Unlock(lockID)
return errors.New("database not open")
}
acceptErr := s.acceptBlock(block)
s.mu.Unlock(lockID)
// ErrNonExtendingBlock must be ignored until headers-first block
// sharing is implemented.
if acceptErr == modules.ErrNonExtendingBlock {
acceptErr = nil
}
if acceptErr != nil {
return acceptErr
}
// Yield the processor to give other processes time to grab a lock.
// The Lock/Unlock cycle in this loop is very tight, and has been
// known to prevent interrupts from getting lock access quickly.
runtime.Gosched()
}
}
return nil
}
// sendBlocks is the receiving end of the SendBlocks RPC. It returns a
// sequential set of blocks based on the 32 input block IDs. The most recent
// known ID is used as the starting point, and up to 'MaxCatchUpBlocks' from
// that BlockHeight onwards are returned. It also sends a boolean indicating
// whether more blocks are available.
func (s *ConsensusSet) sendBlocks(conn modules.PeerConn) error {
// Read known blocks.
var knownBlocks [32]types.BlockID
err := encoding.ReadObject(conn, &knownBlocks, 32*crypto.HashSize)
if err != nil {
return err
}
// Find the most recent block from knownBlocks in the current path.
found := false
var start types.BlockHeight
lockID := s.mu.RLock()
if !s.db.open {
s.mu.RUnlock(lockID)
return errors.New("database not open")
}
for _, id := range knownBlocks {
if s.db.inBlockMap(id) {
pb := s.db.getBlockMap(id)
if pb.Height <= s.height() && id == s.db.getPath(pb.Height) {
found = true
start = pb.Height + 1 // start at child
break
}
}
}
// If no matching blocks are found, or if the caller has all known blocks,
// don't send any blocks.
h := s.height()
s.mu.RUnlock(lockID)
if !found || start > h {
// Send 0 blocks.
err = encoding.WriteObject(conn, []types.Block{})
if err != nil {
return err
}
// Indicate that no more blocks are available.
return encoding.WriteObject(conn, false)
}
// Send the caller all of the blocks that they are missing.
moreAvailable := true
for moreAvailable {
// Get the set of blocks to send.
var blocks []types.Block
lockID = s.mu.RLock()
if !s.db.open {
s.mu.RUnlock(lockID)
return errors.New("database not open")
}
{
height := s.height()
// TODO: unit test for off-by-one errors here
for i := start; i <= height && i < start+MaxCatchUpBlocks; i++ {
node := s.db.getBlockMap(s.db.getPath(i))
blocks = append(blocks, node.Block)
}
// TODO: Check for off-by-one here too.
moreAvailable = start+MaxCatchUpBlocks < height
start += MaxCatchUpBlocks
}
s.mu.RUnlock(lockID)
// Send a set of blocks to the caller + a flag indicating whether more
// are available.
if err = encoding.WriteObject(conn, blocks); err != nil {
return err
}
if err = encoding.WriteObject(conn, moreAvailable); err != nil {
return err
}
}
return nil
}
|
package main
import (
"fmt"
"reflect"
)
func main() {
cases := []struct {
in string
exp []string
}{
{"a", []string{"a"}},
{"ab", []string{"ab", "ba"}},
{"abc", []string{"abc", "acb", "bac", "bca", "cba", "cab"}},
}
for _, tc := range cases {
act := permutation(tc.in)
if reflect.DeepEqual(tc.exp, act) {
fmt.Printf("pass\n%v == %v\n", tc.exp, act)
} else {
fmt.Printf("fail:\npermutation(%v) = %v - want: %v\n", tc.in, act, tc.exp)
}
fmt.Println()
}
}
func permutation(str string) []string {
return permutationR(str, 0)
}
func permutationR(str string, idx int) []string {
if idx >= len(str) {
return []string{str}
}
ret := make([]string, 0)
for i := idx; i < len(str); i++ {
ret = append(ret, permutationR(swap(str, idx, i), idx+1)...)
}
return ret
}
func swap(s string, a, b int) string {
str := []byte(s)
str[a], str[b] = str[b], str[a]
return string(str)
}
|
/**
* @description: 切片的copy方法
* @author Administrator
* @date 2020/7/11 0011 10:44
*/
package main
import "fmt"
func main() {
//由于切片是引用类型,所以a和b其实都指向了同一块内存地址。修改b的同时a的值也会发生变化
a := []int{1, 2, 3, 4, 5}
b := a
fmt.Println(a) //[1 2 3 4 5]
fmt.Println(b) //[1 2 3 4 5]
b[0] = 1000
fmt.Println(a) //[1000 2 3 4 5]
fmt.Println(b) //[1000 2 3 4 5]
// copy()复制切片
//Go语言内建的copy()函数可以迅速地将一个切片的数据复制到另外一个切片空间中
c := []int{1, 2, 3, 4, 5}
d := make([]int, 5, 5)
copy(d, c) //使用copy()函数将切片c中的元素复制到切片d
fmt.Println(c) //[1 2 3 4 5]
fmt.Println(d) //[1 2 3 4 5]
d[0] = 1000
fmt.Println(c) //[1 2 3 4 5]
fmt.Println(d) //[1000 2 3 4 5]
}
|
package leetcode
func imageSmoother(img [][]int) [][]int {
m, n := len(img), len(img[0])
preSum, ans := make([][]int, m+1), make([][]int, m)
preSum[0] = make([]int, n+1)
for i := 0; i < m; i++ {
preSum[i+1] = make([]int, n+1)
ans[i] = make([]int, n)
for j := 0; j < n; j++ {
preSum[i+1][j+1] = preSum[i+1][j] + preSum[i][j+1] - preSum[i][j] + img[i][j]
}
}
for i := 0; i < m; i++ {
for j := 0; j < n; j++ {
di, ui, dj, uj := max(0, i-1), min(m, i+2), max(0, j-1), min(n, j+2)
ans[i][j] = (preSum[ui][uj] - preSum[ui][dj] - preSum[di][uj] + preSum[di][dj]) / ((ui - di) * (uj - dj))
}
}
return ans
}
func flip(a int) int {
return a ^ 1
}
func sign(a int) int {
return flip(a >> 31 & 1)
}
func max(a, b int) int {
c := sign(a - b)
d := flip(c)
return a*c + b*d
}
func min(a, b int) int {
return a + (b-a)>>31&(b-a)
}
func imageSmoother1(img [][]int) [][]int {
res := make([][]int, len(img))
for i := range img {
res[i] = make([]int, len(img[0]))
}
for y := 0; y < len(img); y++ {
for x := 0; x < len(img[0]); x++ {
res[y][x] = smooth(x, y, img)
}
}
return res
}
func smooth(x, y int, img [][]int) int {
count, sum := 1, img[y][x]
// Check bottom
if y+1 < len(img) {
sum += img[y+1][x]
count++
}
// Check Top
if y-1 >= 0 {
sum += img[y-1][x]
count++
}
// Check left
if x-1 >= 0 {
sum += img[y][x-1]
count++
}
// Check Right
if x+1 < len(img[y]) {
sum += img[y][x+1]
count++
}
// Top Left
if y-1 >= 0 && x-1 >= 0 {
sum += img[y-1][x-1]
count++
}
// Top Right
if y-1 >= 0 && x+1 < len(img[0]) {
sum += img[y-1][x+1]
count++
}
// Bottom Left
if y+1 < len(img) && x-1 >= 0 {
sum += img[y+1][x-1]
count++
}
//Bottom Right
if y+1 < len(img) && x+1 < len(img[0]) {
sum += img[y+1][x+1]
count++
}
return sum / count
}
|
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"sync"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr"
"github.com/cockroachdb/cockroach/pkg/sql/row"
"github.com/cockroachdb/cockroach/pkg/sql/rowcontainer"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqlerrors"
"github.com/cockroachdb/errors"
)
var updateNodePool = sync.Pool{
New: func() interface{} {
return &updateNode{}
},
}
type updateNode struct {
source planNode
// columns is set if this UPDATE is returning any rows, to be
// consumed by a renderNode upstream. This occurs when there is a
// RETURNING clause with some scalar expressions.
columns colinfo.ResultColumns
run updateRun
}
// updateRun contains the run-time state of updateNode during local execution.
type updateRun struct {
tu tableUpdater
rowsNeeded bool
checkOrds checkSet
// done informs a new call to BatchedNext() that the previous call to
// BatchedNext() has completed the work already.
done bool
// traceKV caches the current KV tracing flag.
traceKV bool
// computedCols are the columns that need to be (re-)computed as
// the result of updating some of the columns in updateCols.
computedCols []descpb.ColumnDescriptor
// computeExprs are the expressions to evaluate to re-compute the
// columns in computedCols.
computeExprs []tree.TypedExpr
// iVarContainerForComputedCols is used as a temporary buffer that
// holds the updated values for every column in the source, to
// serve as input for indexed vars contained in the computeExprs.
iVarContainerForComputedCols schemaexpr.RowIndexedVarContainer
// sourceSlots is the helper that maps RHS expressions to LHS targets.
// This is necessary because there may be fewer RHS expressions than
// LHS targets. For example, SET (a, b) = (SELECT 1,2) has:
// - 2 targets (a, b)
// - 1 source slot, the subquery (SELECT 1, 2).
// Each call to extractValues() on a sourceSlot will return 1 or more
// datums suitable for assignments. In the example above, the
// method would return 2 values.
sourceSlots []sourceSlot
// updateValues will hold the new values for every column
// mentioned in the LHS of the SET expressions, in the
// order specified by those SET expressions (thus potentially
// a different order than the source).
updateValues tree.Datums
// During the update, the expressions provided by the source plan
// contain the columns that are being assigned in the order
// specified by the table descriptor.
//
// For example, with UPDATE kv SET v=3, k=2, the source plan will
// provide the values in the order k, v (assuming this is the order
// the columns are defined in kv's descriptor).
//
// Then during the update, the columns are updated in the order of
// the setExprs (or, equivalently, the order of the sourceSlots),
// for the example above that would be v, k. The results
// are stored in updateValues above.
//
// Then at the end of the update, the values need to be presented
// back to the TableRowUpdater in the order of the table descriptor
// again.
//
// updateVals is the buffer for this 2nd stage.
// updateColsIdx maps the order of the 2nd stage into the order of the 3rd stage.
// This provides the inverse mapping of sourceSlots.
//
updateColsIdx catalog.TableColMap
// rowIdxToRetIdx is the mapping from the columns in ru.FetchCols to the
// columns in the resultRowBuffer. A value of -1 is used to indicate
// that the column at that index is not part of the resultRowBuffer
// of the mutation. Otherwise, the value at the i-th index refers to the
// index of the resultRowBuffer where the i-th column is to be returned.
rowIdxToRetIdx []int
// numPassthrough is the number of columns in addition to the set of
// columns of the target table being returned, that we must pass through
// from the input node.
numPassthrough int
}
func (u *updateNode) startExec(params runParams) error {
// cache traceKV during execution, to avoid re-evaluating it for every row.
u.run.traceKV = params.p.ExtendedEvalContext().Tracing.KVTracingEnabled()
if u.run.rowsNeeded {
u.run.tu.rows = rowcontainer.NewRowContainer(
params.EvalContext().Mon.MakeBoundAccount(),
colinfo.ColTypeInfoFromResCols(u.columns),
)
}
return u.run.tu.init(params.ctx, params.p.txn, params.EvalContext())
}
// Next is required because batchedPlanNode inherits from planNode, but
// batchedPlanNode doesn't really provide it. See the explanatory comments
// in plan_batch.go.
func (u *updateNode) Next(params runParams) (bool, error) { panic("not valid") }
// Values is required because batchedPlanNode inherits from planNode, but
// batchedPlanNode doesn't really provide it. See the explanatory comments
// in plan_batch.go.
func (u *updateNode) Values() tree.Datums { panic("not valid") }
// BatchedNext implements the batchedPlanNode interface.
func (u *updateNode) BatchedNext(params runParams) (bool, error) {
if u.run.done {
return false, nil
}
// Advance one batch. First, clear the last batch.
u.run.tu.clearLastBatch(params.ctx)
// Now consume/accumulate the rows for this batch.
lastBatch := false
for {
if err := params.p.cancelChecker.Check(); err != nil {
return false, err
}
// Advance one individual row.
if next, err := u.source.Next(params); !next {
lastBatch = true
if err != nil {
return false, err
}
break
}
// Process the update for the current source row, potentially
// accumulating the result row for later.
if err := u.processSourceRow(params, u.source.Values()); err != nil {
return false, err
}
// Are we done yet with the current batch?
if u.run.tu.currentBatchSize >= u.run.tu.maxBatchSize {
break
}
}
if u.run.tu.currentBatchSize > 0 {
if !lastBatch {
// We only run/commit the batch if there were some rows processed
// in this batch.
if err := u.run.tu.flushAndStartNewBatch(params.ctx); err != nil {
return false, err
}
}
}
if lastBatch {
if err := u.run.tu.finalize(params.ctx); err != nil {
return false, err
}
// Remember we're done for the next call to BatchedNext().
u.run.done = true
}
// Possibly initiate a run of CREATE STATISTICS.
params.ExecCfg().StatsRefresher.NotifyMutation(
u.run.tu.tableDesc().GetID(),
u.run.tu.lastBatchSize,
)
return u.run.tu.lastBatchSize > 0, nil
}
// processSourceRow processes one row from the source for update and, if
// result rows are needed, saves it in the result row container.
func (u *updateNode) processSourceRow(params runParams, sourceVals tree.Datums) error {
// sourceVals contains values for the columns from the table, in the order of the
// table descriptor. (One per column in u.tw.ru.FetchCols)
//
// And then after that, all the extra expressions potentially added via
// a renderNode for the RHS of the assignments.
// oldValues is the prefix of sourceVals that corresponds to real
// stored columns in the table, that is, excluding the RHS assignment
// expressions.
oldValues := sourceVals[:len(u.run.tu.ru.FetchCols)]
// valueIdx is used in the loop below to map sourceSlots to
// entries in updateValues.
valueIdx := 0
// Propagate the values computed for the RHS expressions into
// updateValues at the right positions. The positions in
// updateValues correspond to the columns named in the LHS
// operands for SET.
for _, slot := range u.run.sourceSlots {
for _, value := range slot.extractValues(sourceVals) {
u.run.updateValues[valueIdx] = value
valueIdx++
}
}
// At this point, we have populated updateValues with the result of
// computing the RHS for every assignment.
//
if len(u.run.computeExprs) > 0 {
// We now need to (re-)compute the computed column values, using
// the updated values above as input.
//
// This needs to happen in the context of a row containing all the
// table's columns as if they had been updated already. This is not
// yet reflected neither by oldValues (which contain non-updated values)
// nor updateValues (which contain only those columns mentioned in the SET LHS).
//
// So we need to construct a buffer that groups them together.
// iVarContainerForComputedCols does this.
copy(u.run.iVarContainerForComputedCols.CurSourceRow, oldValues)
for i := range u.run.tu.ru.UpdateCols {
id := u.run.tu.ru.UpdateCols[i].ID
idx := u.run.tu.ru.FetchColIDtoRowIndex.GetDefault(id)
u.run.iVarContainerForComputedCols.CurSourceRow[idx] = u.run.
updateValues[i]
}
// Now (re-)compute the computed columns.
// Note that it's safe to do this in any order, because we currently
// prevent computed columns from depending on other computed columns.
params.EvalContext().PushIVarContainer(&u.run.iVarContainerForComputedCols)
for i := range u.run.computedCols {
d, err := u.run.computeExprs[i].Eval(params.EvalContext())
if err != nil {
params.EvalContext().IVarContainer = nil
return errors.Wrapf(err, "computed column %s", tree.ErrString((*tree.Name)(&u.run.computedCols[i].Name)))
}
idx := u.run.updateColsIdx.GetDefault(u.run.computedCols[i].ID)
u.run.updateValues[idx] = d
}
params.EvalContext().PopIVarContainer()
}
// Verify the schema constraints. For consistency with INSERT/UPSERT
// and compatibility with PostgreSQL, we must do this before
// processing the CHECK constraints.
if err := enforceLocalColumnConstraints(u.run.updateValues, u.run.tu.ru.UpdateCols); err != nil {
return err
}
// Run the CHECK constraints, if any. CheckHelper will either evaluate the
// constraints itself, or else inspect boolean columns from the input that
// contain the results of evaluation.
if !u.run.checkOrds.Empty() {
checkVals := sourceVals[len(u.run.tu.ru.FetchCols)+len(u.run.tu.ru.UpdateCols)+u.run.numPassthrough:]
if err := checkMutationInput(
params.ctx, ¶ms.p.semaCtx, u.run.tu.tableDesc(), u.run.checkOrds, checkVals,
); err != nil {
return err
}
}
// Create a set of partial index IDs to not add entries or remove entries
// from.
var pm row.PartialIndexUpdateHelper
if n := len(u.run.tu.tableDesc().PartialIndexes()); n > 0 {
offset := len(u.run.tu.ru.FetchCols) + len(u.run.tu.ru.UpdateCols) + u.run.checkOrds.Len() + u.run.numPassthrough
partialIndexVals := sourceVals[offset:]
partialIndexPutVals := partialIndexVals[:n]
partialIndexDelVals := partialIndexVals[n : n*2]
err := pm.Init(partialIndexPutVals, partialIndexDelVals, u.run.tu.tableDesc())
if err != nil {
return err
}
}
// Queue the insert in the KV batch.
newValues, err := u.run.tu.rowForUpdate(params.ctx, oldValues, u.run.updateValues, pm, u.run.traceKV)
if err != nil {
return err
}
// If result rows need to be accumulated, do it.
if u.run.tu.rows != nil {
// The new values can include all columns, the construction of the
// values has used execinfra.ScanVisibilityPublicAndNotPublic so the
// values may contain additional columns for every newly added column
// not yet visible. We do not want them to be available for RETURNING.
//
// MakeUpdater guarantees that the first columns of the new values
// are those specified u.columns.
resultValues := make([]tree.Datum, len(u.columns))
largestRetIdx := -1
for i := range u.run.rowIdxToRetIdx {
retIdx := u.run.rowIdxToRetIdx[i]
if retIdx >= 0 {
if retIdx >= largestRetIdx {
largestRetIdx = retIdx
}
resultValues[retIdx] = newValues[i]
}
}
// At this point we've extracted all the RETURNING values that are part
// of the target table. We must now extract the columns in the RETURNING
// clause that refer to other tables (from the FROM clause of the update).
if u.run.numPassthrough > 0 {
passthroughBegin := len(u.run.tu.ru.FetchCols) + len(u.run.tu.ru.UpdateCols)
passthroughEnd := passthroughBegin + u.run.numPassthrough
passthroughValues := sourceVals[passthroughBegin:passthroughEnd]
for i := 0; i < u.run.numPassthrough; i++ {
largestRetIdx++
resultValues[largestRetIdx] = passthroughValues[i]
}
}
if _, err := u.run.tu.rows.AddRow(params.ctx, resultValues); err != nil {
return err
}
}
return nil
}
// BatchedCount implements the batchedPlanNode interface.
func (u *updateNode) BatchedCount() int { return u.run.tu.lastBatchSize }
// BatchedCount implements the batchedPlanNode interface.
func (u *updateNode) BatchedValues(rowIdx int) tree.Datums { return u.run.tu.rows.At(rowIdx) }
func (u *updateNode) Close(ctx context.Context) {
u.source.Close(ctx)
u.run.tu.close(ctx)
*u = updateNode{}
updateNodePool.Put(u)
}
func (u *updateNode) enableAutoCommit() {
u.run.tu.enableAutoCommit()
}
// sourceSlot abstracts the idea that our update sources can either be tuples
// or scalars. Tuples are for cases such as SET (a, b) = (1, 2) or SET (a, b) =
// (SELECT 1, 2), and scalars are for situations like SET a = b. A sourceSlot
// represents how to extract and type-check the results of the right-hand side
// of a single SET statement. We could treat everything as tuples, including
// scalars as tuples of size 1, and eliminate this indirection, but that makes
// the query plan more complex.
type sourceSlot interface {
// extractValues returns a slice of the values this slot is responsible for,
// as extracted from the row of results.
extractValues(resultRow tree.Datums) tree.Datums
// checkColumnTypes compares the types of the results that this slot refers to to the types of
// the columns those values will be assigned to. It returns an error if those types don't match up.
checkColumnTypes(row []tree.TypedExpr) error
}
type scalarSlot struct {
column descpb.ColumnDescriptor
sourceIndex int
}
func (ss scalarSlot) extractValues(row tree.Datums) tree.Datums {
return row[ss.sourceIndex : ss.sourceIndex+1]
}
func (ss scalarSlot) checkColumnTypes(row []tree.TypedExpr) error {
renderedResult := row[ss.sourceIndex]
typ := renderedResult.ResolvedType()
return colinfo.CheckDatumTypeFitsColumnType(&ss.column, typ)
}
// enforceLocalColumnConstraints asserts the column constraints that
// do not require data validation from other sources than the row data
// itself. This includes:
// - rejecting null values in non-nullable columns;
// - checking width constraints from the column type;
// - truncating results to the requested precision (not width).
// Note: the second point is what distinguishes this operation
// from a regular SQL cast -- here widths are checked, not
// used to truncate the value silently.
//
// The row buffer is modified in-place with the result of the
// checks.
func enforceLocalColumnConstraints(row tree.Datums, cols []descpb.ColumnDescriptor) error {
for i := range cols {
col := &cols[i]
if !col.Nullable && row[i] == tree.DNull {
return sqlerrors.NewNonNullViolationError(col.Name)
}
outVal, err := tree.AdjustValueToType(col.Type, row[i])
if err != nil {
return err
}
row[i] = outVal
}
return nil
}
|
package adminconsole
import (
"context"
"fmt"
"github.com/epmd-edp/admin-console-operator/v2/pkg/controller/helper"
"github.com/epmd-edp/admin-console-operator/v2/pkg/service/admin_console"
"github.com/epmd-edp/admin-console-operator/v2/pkg/service/platform"
"os"
"time"
edpv1alpha1 "github.com/epmd-edp/admin-console-operator/v2/pkg/apis/edp/v1alpha1"
errorsf "github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
StatusInstall = "installing"
StatusFailed = "failed"
StatusCreated = "created"
StatusExposeStart = "exposing config"
StatusExposeFinish = "config exposed"
StatusIntegrationStart = "integration started"
StatusReady = "ready"
DefaultRequeueTime = 30
)
var log = logf.Log.WithName("controller_adminconsole")
/**
* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
* business logic. Delete these comments after modifying this file.*
*/
// Add creates a new AdminConsole Controller and adds it to the Manager. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
scheme := mgr.GetScheme()
client := mgr.GetClient()
platformType := helper.GetPlatformTypeEnv()
platformService, err := platform.NewPlatformService(platformType, scheme, &client)
if err != nil {
log.Error(err, "")
os.Exit(1)
}
adminConsoleService := admin_console.NewAdminConsoleService(platformService, client)
return &ReconcileAdminConsole{
client: client,
scheme: scheme,
service: adminConsoleService,
}
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("adminconsole-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to primary resource AdminConsole
err = c.Watch(&source.Kind{Type: &edpv1alpha1.AdminConsole{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
// TODO(user): Modify this to be the types you create that are owned by the primary resource
// Watch for changes to secondary resource Pods and requeue the owner AdminConsole
err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &edpv1alpha1.AdminConsole{},
})
if err != nil {
return err
}
return nil
}
// blank assignment to verify that ReconcileAdminConsole implements reconcile.Reconciler
var _ reconcile.Reconciler = &ReconcileAdminConsole{}
// ReconcileAdminConsole reconciles a AdminConsole object
type ReconcileAdminConsole struct {
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the apiserver
client client.Client
scheme *runtime.Scheme
service admin_console.AdminConsoleService
}
// Reconcile reads that state of the cluster for a AdminConsole object and makes changes based on the state read
// and what is in the AdminConsole.Spec
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *ReconcileAdminConsole) Reconcile(request reconcile.Request) (reconcile.Result, error) {
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
reqLogger.Info("Reconciling AdminConsole")
// Fetch the AdminConsole instance
instance := &edpv1alpha1.AdminConsole{}
err := r.client.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
if instance.Status.Status == "" || instance.Status.Status == StatusFailed {
reqLogger.Info("Installation has been started")
err = r.updateStatus(instance, StatusInstall)
if err != nil {
return reconcile.Result{RequeueAfter: DefaultRequeueTime * time.Second}, err
}
}
if instance.Status.Status == StatusInstall {
log.Info("Installation has finished")
err = r.updateStatus(instance, StatusCreated)
if err != nil {
return reconcile.Result{RequeueAfter: DefaultRequeueTime * time.Second}, err
}
}
if dcIsReady, err := r.service.IsDeploymentReady(*instance); err != nil {
return reconcile.Result{RequeueAfter: DefaultRequeueTime * time.Second}, errorsf.Wrapf(err, "Checking if Deployment configs is ready has been failed")
} else if !dcIsReady {
reqLogger.Info("Deployment config is not ready for exposing configuration yet")
return reconcile.Result{RequeueAfter: DefaultRequeueTime * time.Second}, nil
}
if instance.Status.Status == StatusCreated {
reqLogger.Info("Exposing configuration has started")
err = r.updateStatus(instance, StatusExposeStart)
if err != nil {
return reconcile.Result{RequeueAfter: DefaultRequeueTime * time.Second}, err
}
}
instance, err = r.service.ExposeConfiguration(*instance)
if err != nil {
err = r.updateStatus(instance, StatusFailed)
if err != nil {
return reconcile.Result{RequeueAfter: DefaultRequeueTime * time.Second}, err
}
return reconcile.Result{RequeueAfter: DefaultRequeueTime * time.Second}, errorsf.Wrapf(err, "Exposing configuration failed")
}
if instance.Status.Status == StatusExposeStart {
reqLogger.Info("Exposing configuration has finished")
err = r.updateStatus(instance, StatusExposeFinish)
if err != nil {
return reconcile.Result{RequeueAfter: DefaultRequeueTime * time.Second}, err
}
}
if instance.Status.Status == StatusExposeFinish {
reqLogger.Info("Integration has started")
err = r.updateStatus(instance, StatusIntegrationStart)
if err != nil {
return reconcile.Result{RequeueAfter: DefaultRequeueTime * time.Second}, err
}
}
instance, err = r.service.Integrate(*instance)
if err != nil {
err = r.updateStatus(instance, StatusFailed)
if err != nil {
return reconcile.Result{RequeueAfter: DefaultRequeueTime * time.Second}, err
}
return reconcile.Result{RequeueAfter: DefaultRequeueTime * time.Second}, errorsf.Wrapf(err, "Integration failed")
}
if instance.Status.Status == StatusIntegrationStart {
reqLogger.Info("Exposing configuration has started")
err = r.updateStatus(instance, StatusReady)
if err != nil {
return reconcile.Result{RequeueAfter: DefaultRequeueTime * time.Second}, err
}
}
err = r.updateAvailableStatus(instance, true)
if err != nil {
reqLogger.Info("Failed to update availability status")
return reconcile.Result{RequeueAfter: DefaultRequeueTime * time.Second}, err
}
return reconcile.Result{}, nil
}
func (r *ReconcileAdminConsole) updateStatus(instance *edpv1alpha1.AdminConsole, newStatus string) error {
reqLogger := log.WithValues("Request.Namespace", instance.Namespace, "Request.Name", instance.Name).WithName("status_update")
currentStatus := instance.Status.Status
instance.Status.Status = newStatus
instance.Status.LastTimeUpdated = time.Now()
err := r.client.Status().Update(context.TODO(), instance)
if err != nil {
err = r.client.Update(context.TODO(), instance)
if err != nil {
return errorsf.Wrapf(err, "Couldn't update status from '%v' to '%v'", currentStatus, newStatus)
}
}
reqLogger.Info(fmt.Sprintf("Status has been updated to '%v'", newStatus))
return nil
}
func (r *ReconcileAdminConsole) resourceActionFailed(instance *edpv1alpha1.AdminConsole, err error) error {
if r.updateStatus(instance, StatusFailed) != nil {
return err
}
return err
}
func (r ReconcileAdminConsole) updateAvailableStatus(instance *edpv1alpha1.AdminConsole, value bool) error {
if instance.Status.Available != value {
instance.Status.Available = value
instance.Status.LastTimeUpdated = time.Now()
err := r.client.Status().Update(context.TODO(), instance)
if err != nil {
err := r.client.Update(context.TODO(), instance)
if err != nil {
return err
}
}
}
return nil
}
|
package structs
import (
"github.com/UniversityRadioYork/myradio-go"
)
type TimeslotTemplateData struct {
Timeslot myradio.Timeslot
Config Config
}
type CalendarTemplateData struct {
Show myradio.ShowMeta
User myradio.User
Config Config
}
|
package main
import (
"fmt"
"strconv"
"math"
)
// https://projecteuler.net/problem=36
func same_both_ways(s string) bool{
same := true
if(string([]byte{s[0]}) == "0" || string([]byte{s[len(s) - 1]}) == "0"){
return false;
}
for i := 0; i < len(s); i++ {
if(s[i] == s[len(s) - i - 1]){
// fmt.Println(string([]byte{s[i]}) + " " + string([]byte{s[len(s) - i - 1]}))
}else{
same = false
break;
}
}
return same;
}
func main(){
sum := 0
for x := 0; float64(x) < math.Pow(float64(10), float64(6)) + 1; x++ {
y := strconv.Itoa(x)
z := fmt.Sprintf("%b", x)
if same_both_ways(y) && same_both_ways(z) {
sum = sum + x
// fmt.Println(x, y);
}
}
fmt.Printf("sum is %d\n", sum);
} |
package packets
import (
"io"
"bytes"
)
type PingreqPacket struct {
FixedHeader
}
func (pr *PingreqPacket) Write(w io.Writer) error {
var body bytes.Buffer
body.WriteByte(pr.FixedHeader.MessageType << 4)
body.WriteByte(0)
_, err := w.Write(body.Bytes())
return err
}
func (pr *PingreqPacket) Unpack(b io.Reader) error {
return nil
}
|
package service
// Context holds interfaces of external services
type Context struct {
}
// Request is a request object for ...
type Request struct {
}
// Response is a response object for ...
type Response struct {
}
|
package tools
import (
"runtime"
"strings"
)
/********************************************************************
created: 2020-06-13
author: lixianmin
Copyright (C) - All Rights Reserved
*********************************************************************/
func CallersFrames(skip int, fullStack bool) []runtime.Frame {
const depth = 16
var pcs [depth]uintptr // 程序计算器
var total = runtime.Callers(skip, pcs[:])
var fetch = total
if !fullStack && total > 1 {
fetch = 1
}
var frames = runtime.CallersFrames(pcs[:fetch])
var results = make([]runtime.Frame, 0, fetch)
for {
var frame, more = frames.Next()
results = append(results, frame)
if !more {
break
}
}
return results
}
func AppendFrameInfo(buffer []byte, frame runtime.Frame) []byte {
buffer = append(buffer, frame.File...)
buffer = append(buffer, ':')
Itoa(&buffer, frame.Line, -1)
if frame.Function != "" {
buffer = append(buffer, ' ')
buffer = append(buffer, trimUrlPath(frame.Function)...)
buffer = append(buffer, '(', ')')
}
return buffer
}
// frame.Function可能会是 github.com/lixianmin/logo.TestConsoleHook, 然后提取完成后会变成logo.TestConsoleHook, 包含package+object+function名
func trimUrlPath(function string) string {
if function != "" {
var lastIndex = strings.LastIndexByte(function, '/')
if lastIndex > 0 {
var s = function[lastIndex+1:]
//fmt.Printf("function=%s, s = %s\n\n", function, s)
return s
}
}
return function
}
func Itoa(buf *[]byte, i int, wid int) {
// Assemble decimal in reverse order.
var b [20]byte
bp := len(b) - 1
for i >= 10 || wid > 1 {
wid--
q := i / 10
b[bp] = byte('0' + i - q*10)
bp--
i = q
}
// i < 10
b[bp] = byte('0' + i)
*buf = append(*buf, b[bp:]...)
}
|
package main
import "gopkg.in/alecthomas/kingpin.v1"
var (
searchCommand = kingpin.Command("search", "search for packages").Dispatch(search)
)
func search(c *kingpin.ParseContext) error {
return nil
}
|
// Copyright 2018 Saferwall. All rights reserved.
// Use of this source code is governed by Apache v2 license
// license that can be found in the LICENSE file.
// Package log provides context-aware and structured logging capabilities.
package log
import (
"context"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"go.uber.org/zap/zaptest/observer"
)
// Logger is a logger that supports log levels, context and structured logging.
type Logger interface {
// With returns a logger based off the root logger and decorates it with
// the given context and arguments.
With(ctx context.Context, args ...interface{}) Logger
// Debug uses fmt.Sprint to construct and log a message at DEBUG level
Debug(args ...interface{})
// Info uses fmt.Sprint to construct and log a message at INFO level
Info(args ...interface{})
// Error uses fmt.Sprint to construct and log a message at ERROR level
Error(args ...interface{})
// Debugf uses fmt.Sprintf to construct and log a message at DEBUG level
Debugf(format string, args ...interface{})
// Infof uses fmt.Sprintf to construct and log a message at INFO level
Infof(format string, args ...interface{})
// Errorf uses fmt.Sprintf to construct and log a message at ERROR level
Errorf(format string, args ...interface{})
}
type logger struct {
*zap.SugaredLogger
}
type contextKey int
const (
requestIDKey contextKey = iota
correlationIDKey
)
// New creates a new logger using the default configuration.
func New() Logger {
l, _ := zap.NewProduction()
return NewWithZap(l)
}
// NewCustom creates a new logger using a custom configuration
// given a log level.
func NewCustom(level string) Logger {
// NewProductionConfig is a reasonable production logging configuration
// Uses JSON, writes to standard error, and enables sampling.
// Stacktraces are automatically included on logs of ErrorLevel and above.
config := zap.NewProductionConfig()
config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
config.EncoderConfig.TimeKey = "timestamp"
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
config.Level = getLoggingLevel(level)
logger, _ := config.Build()
return NewWithZap(logger)
}
// NewCustomWithFile creates a new logger using a custom configuration
// given a log level and an output path.
func NewCustomWithFile(level string, outputPath string) Logger {
// NewProductionConfig is a reasonable production logging configuration
// Uses JSON, writes to standard error, and enables sampling.
// Stacktraces are automatically included on logs of ErrorLevel and above.
config := zap.NewProductionConfig()
config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
config.EncoderConfig.TimeKey = "timestamp"
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
config.Level = getLoggingLevel(level)
config.OutputPaths = []string{outputPath}
logger, _ := config.Build()
return NewWithZap(logger)
}
// NewWithZap creates a new logger using the pre-configured zap logger.
func NewWithZap(l *zap.Logger) Logger {
return &logger{l.Sugar()}
}
// NewForTest returns a new logger and the corresponding observed logs which
// can be used in unit tests to verify log entries.
func NewForTest() (Logger, *observer.ObservedLogs) {
core, recorded := observer.New(zapcore.InfoLevel)
return NewWithZap(zap.New(core)), recorded
}
// With returns a logger based off the root logger and decorates it with
// the given context and arguments.
//
// If the context contains request ID and/or correlation ID information
// (recorded via WithRequestID() and WithCorrelationID()), they will be
// added to every log message generated by the new logger.
//
// The arguments should be specified as a sequence of name, value pairs
// with names being strings.
// The arguments will also be added to every log message generated by the logger.
func (l *logger) With(ctx context.Context, args ...interface{}) Logger {
if ctx != nil {
if id, ok := ctx.Value(requestIDKey).(string); ok {
args = append(args, zap.String("request_id", id))
}
if id, ok := ctx.Value(correlationIDKey).(string); ok {
args = append(args, zap.String("correlation_id", id))
}
}
if len(args) > 0 {
return &logger{l.SugaredLogger.With(args...)}
}
return l
}
|
package e7_5
import (
"io"
)
type limitReader struct {
reader io.Reader
limit int64
pos int64
}
func LimitReader(r io.Reader, n int64) io.Reader {
return &limitReader{reader: r, limit: n}
}
func (lr *limitReader) Read(p []byte) (n int, err error) {
var readLen int64
if lr.pos == lr.limit && len(p) > 0 {
return 0, io.EOF
}
if lr.limit < lr.pos+(int64(len(p))) {
readLen = lr.limit - lr.pos
} else {
readLen = int64(len(p))
}
n, err = lr.reader.Read(p[:readLen])
lr.pos += int64(n)
return
}
|
package powerblink
import "testing"
func Test色の加算テスト(t *testing.T) {
lhs := Color{0, 0, 0}
rhs := Color{100, 100, 100}
ans := lhs.Add(rhs)
if ans.Red != 100 {
t.Error("Red != 100", ans.Red)
}
if ans.Green != 100 {
t.Error("Green != 100", ans.Green)
}
if ans.Blue != 100 {
t.Error("Blue != 100", ans.Green)
}
}
func Test色の減算テスト(t *testing.T) {
lhs := Color{255, 255, 255}
rhs := Color{55, 55, 55}
ans := lhs.Sub(rhs)
if ans.Red != 200 {
t.Error("Red != 200", ans.Red)
}
if ans.Green != 200 {
t.Error("Green != 200", ans.Green)
}
if ans.Blue != 200 {
t.Error("Blue != 200", ans.Green)
}
}
func Test色の乗算テスト(t *testing.T) {
lhs := Color{100, 100, 100}
ans := lhs.Mul(2.0)
if ans.Red != 200 {
t.Error("Red != 0", ans.Red)
}
if ans.Green != 200 {
t.Error("Green != 0", ans.Green)
}
if ans.Blue != 200 {
t.Error("Blue != 0", ans.Green)
}
}
func Test色の丸めテスト(t *testing.T) {
color := Color{256, 256, 256}.Round()
if color.Red != 255 {
t.Error("Red != 255 : ", color.Red)
}
if color.Green != 255 {
t.Error("Green != 255 : ", color.Green)
}
if color.Blue != 255 {
t.Error("Blue != 255 : ", color.Blue)
}
color = Color{-1, -1, -1}.Round()
if color.Red != 0 {
t.Error("Red != 0 : ", color.Red)
}
if color.Green != 0 {
t.Error("Green != 0 : ", color.Green)
}
if color.Blue != 0 {
t.Error("Blue != 0 : ", color.Blue)
}
}
|
package types
type InstallUbuntu struct {
Comport string `json:"comport"`
CompletionUri string `json:"completionUri"`
Domain string `json:"domain"`
Hostname string `json:"hostname"`
Password string `json:"password"`
Profile string `json:"profile"`
UID int `json:"uid"`
Username string `json:"username"`
}
|
package main
import "fmt"
func main() {
array := []int{48,96,86,68,57,82,63,70,37,34,83,27,19,97,9,17,}
smallestNumber := array[0]
for _, element := range array {
if element < smallestNumber {
smallestNumber = element
}
}
fmt.Println("Smallest number is ", smallestNumber)
}
|
package ravendb
import (
"net/http"
)
var (
_ RavenCommand = &GetSubscriptionStateCommand{}
)
// GetSubscriptionStateCommand describes "get subscription state" command
type GetSubscriptionStateCommand struct {
RavenCommandBase
subscriptionName string
Result *SubscriptionState
}
func newGetSubscriptionStateCommand(subscriptionName string) *GetSubscriptionStateCommand {
cmd := &GetSubscriptionStateCommand{
RavenCommandBase: NewRavenCommandBase(),
subscriptionName: subscriptionName,
}
cmd.IsReadRequest = true
return cmd
}
func (c *GetSubscriptionStateCommand) CreateRequest(node *ServerNode) (*http.Request, error) {
url := node.URL + "/databases/" + node.Database + "/subscriptions/state?name=" + urlUtilsEscapeDataString(c.subscriptionName)
return newHttpGet(url)
}
func (c *GetSubscriptionStateCommand) SetResponse(response []byte, fromCache bool) error {
if len(response) == 0 {
return throwInvalidResponse()
}
return jsonUnmarshal(response, &c.Result)
}
|
package queue
import (
"context"
"fmt"
"time"
"github.com/mongodb/mongo-go-driver/bson"
"github.com/mongodb/mongo-go-driver/bson/primitive"
"github.com/mongodb/mongo-go-driver/mongo"
"github.com/mongodb/mongo-go-driver/mongo/options"
)
// QueueMessage is the queue message structure.
type QueueMessage struct {
Id *primitive.ObjectID `json:"id" bson:"_id"`
Version *primitive.ObjectID `json:"version" bson:"version"`
Visibility int `json:"visibility" bson:"visibility"` // Visibility timeout is in seconds
Created *time.Time `json:"created" bson:"created"`
Payload string `json:"payload" bson:"payload"`
Started *time.Time `json:"started" bson:"started"`
Dequeued *time.Time `json:"dequeued" bson:"dequeued"`
Expire *time.Time `json:"expire" bson:"expire"`
queue *Queue
}
// Done tries to delete the message from the queue. If the visibility expired and the entry was
// updated, the version will not match and this method will return an error. The error is simply
// informational because the entry will be made available for another worker/processor.
// Reminder, this queue is for idempotent workloads.
func (m *QueueMessage) Done(ctx context.Context) error {
filter := bson.D{{"_id", m.Id}, {"version", m.Version}}
if res, err := m.queue.collection.DeleteOne(ctx, filter); err != nil {
return fmt.Errorf(
"Unable to delete entry - db: %s - collection: %s - id: %s - version: %s - reason: %v",
m.queue.collection.Database().Name(),
m.queue.collection.Name(),
m.Id.Hex(),
m.Version.Hex(),
err,
)
} else if res.DeletedCount != 1 {
return fmt.Errorf(
"Unable to delete entry - db: %s - collection: %s - id: %s - version: %s - reason: doc not found",
m.queue.collection.Database().Name(),
m.queue.collection.Name(),
m.Id.Hex(),
m.Version.Hex(),
)
}
return nil
}
// If the entry visibility is expired, reset.
func (m *QueueMessage) reset(ctx context.Context) error {
opts := options.Update()
opts.SetUpsert(false)
filter := bson.D{{"_id", m.Id}, {"version", m.Version}}
update := bson.D{{"$set", bson.D{{"version", objectId()}, {"dequeued", nil}, {"started", nil}, {"expire", nil}}}}
if res, err := m.queue.collection.UpdateOne(ctx, filter, update, opts); err != nil {
return fmt.Errorf(
"Unable to reset queue entry after expiration - db: %s - collection: %s - id: %s - version: %s - reason: %v",
m.queue.collection.Database().Name(),
m.queue.collection.Name(),
m.Id.Hex(),
m.Version.Hex(),
err,
)
} else if res.MatchedCount == 0 {
return mongo.ErrNoDocuments
}
return nil
}
|
package graph
// This file will be automatically regenerated based on the schema, any resolver implementations
// will be copied through when generating and any unknown code will be moved to the end.
import (
"BackEnd/models"
"context"
"errors"
)
func (r *mutationResolver) CreateCategory(ctx context.Context, input *models.NewCategory) (*models.Kategori, error) {
kategori := models.Kategori{
CategoryName: input.CategoryName,
}
_, err := r.DB.Model(&kategori).Insert()
if err != nil {
return nil, errors.New("insert category failed")
}
return &kategori, nil
}
func (r *mutationResolver) UpdateCategory(ctx context.Context, id string, input *models.NewCategory) (*models.Kategori, error) {
var kategori models.Kategori
err := r.DB.Model(&kategori).Where("id=?", id).First()
if err != nil {
return nil, errors.New("category not found")
}
kategori.CategoryName = input.CategoryName
_, updateErr := r.DB.Model(&kategori).Where("id=?", id).Update()
if updateErr != nil {
return nil, errors.New("update category failed")
}
return &kategori, nil
}
func (r *mutationResolver) DeleteCategory(ctx context.Context, id string) (bool, error) {
var kategori models.Kategori
err := r.DB.Model(&kategori).Where("id=?", id).First()
if err != nil {
return false, errors.New("category not found")
}
_, deleteError := r.DB.Model(&kategori).Where("id=?", id).Delete()
if deleteError != nil {
return false, errors.New("delete error")
}
return true, nil
}
func (r *queryResolver) Categories(ctx context.Context) ([]*models.Kategori, error) {
var categories []*models.Kategori
err := r.DB.Model(&categories).Order("id").Select()
if err != nil {
return nil, errors.New("query failed")
}
return categories, nil
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package componentconfig
// TODO(authprovider-q): Should these be in pkg/apis/authprovider?
// or pkg/apis/auth or pkg/apis/authconfig or ???
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// TODO(authprovider-q): Is the Auth in AuthConfiguration redundant?
type AuthConfiguration struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
GenerateKubeconfig GenerateKubeconfig `json:"generateKubeconfig,omitempty"`
}
type GenerateKubeconfig struct {
Server string `json:"server,omitempty"`
Name string `json:"name,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type AuthConfigurationList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []AuthConfiguration `json:"items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type AuthProvider struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
// Description is a human-friendly name
Description string `json:"description,omitempty"`
OAuthConfig OAuthConfig `json:"oAuthConfig,omitempty"`
// Email addresses that are allowed to register using this provider
PermitEmails []string `json:"permitEmails,omitempty"`
}
type OAuthConfig struct {
ClientID string `json:"clientID,omitempty"`
// TODO(authprovider-q): What do we do about secrets? We presumably don't want this secret
// in the configmap, because that might have a fairly permissive RBAC role. But do we want to
// do a layerable configuration? Keep the secret in a second configuration object? Have the
// name of the secret here, and just runtime error until the secret is loaded?
// ClientSecret is the OAuth secret
ClientSecret string `json:"clientSecret,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type AuthProviderList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []AuthProvider `json:"items"`
}
|
package client
import (
"fmt"
"github.com/DataDog/datadog-agent/pkg/util/log"
"github.com/gorilla/websocket"
"proxy/config"
"proxy/impl"
"time"
)
var retryTimes = 1
// 连接客户端,向客户端中转数据
func StartClient() {
log.Info("client start.....")
fmt.Println(" client start .....")
conf := config.ConfigVal("config")
conn, _, err := websocket.DefaultDialer.Dial(conf.TransferPath, nil)
if err != nil {
log.Info("connect to conserver fail: ", err)
log.Info("start to retry connect ....")
// 五秒后重试
time.Sleep(5 * time.Second)
retryTimes++
if retryTimes < 10 {
StartClient()
}
log.Info("reconnect conserver failed")
}
client := impl.InitClientParam(conn)
client.ClientInitConn()
}
|
// Package thisconf is a simple package to do things that Viper doesn't.
package thisconf
import (
"github.com/spf13/viper"
"github.com/utrack/goroadie"
)
// Load loads app config from files & env variables. Config structure
// is whatever you like (and whatever the libs are able to unmarshal into).
func Load(conf interface{}, envPrefix string) (err error) {
// First, read stuff from config file using Viper.
viper.SetConfigName("config")
viper.SetConfigType("toml")
viper.AddConfigPath(".")
err = viper.ReadInConfig()
if err != nil {
return
}
err = viper.Unmarshal(conf)
if err != nil {
return
}
// Then, overwrite them with env variables using goroadie.
goroadie.Process(envPrefix, conf)
return
}
|
package main
import (
"html/template"
"io"
"net/http"
"os"
"github.com/julienschmidt/httprouter"
)
func main() {
r := httprouter.New()
//curl -XGET -vvv http://127.0.0.1:9999/posts/my-second-post
//curl -XGET -vvv http://127.0.0.1:9999/posts/wrong_path
r.GET("/posts/:title", func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
title := params.ByName("title")
_, err := os.Stat("static/posts/" + title)
if err != nil {
if os.IsNotExist(err) {
w.WriteHeader(http.StatusNotFound)
io.WriteString(w, "no path")
return
}
}
t := template.Must(template.ParseFiles("./static/posts/" + title + "/index.html"))
t.Execute(w, nil)
})
//if you take auto create static site by Hugo.
//you should set baseURL = "http://127.0.0.1:9999/static/" in 'config.toml' configuration file before you take 'hugo -D' command line for auto creation.
r.ServeFiles("/static/*filepath", http.Dir("./static"))
http.ListenAndServe(":9999", r)
}
|
package service
func (s *Service) ExamPaperAnswerSelectAllCount() int {
return s.examService.ExamPaperAnswerSelectAllCount()
}
|
package editor
import (
"api/base"
"encoding/json"
"fmt"
)
//
type User base.User
func (user *User) Save() {
jsonStr, err := json.Marshal(user)
if err != nil {
fmt.Println("json user is error")
}
baseProvider.User.RedisProvider.SetUserInfo(user.UserId, string(jsonStr))
//进行mongo存储
}
func GetUserEditor(userId string) *User {
user := new(User)
editorData := baseProvider.User.RedisProvider.GetUserInfo(userId)
json.Unmarshal([]byte(editorData), user)
return user
}
func GetUserEditors(userIds []string) []*User {
result := make([]*User, 0)
return result
}
|
package main
import (
"net/http"
"github.com/gin-gonic/gin"
)
// IndexHandler 主页处理函数
func IndexHandler(c *gin.Context) {
c.HTML(http.StatusOK, "index.html", gin.H{
"msg": "嘿嘿嘿",
})
}
func main() {
engine := gin.Default()
engine.LoadHTMLGlob("templates/*")
engine.Static("/static", "./statics")
engine.GET("/index", IndexHandler)
engine.Run(":8001")
}
|
package gs15
const (
FullLen = 15
MasterLen = 14
)
|
package replacespaces
import (
"fmt"
"golangexercises/arraysandstrings/util"
"strings"
"testing"
"time"
)
const (
myName = "My name is a secret and I live in Utah "
myNameReplaced = "My%20name%20is%20a%20secret%20and%20I%20live%20in%20Utah"
)
func TestReplace(t *testing.T) {
if r := ReplaceSpaces(myName); r != myNameReplaced {
t.Errorf("%s is not correct replacement for %s\n", r, myName)
}
randomString := util.GenerateRandomRune(util.Length)
start := time.Now()
ReplaceSpaces(randomString)
fmt.Printf("Replaced spaces in %d elements in %s\n", util.Length, time.Since(start))
start = time.Now()
r := strings.NewReplacer(" ", "%20")
r.Replace(randomString)
fmt.Printf("Replaced spaces in %d elements in %s using Go strings Replacer\n", util.Length, time.Since(start))
}
|
package raft
import (
"bytes"
"encoding/gob"
"fmt"
"log"
"math/rand"
"os"
"sync"
"sync/atomic"
"time"
)
const DebugCM = 1
/*
CommitEntry就是Raft向提交通道发送的数据。每一条提交的条目都会通知客户端,
表明指令已满足一致性,可以应用到客户端的状态机上。
*/
type CommitEntry struct {
// Command 是被提交的客户端指令
Command interface{}
// Index 是被提交的客户端指令对应的日志索引
Index int
// Term 是被提交的客户端指令对应的任期
Term int
}
type LogEntry struct {
Command interface{}
Term int
}
type CMState int
const (
Follower CMState = iota // 跟随者
Candidate // 候选人
Leader // 领导者
Dead //
)
func (s CMState) String() string {
switch s {
case Follower:
return "Follower"
case Candidate:
return "Candidate"
case Leader:
return "Leader"
case Dead:
return "Dead"
default:
panic("unreachable")
}
}
// 共识模块(CM)
type ConsensusModule struct {
mu sync.Mutex
id int // id 是一致性模块中的服务器ID
peerIds []int // peerIds 是集群中所有同伴的ID列表
server *Server // server 是包含该CM的服务器. 该字段用于向其它同伴发起RPC调用
storage Storage
commitChan chan<- CommitEntry // commitChan 是此 CM 将报告已提交日志条目的通道。它是在施工过程中由客户传入的。
newCommitReadyChan chan struct{} // newCommitReadyChan 是 goroutines 使用的内部通知通道,它将新条目提交到日志以通知这些条目可能会在 commitChan 上发送。
triggerAEChan chan struct{} // triggerAEChan 是一个内部通知通道,用于在发生有趣的变化时触发向关注者发送新的 AE。
currentTerm int // 服务器接收到的最新任期(启动时初始化为0,单调递增)
votedFor int // 在当前任期内收到赞同票的候选人ID(如果没有就是null)
log []LogEntry // 日志条目;每个条目中 包含输入状态机的指令,以及领导者接收条目时的任期(第一个索引是1)
commitIndex int // 确认已提交的日志条目的最大索引值(初始化为0,单调递增)
lastApplied int // 应用于状态机的日志条目的最大索引值(初始化为0,单调递增)
state CMState
electionResetEvent time.Time
nextIndex map[int]int // 对于每个服务器,存储要发送给该服务器的下一条日志条目的索引(初始化为领导者的最新日志索引+1)
matchIndex map[int]int // 对于每个服务器,存储确认复制到该服务器的日志条目的最大索引值(初始化为0,单调递增)
}
// NewConsensusModule 使用给定的 ID、对等 ID 列表和
// 服务器创建一个新的 CM。就绪通道向 CM 发出信号,所有对等点都已连接,并且
// 可以安全地启动其状态机。
func NewConsensusModule(id int, peerIds []int, server *Server, storage Storage, ready <-chan interface{}, commitChan chan<- CommitEntry) *ConsensusModule {
cm := new(ConsensusModule)
cm.id = id
cm.peerIds = peerIds
cm.server = server
cm.storage = storage
cm.commitChan = commitChan
cm.newCommitReadyChan = make(chan struct{}, 16)
cm.triggerAEChan = make(chan struct{}, 1)
cm.state = Follower
cm.votedFor = -1
cm.commitIndex = -1
cm.lastApplied = -1
cm.nextIndex = make(map[int]int)
cm.matchIndex = make(map[int]int)
if cm.storage.HasData() {
cm.restoreFromStorage(cm.storage)
}
go func() {
// CM 处于静止状态,直到发出就绪信号;然后,它开始倒计时以进行领导者选举。
<-ready
cm.mu.Lock()
cm.electionResetEvent = time.Now()
cm.mu.Unlock()
cm.runElectionTimer()
}()
go cm.commitChanSender()
return cm
}
func (cm *ConsensusModule) Report() (id int, term int, isLeader bool) {
cm.mu.Lock()
defer cm.mu.Unlock()
return cm.id, cm.currentTerm, cm.state == Leader
}
/*
Submit方法会向CM呈递一条新的指令。这个函数是非阻塞的;
客户端读取构造函数中传入的commit channel,以获得新提交条目的通知。
如果当前CM是领导者返回true——表示指令被接受了。
如果返回false,客户端会寻找新的服务器呈递该指令。
*/
func (cm *ConsensusModule) Submit(command interface{}) bool {
cm.mu.Lock()
cm.dlog("Submit received by %v: %v", cm.state, command)
if cm.state == Leader {
cm.log = append(cm.log, LogEntry{Command: command, Term: cm.currentTerm})
cm.persistToStorage()
cm.dlog("... log=%v", cm.log)
cm.mu.Unlock()
cm.triggerAEChan <- struct{}{}
return true
}
cm.mu.Unlock()
return false
}
// Stop 停止这个 CM,清理它的状态。这个方法返回很快,但是
// 所有 goroutines 可能需要一些时间(直到 ~election timeout)
// 退出。
func (cm *ConsensusModule) Stop() {
cm.mu.Lock()
defer cm.mu.Unlock()
cm.state = Dead
cm.dlog("becomes Dead")
close(cm.newCommitReadyChan)
}
func (cm *ConsensusModule) restoreFromStorage(storage Storage) {
if termData, found := cm.storage.Get("currentTerm"); found {
d := gob.NewDecoder(bytes.NewBuffer(termData))
if err := d.Decode(&cm.currentTerm); err != nil {
log.Fatal(err)
}
} else {
log.Fatal("currentTerm not found in storage")
}
if votedData, found := cm.storage.Get("votedFor"); found {
d := gob.NewDecoder(bytes.NewBuffer(votedData))
if err := d.Decode(&cm.votedFor); err != nil {
log.Fatal(err)
}
} else {
log.Fatal("votedFor not found in storage")
}
if logData, found := cm.storage.Get("log"); found {
d := gob.NewDecoder(bytes.NewBuffer(logData))
if err := d.Decode(&cm.log); err != nil {
log.Fatal(err)
}
} else {
log.Fatal("log not found in storage")
}
}
func (cm *ConsensusModule) persistToStorage() {
var termData bytes.Buffer
if err := gob.NewEncoder(&termData).Encode(cm.currentTerm); err != nil {
log.Fatal(err)
}
cm.storage.Set("currentTerm", termData.Bytes())
var votedData bytes.Buffer
if err := gob.NewEncoder(&votedData).Encode(cm.votedFor); err != nil {
log.Fatal(err)
}
cm.storage.Set("votedFor", votedData.Bytes())
var logData bytes.Buffer
if err := gob.NewEncoder(&logData).Encode(cm.log); err != nil {
log.Fatal(err)
}
cm.storage.Set("log", logData.Bytes())
}
func (cm *ConsensusModule) dlog(format string, args ...interface{}) {
if DebugCM > 0 {
format = fmt.Sprintf("[%d] ", cm.id) + format
log.Printf(format, args...)
}
}
type RequestVoteArgs struct {
Term int // 候选人的任期
CandidateId int // 请求选票的候选人ID
LastLogIndex int // 候选人的最新日志条目对应索引
LastLogTerm int // 候选人的最新日志条目对应任期
}
type RequestVoteReply struct {
Term int // currentTerm,当前任期,回复给候选人。候选人用于自我更新
VoteGranted bool // true表示候选人获得了赞成票
}
// 请求投票RPC RV请求
func (cm *ConsensusModule) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) error {
cm.mu.Lock()
defer cm.mu.Unlock()
if cm.state == Dead {
return nil
}
lastLogIndex, lastLogTerm := cm.lastLogIndexAndTerm()
cm.dlog("RequestVote: %+v [currentTerm=%d, votedFor=%d, log index/term=(%d, %d)]", args, cm.currentTerm, cm.votedFor, lastLogIndex, lastLogTerm)
// 请求中的任期大于本地任期,转换为追随者状态
if args.Term > cm.currentTerm {
cm.dlog("... term out of date in RequestVote")
cm.becomeFollower(args.Term)
}
// 任期相同,且未投票或已投票给当前请求同伴,则返回赞成投票;否则,返回反对投票。
if cm.currentTerm == args.Term && (cm.votedFor == -1 || cm.votedFor == args.CandidateId) &&
(args.LastLogTerm > lastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex >= lastLogIndex)) {
reply.VoteGranted = true
cm.votedFor = args.CandidateId
cm.electionResetEvent = time.Now()
} else {
reply.VoteGranted = false
}
reply.Term = cm.currentTerm
cm.persistToStorage()
cm.dlog("... RequestVote reply: %+v", reply)
return nil
}
type AppendEntriesArgs struct {
Term int // 领导者的任期
LeaderId int // 领导者ID,追随者就可以对客户端进行重定向
PrevLogIndex int // 紧接在新日志条目之前的条目的索引
PrevLogTerm int // prevLogIndex对应条目的任期
Entries []LogEntry // 需要报错的日志条目(为空时是心跳请求;为了高效可能会发送多条日志)
LeaderCommit int // 领导者的commitIndex
}
type AppendEntriesReply struct {
Term int // currentTerm,当前任期,回复给领导者。领导者用于自我更新
Success bool // 如果追随者保存了prevLogIndex和prevLogTerm相匹配的日志条目,则返回true
ConflictIndex int //
ConflictTerm int
}
// AE请求 由领导者发起,用于向追随者复制客户端指令,也用于维护心跳
func (cm *ConsensusModule) AppendEntries(args AppendEntriesArgs, reply *AppendEntriesReply) error {
cm.mu.Lock()
defer cm.mu.Unlock()
if cm.state == Dead {
return nil
}
cm.dlog("AppendEntries: %+v", args)
// 请求中的任期大于本地任期,转换为追随者状态
if args.Term > cm.currentTerm {
cm.dlog("... term out of date in AppendEntries")
cm.becomeFollower(args.Term)
}
reply.Success = false
if args.Term == cm.currentTerm {
// 如果当前状态不是追随者,则变为追随者
if cm.state != Follower {
cm.becomeFollower(args.Term)
}
cm.electionResetEvent = time.Now()
// 检查本地的日志在索引PrevLogIndex处是否包含任期与PrevLogTerm匹配的记录?
// 注意在PrevLogIndex=-1的极端情况下,这里应该是true
if args.PrevLogIndex == -1 || (args.PrevLogIndex < len(cm.log) && args.PrevLogTerm == cm.log[args.PrevLogIndex].Term) {
reply.Success = true
// 找到插入点 —— 索引从PrevLogIndex+1开始的本地日志与RPC发送的新条目间出现任期不匹配的位置。
logInsertIndex := args.PrevLogIndex + 1
newEntriesIndex := 0
for {
if logInsertIndex >= len(cm.log) || newEntriesIndex >= len(args.Entries) {
break
}
if cm.log[logInsertIndex].Term != args.Entries[newEntriesIndex].Term {
break
}
logInsertIndex++
newEntriesIndex++
}
/*
循环结束时:
- logInsertIndex指向本地日志结尾,或者是与领导者发送日志间存在任期冲突的索引位置
- newEntriesIndex指向请求条目的结尾,或者是与本地日志存在任期冲突的索引位置
*/
if newEntriesIndex < len(args.Entries) {
cm.dlog("... inserting entries %v from index %d", args.Entries[newEntriesIndex:], logInsertIndex)
cm.log = append(cm.log[:logInsertIndex], args.Entries[newEntriesIndex:]...)
cm.dlog("... log is now: %v", cm.log)
}
if args.LeaderCommit > cm.commitIndex {
cm.commitIndex = intMin(args.LeaderCommit, len(cm.log)-1)
cm.dlog("... setting commitIndex=%d", cm.commitIndex)
cm.newCommitReadyChan <- struct{}{}
}
} else {
if args.PrevLogIndex >= len(cm.log) {
reply.ConflictIndex = len(cm.log)
reply.ConflictTerm = -1
} else {
// PrevLogIndex points within our log, but PrevLogTerm doesn't match
// cm.log[PrevLogIndex].
reply.ConflictTerm = cm.log[args.PrevLogIndex].Term
var i int
for i = args.PrevLogIndex - 1; i >= 0; i-- {
if cm.log[i].Term != reply.ConflictTerm {
break
}
}
reply.ConflictIndex = i + 1
}
}
}
reply.Term = cm.currentTerm
cm.persistToStorage()
cm.dlog("AppendEntries reply: %+v", *reply)
return nil
}
//electionTimeout 生成一个伪随机的选举超时持续时间。
func (cm *ConsensusModule) electionTimeout() time.Duration {
// 如果设置了 RAFT_FORCE_MORE_REELECTION,则有意地进行压力测试
// 经常生成一个硬编码的数字。这将在不同的服务器之间产生冲突
// 并迫使更多的重新选举。
if len(os.Getenv("RAFT_FORCE_MORE_REELECTION")) > 0 && rand.Intn(3) == 0 {
return time.Duration(150) * time.Millisecond
} else {
return time.Duration(150+rand.Intn(150)) * time.Millisecond
}
}
// runElectionTimer 实现了一个选举计时器。它应该在
// 我们想要启动一个计时器以成为新选举的候选人时启动。
//
// 这个函数是阻塞的,应该在一个单独的 goroutine 中启动;
// 它旨在为单个(一次性)选举计时器工作,因为每当 CM 状态从跟随者/候选人或任期更改时它退出 。
func (cm *ConsensusModule) runElectionTimer() {
timeoutDuration := cm.electionTimeout()
cm.mu.Lock()
termStarted := cm.currentTerm
cm.mu.Unlock()
cm.dlog("election timer started (%v), term=%d", timeoutDuration, termStarted)
/*
循环会在以下条件结束:
1 - 发现不再需要选举定时器
2 - 选举定时器超时,CM变为候选人
对于追随者而言,定时器通常会在CM的整个生命周期中一直在后台运行。
*/
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
for {
<-ticker.C
cm.mu.Lock()
// CM不再需要定时器
if cm.state != Candidate && cm.state != Follower {
cm.dlog("in election timer state=%s, bailing out", cm.state)
cm.mu.Unlock()
return
}
// 任期变化
if termStarted != cm.currentTerm {
cm.dlog("in election timer term changed from %d to %d, bailing out", termStarted, cm.currentTerm)
cm.mu.Unlock()
return
}
// 如果在超时之前没有收到领导者的信息或者给其它候选人投票,就开始新一轮选举
if elapsed := time.Since(cm.electionResetEvent); elapsed >= timeoutDuration {
cm.startElection()
cm.mu.Unlock()
return
}
cm.mu.Unlock()
}
}
/*
如果追随者在一段时间内没有收到领导者或其它候选人的信息,它就会开始新一轮的选举。
将状态切换为候选人并增加任期,因为这是算法对每次选举的要求。
发送RV请求给其它同伴,请他们在本轮选举中为自己投票。
等待RPC请求的返回值,并统计我们是否获得足够多的票数成为领导者。
*/
func (cm *ConsensusModule) startElection() {
cm.state = Candidate
cm.currentTerm += 1
savedCurrentTerm := cm.currentTerm
cm.electionResetEvent = time.Now()
cm.votedFor = cm.id
cm.dlog("becomes Candidate (currentTerm=%d); log=%v", savedCurrentTerm, cm.log)
var votesReceived int32 = 1
for _, peerId := range cm.peerIds {
go func(peerId int) {
cm.mu.Lock()
savedLastLogIndex, savedLastLogTerm := cm.lastLogIndexAndTerm()
cm.mu.Unlock()
args := RequestVoteArgs{
Term: savedCurrentTerm,
CandidateId: cm.id,
LastLogIndex: savedLastLogIndex,
LastLogTerm: savedLastLogTerm,
}
var reply RequestVoteReply
cm.dlog("sending RequestVote to %d: %+v", peerId, args)
if err := cm.server.Call(peerId, "ConsensusModule.RequestVote", args, &reply); err == nil {
cm.mu.Lock()
defer cm.mu.Unlock()
cm.dlog("received RequestVoteReply %+v", reply)
// 状态不是候选人,退出选举(可能退化为追随者,也可能已经胜选成为领导者)
if cm.state != Candidate {
cm.dlog("while waiting for reply, state = %v", cm.state)
return
}
// 存在更高任期(新领导者),转换为追随者
if reply.Term > savedCurrentTerm {
cm.dlog("term out of date in RequestVoteReply")
cm.becomeFollower(reply.Term)
return
} else if reply.Term == savedCurrentTerm {
if reply.VoteGranted {
votes := int(atomic.AddInt32(&votesReceived, 1))
if votes*2 > len(cm.peerIds)+1 {
// 获得票数超过一半,选举获胜,成为最新的领导者
cm.dlog("wins election with %d votes", votes)
cm.startLeader()
return
}
}
}
}
}(peerId)
}
// 另行启动一个选举定时器,以防本次选举不成功
go cm.runElectionTimer()
}
// becomeFollower 使 cm 成为跟随者并重置其状态。
// 期望 cm.mu 被锁定。
func (cm *ConsensusModule) becomeFollower(term int) {
cm.dlog("becomes Follower with term=%d; log=%v", term, cm.log)
cm.state = Follower
cm.currentTerm = term
cm.votedFor = -1
cm.electionResetEvent = time.Now()
go cm.runElectionTimer()
}
func (cm *ConsensusModule) startLeader() {
cm.state = Leader
for _, peerId := range cm.peerIds {
cm.nextIndex[peerId] = len(cm.log)
cm.matchIndex[peerId] = -1
}
cm.dlog("becomes Leader; term=%d, nextIndex=%v, matchIndex=%v; log=%v", cm.currentTerm, cm.nextIndex, cm.matchIndex, cm.log)
/*
该goroutine在后台运行并向同伴服务器发送AE请求:
- triggerAEChan通道发送任何内容时
- 如果triggerAEChan通道没有内容时,每50ms执行一次
*/
go func(heartbeatTimeout time.Duration) {
// Immediately send AEs to peers.
cm.leaderSendAEs()
t := time.NewTimer(heartbeatTimeout)
defer t.Stop()
// 只要当前服务器是领导者,就要周期性发送心跳
for {
doSend := false
select {
case <-t.C:
doSend = true
// Reset timer to fire again after heartbeatTimeout.
t.Stop()
t.Reset(heartbeatTimeout)
case _, ok := <-cm.triggerAEChan:
if ok {
doSend = true
} else {
return
}
// Reset timer for heartbeatTimeout.
if !t.Stop() {
<-t.C
}
t.Reset(heartbeatTimeout)
}
if doSend {
cm.mu.Lock()
if cm.state != Leader {
cm.mu.Unlock()
return
}
cm.mu.Unlock()
cm.leaderSendAEs()
}
}
}(50 * time.Millisecond)
}
func (cm *ConsensusModule) leaderSendAEs() {
cm.mu.Lock()
savedCurrentTerm := cm.currentTerm
cm.mu.Unlock()
// 向所有追随者发送AE请求
for _, peerId := range cm.peerIds {
go func(peerId int) {
cm.mu.Lock()
ni := cm.nextIndex[peerId]
prevLogIndex := ni - 1
prevLogTerm := -1
if prevLogIndex >= 0 {
prevLogTerm = cm.log[prevLogIndex].Term
}
entries := cm.log[ni:]
args := AppendEntriesArgs{
Term: savedCurrentTerm,
LeaderId: cm.id,
PrevLogIndex: prevLogIndex,
PrevLogTerm: prevLogTerm,
Entries: entries,
LeaderCommit: cm.commitIndex,
}
cm.mu.Unlock()
cm.dlog("sending AppendEntries to %v: ni=%d, args=%+v", peerId, ni, args)
var reply AppendEntriesReply
if err := cm.server.Call(peerId, "ConsensusModule.AppendEntries", args, &reply); err == nil {
cm.mu.Lock()
defer cm.mu.Unlock()
if reply.Term > savedCurrentTerm {
cm.dlog("term out of date in heartbeat reply")
cm.becomeFollower(reply.Term)
return
}
if cm.state == Leader && savedCurrentTerm == reply.Term {
if reply.Success {
cm.nextIndex[peerId] = ni + len(entries)
cm.matchIndex[peerId] = cm.nextIndex[peerId] - 1
savedCommitIndex := cm.commitIndex
for i := cm.commitIndex + 1; i < len(cm.log); i++ {
if cm.log[i].Term == cm.currentTerm {
matchCount := 1
for _, peerId := range cm.peerIds {
if cm.matchIndex[peerId] >= i {
matchCount++
}
}
if matchCount*2 > len(cm.peerIds)+1 {
cm.commitIndex = i
}
}
}
cm.dlog("AppendEntries reply from %d success: nextIndex := %v, matchIndex := %v; commitIndex := %d", peerId, cm.nextIndex, cm.matchIndex, cm.commitIndex)
if cm.commitIndex != savedCommitIndex {
cm.dlog("leader sets commitIndex := %d", cm.commitIndex)
cm.newCommitReadyChan <- struct{}{}
cm.triggerAEChan <- struct{}{}
}
} else {
if reply.ConflictTerm >= 0 {
lastIndexOfTerm := -1
for i := len(cm.log) - 1; i >= 0; i-- {
if cm.log[i].Term == reply.ConflictTerm {
lastIndexOfTerm = i
break
}
}
if lastIndexOfTerm >= 0 {
cm.nextIndex[peerId] = lastIndexOfTerm + 1
} else {
cm.nextIndex[peerId] = reply.ConflictIndex
}
} else {
cm.nextIndex[peerId] = reply.ConflictIndex
}
cm.dlog("AppendEntries reply from %d !success: nextIndex := %d", peerId, ni-1)
}
}
}
}(peerId)
}
}
/*
commitChanSender负责在cm.commitChan上发送已提交的日志条目。
它会监听newCommitReadyChan的通知并检查哪些条目可以发送(给客户端)。
该方法应该在单独的后台goroutine中运行;cm.commitChan可能会有缓冲来限制客户端消费已提交指令的速度。
当newCommitReadyChan关闭时方法结束。
*/
func (cm *ConsensusModule) lastLogIndexAndTerm() (int, int) {
if len(cm.log) > 0 {
lastIndex := len(cm.log) - 1
return lastIndex, cm.log[lastIndex].Term
} else {
return -1, -1
}
}
func (cm *ConsensusModule) commitChanSender() {
for range cm.newCommitReadyChan {
// Find which entries we have to apply.
cm.mu.Lock()
savedTerm := cm.currentTerm
savedLastApplied := cm.lastApplied
var entries []LogEntry
if cm.commitIndex > cm.lastApplied {
entries = cm.log[cm.lastApplied+1 : cm.commitIndex+1]
cm.lastApplied = cm.commitIndex
}
cm.mu.Unlock()
cm.dlog("commitChanSender entries=%v, savedLastApplied=%d", entries, savedLastApplied)
for i, entry := range entries {
cm.commitChan <- CommitEntry{
Command: entry.Command,
Index: savedLastApplied + i + 1,
Term: savedTerm,
}
}
}
cm.dlog("commitChanSender done")
}
func intMin(a, b int) int {
if a < b {
return a
}
return b
}
|
package form3client
import (
"encoding/json"
"io/ioutil"
"net/http"
"net/http/httptest"
"strconv"
"testing"
)
type test struct {
status int
}
func createClient() Form3Client {
client := Form3Client{}
client.New()
return client
}
func readAccountSampleData() ([]byte, error) {
filename := "testdata/account_sample.json"
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
return data, nil
}
func getAccount(sampleData []byte) *AccountData {
data := ResponseData{}
json.Unmarshal(sampleData, &data)
return data.Data
}
func TestClient_Fetch(t *testing.T) {
expectedAccountData, err := readAccountSampleData()
if err != nil {
t.Errorf("Error to read account sample data: %s", err)
}
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write(expectedAccountData)
if r.Method != "GET" {
t.Errorf("Expected 'GET' request, got '%s'", r.Method)
}
}))
defer server.Close()
accountId := getAccount(expectedAccountData).ID
client := createClient()
accountData, err := client.Fetch(server.URL, accountId)
if err != nil {
t.Errorf("Expected err == nil, got '%s'", err)
}
if accountData.ID != accountId {
t.Errorf("Expected account id == %s, got %s", accountId, accountData.ID)
}
}
func TestClient_Fetch_Fail(t *testing.T) {
tests := []test{
{http.StatusBadRequest},
{http.StatusUnauthorized},
{http.StatusForbidden},
{http.StatusNotFound},
{http.StatusInternalServerError},
{http.StatusServiceUnavailable},
{http.StatusGatewayTimeout},
}
expectedAccountData, err := readAccountSampleData()
if err != nil {
t.Errorf("Error to read account sample data: %s", err)
}
for _, tc := range tests {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tc.status)
}))
defer server.Close()
accountId := getAccount(expectedAccountData).ID
client := createClient()
accountData, err := client.Fetch(server.URL, accountId)
if err == nil {
t.Errorf("Expected err == nil, got '%s'", err)
}
if accountData != nil {
t.Errorf("Expected account == nil")
}
}
}
func TestClient_Delete(t *testing.T) {
expectedAccountData, err := readAccountSampleData()
if err != nil {
t.Errorf("Error to read account sample data: %s", err)
}
account := getAccount(expectedAccountData)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
if r.Method != "DELETE" {
t.Errorf("Expected 'DELETE' request, got '%s'", r.Method)
}
}))
defer server.Close()
client := createClient()
response, err := client.Delete(server.URL, account.ID,
map[string]string{"Version": strconv.FormatInt(*account.Version, 10)})
if err != nil {
t.Errorf("Expected err == nil, got '%s'", err)
}
if response == false {
t.Errorf("Expected response == true, got %t", response)
}
}
func TestClient_Delete_Fail(t *testing.T) {
tests := []test{
{http.StatusBadRequest},
{http.StatusUnauthorized},
{http.StatusForbidden},
{http.StatusNotFound},
{http.StatusInternalServerError},
{http.StatusServiceUnavailable},
{http.StatusGatewayTimeout},
}
expectedAccountData, err := readAccountSampleData()
if err != nil {
t.Errorf("Error to read account sample data: %s", err)
}
account := getAccount(expectedAccountData)
for _, tc := range tests {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tc.status)
}))
defer server.Close()
client := createClient()
response, err := client.Delete(server.URL, account.ID,
map[string]string{"Version": strconv.FormatInt(*account.Version, 10)})
if err == nil {
t.Errorf("Expected err == nil, got '%s'", err)
}
if response != false {
t.Errorf("Expected account == nil")
}
}
}
func TestClient_Create(t *testing.T) {
expectedAccountData, err := readAccountSampleData()
if err != nil {
t.Errorf("Error to read account sample data: %s", err)
}
account := getAccount(expectedAccountData)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusCreated)
w.Write(expectedAccountData)
if r.Method != "POST" {
t.Errorf("Expected 'POST' request, got '%s'", r.Method)
}
}))
defer server.Close()
client := createClient()
response, err := client.Create(server.URL, *account)
if err != nil {
t.Errorf("Expected err == nil, got '%s'", err)
}
if response.ID != account.ID {
t.Errorf("Expected account == %s, got %s", response.ID, account.ID)
}
}
func TestClient_Create_Fail(t *testing.T) {
tests := []test{
{http.StatusBadRequest},
{http.StatusUnauthorized},
{http.StatusForbidden},
{http.StatusNotFound},
{http.StatusRequestEntityTooLarge},
{http.StatusTooManyRequests},
{http.StatusInternalServerError},
{http.StatusServiceUnavailable},
{http.StatusGatewayTimeout},
}
expectedAccountData, err := readAccountSampleData()
if err != nil {
t.Errorf("Error to read account sample data: %s", err)
}
account := getAccount(expectedAccountData)
for _, tc := range tests {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tc.status)
}))
defer server.Close()
client := createClient()
accountData, err := client.Create(server.URL, *account)
if err == nil {
t.Errorf("Expected err == nil, got '%s'", err)
}
if accountData != nil {
t.Errorf("Expected account == nil")
}
}
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package firmware
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"time"
"chromiumos/tast/common/testexec"
"chromiumos/tast/local/bundles/cros/firmware/fwupd"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: FwupdInstallRemote,
Desc: "Checks that fwupd can install using a remote repository",
Contacts: []string{
"campello@chromium.org", // Test Author
"chromeos-fwupd@google.com", // CrOS FWUPD
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"fwupd"},
HardwareDeps: hwdep.D(
hwdep.Battery(), // Test doesn't run on ChromeOS devices without a battery.
hwdep.ChromeEC(), // Test requires Chrome EC to set battery to charge via ectool.
),
Timeout: fwupd.ChargingStateTimeout + 1*time.Minute,
})
}
// FwupdInstallRemote runs the fwupdtool utility and verifies that it
// can update a device in the system using a remote repository.
func FwupdInstallRemote(ctx context.Context, s *testing.State) {
// make sure dut battery is charging/charged
if cleanup, err := fwupd.SetFwupdChargingState(ctx, true); err != nil {
s.Fatal("Failed to set charging state: ", err)
} else {
defer func() {
if err := cleanup(ctx); err != nil {
s.Fatal("Failed to cleanup: ", err)
}
}()
}
cmd := testexec.CommandContext(ctx, "/usr/bin/fwupdmgr", "install", "--allow-reinstall", "-v", fwupd.ReleaseURI)
cmd.Env = append(os.Environ(), "CACHE_DIRECTORY=/var/cache/fwupd")
output, err := cmd.Output(testexec.DumpLogOnError)
if err != nil {
s.Errorf("%q failed: %v", cmd.Args, err)
}
if err := ioutil.WriteFile(filepath.Join(s.OutDir(), "fwupdmgr.txt"), output, 0644); err != nil {
s.Error("Failed to write output from update: ", err)
}
}
|
package requests
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"strings"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
"github.com/atomicjolt/canvasapi/models"
)
// CreateLineItem Create a new Line Item
// https://canvas.instructure.com/doc/api/line_items.html
//
// Path Parameters:
// # Path.CourseID (Required) ID
//
// Form Parameters:
// # Form.ScoreMaximum (Required) The maximum score for the line item. Scores created for the Line Item may exceed this value.
// # Form.Label (Required) The label for the Line Item. If no resourceLinkId is specified this value will also be used
// as the name of the placeholder assignment.
// # Form.ResourceID (Optional) A Tool Provider specified id for the Line Item. Multiple line items may
// share the same resourceId within a given context.
// # Form.Tag (Optional) A value used to qualify a line Item beyond its ids. Line Items may be queried
// by this value in the List endpoint. Multiple line items can share the same tag
// within a given context.
// # Form.ResourceLinkID (Optional) The resource link id the Line Item should be attached to. This value should
// match the LTI id of the Canvas assignment associated with the tool.
// # Form.CanvasLTISubmissionType (Optional) (EXTENSION) - Optional block to set Assignment Submission Type when creating a new assignment is created.
// type - 'none' or 'external_tool'::
// external_tool_url - Submission URL only used when type: 'external_tool'::
//
type CreateLineItem struct {
Path struct {
CourseID string `json:"course_id" url:"course_id,omitempty"` // (Required)
} `json:"path"`
Form struct {
ScoreMaximum float64 `json:"score_maximum" url:"score_maximum,omitempty"` // (Required)
Label string `json:"label" url:"label,omitempty"` // (Required)
ResourceID string `json:"resource_id" url:"resource_id,omitempty"` // (Optional)
Tag string `json:"tag" url:"tag,omitempty"` // (Optional)
ResourceLinkID string `json:"resource_link_id" url:"resource_link_id,omitempty"` // (Optional)
CanvasLTISubmissionType map[string](interface{}) `json:"https://canvas.instructure.com/lti/submission_type" url:"https://canvas.instructure.com/lti/submission_type,omitempty"` // (Optional)
} `json:"form"`
}
func (t *CreateLineItem) GetMethod() string {
return "POST"
}
func (t *CreateLineItem) GetURLPath() string {
path := "/lti/courses/{course_id}/line_items"
path = strings.ReplaceAll(path, "{course_id}", fmt.Sprintf("%v", t.Path.CourseID))
return path
}
func (t *CreateLineItem) GetQuery() (string, error) {
return "", nil
}
func (t *CreateLineItem) GetBody() (url.Values, error) {
return query.Values(t.Form)
}
func (t *CreateLineItem) GetJSON() ([]byte, error) {
j, err := json.Marshal(t.Form)
if err != nil {
return nil, nil
}
return j, nil
}
func (t *CreateLineItem) HasErrors() error {
errs := []string{}
if t.Path.CourseID == "" {
errs = append(errs, "'Path.CourseID' is required")
}
if t.Form.Label == "" {
errs = append(errs, "'Form.Label' is required")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *CreateLineItem) Do(c *canvasapi.Canvas) (*models.LineItem, error) {
response, err := c.SendRequest(t)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
if err != nil {
return nil, err
}
ret := models.LineItem{}
err = json.Unmarshal(body, &ret)
if err != nil {
return nil, err
}
return &ret, nil
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package watchdog is responsible for monitoring the sentry for tasks that may
// potentially be stuck or looping inderterminally causing hard to debug hungs in
// the untrusted app.
//
// It works by periodically querying all tasks to check whether they are in user
// mode (RunUser), kernel mode (RunSys), or blocked in the kernel (OffCPU). Tasks
// that have been running in kernel mode for a long time in the same syscall
// without blocking are considered stuck and are reported.
//
// When a stuck task is detected, the watchdog can take one of the following actions:
// 1. LogWarning: Logs a warning message followed by a stack dump of all goroutines.
// If a tasks continues to be stuck, the message will repeat every minute, unless
// a new stuck task is detected
// 2. Panic: same as above, followed by panic()
package watchdog
import (
"bytes"
"fmt"
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/metric"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sync"
)
// Opts configures the watchdog.
type Opts struct {
// TaskTimeout is the amount of time to allow a task to execute the
// same syscall without blocking before it's declared stuck.
TaskTimeout time.Duration
// TaskTimeoutAction indicates what action to take when a stuck tasks
// is detected.
TaskTimeoutAction Action
// StartupTimeout is the amount of time to allow between watchdog
// creation and calling watchdog.Start.
StartupTimeout time.Duration
// StartupTimeoutAction indicates what action to take when
// watchdog.Start is not called within the timeout.
StartupTimeoutAction Action
}
// DefaultOpts is a default set of options for the watchdog.
var DefaultOpts = Opts{
// Task timeout.
TaskTimeout: 3 * time.Minute,
TaskTimeoutAction: LogWarning,
// Startup timeout.
StartupTimeout: 30 * time.Second,
StartupTimeoutAction: LogWarning,
}
// descheduleThreshold is the amount of time scheduling needs to be off before the entire wait period
// is discounted from task's last update time. It's set high enough that small scheduling delays won't
// trigger it.
const descheduleThreshold = 1 * time.Second
// Amount of time to wait before dumping the stack to the log again when the same task(s) remains stuck.
var stackDumpSameTaskPeriod = time.Minute
// Action defines what action to take when a stuck task is detected.
type Action int
const (
// LogWarning logs warning message followed by stack trace.
LogWarning Action = iota
// Panic will do the same logging as LogWarning and panic().
Panic
)
// Set implements flag.Value.
func (a *Action) Set(v string) error {
switch v {
case "log", "logwarning":
*a = LogWarning
case "panic":
*a = Panic
default:
return fmt.Errorf("invalid watchdog action %q", v)
}
return nil
}
// Get implements flag.Value.
func (a *Action) Get() any {
return *a
}
// String returns Action's string representation.
func (a Action) String() string {
switch a {
case LogWarning:
return "logWarning"
case Panic:
return "panic"
default:
panic(fmt.Sprintf("Invalid watchdog action: %d", a))
}
}
// Watchdog is the main watchdog class. It controls a goroutine that periodically
// analyses all tasks and reports if any of them appear to be stuck.
type Watchdog struct {
// Configuration options are embedded.
Opts
// period indicates how often to check all tasks. It's calculated based on
// opts.TaskTimeout.
period time.Duration
// k is where the tasks come from.
k *kernel.Kernel
// stop is used to notify to watchdog should stop.
stop chan struct{}
// done is used to notify when the watchdog has stopped.
done chan struct{}
// offenders map contains all tasks that are currently stuck.
offenders map[*kernel.Task]*offender
// lastStackDump tracks the last time a stack dump was generated to prevent
// spamming the log.
lastStackDump time.Time
// lastRun is set to the last time the watchdog executed a monitoring loop.
lastRun ktime.Time
// mu protects the fields below.
mu sync.Mutex
// running is true if the watchdog is running.
running bool
// startCalled is true if Start has ever been called. It remains true
// even if Stop is called.
startCalled bool
}
type offender struct {
lastUpdateTime ktime.Time
}
// New creates a new watchdog.
func New(k *kernel.Kernel, opts Opts) *Watchdog {
// 4 is arbitrary, just don't want to prolong 'TaskTimeout' too much.
period := opts.TaskTimeout / 4
w := &Watchdog{
Opts: opts,
k: k,
period: period,
offenders: make(map[*kernel.Task]*offender),
stop: make(chan struct{}),
done: make(chan struct{}),
}
// Handle StartupTimeout if it exists.
if w.StartupTimeout > 0 {
log.Infof("Watchdog waiting %v for startup", w.StartupTimeout)
go w.waitForStart() // S/R-SAFE: watchdog is stopped buring save and restarted after restore.
}
return w
}
// Start starts the watchdog.
func (w *Watchdog) Start() {
w.mu.Lock()
defer w.mu.Unlock()
w.startCalled = true
if w.running {
return
}
if w.TaskTimeout == 0 {
log.Infof("Watchdog task timeout disabled")
return
}
w.lastRun = w.k.MonotonicClock().Now()
log.Infof("Starting watchdog, period: %v, timeout: %v, action: %v", w.period, w.TaskTimeout, w.TaskTimeoutAction)
go w.loop() // S/R-SAFE: watchdog is stopped during save and restarted after restore.
w.running = true
}
// Stop requests the watchdog to stop and wait for it.
func (w *Watchdog) Stop() {
if w.TaskTimeout == 0 {
return
}
w.mu.Lock()
defer w.mu.Unlock()
if !w.running {
return
}
log.Infof("Stopping watchdog")
w.stop <- struct{}{}
<-w.done
w.running = false
log.Infof("Watchdog stopped")
}
// waitForStart waits for Start to be called and takes action if it does not
// happen within the startup timeout.
func (w *Watchdog) waitForStart() {
<-time.After(w.StartupTimeout)
w.mu.Lock()
defer w.mu.Unlock()
if w.startCalled {
// We are fine.
return
}
metric.WeirdnessMetric.Increment(&metric.WeirdnessTypeWatchdogStuckStartup)
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("Watchdog.Start() not called within %s", w.StartupTimeout))
w.doAction(w.StartupTimeoutAction, false, &buf)
}
// loop is the main watchdog routine. It only returns when 'Stop()' is called.
func (w *Watchdog) loop() {
// Loop until someone stops it.
for {
select {
case <-w.stop:
w.done <- struct{}{}
return
case <-time.After(w.period):
w.runTurn()
}
}
}
// runTurn runs a single pass over all tasks and reports anything it finds.
func (w *Watchdog) runTurn() {
// Someone needs to watch the watchdog. The call below can get stuck if there
// is a deadlock affecting root's PID namespace mutex. Run it in a goroutine
// and report if it takes too long to return.
var tasks []*kernel.Task
done := make(chan struct{})
go func() { // S/R-SAFE: watchdog is stopped and restarted during S/R.
tasks = w.k.TaskSet().Root.Tasks()
close(done)
}()
select {
case <-done:
case <-time.After(w.TaskTimeout):
// Report if the watchdog is not making progress.
// No one is watching the watchdog watcher though.
w.reportStuckWatchdog()
<-done
}
newOffenders := make(map[*kernel.Task]*offender)
newTaskFound := false
now := ktime.FromNanoseconds(int64(w.k.CPUClockNow() * uint64(linux.ClockTick)))
// The process may be running with low CPU limit making tasks appear stuck because
// are starved of CPU cycles. An estimate is that Tasks could have been starved
// since the last time the watchdog run. If the watchdog detects that scheduling
// is off, it will discount the entire duration since last run from 'lastUpdateTime'.
discount := time.Duration(0)
if now.Sub(w.lastRun.Add(w.period)) > descheduleThreshold {
discount = now.Sub(w.lastRun)
}
w.lastRun = now
log.Infof("Watchdog starting loop, tasks: %d, discount: %v", len(tasks), discount)
for _, t := range tasks {
tsched := t.TaskGoroutineSchedInfo()
// An offender is a task running inside the kernel for longer than the specified timeout.
if tsched.State == kernel.TaskGoroutineRunningSys {
lastUpdateTime := ktime.FromNanoseconds(int64(tsched.Timestamp * uint64(linux.ClockTick)))
elapsed := now.Sub(lastUpdateTime) - discount
if elapsed > w.TaskTimeout {
tc, ok := w.offenders[t]
if !ok {
// New stuck task detected.
//
// Note that tasks blocked doing IO may be considered stuck in kernel,
// unless they are surrounded by
// Task.UninterruptibleSleepStart/Finish.
tc = &offender{lastUpdateTime: lastUpdateTime}
metric.WeirdnessMetric.Increment(&metric.WeirdnessTypeWatchdogStuckTasks)
newTaskFound = true
}
newOffenders[t] = tc
}
}
}
if len(newOffenders) > 0 {
w.report(newOffenders, newTaskFound, now)
}
// Remember which tasks have been reported.
w.offenders = newOffenders
}
// report takes appropriate action when a stuck task is detected.
func (w *Watchdog) report(offenders map[*kernel.Task]*offender, newTaskFound bool, now ktime.Time) {
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("Sentry detected %d stuck task(s):\n", len(offenders)))
for t, o := range offenders {
tid := w.k.TaskSet().Root.IDOfTask(t)
buf.WriteString(fmt.Sprintf("\tTask tid: %v (goroutine %d), entered RunSys state %v ago.\n", tid, t.GoroutineID(), now.Sub(o.lastUpdateTime)))
}
buf.WriteString("Search for 'goroutine <id>' in the stack dump to find the offending goroutine(s)")
// Force stack dump only if a new task is detected.
w.doAction(w.TaskTimeoutAction, newTaskFound, &buf)
}
func (w *Watchdog) reportStuckWatchdog() {
var buf bytes.Buffer
buf.WriteString("Watchdog goroutine is stuck")
w.doAction(w.TaskTimeoutAction, false, &buf)
}
// doAction will take the given action. If the action is LogWarning, the stack
// is not always dumped to the log to prevent log flooding. "forceStack"
// guarantees that the stack will be dumped regardless.
func (w *Watchdog) doAction(action Action, forceStack bool, msg *bytes.Buffer) {
switch action {
case LogWarning:
// Dump stack only if forced or sometime has passed since the last time a
// stack dump was generated.
if !forceStack && time.Since(w.lastStackDump) < stackDumpSameTaskPeriod {
msg.WriteString("\n...[stack dump skipped]...")
log.Warningf(msg.String())
return
}
log.TracebackAll(msg.String())
w.lastStackDump = time.Now()
case Panic:
// Panic will skip over running tasks, which is likely the culprit here. So manually
// dump all stacks before panic'ing.
log.TracebackAll(msg.String())
// Attempt to flush metrics, timeout and move on in case metrics are stuck as well.
metricsEmitted := make(chan struct{}, 1)
go func() { // S/R-SAFE: watchdog is stopped during save and restarted after restore.
// Flush metrics before killing process.
metric.EmitMetricUpdate()
metricsEmitted <- struct{}{}
}()
select {
case <-metricsEmitted:
case <-time.After(1 * time.Second):
}
panic(fmt.Sprintf("%s\nStack for running G's are skipped while panicking.", msg.String()))
default:
panic(fmt.Sprintf("Unknown watchdog action %v", action))
}
}
|
package proto2gql
import (
"fmt"
"reflect"
"strings"
"github.com/pkg/errors"
"github.com/EGT-Ukraine/go2gql/generator/plugins/graphql"
"github.com/EGT-Ukraine/go2gql/generator/plugins/proto2gql/parser"
)
func (g Proto2GraphQL) serviceMethodArguments(file *parsedFile, method *parser.Method) ([]graphql.MethodArgument, error) {
var args []graphql.MethodArgument
messageFields, err := g.getMessageFields(file, method.InputMessage)
if err != nil {
return nil, err
}
for _, messageField := range messageFields {
args = append(args, graphql.MethodArgument{
Name: messageField.Name,
Type: messageField.Type,
QuotedComment: messageField.QuotedComment,
})
}
return args, nil
}
func (g Proto2GraphQL) messagePayloadErrorParams(message *parser.Message) (checker graphql.PayloadErrorChecker, accessor graphql.PayloadErrorAccessor, err error) {
outMsgCfg, err := g.fileConfig(message.File()).MessageConfig(message.Name)
if err != nil {
err = errors.Wrap(err, "failed to resolve output message config")
return
}
if outMsgCfg.ErrorField == "" {
return
}
errorAccessor := func(arg string) string {
return arg + ".Get" + camelCase(outMsgCfg.ErrorField) + "()"
}
errorCheckerByType := func(repeated bool, p parser.Type) graphql.PayloadErrorChecker {
if repeated || p.Kind() == parser.TypeMap {
return func(arg string) string {
return "len(" + arg + ".Get" + camelCase(outMsgCfg.ErrorField) + "())>0"
}
}
if p.Kind() == parser.TypeScalar || p.Kind() == parser.TypeEnum {
fmt.Println("Warning: scalars and enums is not supported as payload error fields")
return nil
}
if p.Kind() == parser.TypeMessage {
return func(arg string) string {
return arg + ".Get" + camelCase(outMsgCfg.ErrorField) + "() != nil"
}
}
return nil
}
for _, fld := range message.NormalFields {
if fld.Name == outMsgCfg.ErrorField {
errorChecker := errorCheckerByType(fld.Repeated, fld.Type)
if errorChecker == nil {
return nil, nil, nil
}
return errorChecker, errorAccessor, nil
}
}
for _, fld := range message.MapFields {
if fld.Name == outMsgCfg.ErrorField {
errorChecker := errorCheckerByType(false, fld.Map)
if errorChecker == nil {
return nil, nil, nil
}
return errorChecker, errorAccessor, nil
}
}
for _, of := range message.OneOffs {
for _, fld := range of.Fields {
if fld.Name == outMsgCfg.ErrorField {
errorChecker := errorCheckerByType(false, fld.Type)
if errorChecker == nil {
return nil, nil, nil
}
return errorChecker, errorAccessor, nil
}
}
}
return nil, nil, nil
}
func (g Proto2GraphQL) methodName(cfg MethodConfig, method *parser.Method) string {
if cfg.Alias != "" {
return cfg.Alias
}
return method.Name
}
func (g Proto2GraphQL) serviceMethod(sc ServiceConfig, cfg MethodConfig, file *parsedFile, method *parser.Method) (*graphql.Method, error) {
outputMsgTypeFile, err := g.parsedFile(method.OutputMessage.File())
if err != nil {
return nil, errors.Wrap(err, "failed to resolve file type file")
}
clientMethodCaller := func(client, arg string, ctx graphql.BodyContext) string {
return client + "." + camelCase(method.Name) + "(ctx," + arg + ")"
}
var outProtoType parser.Type
var outType graphql.TypeResolver
var outProtoTypeRepeated bool
outputMessageConfig, err := file.Config.MessageConfig(method.OutputMessage.Name)
if err != nil {
return nil, errors.Wrapf(err, "failed to resolve message %s config", method.OutputMessage.Name)
}
if outputMessageConfig.UnwrapField {
if len(method.OutputMessage.NormalFields) != 1 {
return nil, errors.Errorf(
"can't unwrap `%s` service `%s` method response. Output message must have 1 field.",
method.Service.Name,
method.Name,
)
}
unwrapFieldName := method.OutputMessage.NormalFields[0].Name
resolver, err := g.FieldOutputValueResolver(method.OutputMessage, unwrapFieldName)
if err != nil {
return nil, errors.Wrap(err, "failed to build output value resolver")
}
outType, err = g.FieldOutputGraphQLTypeResolver(method.OutputMessage, unwrapFieldName)
if err != nil {
return nil, errors.Wrap(err, "failed to build output type resovler")
}
clientMethodCaller = func(client, arg string, ctx graphql.BodyContext) string {
return `func() (interface{}, error) {
res, err := ` + client + "." + camelCase(method.Name) + `(ctx,` + arg + `)
if err != nil {
return nil, err
}
return ` + resolver("res", ctx) + `, nil
}()`
}
} else {
if len(method.OutputMessage.NormalFields) == 1 {
fmt.Printf(
"Suggestion: service `%s` method `%s` in file `%s` has 1 output field. Can be unwrapped.\n",
method.Service.Name,
method.Name,
file.File.FilePath,
)
}
outProtoType = method.OutputMessage
outType, err = g.TypeOutputGraphQLTypeResolver(outputMsgTypeFile, outProtoType)
if err != nil {
return nil, errors.Wrapf(err, "failed to get output type resolver for method: %s", method.Name)
}
if outProtoTypeRepeated {
outType = graphql.GqlListTypeResolver(graphql.GqlNonNullTypeResolver(outType))
}
}
requestType, err := g.goTypeByParserType(method.InputMessage)
if err != nil {
return nil, errors.Wrapf(err, "failed to get request go type for method: %s", method.Name)
}
args, err := g.serviceMethodArguments(file, method)
if err != nil {
return nil, errors.Wrap(err, "failed to prepare service method arguments")
}
payloadErrChecker, payloadErrAccessor, err := g.messagePayloadErrorParams(method.OutputMessage)
if err != nil {
return nil, errors.Wrap(err, "failed to resolve message payload error params")
}
inputMessageFile, err := g.parsedFile(method.InputMessage.File())
if err != nil {
return nil, errors.Wrapf(err, "failed to resolve message '%s' parsed file", dotedTypeName(method.InputMessage.TypeName))
}
valueResolver, valueResolverWithErr, _, err := g.TypeValueResolver(inputMessageFile, method.InputMessage, "", false)
if err != nil {
return nil, errors.Wrap(err, "failed to resolve message value resolver")
}
if err := g.registerMethodDataLoaders(sc, cfg, file, method); err != nil {
return nil, errors.Wrap(err, "failed add data loader provider")
}
return &graphql.Method{
OriginalName: method.Name,
Name: g.methodName(cfg, method),
QuotedComment: method.QuotedComment,
GraphQLOutputType: outType,
RequestType: requestType,
ClientMethodCaller: clientMethodCaller,
RequestResolver: valueResolver,
RequestResolverWithErr: valueResolverWithErr,
Arguments: args,
PayloadErrorChecker: payloadErrChecker,
PayloadErrorAccessor: payloadErrAccessor,
}, nil
}
func (g Proto2GraphQL) serviceQueryMethods(sc ServiceConfig, file *parsedFile, service *parser.Service) ([]graphql.Method, error) {
var res []graphql.Method
for methodName, methodConfig := range sc.Methods {
method, ok := service.Methods[methodName]
if !ok {
return nil, errors.Errorf("Method with name '%s' not found in service '%s'", methodName, service.Name)
}
if !g.methodIsQuery(methodConfig, method) {
continue
}
met, err := g.serviceMethod(sc, methodConfig, file, method)
if err != nil {
return nil, errors.Wrapf(err, "failed to prepare service method %s", method.Name)
}
res = append(res, *met)
}
return res, nil
}
func (g Proto2GraphQL) methodIsQuery(cfg MethodConfig, method *parser.Method) bool {
switch cfg.RequestType {
case RequestTypeQuery:
return true
case RequestTypeMutation:
return false
}
return strings.HasPrefix(strings.ToLower(method.Name), "get")
}
func (g Proto2GraphQL) serviceMutationsMethods(cfg ServiceConfig, file *parsedFile, service *parser.Service) ([]graphql.Method, error) {
var res []graphql.Method
for methodName, methodConfig := range cfg.Methods {
method, ok := service.Methods[methodName]
if !ok {
return nil, errors.Errorf("Method with name '%s' not found in service '%s'", methodName, service.Name)
}
if g.methodIsQuery(methodConfig, method) {
continue
}
met, err := g.serviceMethod(cfg, methodConfig, file, method)
if err != nil {
return nil, errors.Wrapf(err, "failed to prepare service method %s", method.Name)
}
res = append(res, *met)
}
return res, nil
}
func (g Proto2GraphQL) serviceName(sc ServiceConfig, service *parser.Service) string {
if sc.ServiceName != "" {
return sc.ServiceName
}
return service.Name
}
func (g Proto2GraphQL) fileServices(file *parsedFile) ([]graphql.Service, error) {
var res []graphql.Service
for serviceName, sc := range file.Config.GetServices() {
service, ok := file.File.Services[serviceName]
if !ok {
return nil, errors.Errorf("Service '%s' not found in file '%s'", serviceName, file.File.FilePath)
}
queryMethods, err := g.serviceQueryMethods(sc, file, service)
if err != nil {
return nil, errors.Wrap(err, "failed to resolve service methods")
}
mutationsMethods, err := g.serviceMutationsMethods(sc, file, service)
if err != nil {
return nil, errors.Wrap(err, "failed to resolve service methods")
}
res = append(res, graphql.Service{
OriginalName: service.Name,
Name: g.serviceName(sc, service),
QuotedComment: service.QuotedComment,
CallInterface: g.serviceCallInterface(file, service.Name),
QueryMethods: queryMethods,
MutationMethods: mutationsMethods,
})
}
return res, nil
}
func (g Proto2GraphQL) serviceCallInterface(file *parsedFile, serviceName string) graphql.GoType {
return graphql.GoType{
Kind: reflect.Interface,
Pkg: file.GRPCSourcesPkg,
Name: serviceName + "Client",
}
}
|
// Copyright 2018 xgfone
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package django supplies a html template similar to django in Python.
//
// It uses the third-party package, github.com/flosch/pongo2, to implement it.
package django
import (
"io"
"github.com/flosch/pongo2"
)
// Type Aliases from pongo2.
type (
// A Context type provides constants, variables, instances or functions
// to a template.
Context = pongo2.Context
// The Error type is being used to address an error during lexing,
// parsing or execution.
Error = pongo2.Error
// FilterFunction is the type filter functions must fulfil.
FilterFunction = pongo2.FilterFunction
// TagParser is the function signature of the tag's parser you will have to
// implement in order to create a new tag.
TagParser = pongo2.TagParser
// Template is a template type.
Template = pongo2.Template
)
// Some functions from pongo2.
var (
// FilterExists returns true if the given filter is already registered.
FilterExists = pongo2.FilterExists
// RegisterFilter registers a new filter. If there's already a filter
// with the same name, RegisterFilter will panic. You usually want
// to call this function in the filter's init() function.
RegisterFilter = pongo2.RegisterFilter
// RegisterTag registers a new tag. You usually want to call this function
// in the tag's init() function.
RegisterTag = pongo2.RegisterTag
// ReplaceFilter replaces an already registered filter with a new
// implementation. Use this function with caution since it allows you
// to change existing filter behaviour.
ReplaceFilter = pongo2.ReplaceFilter
// ReplaceTag replaces an already registered tag with a new implementation.
// Use this function with caution since it allows you to change existing
// tag behaviour.
ReplaceTag = pongo2.ReplaceTag
// SetAutoescape sets whether or not to escape automatically.
SetAutoescape = pongo2.SetAutoescape
)
// Engine adapts the pongo2 engine.
type Engine struct {
*pongo2.TemplateSet
directory string
extension string
}
// New returns a new django engine.
func New(dir string, extension ...string) *Engine {
ext := ".html"
if len(extension) > 0 {
ext = extension[0]
}
tplset := pongo2.NewSet("django", pongo2.MustNewLocalFileSystemLoader(dir))
return &Engine{
TemplateSet: tplset,
directory: dir,
extension: ext,
}
}
// Ext returns the file extension which this django engine is responsible to render.
func (e *Engine) Ext() string {
return e.extension
}
// Execute renders a django template.
func (e *Engine) Execute(w io.Writer, filename string, data interface{}, metadata map[string]interface{}) error {
tpl, err := e.FromCache(filename)
if err != nil {
return err
}
return tpl.ExecuteWriterUnbuffered(data.(map[string]interface{}), w)
}
// Load reloads all the django templates.
func (e Engine) Load() error {
e.CleanCache()
return nil
}
|
package list
import (
"github.com/JiBadBoy/gods/containers"
)
type List interface {
} |
package hive
import "gmf"
func CreateEncoder(codec * Codec)*gmf.Encoder{
result:=new(gmf.Encoder)
result.SetParameter("codecid",codec.Id)
for i:=0;i<len(codec.Param);i++{
result.SetParameter(codec.Param[i].Name,codec.Param[i].Value)
}
return result
}
|
/**
a small chat room
can use @ to talk to someone
➜ ~ nc localhost 8888
[2017-07-20 17:54:30][admin] hi tourist-1, welcome to chat room!
大家好
[2017-07-20 17:54:35][tourist-1] 大家好
@tourist-0 你好啊
[2017-07-20 17:55:03][tourist-0] 哈哈,你好
大家好
[2017-07-20 17:56:06][tourist-1] 大家好
*/
package main
import (
"bufio"
"log"
"net"
"strconv"
"strings"
"time"
)
type MsgChan chan msg
var clients map[string]*con = make(map[string]*con)
var msgQueue MsgChan = make(MsgChan)
type con struct {
name string //昵称
conn *net.TCPConn
r *bufio.Reader
w *bufio.Writer
remoteAddr net.Addr
}
type msg struct {
frmName string //谁发的
toName string //发给谁
broad bool //是否广播
time time.Time //消息发送时间
msg string //消息内容
}
func (con *con) Start() {
for {
string, serr := con.r.ReadString('\n')
if serr == nil {
m := decode(string)
m.frmName = con.name
msgQueue <- m
}
}
}
func (con *con) Send(m msg) {
ms := encode(m)
con.w.WriteString(ms)
con.w.Flush()
}
func encode(m msg) string {
return "[" + m.time.Format("2006-01-02 15:04:05") + "][" + m.frmName + "] " + m.msg
}
func decode(m string) msg {
var toName string
var _msg string = m
if strings.HasPrefix(m, "@") {
index := strings.Index(m, " ")
if index > 0 {
toName = m[1:index]
_msg = m[index+1:]
}
}
return msg{
toName: toName,
broad: len(toName) == 0,
time: time.Now(),
msg: _msg,
}
}
func main() {
addr, rerr := net.ResolveTCPAddr("tcp4", ":8888")
if rerr != nil {
log.Fatal("error resolve tcp addr", rerr)
}
tcp, lerr := net.ListenTCP("tcp4", addr)
if lerr != nil {
log.Fatal("err listen tcp", lerr)
}
go msgRoute(msgQueue)
i := 0
for {
conn, cerr := tcp.AcceptTCP()
if cerr != nil {
log.Println("err accept", cerr)
continue
}
cl := &con{
name: "tourist-" + strconv.Itoa(i),
conn: conn,
r: bufio.NewReader(conn),
w: bufio.NewWriter(conn),
remoteAddr: conn.RemoteAddr(),
}
clients[cl.name] = cl
onNewClient(cl)
go cl.Start()
i++
}
}
func onNewClient(con *con) {
//进入房间后发送问候
con.Send(msg{
frmName: "admin",
toName: con.name,
time: time.Now(),
msg: "hi " + con.name + ", welcome to chat room! \n",
})
//向大家发送进入房间提醒
m := msg{
frmName: "admin",
time: time.Now(),
msg: con.name + " entered into chat room! \n",
}
for _, cl := range clients {
if cl == con {
continue
}
go cl.Send(m)
}
}
func msgRoute(msgQueue MsgChan) {
for {
m := <-msgQueue
go func(m msg) {
if m.broad {
for _, cl := range clients {
if cl.name == m.frmName {
continue
}
cl.Send(m)
}
} else {
cl := clients[m.toName]
if cl != nil {
cl.Send(m)
}
}
}(m)
}
}
|
package graph
import (
"context"
"graphqltest/graph/generated"
"graphqltest/models"
)
func (r *documentResolver) User(ctx context.Context, obj *models.Document) (*models.User, error) {
return GetUserLoader(ctx).Load(obj.UserId)
}
func (d documentResolver) Description(ctx context.Context, obj *models.Document) (string, error) {
panic("implement me")
}
func (r *Resolver) Document() generated.DocumentResolver { return &documentResolver{r} }
type documentResolver struct{ *Resolver }
|
package innerrpc
//THIS MODULE INITIALIZES RPC CONNECTION TO DATA PROVIDE
import (
"time"
"splitter/config"
)
var (
DataClient *Client
err error
dns = config.DATASERVER_IP + config.DBSERVER_RPC_PORT
)
func Start() {
//Dial Data Provider
DataClient, err = NewClient(dns, time.Millisecond*500)
}
|
package plusone
import (
"reflect"
"testing"
)
func TestPlusOne(t *testing.T) {
tests := []struct {
in []int
want []int
}{
{
in: []int{1, 2, 3},
want: []int{1, 2, 4},
},
{
in: []int{4, 3, 2, 1},
want: []int{4, 3, 2, 2},
},
{
in: []int{0},
want: []int{1},
},
{
in: []int{1, 0, 9},
want: []int{1, 1, 0},
},
{
in: []int{9, 9, 9, 9, 9, 9, 9, 9, 9},
want: []int{1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
},
}
for _, test := range tests {
got := plusOne(test.in)
if !reflect.DeepEqual(got, test.want) {
t.Errorf("plusOne(%v)=%v, want %v", test.in, got, test.want)
}
}
}
|
package solutions
func reverseVowels(s string) string {
result := []byte(s)
for i, j := 0, len(s) - 1; i < len(s) && j > i ; i++ {
if isVowel(s[i]) {
for j >= i && !isVowel(s[j]) {
j--
}
result[i], result[j] = result[j], result[i]
j--
}
}
return string(result)
}
func isVowel(c byte) bool {
return c == 'A' || c == 'E' || c == 'I' || c == 'O' || c == 'U' || c == 'a' || c == 'e' || c == 'i' || c == 'o' || c == 'u'
}
|
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package checkescape allows recursive escape analysis for hot paths.
//
// The analysis tracks multiple types of escapes, in two categories. First,
// 'hard' escapes are explicit allocations. Second, 'soft' escapes are
// interface dispatches or dynamic function dispatches; these don't necessarily
// escape but they *may* escape. The analysis is capable of making assertions
// recursively: soft escapes cannot be analyzed in this way, and therefore
// count as escapes for recursive purposes.
//
// The different types of escapes are as follows, with the category in
// parentheses:
//
// heap: A direct allocation is made on the heap (hard).
// builtin: A call is made to a built-in allocation function (hard).
// stack: A stack split as part of a function preamble (soft).
// interface: A call is made via an interface which *may* escape (soft).
// dynamic: A dynamic function is dispatched which *may* escape (soft).
//
// To the use the package, annotate a function-level comment with either the
// line "// +checkescape" or "// +checkescape:OPTION[,OPTION]". In the second
// case, the OPTION field is either a type above, or one of:
//
// local: Escape analysis is limited to local hard escapes only.
// all: All the escapes are included.
// hard: All hard escapes are included.
//
// If the "// +checkescape" annotation is provided, this is equivalent to
// provided the local and hard options.
//
// Some examples of this syntax are:
//
// +checkescape:all - Analyzes for all escapes in this function and all calls.
// +checkescape:local - Analyzes only for default local hard escapes.
// +checkescape:heap - Only analyzes for heap escapes.
// +checkescape:interface,dynamic - Only checks for dynamic calls and interface calls.
// +checkescape - Does the same as +checkescape:local,hard.
//
// Note that all of the above can be inverted by using +mustescape. The
// +checkescape keyword will ensure failure if the class of escape occurs,
// whereas +mustescape will fail if the given class of escape does not occur.
//
// Local exemptions can be made by a comment of the form "// escapes: reason."
// This must appear on the line of the escape and will also apply to callers of
// the function as well (for non-local escape analysis).
package checkescape
import (
"bufio"
"bytes"
"fmt"
"go/ast"
"go/token"
"go/types"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/buildssa"
"golang.org/x/tools/go/ssa"
"gvisor.dev/gvisor/tools/nogo/flags"
)
const (
// magic is the magic annotation.
magic = "// +checkescape"
// Bad versions of `magic` observed in the wilderness of the codebase.
badMagicNoSpace = "//+checkescape"
badMagicPlural = "// +checkescapes"
// magicParams is the magic annotation with specific parameters.
magicParams = magic + ":"
// testMagic is the test magic annotation (parameters required).
testMagic = "// +mustescape:"
// exempt is the exemption annotation.
exempt = "// escapes"
)
// EscapeReason is an escape reason.
//
// This is a simple enum.
type EscapeReason int
const (
allocation EscapeReason = iota
builtin
interfaceInvoke
dynamicCall
stackSplit
unknownPackage
reasonCount // Count for below.
)
// String returns the string for the EscapeReason.
//
// Note that this also implicitly defines the reverse string -> EscapeReason
// mapping, which is the word before the colon (computed below).
func (e EscapeReason) String() string {
switch e {
case interfaceInvoke:
return "interface: call to potentially allocating function"
case unknownPackage:
return "unknown: no package information available"
case allocation:
return "heap: explicit allocation"
case builtin:
return "builtin: call to potentially allocating builtin"
case dynamicCall:
return "dynamic: call to potentially allocating function"
case stackSplit:
return "stack: possible split on function entry"
default:
panic(fmt.Sprintf("unknown reason: %d", e))
}
}
var hardReasons = []EscapeReason{
allocation,
builtin,
}
var softReasons = []EscapeReason{
interfaceInvoke,
unknownPackage,
dynamicCall,
stackSplit,
}
var allReasons = append(hardReasons, softReasons...)
var escapeTypes = func() map[string]EscapeReason {
result := make(map[string]EscapeReason)
for _, r := range allReasons {
parts := strings.Split(r.String(), ":")
result[parts[0]] = r // Key before ':'.
}
return result
}()
// escapingBuiltins are builtins known to escape.
//
// These are lowered at an earlier stage of compilation to explicit function
// calls, but are not available for recursive analysis.
var escapingBuiltins = []string{
"append",
"makemap",
"newobject",
"mallocgc",
}
// objdumpAnalyzer accepts the objdump parameter.
type objdumpAnalyzer struct {
analysis.Analyzer
}
// Run implements nogo.binaryAnalyzer.Run.
func (ob *objdumpAnalyzer) Run(pass *analysis.Pass, binary io.Reader) (any, error) {
return run(pass, binary)
}
// Legacy implements nogo.analyzer.Legacy.
func (ob *objdumpAnalyzer) Legacy() *analysis.Analyzer {
return &ob.Analyzer
}
// Analyzer includes specific results.
var Analyzer = &objdumpAnalyzer{
Analyzer: analysis.Analyzer{
Name: "checkescape",
Doc: "escape analysis checks based on +checkescape annotations",
Run: nil, // Must be invoked via Run above.
Requires: []*analysis.Analyzer{buildssa.Analyzer},
FactTypes: []analysis.Fact{(*Escapes)(nil)},
},
}
// LinePosition is a low-resolution token.Position.
//
// This is used to match against possible exemptions placed in the source.
type LinePosition struct {
Filename string
Line int
}
// String implements fmt.Stringer.String.
func (e LinePosition) String() string {
return fmt.Sprintf("%s:%d", e.Filename, e.Line)
}
// Simplified returns the simplified name.
func (e LinePosition) Simplified() string {
return fmt.Sprintf("%s:%d", filepath.Base(e.Filename), e.Line)
}
// CallSite is a single call site.
//
// These can be chained.
type CallSite struct {
LocalPos token.Pos
Resolved LinePosition
}
// IsValid indicates whether the CallSite is valid or not.
func (cs *CallSite) IsValid() bool {
return cs.LocalPos.IsValid()
}
// Escapes is a collection of escapes.
//
// We record at most one escape for each reason, but record the number of
// escapes that were omitted.
//
// This object should be used to summarize all escapes for a single line (local
// analysis) or a single function (package facts).
//
// All fields are exported for gob.
type Escapes struct {
CallSites [reasonCount][]CallSite
Details [reasonCount]string
Omitted [reasonCount]int
}
// AFact implements analysis.Fact.AFact.
func (*Escapes) AFact() {}
// add is called by Add and Merge.
func (es *Escapes) add(r EscapeReason, detail string, omitted int, callSites ...CallSite) {
if es.CallSites[r] != nil {
// We will either be replacing the current escape or dropping
// the added one. Either way, we increment omitted by the
// appropriate amount.
es.Omitted[r]++
// If the callSites in the other is only a single element, then
// we will universally favor this. This provides the cleanest
// set of escapes to summarize, and more importantly: if there
if len(es.CallSites) == 1 || len(callSites) != 1 {
return
}
}
es.Details[r] = detail
es.CallSites[r] = callSites
es.Omitted[r] += omitted
}
// Add adds a single escape.
func (es *Escapes) Add(r EscapeReason, detail string, callSites ...CallSite) {
es.add(r, detail, 0, callSites...)
}
// IsEmpty returns true iff this Escapes is empty.
func (es *Escapes) IsEmpty() bool {
for _, cs := range es.CallSites {
if cs != nil {
return false
}
}
return true
}
// Filter filters out all escapes except those matches the given reasons.
//
// If local is set, then non-local escapes will also be filtered.
func (es *Escapes) Filter(reasons []EscapeReason, local bool) {
FilterReasons:
for r := EscapeReason(0); r < reasonCount; r++ {
for i := 0; i < len(reasons); i++ {
if r == reasons[i] {
continue FilterReasons
}
}
// Zap this reason.
es.CallSites[r] = nil
es.Details[r] = ""
es.Omitted[r] = 0
}
if !local {
return
}
for r := EscapeReason(0); r < reasonCount; r++ {
// Is does meet our local requirement?
if len(es.CallSites[r]) > 1 {
es.CallSites[r] = nil
es.Details[r] = ""
es.Omitted[r] = 0
}
}
}
// MergeWithCall merges these escapes with another.
//
// If callSite is nil, no call is added.
func (es *Escapes) MergeWithCall(other Escapes, callSite CallSite) {
for r := EscapeReason(0); r < reasonCount; r++ {
if other.CallSites[r] != nil {
// Construct our new call chain.
newCallSites := other.CallSites[r]
if callSite.IsValid() {
newCallSites = append([]CallSite{callSite}, newCallSites...)
}
// Add (potentially replacing) the underlying escape.
es.add(r, other.Details[r], other.Omitted[r], newCallSites...)
}
}
}
// Reportf will call Reportf for each class of escapes.
func (es *Escapes) Reportf(pass *analysis.Pass) {
var b bytes.Buffer // Reused for all escapes.
for r := EscapeReason(0); r < reasonCount; r++ {
if es.CallSites[r] == nil {
continue
}
b.Reset()
fmt.Fprintf(&b, "%s ", r.String())
if es.Omitted[r] > 0 {
fmt.Fprintf(&b, "(%d omitted) ", es.Omitted[r])
}
for _, cs := range es.CallSites[r][1:] {
fmt.Fprintf(&b, "→ %s ", cs.Resolved.String())
}
fmt.Fprintf(&b, "→ %s", es.Details[r])
pass.Reportf(es.CallSites[r][0].LocalPos, b.String())
}
}
// MergeAll merges a sequence of escapes.
func MergeAll(others []Escapes) (es Escapes) {
for _, other := range others {
es.MergeWithCall(other, CallSite{})
}
return
}
// loadObjdump reads the objdump output.
//
// This records if there is a call any function for every source line. It is
// used only to remove false positives for escape analysis. The call will be
// elided if escape analysis is able to put the object on the heap exclusively.
//
// Note that the map uses <basename.go>:<line> because that is all that is
// provided in the objdump format. Since this is all local, it is sufficient.
func loadObjdump(binary io.Reader) (finalResults map[string][]string, finalErr error) {
// Do we have a binary? If it's missing, then the nil will simply be
// plumbed all the way down here.
if binary == nil {
return nil, fmt.Errorf("no binary provided")
}
// Construct & start our command. The 'go tool objdump' command
// requires a seekable input passed on the command line. Therefore, we
// may need to generate a temporary file here.
input, ok := binary.(*os.File)
if ok {
// Ensure that the file is seekable and that the offset is
// zero, since we can't control that.
if offset, err := input.Seek(0, os.SEEK_CUR); err != nil || offset != 0 {
ok = false // Not usable.
}
}
if !ok {
// Copy to a temporary path.
f, err := ioutil.TempFile("", "")
if err != nil {
return nil, fmt.Errorf("unable to create temp file: %w", err)
}
// Ensure the file is deleted.
defer os.Remove(f.Name())
// Populate the file contents.
if _, err := io.Copy(f, binary); err != nil {
return nil, fmt.Errorf("unable to populate temp file: %w", err)
}
// Seek to the beginning.
if _, err := f.Seek(0, os.SEEK_SET); err != nil {
return nil, fmt.Errorf("unable to seek in temp file: %w", err)
}
input = f
}
// Execute go tool objdump ggiven the input.
cmd := exec.Command(flags.Go, "tool", "objdump", input.Name())
pipeOut, err := cmd.StdoutPipe()
if err != nil {
return nil, fmt.Errorf("unable to load objdump: %w", err)
}
defer pipeOut.Close()
pipeErr, err := cmd.StderrPipe()
if err != nil {
return nil, fmt.Errorf("unable to load objdump: %w", err)
}
defer pipeErr.Close()
if startErr := cmd.Start(); startErr != nil {
return nil, fmt.Errorf("unable to start objdump: %w", startErr)
}
// Ensure that the command has finished successfully. Note that even if
// we parse the first few lines correctly, and early exit could
// indicate that the dump was incomplete and we could be missed some
// escapes that would have appeared. We need to force failure.
defer func() {
var (
wg sync.WaitGroup
buf bytes.Buffer
)
wg.Add(1)
go func() {
defer wg.Done()
io.Copy(&buf, pipeErr)
}()
waitErr := cmd.Wait()
wg.Wait()
if finalErr == nil && waitErr != nil {
// Override the function's return value in this case.
finalErr = fmt.Errorf("error running objdump %s: %v (%s)", input.Name(), waitErr, buf.Bytes())
}
}()
// Identify calls by address or name. Note that the list of allowed addresses
// -- not the list of allowed function names -- is also constructed
// dynamically below, as we encounter the addresses. This is because some of
// the functions (duffzero) may have jump targets in the middle of the
// function itself.
funcsAllowed := map[string]struct{}{
"runtime.duffzero": {},
"runtime.duffcopy": {},
"runtime.racefuncenter": {},
"runtime.gcWriteBarrier": {},
"runtime.retpolineAX": {},
"runtime.retpolineBP": {},
"runtime.retpolineBX": {},
"runtime.retpolineCX": {},
"runtime.retpolineDI": {},
"runtime.retpolineDX": {},
"runtime.retpolineR10": {},
"runtime.retpolineR11": {},
"runtime.retpolineR12": {},
"runtime.retpolineR13": {},
"runtime.retpolineR14": {},
"runtime.retpolineR15": {},
"runtime.retpolineR8": {},
"runtime.retpolineR9": {},
"runtime.retpolineSI": {},
"runtime.stackcheck": {},
"runtime.settls": {},
}
// addrsAllowed lists every address that can be jumped to within the
// funcsAllowed functions.
addrsAllowed := make(map[string]struct{})
// Build the map.
nextFunc := "" // For funcsAllowed.
m := make(map[string][]string)
r := bufio.NewReader(pipeOut)
NextLine:
for {
line, err := r.ReadString('\n')
if err != nil && err != io.EOF {
return nil, err
}
fields := strings.Fields(line)
// Is this an "allowed" function definition? If so, record every address of
// the function body.
if len(fields) >= 2 && fields[0] == "TEXT" {
nextFunc = strings.TrimSuffix(fields[1], "(SB)")
if _, ok := funcsAllowed[nextFunc]; !ok {
nextFunc = "" // Don't record addresses.
}
}
if nextFunc != "" && len(fields) > 2 {
// We're inside an allowed function. Save the given address (in hex form,
// as it appears).
addrsAllowed[fields[1]] = struct{}{}
}
// We recognize lines corresponding to actual code (not the
// symbol name or other metadata) and annotate them if they
// correspond to an explicit CALL instruction. We assume that
// the lack of a CALL for a given line is evidence that escape
// analysis has eliminated an allocation.
//
// Lines look like this (including the first space):
// gohacks_unsafe.go:33 0xa39 488b442408 MOVQ 0x8(SP), AX
if len(fields) >= 5 && line[0] == ' ' {
if !strings.Contains(fields[3], "CALL") {
continue
}
site := fields[0]
target := strings.TrimSuffix(fields[4], "(SB)")
target, err := fixOffset(fields, target)
if err != nil {
return nil, err
}
// Ignore strings containing allowed functions.
if _, ok := funcsAllowed[target]; ok {
continue
}
if _, ok := addrsAllowed[target]; ok {
continue
}
if len(fields) > 5 {
// This may be a future relocation. Some
// objdump versions describe this differently.
// If it contains any of the functions allowed
// above as a string, we let it go.
softTarget := strings.Join(fields[5:], " ")
for name := range funcsAllowed {
if strings.Contains(softTarget, name) {
continue NextLine
}
}
}
// Does this exist already?
existing, ok := m[site]
if !ok {
existing = make([]string, 0, 1)
}
for _, other := range existing {
if target == other {
continue NextLine
}
}
existing = append(existing, target)
m[site] = existing // Update.
}
if err == io.EOF {
break
}
}
// Zap any accidental false positives.
final := make(map[string][]string)
for site, calls := range m {
filteredCalls := make([]string, 0, len(calls))
for _, call := range calls {
if _, ok := addrsAllowed[call]; ok {
continue // Omit this call.
}
filteredCalls = append(filteredCalls, call)
}
final[site] = filteredCalls
}
return final, nil
}
// poser is a type that implements Pos.
type poser interface {
Pos() token.Pos
}
// findReasons extracts reasons from the function.
func findReasons(pass *analysis.Pass, fdecl *ast.FuncDecl) ([]EscapeReason, bool, map[EscapeReason]bool) {
// Is there a comment?
if fdecl.Doc == nil {
return nil, false, nil
}
var (
reasons []EscapeReason
local bool
testReasons = make(map[EscapeReason]bool) // reason -> local?
)
// Scan all lines.
found := false
for _, c := range fdecl.Doc.List {
if strings.HasPrefix(c.Text, badMagicNoSpace) || strings.HasPrefix(c.Text, badMagicPlural) {
pass.Reportf(fdecl.Pos(), "misspelled checkescape prefix: please use %q instead", magic)
continue
}
// Does the comment contain a +checkescape line?
if !strings.HasPrefix(c.Text, magic) && !strings.HasPrefix(c.Text, testMagic) {
continue
}
if c.Text == magic {
// Default: hard reasons, local only.
reasons = hardReasons
local = true
} else if strings.HasPrefix(c.Text, magicParams) {
// Extract specific reasons.
types := strings.Split(c.Text[len(magicParams):], ",")
found = true // For below.
for i := 0; i < len(types); i++ {
if types[i] == "local" {
// Limit search to local escapes.
local = true
} else if types[i] == "all" {
// Append all reasons.
reasons = append(reasons, allReasons...)
} else if types[i] == "hard" {
// Append all hard reasons.
reasons = append(reasons, hardReasons...)
} else {
r, ok := escapeTypes[types[i]]
if !ok {
// This is not a valid escape reason.
pass.Reportf(fdecl.Pos(), "unknown reason: %v", types[i])
continue
}
reasons = append(reasons, r)
}
}
} else if strings.HasPrefix(c.Text, testMagic) {
types := strings.Split(c.Text[len(testMagic):], ",")
local := false
for i := 0; i < len(types); i++ {
if types[i] == "local" {
local = true
} else {
r, ok := escapeTypes[types[i]]
if !ok {
// This is not a valid escape reason.
pass.Reportf(fdecl.Pos(), "unknown reason: %v", types[i])
continue
}
if v, ok := testReasons[r]; ok && v {
// Already registered as local.
continue
}
testReasons[r] = local
}
}
}
}
if len(reasons) == 0 && found {
// A magic annotation was provided, but no reasons.
pass.Reportf(fdecl.Pos(), "no reasons provided")
}
return reasons, local, testReasons
}
// run performs the analysis.
func run(pass *analysis.Pass, binary io.Reader) (any, error) {
// Note that if this analysis fails, then we don't actually
// fail the analyzer itself. We simply report every possible
// escape. In most cases this will work just fine.
calls, callsErr := loadObjdump(binary)
allEscapes := make(map[string][]Escapes)
mergedEscapes := make(map[string]Escapes)
linePosition := func(inst, parent poser) LinePosition {
p := pass.Fset.Position(inst.Pos())
if (p.Filename == "" || p.Line == 0) && parent != nil {
p = pass.Fset.Position(parent.Pos())
}
return LinePosition{
Filename: p.Filename,
Line: p.Line,
}
}
callSite := func(inst ssa.Instruction) CallSite {
return CallSite{
LocalPos: inst.Pos(),
Resolved: linePosition(inst, inst.Parent()),
}
}
hasCall := func(inst poser) (string, bool) {
p := linePosition(inst, nil)
if callsErr != nil {
// See above: we don't have access to the binary
// itself, so need to include every possible call.
return fmt.Sprintf("(possible, unable to load objdump: %v)", callsErr), true
}
s, ok := calls[p.Simplified()]
if !ok {
return "", false
}
// Join all calls together.
return strings.Join(s, " or "), true
}
state := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
// Build the exception list.
exemptions := make(map[LinePosition]string)
for _, f := range pass.Files {
for _, cg := range f.Comments {
for _, c := range cg.List {
p := pass.Fset.Position(c.Slash)
if strings.HasPrefix(strings.ToLower(c.Text), exempt) {
exemptions[LinePosition{
Filename: p.Filename,
Line: p.Line,
}] = c.Text[len(exempt):]
}
}
}
}
var loadFunc func(*ssa.Function) Escapes // Used below.
analyzeInstruction := func(inst ssa.Instruction) (es Escapes) {
cs := callSite(inst)
if _, ok := exemptions[cs.Resolved]; ok {
return // No escape.
}
switch x := inst.(type) {
case *ssa.Call:
if x.Call.IsInvoke() {
// This is an interface dispatch. There is no
// way to know if this is actually escaping or
// not, since we don't know the underlying
// type.
call, _ := hasCall(inst)
es.Add(interfaceInvoke, call, cs)
return
}
switch x := x.Call.Value.(type) {
case *ssa.Function:
// Is this a local function? If yes, call the
// function to load the local function. The
// local escapes are the escapes found in the
// local function.
if x.Pkg != nil && x.Pkg.Pkg == pass.Pkg {
es.MergeWithCall(loadFunc(x), cs)
return
}
// If this package is the atomic package, the implementation
// may be replaced by instrinsics that don't have analysis.
if x.Pkg != nil && x.Pkg.Pkg.Path() == "sync/atomic" {
return
}
// Recursively collect information.
var funcEscapes Escapes
if !pass.ImportObjectFact(x.Object(), &funcEscapes) {
// If this is the unix or syscall
// package, and the function is
// RawSyscall, we can also ignore this
// case.
pkgIsUnixOrSyscall := x.Pkg != nil && (x.Pkg.Pkg.Name() == "unix" || x.Pkg.Pkg.Name() == "syscall")
methodIsRawSyscall := x.Name() == "RawSyscall" || x.Name() == "RawSyscall6"
if pkgIsUnixOrSyscall && methodIsRawSyscall {
return
}
// Unable to import the dependency; we must
// declare these as escaping.
message := fmt.Sprintf("no analysis for %q", x.Object().String())
es.Add(unknownPackage, message, cs)
return
}
// The escapes of this instruction are the
// escapes of the called function directly.
// Note that this may record many escapes.
es.MergeWithCall(funcEscapes, cs)
return
case *ssa.Builtin:
// Ignore elided escapes.
if _, has := hasCall(inst); !has {
return
}
// Check if the builtin is escaping.
for _, name := range escapingBuiltins {
if x.Name() == name {
es.Add(builtin, name, cs)
return
}
}
default:
// All dynamic calls are counted as soft
// escapes. They are similar to interface
// dispatches. We cannot actually look up what
// this refers to using static analysis alone.
call, _ := hasCall(inst)
es.Add(dynamicCall, call, cs)
}
case *ssa.Alloc:
// Ignore non-heap allocations.
if !x.Heap {
return
}
// Ignore elided escapes.
call, has := hasCall(inst)
if !has {
return
}
// This is a real heap allocation.
es.Add(allocation, call, cs)
case *ssa.MakeMap:
es.Add(builtin, "makemap", cs)
case *ssa.MakeSlice:
es.Add(builtin, "makeslice", cs)
case *ssa.MakeClosure:
es.Add(builtin, "makeclosure", cs)
case *ssa.MakeChan:
es.Add(builtin, "makechan", cs)
}
return
}
var analyzeBasicBlock func(*ssa.BasicBlock) []Escapes // Recursive.
analyzeBasicBlock = func(block *ssa.BasicBlock) (rval []Escapes) {
for _, inst := range block.Instrs {
if es := analyzeInstruction(inst); !es.IsEmpty() {
rval = append(rval, es)
}
}
return
}
loadFunc = func(fn *ssa.Function) Escapes {
// Is this already available?
name := fn.RelString(pass.Pkg)
if es, ok := mergedEscapes[name]; ok {
return es
}
// In the case of a true cycle, we assume that the current
// function itself has no escapes.
//
// When evaluating the function again, the proper escapes will
// be filled in here.
allEscapes[name] = nil
mergedEscapes[name] = Escapes{}
// Perform the basic analysis.
var es []Escapes
if fn.Recover != nil {
es = append(es, analyzeBasicBlock(fn.Recover)...)
}
for _, block := range fn.Blocks {
es = append(es, analyzeBasicBlock(block)...)
}
// Check for a stack split.
if call, has := hasCall(fn); has {
var ss Escapes
ss.Add(stackSplit, call, CallSite{
LocalPos: fn.Pos(),
Resolved: linePosition(fn, fn.Parent()),
})
es = append(es, ss)
}
// Save the result and return.
//
// Note that we merge the result when saving to the facts. It
// doesn't really matter the specific escapes, as long as we
// have recorded all the appropriate classes of escapes.
summary := MergeAll(es)
allEscapes[name] = es
mergedEscapes[name] = summary
return summary
}
// Complete all local functions.
for _, fn := range state.SrcFuncs {
funcEscapes := loadFunc(fn)
if obj := fn.Object(); obj != nil {
pass.ExportObjectFact(obj, &funcEscapes)
}
}
// Scan all functions for violations.
for _, f := range pass.Files {
// Scan all declarations.
for _, decl := range f.Decls {
// Function declaration?
fdecl, ok := decl.(*ast.FuncDecl)
if !ok {
continue
}
// Find all declared reasons.
reasons, local, testReasons := findReasons(pass, fdecl)
// Scan for matches.
fn := pass.TypesInfo.Defs[fdecl.Name].(*types.Func)
fv := state.Pkg.Prog.FuncValue(fn)
if fv == nil {
continue
}
name := fv.RelString(pass.Pkg)
all, allOk := allEscapes[name]
merged, mergedOk := mergedEscapes[name]
if !allOk || !mergedOk {
pass.Reportf(fdecl.Pos(), "internal error: function %s not found.", name)
continue
}
// Filter reasons and report.
//
// For the findings, we use all escapes.
for _, es := range all {
es.Filter(reasons, local)
es.Reportf(pass)
}
// Scan for test (required) matches.
//
// For tests we need only the merged escapes.
testReasonsFound := make(map[EscapeReason]bool)
for r := EscapeReason(0); r < reasonCount; r++ {
if merged.CallSites[r] == nil {
continue
}
// Is this local?
wantLocal, ok := testReasons[r]
isLocal := len(merged.CallSites[r]) == 1
testReasonsFound[r] = isLocal
if !ok {
continue
}
if isLocal == wantLocal {
delete(testReasons, r)
}
}
for reason, local := range testReasons {
// We didn't find the escapes we wanted.
pass.Reportf(fdecl.Pos(), fmt.Sprintf("testescapes not found: reason=%s, local=%t", reason, local))
}
if len(testReasons) > 0 {
// Report for debugging.
merged.Reportf(pass)
}
}
}
return nil, nil
}
|
package main
import (
"fmt"
)
type (
Category struct {
Name string
}
Post struct {
Title string
}
)
func (c Category) lihatData() {
fmt.Println(c)
}
func (p Post) lihatData() {
fmt.Println(p)
}
func main() {
fmt.Printf("From Category\n")
cats := Category{
Name: "Berita",
}
cats.lihatData()
fmt.Printf("From Post\n")
p := Post{
Title: "Belajar Golang",
}
p.lihatData()
} |
package db
import (
"errors"
"fmt"
)
type Config struct {
Host string `json:"host"`
Port int `json:"port"`
Username string `json:"username"`
Password string `json:"password"`
Name string `json:"name"`
SSLMode string `json:"ssl_mode"`
}
func (c Config) PostgresURL() (string, error) {
if c.Host == "" {
return "", errors.New(`"host" is required`)
}
if c.Port == 0 {
return "", errors.New(`"port" is required`)
}
if c.Username == "" {
return "", errors.New(`"username" is required`)
}
if c.Name == "" {
return "", errors.New(`"name" is required`)
}
if c.SSLMode == "" {
return "", errors.New(`"ssl_mode" is required`)
}
return fmt.Sprintf(
"%s://%s:%s@%s:%d/%s?sslmode=%s",
"postgres",
c.Username,
c.Password,
c.Host,
c.Port,
c.Name,
c.SSLMode,
), nil
}
|
package siprocket
/*
RFC 3261 - https://www.ietf.org/rfc/rfc3261.txt - 8.1.1.7 Via
The Via header field indicates the transport used for the transaction
and identifies the location where the response is to be sent. A Via
header field value is added only after the transport that will be
used to reach the next hop has been selected (which may involve the
usage of the procedures in [4]).
*/
type sipVia struct {
Trans string // Type of Transport udp, tcp, tls, sctp etc
Host []byte // Host part
Port []byte // Port number
Branch []byte //
Rport []byte //
Maddr []byte //
Ttl []byte //
Rcvd []byte //
Src []byte // Full source if needed
}
func parseSipVia(v []byte, out *sipVia) {
pos := 0
state := FIELD_BASE
// Init the output area
out.Trans = ""
out.Host = nil
out.Port = nil
out.Branch = nil
out.Rport = nil
out.Maddr = nil
out.Ttl = nil
out.Rcvd = nil
out.Src = nil
// Keep the source line if needed
if keep_src {
out.Src = v
}
// Loop through the bytes making up the line
for pos < len(v) {
// FSM
switch state {
case FIELD_BASE:
if v[pos] != ' ' {
// Not a space
if getString(v, pos, pos+8) == "SIP/2.0/" {
// Transport type
state = FIELD_HOST
pos = pos + 8
if getString(v, pos, pos+3) == "UDP" {
out.Trans = "udp"
pos = pos + 3
continue
}
if getString(v, pos, pos+3) == "TCP" {
out.Trans = "tcp"
pos = pos + 3
continue
}
if getString(v, pos, pos+3) == "TLS" {
out.Trans = "tls"
pos = pos + 3
continue
}
if getString(v, pos, pos+4) == "SCTP" {
out.Trans = "sctp"
pos = pos + 4
continue
}
}
// Look for a Branch identifier
if getString(v, pos, pos+7) == "branch=" {
state = FIELD_BRANCH
pos = pos + 7
continue
}
// Look for a Rport identifier
if getString(v, pos, pos+6) == "rport=" {
state = FIELD_RPORT
pos = pos + 6
continue
}
// Look for a maddr identifier
if getString(v, pos, pos+6) == "maddr=" {
state = FIELD_MADDR
pos = pos + 6
continue
}
// Look for a ttl identifier
if getString(v, pos, pos+4) == "ttl=" {
state = FIELD_TTL
pos = pos + 4
continue
}
// Look for a recevived identifier
if getString(v, pos, pos+9) == "received=" {
state = FIELD_REC
pos = pos + 9
continue
}
}
case FIELD_HOST:
if v[pos] == ':' {
state = FIELD_PORT
pos++
continue
}
if v[pos] == ';' {
state = FIELD_BASE
pos++
continue
}
if v[pos] == ' ' {
pos++
continue
}
out.Host = append(out.Host, v[pos])
case FIELD_PORT:
if v[pos] == ';' {
state = FIELD_BASE
pos++
continue
}
out.Port = append(out.Port, v[pos])
case FIELD_BRANCH:
if v[pos] == ';' {
state = FIELD_BASE
pos++
continue
}
out.Branch = append(out.Branch, v[pos])
case FIELD_RPORT:
if v[pos] == ';' {
state = FIELD_BASE
pos++
continue
}
out.Rport = append(out.Rport, v[pos])
case FIELD_MADDR:
if v[pos] == ';' {
state = FIELD_BASE
pos++
continue
}
out.Maddr = append(out.Maddr, v[pos])
case FIELD_TTL:
if v[pos] == ';' {
state = FIELD_BASE
pos++
continue
}
out.Ttl = append(out.Ttl, v[pos])
case FIELD_REC:
if v[pos] == ';' {
state = FIELD_BASE
pos++
continue
}
out.Rcvd = append(out.Rcvd, v[pos])
}
pos++
}
}
|
package cli
import (
"context"
"fmt"
"log"
"time"
"github.com/fatih/color"
"github.com/mattn/go-colorable"
"github.com/spf13/cobra"
"github.com/tilt-dev/tilt/internal/analytics"
"github.com/tilt-dev/tilt/internal/hud/prompt"
"github.com/tilt-dev/tilt/internal/store"
"github.com/tilt-dev/tilt/pkg/logger"
"github.com/tilt-dev/tilt/pkg/model"
)
type ciCmd struct {
fileName string
outputSnapshotOnExit string
}
func (c *ciCmd) name() model.TiltSubcommand { return "ci" }
func (c *ciCmd) register() *cobra.Command {
cmd := &cobra.Command{
Use: "ci [<tilt flags>] [-- <Tiltfile args>]",
DisableFlagsInUseLine: true,
Short: "Start Tilt in CI/batch mode with the given Tiltfile args",
Long: fmt.Sprintf(`
Starts Tilt and runs resources defined in the Tiltfile.
Exits with failure if any build fails or any server crashes.
Exits with success if all tasks have completed successfully
and all servers are healthy.
While Tilt is running, you can view the UI at %s:%d
(configurable with --host and --port).
See blog post for additional information: https://blog.tilt.dev/2020/04/16/how-to-not-break-server-startup.html
`, defaultWebHost, defaultWebPort),
}
addStartServerFlags(cmd)
addDevServerFlags(cmd)
addTiltfileFlag(cmd, &c.fileName)
addKubeContextFlag(cmd)
cmd.Flags().BoolVar(&logActionsFlag, "logactions", false, "log all actions and state changes")
cmd.Flags().Lookup("logactions").Hidden = true
cmd.Flags().StringVar(&c.outputSnapshotOnExit, "output-snapshot-on-exit", "",
"If specified, Tilt will dump a snapshot of its state to the specified path when it exits")
cmd.Flags().DurationVar(&ciTimeout, "timeout", model.CITimeoutDefault,
"Timeout to wait for CI to pass. Set to 0 for no timeout.")
return cmd
}
func (c *ciCmd) run(ctx context.Context, args []string) error {
a := analytics.Get(ctx)
a.Incr("cmd.ci", nil)
defer a.Flush(time.Second)
deferred := logger.NewDeferredLogger(ctx)
ctx = redirectLogs(ctx, deferred)
log.SetFlags(log.Flags() &^ (log.Ldate | log.Ltime))
webHost := provideWebHost()
webURL, _ := provideWebURL(webHost, provideWebPort())
startLine := prompt.StartStatusLine(webURL, webHost)
log.Print(startLine)
log.Print(buildStamp())
if ok, reason := analytics.IsAnalyticsDisabledFromEnv(); ok {
log.Printf("Tilt analytics disabled: %s", reason)
}
cmdCIDeps, err := wireCmdCI(ctx, a, "ci")
if err != nil {
deferred.SetOutput(deferred.Original())
return err
}
upper := cmdCIDeps.Upper
l := store.NewLogActionLogger(ctx, upper.Dispatch)
deferred.SetOutput(l)
ctx = redirectLogs(ctx, l)
if c.outputSnapshotOnExit != "" {
defer cmdCIDeps.Snapshotter.WriteSnapshot(ctx, c.outputSnapshotOnExit)
}
err = upper.Start(ctx, args, cmdCIDeps.TiltBuild,
c.fileName, store.TerminalModeStream, a.UserOpt(), cmdCIDeps.Token,
string(cmdCIDeps.CloudAddress))
if err == nil {
_, _ = fmt.Fprintln(colorable.NewColorableStdout(),
color.GreenString("SUCCESS. All workloads are healthy."))
}
return err
}
var ciTimeout time.Duration
|
package store_test
import (
"testing"
"github.com/dhui/dktest"
"github.com/stretchr/testify/require"
. "nidavellir/services/store"
)
func TestNewSecret(t *testing.T) {
t.Parallel()
assert := require.New(t)
tests := []struct {
SourceId int
Key string
Value string
HasError bool
}{
{0, "Key", "Value", true},
{1, "", "Value", true},
{1, "Key", "", true},
{1, "Key", "Value", false},
}
for _, test := range tests {
s, err := NewSecret(test.SourceId, test.Key, test.Value)
if test.HasError {
assert.Error(err)
assert.Nil(s)
} else {
assert.NoError(err)
assert.IsType(Secret{}, *s)
}
}
}
func TestPostgres_GetSecrets(t *testing.T) {
t.Parallel()
assert := require.New(t)
dktest.Run(t, imageName, postgresImageOptions, func(t *testing.T, info dktest.ContainerInfo) {
db, err := newTestDb(info, seedSources, seedSecrets)
assert.NoError(err)
sourceId := 1
secrets, err := db.GetSecrets(sourceId)
assert.NoError(err)
assert.Len(secrets, 1)
})
}
func TestPostgres_AddSecret(t *testing.T) {
t.Parallel()
assert := require.New(t)
dktest.Run(t, imageName, postgresImageOptions, func(t *testing.T, info dktest.ContainerInfo) {
_, err := newTestDb(info, seedSources, seedSecrets)
assert.NoError(err)
})
}
func TestPostgres_UpdateSecret(t *testing.T) {
t.Parallel()
assert := require.New(t)
dktest.Run(t, imageName, postgresImageOptions, func(t *testing.T, info dktest.ContainerInfo) {
db, err := newTestDb(info, seedSources, seedSecrets)
assert.NoError(err)
s, err := db.GetSecret(1)
assert.NoError(err)
s.Value = "ABC123"
s, err = db.UpdateSecret(s)
assert.NoError(err)
s2, err := db.GetSecret(1)
assert.NoError(err)
assert.EqualValues(s2.Value, s.Value)
})
}
func TestPostgres_RemoveSecret(t *testing.T) {
t.Parallel()
assert := require.New(t)
dktest.Run(t, imageName, postgresImageOptions, func(t *testing.T, info dktest.ContainerInfo) {
db, err := newTestDb(info, seedSources)
assert.NoError(err)
err = db.RemoveSecret(1)
assert.NoError(err)
})
}
func seedSecrets(db *Postgres) error {
// seed secret to first source
secret, err := NewSecret(1, "key", "value")
if err != nil {
return err
}
_, err = db.AddSecret(secret)
return err
}
|
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// 6.7.18 - changed `newHandler` to public `NewHandler` - chief@beefdisciple.com
// Package handler serves Go vanity URLs.
package handler
import (
"errors"
"fmt"
"html/template"
"log"
"net/http"
"sort"
"strings"
"sync"
"time"
)
// ConfigFetcher defines an interface in which to fetch a configuration file to be used when setting up the handler. Fetch() is called immediately
// and then spawns a background goroutine that loops forever, but sleeps for the given fetch_interval defined in the config. If no fetch_interval
// is set then it defaults to 24 hours.
type ConfigFetcher interface {
Fetch() (*Config, error)
}
// Config is the data needed to set up the Handler
type Config struct {
Host string `yaml:"host,omitempty"`
FetchInterval *int `yaml:"fetch_interval,omitempty"`
CacheAge *int `yaml:"cache_max_age,omitempty"`
Paths map[string]struct {
Repo string `yaml:"repo,omitempty"`
Display string `yaml:"display,omitempty"`
VCS string `yaml:"vcs,omitempty"`
} `yaml:"paths,omitempty"`
}
// Handler holds the logic for performing the redirects and satisfies the http interface.
type Handler struct {
mu sync.Mutex
host string
cacheControl string
paths pathConfigSet
}
type pathConfig struct {
path string
repo string
display string
vcs string
}
// NewHandler creates a new handler
func NewHandler(fetcher ConfigFetcher) (*Handler, error) {
fetchInterval := 86400
config, err := fetcher.Fetch()
if err != nil {
return nil, err
}
if config.FetchInterval != nil && *config.FetchInterval > 0 {
fetchInterval = *config.FetchInterval
}
h := Handler{}
err = h.configure(config)
if err != nil {
return nil, err
}
go func() {
var err error
for {
time.Sleep(time.Duration(fetchInterval) * time.Second)
config, err = fetcher.Fetch()
if err != nil {
log.Println(err)
} else {
if err := h.configure(config); err != nil {
log.Println(err)
}
}
}
}()
return &h, nil
}
func (h *Handler) configure(c *Config) error {
h.mu.Lock()
defer h.mu.Unlock()
cacheAge := 86400
if c.CacheAge != nil {
if *c.CacheAge < 0 {
return errors.New("cache_max_age is negative")
}
if *c.CacheAge >= 0 {
cacheAge = *c.CacheAge
}
}
h.host = c.Host
h.cacheControl = fmt.Sprintf("public, max-age=%d", cacheAge)
h.paths = make(pathConfigSet, 0)
for path, e := range c.Paths {
pc := pathConfig{
path: strings.TrimSuffix(path, "/"),
repo: e.Repo,
display: e.Display,
vcs: e.VCS,
}
switch {
case e.Display != "":
// Already filled in.
case strings.HasPrefix(e.Repo, "https://github.com/"):
pc.display = fmt.Sprintf("%v %v/tree/master{/dir} %v/blob/master{/dir}/{file}#L{line}", e.Repo, e.Repo, e.Repo)
case strings.HasPrefix(e.Repo, "https://bitbucket.org"):
pc.display = fmt.Sprintf("%v %v/src/default{/dir} %v/src/default{/dir}/{file}#{file}-{line}", e.Repo, e.Repo, e.Repo)
}
switch {
case e.VCS != "":
// Already filled in.
if e.VCS != "bzr" && e.VCS != "git" && e.VCS != "hg" && e.VCS != "svn" {
return fmt.Errorf("configuration for %v: unknown VCS %s", path, e.VCS)
}
case strings.HasPrefix(e.Repo, "https://github.com/"):
pc.vcs = "git"
default:
return fmt.Errorf("configuration for %v: cannot infer VCS from %s", path, e.Repo)
}
h.paths = append(h.paths, pc)
}
sort.Sort(h.paths)
return nil
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
current := r.URL.Path
pc, subpath := h.paths.find(current)
if pc == nil && current == "/" {
h.serveIndex(w, r)
return
}
if pc == nil {
http.NotFound(w, r)
return
}
w.Header().Set("Cache-Control", h.cacheControl)
if err := vanityTmpl.Execute(w, struct {
Import string
Subpath string
Repo string
Display string
VCS string
}{
Import: h.getHost(r) + pc.path,
Subpath: subpath,
Repo: pc.repo,
Display: pc.display,
VCS: pc.vcs,
}); err != nil {
http.Error(w, "cannot render the page", http.StatusInternalServerError)
}
}
func (h *Handler) serveIndex(w http.ResponseWriter, r *http.Request) {
host := h.getHost(r)
handlers := make([]string, len(h.paths))
for i, h := range h.paths {
handlers[i] = host + h.path
}
if err := indexTmpl.Execute(w, struct {
Host string
Handlers []string
}{
Host: host,
Handlers: handlers,
}); err != nil {
http.Error(w, "cannot render the page", http.StatusInternalServerError)
}
}
func (h *Handler) getHost(r *http.Request) string {
host := h.host
if host == "" {
host = r.Host
}
return host
}
var indexTmpl = template.Must(template.New("index").Parse(`<!DOCTYPE html>
<html>
<h1>{{.Host}}</h1>
<ul>
{{range .Handlers}}<li><a href="https://godoc.org/{{.}}">{{.}}</a></li>{{end}}
</ul>
</html>
`))
var vanityTmpl = template.Must(template.New("vanity").Parse(`<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
<meta name="go-import" content="{{.Import}} {{.VCS}} {{.Repo}}">
<meta name="go-source" content="{{.Import}} {{.Display}}">
<meta http-equiv="refresh" content="0; url=https://godoc.org/{{.Import}}/{{.Subpath}}">
</head>
<body>
Nothing to see here; <a href="https://godoc.org/{{.Import}}/{{.Subpath}}">see the package on godoc</a>.
</body>
</html>`))
type pathConfigSet []pathConfig
func (pset pathConfigSet) Len() int {
return len(pset)
}
func (pset pathConfigSet) Less(i, j int) bool {
return pset[i].path < pset[j].path
}
func (pset pathConfigSet) Swap(i, j int) {
pset[i], pset[j] = pset[j], pset[i]
}
func (pset pathConfigSet) find(path string) (pc *pathConfig, subpath string) {
// Fast path with binary search to retrieve exact matches
// e.g. given pset ["/", "/abc", "/xyz"], path "/def" won't match.
i := sort.Search(len(pset), func(i int) bool {
return pset[i].path >= path
})
if i < len(pset) && pset[i].path == path {
return &pset[i], ""
}
if i > 0 && strings.HasPrefix(path, pset[i-1].path+"/") {
return &pset[i-1], path[len(pset[i-1].path)+1:]
}
// Slow path, now looking for the longest prefix/shortest subpath i.e.
// e.g. given pset ["/", "/abc/", "/abc/def/", "/xyz"/]
// * query "/abc/foo" returns "/abc/" with a subpath of "foo"
// * query "/x" returns "/" with a subpath of "x"
lenShortestSubpath := len(path)
var bestMatchConfig *pathConfig
// After binary search with the >= lexicographic comparison,
// nothing greater than i will be a prefix of path.
max := i
for i := 0; i < max; i++ {
ps := pset[i]
if len(ps.path) >= len(path) {
// We previously didn't find the path by search, so any
// route with equal or greater length is NOT a match.
continue
}
sSubpath := strings.TrimPrefix(path, ps.path)
if len(sSubpath) < lenShortestSubpath {
subpath = sSubpath
lenShortestSubpath = len(sSubpath)
bestMatchConfig = &pset[i]
}
}
return bestMatchConfig, subpath
}
|
package gormzap
import (
"fmt"
"time"
"github.com/jinzhu/gorm"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
type log struct {
occurredAt time.Time
source string
duration time.Duration
sql string
values []string
other []string
}
func (l *log) toZapFields() []zapcore.Field {
return []zapcore.Field{
zap.Time("occurredAt", l.occurredAt),
zap.String("source", l.source),
zap.Duration("duration", l.duration),
zap.String("sql", l.sql),
zap.Strings("values", l.values),
zap.Strings("other", l.other),
}
}
func createLog(values []interface{}) *log {
ret := &log{}
ret.occurredAt = gorm.NowFunc()
if len(values) > 1 {
var level = values[0]
ret.source = getSource(values)
if level == "sql" {
ret.duration = getDuration(values)
ret.values = getFormattedValues(values)
ret.sql = values[3].(string)
} else {
ret.other = append(ret.other, fmt.Sprint(values[2:]))
}
}
return ret
}
|
package funcmod
// IterChan return a iterable for channel
// NOTE: channle could be blocking
func IterChan(ch <-chan interface{}) Iterable {
return iterable(ch)
}
|
package runtime
import (
"fmt"
"github.com/healthy-tiger/scalc/parser"
)
// 共通のランタイムエラーIDの定義
var (
ErrorTheNumberOfArgumentsDoesNotMatch int
ErrorUndefinedSymbol int
ErrorAnEmptyListIsNotAllowed int
ErrorTheFirstElementOfTheListToBeEvaluatedMustBeACallableObject int
ErrorFunctionCannotBePassedAsFunctionArgument int
ErrorInsufficientNumberOfArguments int
ErrorTooManyArguments int
ErrorInvalidOperation int
ErrorValueOutOfRange int
)
var errorMessages map[int]string = make(map[int]string)
func init() {
ErrorTheNumberOfArgumentsDoesNotMatch = RegisterEvalError("The number of arguments does not match(%v given, %v need)")
ErrorUndefinedSymbol = RegisterEvalError("Undefined symbol %v")
ErrorAnEmptyListIsNotAllowed = RegisterEvalError("An empty list is not allowed")
ErrorTheFirstElementOfTheListToBeEvaluatedMustBeACallableObject = RegisterEvalError("The first element of the list to be evaluated must be a callable object: %v ")
ErrorFunctionCannotBePassedAsFunctionArgument = RegisterEvalError("Function cannot be passed as function argument")
ErrorInsufficientNumberOfArguments = RegisterEvalError("Insufficient number of arguments(%v given, %v need)")
ErrorInvalidOperation = RegisterEvalError("Invalid Operation")
ErrorValueOutOfRange = RegisterEvalError("Value out of range %v(%v to %v)")
}
// EvalError 実行時エラーの構造体
type EvalError struct {
ErrorLocation parser.Position
Message string
}
// NewEvalError 式の評価の際に発生したエラーを表すオブジェクトを生成する。
func NewEvalError(loc parser.Position, id int, args ...interface{}) *EvalError {
msg, ok := errorMessages[id]
if !ok {
panic("Undefined error id")
}
e := new(EvalError)
e.ErrorLocation = loc
e.Message = fmt.Sprintf(msg, args...)
return e
}
func (err *EvalError) Error() string {
h := fmt.Sprintf("%s:%d:%d ", err.ErrorLocation.Filename, err.ErrorLocation.Line, err.ErrorLocation.Column)
return h + err.Message
}
// RegisterEvalError 実行時エラーのエラーメッセージを登録し、エラーメッセージのIDを返す。
func RegisterEvalError(msg string) int {
n := len(errorMessages)
errorMessages[n] = msg
return n
}
|
package main
import (
"context"
"log"
"time"
firebase "firebase.google.com/go"
"google.golang.org/api/option"
msgbroker "audiman/gu-project/recharger-ser/messageBroker"
"audiman/gu-project/recharger-ser/services"
)
func main() {
mb := setupRabbitMQ()
rs := services.NewRechargerService(mb)
go rs.InitRechargesListener()
defer rs.CloseListener()
// try
sa := option.WithCredentialsFile("./config/audiman.json")
app, err := firebase.NewApp(context.Background(), nil, sa)
if err != nil {
log.Println(err)
}
client, err := app.Firestore(context.Background())
if err != nil {
log.Println(err)
}
quote := getQoute()
log.Println(quote)
result, err := client.Collection("sampleData").Doc("inspiration").Set(context.Background(), quote)
if err != nil {
log.Println(err)
}
log.Println(result)
defer client.Close()
//end try
}
func getQoute() *Quote {
myQuote := Quote{
"Audiman 22 ",
"Karem is tha best singer",
}
return &myQuote
}
type Quote struct {
autor string
message string
}
func setupRabbitMQ() msgbroker.MessageBroker {
//rabbitURL := os.Getenv("RABBIT_URL")
rabbitURL := "amqp://guest:guest@localhost:5672/"
brk, err := msgbroker.NewRabbitMqBroker(rabbitURL)
if err != nil {
time.Sleep(time.Second * 2)
log.Printf("Trying to connect: %s\n", rabbitURL)
return setupRabbitMQ()
}
return brk
}
|
package api
import (
"coding-challenge-go/pkg/api/product"
"coding-challenge-go/pkg/api/seller"
"database/sql"
"github.com/gin-gonic/gin"
)
// CreateAPIEngine creates engine instance that serves API endpoints,
// consider it as a router for incoming requests.
func CreateAPIEngine(db *sql.DB) (*gin.Engine, error) {
r := gin.New()
v1 := r.Group("api/v1")
productRepository := product.NewRepository(db)
sellerRepository := seller.NewRepository(db)
emailProvider := seller.NewEmailProvider()
productController := product.NewController(productRepository, sellerRepository, emailProvider)
v1.GET("products", productController.List)
v1.GET("product", productController.Get)
v1.POST("product", productController.Post)
v1.PUT("product", productController.Put)
v1.DELETE("product", productController.Delete)
sellerController := seller.NewController(sellerRepository)
v1.GET("sellers", sellerController.List)
return r, nil
} |
package main
import "fmt"
func A() {
defer fmt.Println("Hello from A!") //
defer B()
fmt.Println("Exiting A...") //
}
func B() {
defer fmt.Println("Hello from B!") //
defer A()
fmt.Println("Exiting B...") //
}
func main() {
defer B()
}
|
package routers
import (
"github.com/astaxie/beego"
"homework/backend/controllers"
"homework/common/mysql"
"homework/common/redis"
"homework/models/repositories"
"homework/models/services"
)
func init() {
db, err := mysql.NewMysqlConn()
if err != nil {
return
}
conn := redis.NewRedisConn()
/*
/order
*/
orderrepo := repositories.NewOrderManagerRepository("order", db)
ordersevice := services.NewOrderService(orderrepo)
ordercontroller := &controllers.OrderController{OrderService: ordersevice}
beego.Router("/order/list", ordercontroller, "get:GetList")
beego.Router("/order/deliver", ordercontroller, "get:GetUpdate")
/*
/product
*/
productrepo := repositories.NewProductManager("product", db, conn)
productservice := services.NewProductService(productrepo)
productcontroller := &controllers.ProductController{ProductService: productservice}
beego.Router("/", productcontroller, "get:GetAll")
beego.Router("/product/list", productcontroller, "get:GetAll")
beego.Router("/product/update", productcontroller, "get:GetManager;post:PostManager")
beego.Router("/product/insert", productcontroller, "get:GetInsert;post:PostInsert")
beego.Router("/product/delete", productcontroller, "get:GetDelete")
/*
/shop
*/
shoprepo := repositories.NewShopRepository(db, "shop")
shopservice := services.NewShopService(shoprepo)
shopController := &controllers.ShopController{ShopService: shopservice}
beego.Router("/shop/register", shopController, "get:GetRegister;post:PostRegister")
beego.Router("/shop/login", shopController, "get:GetLogin;post:PostLogin")
beego.Router("/shop/logout", shopController, "get:GetLogout")
}
|
/*
Copyright 2019 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"context"
"math"
"strings"
"sync"
"testing"
"time"
"github.com/GoogleCloudPlatform/cloud-ingest/agent/common"
taskpb "github.com/GoogleCloudPlatform/cloud-ingest/proto/task_go_proto"
)
func TestTrackerRecordBWLimit(t *testing.T) {
// Create an unused mock ticker to prevent accidental calls to selectDone.
unusedMockTicker := common.NewMockTicker()
accumulatorTickerMaker = func() common.Ticker { return unusedMockTicker }
displayTickerMaker = func() common.Ticker { return unusedMockTicker }
st := NewTracker(context.Background())
var wg sync.WaitGroup
st.selectDone = func() { wg.Done() } // The test hook.
if got, want := st.lifetime.bwLimit, int64(math.MaxInt32); got != want {
t.Fatalf("initial bwLimit = %v, want:%v", got, want)
}
wg.Add(1)
st.RecordBWLimit(123456)
wg.Wait() // Force the Tracker to collect the recorded stats.
if got, want := st.lifetime.bwLimit, int64(123456); got != want {
t.Errorf("bwLimit = %v, want:%v", got, want)
}
}
var (
psEmpty = &PulseStats{}
ps1 = &PulseStats{1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0}
ps2 = &PulseStats{0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1}
ps3 = &PulseStats{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} // ps3 = ps1 + ps2
ps4 = &PulseStats{1, 2, 3, 1, 2, 3, 1, 2, 2, 3, 1, 2, 3}
ps5 = &PulseStats{2, 4, 6, 2, 4, 6, 2, 4, 4, 6, 2, 4, 6} // ps5 = ps4 + ps4
ps6 = &PulseStats{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}
ps7 = &PulseStats{8, 7, 6, 8, 7, 6, 8, 7, 7, 6, 8, 7, 6} // ps7 = ps6 - ps4
)
func TestTrackerAccumulatedPulseStats(t *testing.T) {
tests := []struct {
desc string
inputs []interface{}
want *PulseStats
}{
{"Empty", []interface{}{"t"}, psEmpty},
{"Empty, no accumulator tick", []interface{}{ps1}, psEmpty},
{"Basic 1", []interface{}{ps1, "t"}, ps1},
{"Basic 2", []interface{}{ps1, ps2, "t"}, ps3},
{"Basic 3", []interface{}{ps1, ps2, "t", ps7}, ps3},
{"Basic 4", []interface{}{ps3, "t", ps3, ps3, "t", ps3, ps3, ps3, "t", ps3, ps3, ps3, "t"}, ps6},
}
for _, tc := range tests {
// Must be done before creating the Tracker.
mockAccumulatorTicker := common.NewMockTicker()
accumulatorTickerMaker = func() common.Ticker { return mockAccumulatorTicker }
unusedMockDisplayTicker := common.NewMockTicker()
displayTickerMaker = func() common.Ticker { return unusedMockDisplayTicker }
st := NewTracker(context.Background())
var wg sync.WaitGroup
st.selectDone = func() { wg.Done() }
// AccumulatedPulseStats must start empty.
if got := st.AccumulatedPulseStats(); *got != *psEmpty {
t.Errorf("AccumulatedPulseStats got %v, want %v", got, psEmpty)
continue
}
// Record all of the PulseStats and accumulator ticks.
for _, i := range tc.inputs {
wg.Add(1)
switch v := i.(type) {
case string:
mockAccumulatorTicker.Tick()
case *PulseStats:
st.pulseStatsChan <- v
default:
t.Fatalf("Unrecognized input type: %T %v", i, i)
}
wg.Wait() // Allow the Tracker to collect the input.
}
// Validate AccumulatedPulseStats.
if got := st.AccumulatedPulseStats(); *got != *tc.want {
t.Errorf("AccumulatedPulseStats() got %v, want %v", got, tc.want)
}
// AccumulatedPulseStats should be empty again.
if got := st.AccumulatedPulseStats(); *got != *psEmpty {
t.Errorf("AccumulatedPulseStats got %v, want %v", got, psEmpty)
}
}
}
func TestPulseStatsAdd(t *testing.T) {
tests := []struct {
a *PulseStats
b *PulseStats
want *PulseStats
}{
{psEmpty, psEmpty, psEmpty},
{ps1, ps2, ps3},
{ps4, ps4, ps5},
}
for i, tc := range tests {
got := *tc.a // Create a copy to not interfere with other tests.
if got.add(tc.b); got != *tc.want {
t.Errorf("%d: PulseStats.add got %v, want %v", i, got, tc.want)
}
}
}
func TestPulseStatsSub(t *testing.T) {
tests := []struct {
a *PulseStats
b *PulseStats
want *PulseStats
}{
{psEmpty, psEmpty, psEmpty},
{ps3, ps2, ps1},
{ps6, ps4, ps7},
}
for i, tc := range tests {
tc.a.sub(tc.b)
if got := tc.a; *got != *tc.want {
t.Errorf("%d: PulseStats.sub got %v, want %v", i, got, tc.want)
}
}
}
var (
copyTaskRespMsg = &taskpb.TaskRespMsg{ReqSpec: &taskpb.Spec{Spec: &taskpb.Spec_CopySpec{CopySpec: &taskpb.CopySpec{}}}}
listTaskRespMsg = &taskpb.TaskRespMsg{ReqSpec: &taskpb.Spec{Spec: &taskpb.Spec_ListSpec{ListSpec: &taskpb.ListSpec{}}}}
)
func TestTrackerDisplayStats(t *testing.T) {
tests := []struct {
desc string
inputs []interface{}
wantSubStrs []string
}{
{
"No inputs",
[]interface{}{},
[]string{
"txRate: 0B/s",
"txSum: 0B",
"taskResps[copy:0 list:0]",
"ctrlMsgAge:0s (ok)",
},
},
{
"Responded tasks",
[]interface{}{copyTaskRespMsg, copyTaskRespMsg, listTaskRespMsg},
[]string{
"txRate: 0B/s",
"txSum: 0B",
"taskResps[copy:2 list:1]",
"ctrlMsgAge:0s (ok)",
},
},
{
"Bytes sent",
[]interface{}{500 * 1024, 500 * 1024, 1000 * 1024},
[]string{
"txRate: 0B/s",
"txSum: 2.0MiB",
"taskResps[copy:0 list:0]",
"ctrlMsgAge:0s (ok)",
},
},
{
"Bytes sent",
[]interface{}{500 * 1024, 500 * 1024, 1000 * 1024},
[]string{
"txRate: 0B/s",
"txSum: 2.0MiB",
"taskResps[copy:0 list:0]",
"ctrlMsgAge:0s (ok)",
},
},
{
"Control message (ok)",
[]interface{}{time.Now().Add(-2 * time.Second)},
[]string{
"txRate: 0B/s",
"txSum: 0B",
"taskResps[copy:0 list:0]",
"ctrlMsgAge:2s (ok)",
},
},
{
"Control message (??)",
[]interface{}{time.Now().Add(-32 * time.Second)},
[]string{
"txRate: 0B/s",
"txSum: 0B",
"taskResps[copy:0 list:0]",
"ctrlMsgAge:32s (??)",
},
},
{
"Combined",
[]interface{}{500 * 1024, 500 * 1024, 1000 * 1024, time.Now().Add(-2 * time.Second), copyTaskRespMsg, copyTaskRespMsg, listTaskRespMsg},
[]string{
"txRate: 0B/s",
"txSum: 2.0MiB",
"taskResps[copy:2 list:1]",
"ctrlMsgAge:2s (ok)",
},
},
}
for _, tc := range tests {
// Create an unused mock ticker to prevent accidental calls to selectDone.
unusedMockTicker := common.NewMockTicker()
accumulatorTickerMaker = func() common.Ticker { return unusedMockTicker }
displayTickerMaker = func() common.Ticker { return unusedMockTicker }
st := NewTracker(context.Background())
// Set up the test hooks.
var wg sync.WaitGroup
st.selectDone = func() { wg.Done() } // The test hook.
// Record all the mocked inputs and ticks.
for _, i := range tc.inputs {
wg.Add(1)
switch v := i.(type) {
case *taskpb.TaskRespMsg:
st.RecordTaskResp(v)
case int:
st.tpTracker.RecordBytesSent(int64(v))
st.pulseStatsChan <- &PulseStats{CopyBytes: int64(v)}
case time.Time:
st.RecordCtrlMsg(v)
default:
t.Fatalf("Unrecognized input type: %T %v", i, i)
}
wg.Wait() // Allow the Tracker to collect the input.
}
got := st.displayStats()
for _, want := range tc.wantSubStrs {
if !strings.Contains(got, want) {
t.Errorf("displayStats = %q, want to contain %q", got, want)
}
}
}
}
func TestByteCountBinary(t *testing.T) {
tests := []struct {
b int64
pad int
want string
}{
// Various byte size tests.
{0, 0, "0B"},
{10, 0, "10B"},
{210, 0, "210B"},
{3210, 0, "3.1KiB"},
{43210, 0, "42.2KiB"},
{543210, 0, "530.5KiB"},
{6543210, 0, "6.2MiB"},
{76543210, 0, "73.0MiB"},
{876543210, 0, "835.9MiB"},
{9876543210, 0, "9.2GiB"},
{19876543210, 0, "18.5GiB"},
{109876543210, 0, "102.3GiB"},
{2109876543210, 0, "1.9TiB"},
{32109876543210, 0, "29.2TiB"},
{432109876543210, 0, "393.0TiB"},
{5432109876543210, 0, "4.8PiB"},
{65432109876543210, 0, "58.1PiB"},
{765432109876543210, 0, "679.8PiB"},
{8765432109876543210, 0, "7.6EiB"},
// {98765432109876543210, 0, "98.8EB"}, int64 overflow.
// Pad tests.
{1, 3, " 1B"},
{1, 5, " 1B"},
{12340, 7, " 12.1KiB"},
{12340000, 7, " 11.8MiB"},
{2109876543210, 10, " 1.9TiB"},
}
for _, tc := range tests {
got := byteCountBinary(tc.b, tc.pad)
if got != tc.want {
t.Errorf("byteCountBinary(%v, %v) = %q, want: %q", tc.b, tc.pad, got, tc.want)
}
}
}
|
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"context"
"encoding/csv"
"encoding/json"
"fmt"
"io"
"os"
"sort"
"strconv"
"text/tabwriter"
"github.com/google/subcommands"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/runsc/cmd/util"
"gvisor.dev/gvisor/runsc/flag"
)
// Syscalls implements subcommands.Command for the "syscalls" command.
type Syscalls struct {
format string
os string
arch string
filename string
}
// CompatibilityInfo is a map of system and architecture to compatibility doc.
// Maps operating system to architecture to ArchInfo.
type CompatibilityInfo map[string]map[string]ArchInfo
// ArchInfo is compatibility doc for an architecture.
type ArchInfo struct {
// Syscalls maps syscall number for the architecture to the doc.
Syscalls map[uintptr]SyscallDoc `json:"syscalls"`
}
// SyscallDoc represents a single item of syscall documentation.
type SyscallDoc struct {
Name string `json:"name"`
num uintptr
Support string `json:"support"`
Note string `json:"note,omitempty"`
URLs []string `json:"urls,omitempty"`
}
type outputFunc func(io.Writer, CompatibilityInfo) error
var (
// The string name to use for printing compatibility for all OSes.
osAll = "all"
// The string name to use for printing compatibility for all architectures.
archAll = "all"
// A map of OS name to map of architecture name to syscall table.
syscallTableMap = make(map[string]map[string]*kernel.SyscallTable)
// A map of output type names to output functions.
outputMap = map[string]outputFunc{
"table": outputTable,
"json": outputJSON,
"csv": outputCSV,
}
)
// Name implements subcommands.Command.Name.
func (*Syscalls) Name() string {
return "syscalls"
}
// Synopsis implements subcommands.Command.Synopsis.
func (*Syscalls) Synopsis() string {
return "Print compatibility information for syscalls."
}
// Usage implements subcommands.Command.Usage.
func (*Syscalls) Usage() string {
return `syscalls [options] - Print compatibility information for syscalls.
`
}
// SetFlags implements subcommands.Command.SetFlags.
func (s *Syscalls) SetFlags(f *flag.FlagSet) {
f.StringVar(&s.format, "format", "table", "Output format (table, csv, json).")
f.StringVar(&s.os, "os", osAll, "The OS (e.g. linux)")
f.StringVar(&s.arch, "arch", archAll, "The CPU architecture (e.g. amd64).")
f.StringVar(&s.filename, "filename", "", "Output filename (otherwise stdout).")
}
// Execute implements subcommands.Command.Execute.
func (s *Syscalls) Execute(context.Context, *flag.FlagSet, ...any) subcommands.ExitStatus {
out, ok := outputMap[s.format]
if !ok {
util.Fatalf("Unsupported output format %q", s.format)
}
// Build map of all supported architectures.
tables := kernel.SyscallTables()
for _, t := range tables {
osMap, ok := syscallTableMap[t.OS.String()]
if !ok {
osMap = make(map[string]*kernel.SyscallTable)
syscallTableMap[t.OS.String()] = osMap
}
osMap[t.Arch.String()] = t
}
// Build a map of the architectures we want to output.
info, err := getCompatibilityInfo(s.os, s.arch)
if err != nil {
util.Fatalf("%v", err)
}
w := os.Stdout // Default.
if s.filename != "" {
w, err = os.OpenFile(s.filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
util.Fatalf("Error opening %q: %v", s.filename, err)
}
}
if err := out(w, info); err != nil {
util.Fatalf("Error writing output: %v", err)
}
return subcommands.ExitSuccess
}
// getCompatibilityInfo returns compatibility info for the given OS name and
// architecture name. Supports the special name 'all' for OS and architecture that
// specifies that all supported OSes or architectures should be included.
func getCompatibilityInfo(osName string, archName string) (CompatibilityInfo, error) {
info := CompatibilityInfo(make(map[string]map[string]ArchInfo))
if osName == osAll {
// Special processing for the 'all' OS name.
for osName := range syscallTableMap {
info[osName] = make(map[string]ArchInfo)
// osName is a specific OS name.
if err := addToCompatibilityInfo(info, osName, archName); err != nil {
return info, err
}
}
} else {
// osName is a specific OS name.
info[osName] = make(map[string]ArchInfo)
if err := addToCompatibilityInfo(info, osName, archName); err != nil {
return info, err
}
}
return info, nil
}
// addToCompatibilityInfo adds ArchInfo for the given specific OS name and
// architecture name. Supports the special architecture name 'all' to specify
// that all supported architectures for the OS should be included.
func addToCompatibilityInfo(info CompatibilityInfo, osName string, archName string) error {
if archName == archAll {
// Special processing for the 'all' architecture name.
for archName := range syscallTableMap[osName] {
archInfo, err := getArchInfo(osName, archName)
if err != nil {
return err
}
info[osName][archName] = archInfo
}
} else {
// archName is a specific architecture name.
archInfo, err := getArchInfo(osName, archName)
if err != nil {
return err
}
info[osName][archName] = archInfo
}
return nil
}
// getArchInfo returns compatibility info for a specific OS and architecture.
func getArchInfo(osName string, archName string) (ArchInfo, error) {
info := ArchInfo{}
info.Syscalls = make(map[uintptr]SyscallDoc)
t, ok := syscallTableMap[osName][archName]
if !ok {
return info, fmt.Errorf("syscall table for %s/%s not found", osName, archName)
}
for num, sc := range t.Table {
info.Syscalls[num] = SyscallDoc{
Name: sc.Name,
num: num,
Support: sc.SupportLevel.String(),
Note: sc.Note,
URLs: sc.URLs,
}
}
return info, nil
}
// outputTable outputs the syscall info in tabular format.
func outputTable(w io.Writer, info CompatibilityInfo) error {
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
// Linux
for osName, osInfo := range info {
for archName, archInfo := range osInfo {
// Print the OS/arch
fmt.Fprintf(w, "%s/%s:\n\n", osName, archName)
// Sort the syscalls for output in the table.
sortedCalls := []SyscallDoc{}
for _, sc := range archInfo.Syscalls {
sortedCalls = append(sortedCalls, sc)
}
sort.Slice(sortedCalls, func(i, j int) bool {
return sortedCalls[i].num < sortedCalls[j].num
})
// Write the header
_, err := fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n",
"NUM",
"NAME",
"SUPPORT",
"NOTE",
)
if err != nil {
return err
}
// Write each syscall entry
for _, sc := range sortedCalls {
_, err = fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n",
strconv.FormatInt(int64(sc.num), 10),
sc.Name,
sc.Support,
sc.Note,
)
if err != nil {
return err
}
// Add issue urls to note.
for _, url := range sc.URLs {
_, err = fmt.Fprintf(tw, "%s\t%s\t%s\tSee: %s\t\n",
"",
"",
"",
url,
)
if err != nil {
return err
}
}
}
err = tw.Flush()
if err != nil {
return err
}
}
}
return nil
}
// outputJSON outputs the syscall info in JSON format.
func outputJSON(w io.Writer, info CompatibilityInfo) error {
e := json.NewEncoder(w)
e.SetIndent("", " ")
return e.Encode(info)
}
// numberedRow is aCSV row annotated by syscall number (used for sorting)
type numberedRow struct {
num uintptr
row []string
}
// outputCSV outputs the syscall info in tabular format.
func outputCSV(w io.Writer, info CompatibilityInfo) error {
csvWriter := csv.NewWriter(w)
// Linux
for osName, osInfo := range info {
for archName, archInfo := range osInfo {
// Sort the syscalls for output in the table.
sortedCalls := []numberedRow{}
for _, sc := range archInfo.Syscalls {
// Add issue urls to note.
note := sc.Note
for _, url := range sc.URLs {
note = fmt.Sprintf("%s\nSee: %s", note, url)
}
sortedCalls = append(sortedCalls, numberedRow{
num: sc.num,
row: []string{
osName,
archName,
strconv.FormatInt(int64(sc.num), 10),
sc.Name,
sc.Support,
note,
},
})
}
sort.Slice(sortedCalls, func(i, j int) bool {
return sortedCalls[i].num < sortedCalls[j].num
})
// Write the header
err := csvWriter.Write([]string{
"OS",
"Arch",
"Num",
"Name",
"Support",
"Note",
})
if err != nil {
return err
}
// Write each syscall entry
for _, sc := range sortedCalls {
err = csvWriter.Write(sc.row)
if err != nil {
return err
}
}
csvWriter.Flush()
err = csvWriter.Error()
if err != nil {
return err
}
}
}
return nil
}
|
package main
import (
"context"
"fmt"
"io"
"os"
"strings"
"mvdan.cc/sh/interp"
"mvdan.cc/sh/syntax"
)
func main() {
ctx := context.Background()
if err := Main(ctx, os.Args, os.Stdin, os.Stdout, os.Stderr); err != nil {
switch e2 := err.(type) {
case ErrChildExit:
fmt.Fprintf(os.Stderr, "smsh: %s\n", err)
if e2.Signal != 0 {
os.Exit(128 + e2.Signal)
}
os.Exit(e2.Code)
case ErrInternal:
fmt.Fprintf(os.Stderr, "smsh: %s\n", err)
os.Exit(125)
default:
panic(err)
}
}
}
func Main(ctx context.Context, args []string, stdin io.Reader, stdout, stderr io.Writer) (halt error) {
return Run(ctx, args[1:], stdin, stdout, stderr)
}
func Run(ctx context.Context, cmdStrs []string, stdin io.Reader, stdout, stderr io.Writer) (halt error) {
parser := syntax.NewParser()
for _, cmdStr := range cmdStrs {
file, err := parser.Parse(strings.NewReader(cmdStr), "")
if err != nil {
return err
}
err = RunOne(ctx, file, stdin, stdout, stderr)
if err != nil {
return err
}
}
return nil
}
func RunOne(ctx context.Context, cmd *syntax.File, stdin io.Reader, stdout, stderr io.Writer) (halt error) {
runner, _ := interp.New(
interp.StdIO(stdin, stdout, stderr),
interp.Module(execTool),
interp.Params("-e"), // TODO this doesn't do anything?
interp.Params("-u"), // TODO check if this does either
)
for _, stmt := range cmd.Stmts {
//fmt.Printf(":: %#v\n", stmt)
if err := runner.Run(ctx, stmt); err != nil {
switch err.(type) {
case ErrInternal, ErrChildExit:
return err
default:
return ErrInternal{err}
}
}
}
return nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.